]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
tcg/aarch64: Support INDEX_op_extract2_{i32,i64}
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
36a71934 28#include "tcg-op-gvec.h"
1de7afc9 29#include "qemu/log.h"
534df156 30#include "qemu/bitops.h"
90c84c56 31#include "qemu/qemu-print.h"
1d854765 32#include "arm_ldst.h"
19a6e31c 33#include "exec/semihost.h"
1497c961 34
2ef6175a
RH
35#include "exec/helper-proto.h"
36#include "exec/helper-gen.h"
2c0262af 37
a7e30d84 38#include "trace-tcg.h"
508127e2 39#include "exec/log.h"
a7e30d84
LV
40
41
2b51668f
PM
42#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
43#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 44/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 45#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
09cbd501 46#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
2b51668f
PM
47#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
48#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
49#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
50#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
51#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 52
86753403 53#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 54
f570c61e 55#include "translate.h"
e12ce78d 56
b5ff1b31
FB
57#if defined(CONFIG_USER_ONLY)
58#define IS_USER(s) 1
59#else
60#define IS_USER(s) (s->user)
61#endif
62
ad69471c 63/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 64static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 65static TCGv_i32 cpu_R[16];
78bcaa3e
RH
66TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
67TCGv_i64 cpu_exclusive_addr;
68TCGv_i64 cpu_exclusive_val;
ad69471c 69
b26eefb6 70/* FIXME: These should be removed. */
39d5492a 71static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 72static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 73
022c62cb 74#include "exec/gen-icount.h"
2e70f6ef 75
308e5636 76static const char * const regnames[] =
155c3eac
FN
77 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
78 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
79
61adacc8
RH
80/* Function prototypes for gen_ functions calling Neon helpers. */
81typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
82 TCGv_i32, TCGv_i32);
83
b26eefb6
PB
84/* initialize TCG globals. */
85void arm_translate_init(void)
86{
155c3eac
FN
87 int i;
88
155c3eac 89 for (i = 0; i < 16; i++) {
e1ccc054 90 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 91 offsetof(CPUARMState, regs[i]),
155c3eac
FN
92 regnames[i]);
93 }
e1ccc054
RH
94 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
95 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
96 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
97 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 98
e1ccc054 99 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 100 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 101 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 102 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 103
14ade10f 104 a64_translate_init();
b26eefb6
PB
105}
106
9bb6558a
PM
107/* Flags for the disas_set_da_iss info argument:
108 * lower bits hold the Rt register number, higher bits are flags.
109 */
110typedef enum ISSInfo {
111 ISSNone = 0,
112 ISSRegMask = 0x1f,
113 ISSInvalid = (1 << 5),
114 ISSIsAcqRel = (1 << 6),
115 ISSIsWrite = (1 << 7),
116 ISSIs16Bit = (1 << 8),
117} ISSInfo;
118
119/* Save the syndrome information for a Data Abort */
120static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
121{
122 uint32_t syn;
123 int sas = memop & MO_SIZE;
124 bool sse = memop & MO_SIGN;
125 bool is_acqrel = issinfo & ISSIsAcqRel;
126 bool is_write = issinfo & ISSIsWrite;
127 bool is_16bit = issinfo & ISSIs16Bit;
128 int srt = issinfo & ISSRegMask;
129
130 if (issinfo & ISSInvalid) {
131 /* Some callsites want to conditionally provide ISS info,
132 * eg "only if this was not a writeback"
133 */
134 return;
135 }
136
137 if (srt == 15) {
138 /* For AArch32, insns where the src/dest is R15 never generate
139 * ISS information. Catching that here saves checking at all
140 * the call sites.
141 */
142 return;
143 }
144
145 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
146 0, 0, 0, is_write, 0, is_16bit);
147 disas_set_insn_syndrome(s, syn);
148}
149
8bd5c820 150static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 151{
8bd5c820 152 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
153 * insns:
154 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
155 * otherwise, access as if at PL0.
156 */
157 switch (s->mmu_idx) {
158 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
159 case ARMMMUIdx_S12NSE0:
160 case ARMMMUIdx_S12NSE1:
8bd5c820 161 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
162 case ARMMMUIdx_S1E3:
163 case ARMMMUIdx_S1SE0:
164 case ARMMMUIdx_S1SE1:
8bd5c820 165 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
166 case ARMMMUIdx_MUser:
167 case ARMMMUIdx_MPriv:
168 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
169 case ARMMMUIdx_MUserNegPri:
170 case ARMMMUIdx_MPrivNegPri:
171 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
172 case ARMMMUIdx_MSUser:
173 case ARMMMUIdx_MSPriv:
b9f587d6 174 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
175 case ARMMMUIdx_MSUserNegPri:
176 case ARMMMUIdx_MSPrivNegPri:
177 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
178 case ARMMMUIdx_S2NS:
179 default:
180 g_assert_not_reached();
181 }
182}
183
39d5492a 184static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 185{
39d5492a 186 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
187 tcg_gen_ld_i32(tmp, cpu_env, offset);
188 return tmp;
189}
190
0ecb72a5 191#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 192
39d5492a 193static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
194{
195 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 196 tcg_temp_free_i32(var);
d9ba4830
PB
197}
198
199#define store_cpu_field(var, name) \
0ecb72a5 200 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 201
b26eefb6 202/* Set a variable to the value of a CPU register. */
39d5492a 203static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
204{
205 if (reg == 15) {
206 uint32_t addr;
b90372ad 207 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
208 if (s->thumb)
209 addr = (long)s->pc + 2;
210 else
211 addr = (long)s->pc + 4;
212 tcg_gen_movi_i32(var, addr);
213 } else {
155c3eac 214 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
215 }
216}
217
218/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 219static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 220{
39d5492a 221 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
222 load_reg_var(s, tmp, reg);
223 return tmp;
224}
225
226/* Set a CPU register. The source must be a temporary and will be
227 marked as dead. */
39d5492a 228static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
229{
230 if (reg == 15) {
9b6a3ea7
PM
231 /* In Thumb mode, we must ignore bit 0.
232 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
233 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
234 * We choose to ignore [1:0] in ARM mode for all architecture versions.
235 */
236 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 237 s->base.is_jmp = DISAS_JUMP;
b26eefb6 238 }
155c3eac 239 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 240 tcg_temp_free_i32(var);
b26eefb6
PB
241}
242
55203189
PM
243/*
244 * Variant of store_reg which applies v8M stack-limit checks before updating
245 * SP. If the check fails this will result in an exception being taken.
246 * We disable the stack checks for CONFIG_USER_ONLY because we have
247 * no idea what the stack limits should be in that case.
248 * If stack checking is not being done this just acts like store_reg().
249 */
250static void store_sp_checked(DisasContext *s, TCGv_i32 var)
251{
252#ifndef CONFIG_USER_ONLY
253 if (s->v8m_stackcheck) {
254 gen_helper_v8m_stackcheck(cpu_env, var);
255 }
256#endif
257 store_reg(s, 13, var);
258}
259
b26eefb6 260/* Value extensions. */
86831435
PB
261#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
262#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
263#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
264#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
265
1497c961
PB
266#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
267#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 268
b26eefb6 269
39d5492a 270static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 271{
39d5492a 272 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 273 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
274 tcg_temp_free_i32(tmp_mask);
275}
d9ba4830
PB
276/* Set NZCV flags from the high 4 bits of var. */
277#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
278
d4a2dc67 279static void gen_exception_internal(int excp)
d9ba4830 280{
d4a2dc67
PM
281 TCGv_i32 tcg_excp = tcg_const_i32(excp);
282
283 assert(excp_is_internal(excp));
284 gen_helper_exception_internal(cpu_env, tcg_excp);
285 tcg_temp_free_i32(tcg_excp);
286}
287
73710361 288static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
289{
290 TCGv_i32 tcg_excp = tcg_const_i32(excp);
291 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 292 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 293
73710361
GB
294 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
295 tcg_syn, tcg_el);
296
297 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
298 tcg_temp_free_i32(tcg_syn);
299 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
300}
301
50225ad0
PM
302static void gen_step_complete_exception(DisasContext *s)
303{
304 /* We just completed step of an insn. Move from Active-not-pending
305 * to Active-pending, and then also take the swstep exception.
306 * This corresponds to making the (IMPDEF) choice to prioritize
307 * swstep exceptions over asynchronous exceptions taken to an exception
308 * level where debug is disabled. This choice has the advantage that
309 * we do not need to maintain internal state corresponding to the
310 * ISV/EX syndrome bits between completion of the step and generation
311 * of the exception, and our syndrome information is always correct.
312 */
313 gen_ss_advance(s);
73710361
GB
314 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
315 default_exception_el(s));
dcba3a8d 316 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
317}
318
5425415e
PM
319static void gen_singlestep_exception(DisasContext *s)
320{
321 /* Generate the right kind of exception for singlestep, which is
322 * either the architectural singlestep or EXCP_DEBUG for QEMU's
323 * gdb singlestepping.
324 */
325 if (s->ss_active) {
326 gen_step_complete_exception(s);
327 } else {
328 gen_exception_internal(EXCP_DEBUG);
329 }
330}
331
b636649f
PM
332static inline bool is_singlestepping(DisasContext *s)
333{
334 /* Return true if we are singlestepping either because of
335 * architectural singlestep or QEMU gdbstub singlestep. This does
336 * not include the command line '-singlestep' mode which is rather
337 * misnamed as it only means "one instruction per TB" and doesn't
338 * affect the code we generate.
339 */
dcba3a8d 340 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
341}
342
39d5492a 343static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 344{
39d5492a
PM
345 TCGv_i32 tmp1 = tcg_temp_new_i32();
346 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
347 tcg_gen_ext16s_i32(tmp1, a);
348 tcg_gen_ext16s_i32(tmp2, b);
3670669c 349 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 350 tcg_temp_free_i32(tmp2);
3670669c
PB
351 tcg_gen_sari_i32(a, a, 16);
352 tcg_gen_sari_i32(b, b, 16);
353 tcg_gen_mul_i32(b, b, a);
354 tcg_gen_mov_i32(a, tmp1);
7d1b0095 355 tcg_temp_free_i32(tmp1);
3670669c
PB
356}
357
358/* Byteswap each halfword. */
39d5492a 359static void gen_rev16(TCGv_i32 var)
3670669c 360{
39d5492a 361 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 362 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 363 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
364 tcg_gen_and_i32(tmp, tmp, mask);
365 tcg_gen_and_i32(var, var, mask);
3670669c 366 tcg_gen_shli_i32(var, var, 8);
3670669c 367 tcg_gen_or_i32(var, var, tmp);
68cedf73 368 tcg_temp_free_i32(mask);
7d1b0095 369 tcg_temp_free_i32(tmp);
3670669c
PB
370}
371
372/* Byteswap low halfword and sign extend. */
39d5492a 373static void gen_revsh(TCGv_i32 var)
3670669c 374{
1a855029
AJ
375 tcg_gen_ext16u_i32(var, var);
376 tcg_gen_bswap16_i32(var, var);
377 tcg_gen_ext16s_i32(var, var);
3670669c
PB
378}
379
838fa72d 380/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 381static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 382{
838fa72d
AJ
383 TCGv_i64 tmp64 = tcg_temp_new_i64();
384
385 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 386 tcg_temp_free_i32(b);
838fa72d
AJ
387 tcg_gen_shli_i64(tmp64, tmp64, 32);
388 tcg_gen_add_i64(a, tmp64, a);
389
390 tcg_temp_free_i64(tmp64);
391 return a;
392}
393
394/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 395static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
396{
397 TCGv_i64 tmp64 = tcg_temp_new_i64();
398
399 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 400 tcg_temp_free_i32(b);
838fa72d
AJ
401 tcg_gen_shli_i64(tmp64, tmp64, 32);
402 tcg_gen_sub_i64(a, tmp64, a);
403
404 tcg_temp_free_i64(tmp64);
405 return a;
3670669c
PB
406}
407
5e3f878a 408/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 409static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 410{
39d5492a
PM
411 TCGv_i32 lo = tcg_temp_new_i32();
412 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 413 TCGv_i64 ret;
5e3f878a 414
831d7fe8 415 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 416 tcg_temp_free_i32(a);
7d1b0095 417 tcg_temp_free_i32(b);
831d7fe8
RH
418
419 ret = tcg_temp_new_i64();
420 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
421 tcg_temp_free_i32(lo);
422 tcg_temp_free_i32(hi);
831d7fe8
RH
423
424 return ret;
5e3f878a
PB
425}
426
39d5492a 427static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 428{
39d5492a
PM
429 TCGv_i32 lo = tcg_temp_new_i32();
430 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 431 TCGv_i64 ret;
5e3f878a 432
831d7fe8 433 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 434 tcg_temp_free_i32(a);
7d1b0095 435 tcg_temp_free_i32(b);
831d7fe8
RH
436
437 ret = tcg_temp_new_i64();
438 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
439 tcg_temp_free_i32(lo);
440 tcg_temp_free_i32(hi);
831d7fe8
RH
441
442 return ret;
5e3f878a
PB
443}
444
8f01245e 445/* Swap low and high halfwords. */
39d5492a 446static void gen_swap_half(TCGv_i32 var)
8f01245e 447{
39d5492a 448 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
449 tcg_gen_shri_i32(tmp, var, 16);
450 tcg_gen_shli_i32(var, var, 16);
451 tcg_gen_or_i32(var, var, tmp);
7d1b0095 452 tcg_temp_free_i32(tmp);
8f01245e
PB
453}
454
b26eefb6
PB
455/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
456 tmp = (t0 ^ t1) & 0x8000;
457 t0 &= ~0x8000;
458 t1 &= ~0x8000;
459 t0 = (t0 + t1) ^ tmp;
460 */
461
39d5492a 462static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 463{
39d5492a 464 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
465 tcg_gen_xor_i32(tmp, t0, t1);
466 tcg_gen_andi_i32(tmp, tmp, 0x8000);
467 tcg_gen_andi_i32(t0, t0, ~0x8000);
468 tcg_gen_andi_i32(t1, t1, ~0x8000);
469 tcg_gen_add_i32(t0, t0, t1);
470 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
471 tcg_temp_free_i32(tmp);
472 tcg_temp_free_i32(t1);
b26eefb6
PB
473}
474
475/* Set CF to the top bit of var. */
39d5492a 476static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 477{
66c374de 478 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
479}
480
481/* Set N and Z flags from var. */
39d5492a 482static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 483{
66c374de
AJ
484 tcg_gen_mov_i32(cpu_NF, var);
485 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
486}
487
488/* T0 += T1 + CF. */
39d5492a 489static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 490{
396e467c 491 tcg_gen_add_i32(t0, t0, t1);
66c374de 492 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
493}
494
e9bb4aa9 495/* dest = T0 + T1 + CF. */
39d5492a 496static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 497{
e9bb4aa9 498 tcg_gen_add_i32(dest, t0, t1);
66c374de 499 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
500}
501
3670669c 502/* dest = T0 - T1 + CF - 1. */
39d5492a 503static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 504{
3670669c 505 tcg_gen_sub_i32(dest, t0, t1);
66c374de 506 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 507 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
508}
509
72485ec4 510/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 511static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 512{
39d5492a 513 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
514 tcg_gen_movi_i32(tmp, 0);
515 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 516 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 517 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
518 tcg_gen_xor_i32(tmp, t0, t1);
519 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
520 tcg_temp_free_i32(tmp);
521 tcg_gen_mov_i32(dest, cpu_NF);
522}
523
49b4c31e 524/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 525static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 526{
39d5492a 527 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
528 if (TCG_TARGET_HAS_add2_i32) {
529 tcg_gen_movi_i32(tmp, 0);
530 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 531 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
532 } else {
533 TCGv_i64 q0 = tcg_temp_new_i64();
534 TCGv_i64 q1 = tcg_temp_new_i64();
535 tcg_gen_extu_i32_i64(q0, t0);
536 tcg_gen_extu_i32_i64(q1, t1);
537 tcg_gen_add_i64(q0, q0, q1);
538 tcg_gen_extu_i32_i64(q1, cpu_CF);
539 tcg_gen_add_i64(q0, q0, q1);
540 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
541 tcg_temp_free_i64(q0);
542 tcg_temp_free_i64(q1);
543 }
544 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
545 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
546 tcg_gen_xor_i32(tmp, t0, t1);
547 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
548 tcg_temp_free_i32(tmp);
549 tcg_gen_mov_i32(dest, cpu_NF);
550}
551
72485ec4 552/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 553static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 554{
39d5492a 555 TCGv_i32 tmp;
72485ec4
AJ
556 tcg_gen_sub_i32(cpu_NF, t0, t1);
557 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
558 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
559 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
560 tmp = tcg_temp_new_i32();
561 tcg_gen_xor_i32(tmp, t0, t1);
562 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
563 tcg_temp_free_i32(tmp);
564 tcg_gen_mov_i32(dest, cpu_NF);
565}
566
e77f0832 567/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 568static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 569{
39d5492a 570 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
571 tcg_gen_not_i32(tmp, t1);
572 gen_adc_CC(dest, t0, tmp);
39d5492a 573 tcg_temp_free_i32(tmp);
2de68a49
RH
574}
575
365af80e 576#define GEN_SHIFT(name) \
39d5492a 577static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 578{ \
39d5492a 579 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
580 tmp1 = tcg_temp_new_i32(); \
581 tcg_gen_andi_i32(tmp1, t1, 0xff); \
582 tmp2 = tcg_const_i32(0); \
583 tmp3 = tcg_const_i32(0x1f); \
584 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
585 tcg_temp_free_i32(tmp3); \
586 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
587 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
588 tcg_temp_free_i32(tmp2); \
589 tcg_temp_free_i32(tmp1); \
590}
591GEN_SHIFT(shl)
592GEN_SHIFT(shr)
593#undef GEN_SHIFT
594
39d5492a 595static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 596{
39d5492a 597 TCGv_i32 tmp1, tmp2;
365af80e
AJ
598 tmp1 = tcg_temp_new_i32();
599 tcg_gen_andi_i32(tmp1, t1, 0xff);
600 tmp2 = tcg_const_i32(0x1f);
601 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
602 tcg_temp_free_i32(tmp2);
603 tcg_gen_sar_i32(dest, t0, tmp1);
604 tcg_temp_free_i32(tmp1);
605}
606
39d5492a 607static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 608{
39d5492a
PM
609 TCGv_i32 c0 = tcg_const_i32(0);
610 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
611 tcg_gen_neg_i32(tmp, src);
612 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
613 tcg_temp_free_i32(c0);
614 tcg_temp_free_i32(tmp);
615}
ad69471c 616
39d5492a 617static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 618{
9a119ff6 619 if (shift == 0) {
66c374de 620 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 621 } else {
66c374de
AJ
622 tcg_gen_shri_i32(cpu_CF, var, shift);
623 if (shift != 31) {
624 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
625 }
9a119ff6 626 }
9a119ff6 627}
b26eefb6 628
9a119ff6 629/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
630static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
631 int shift, int flags)
9a119ff6
PB
632{
633 switch (shiftop) {
634 case 0: /* LSL */
635 if (shift != 0) {
636 if (flags)
637 shifter_out_im(var, 32 - shift);
638 tcg_gen_shli_i32(var, var, shift);
639 }
640 break;
641 case 1: /* LSR */
642 if (shift == 0) {
643 if (flags) {
66c374de 644 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
645 }
646 tcg_gen_movi_i32(var, 0);
647 } else {
648 if (flags)
649 shifter_out_im(var, shift - 1);
650 tcg_gen_shri_i32(var, var, shift);
651 }
652 break;
653 case 2: /* ASR */
654 if (shift == 0)
655 shift = 32;
656 if (flags)
657 shifter_out_im(var, shift - 1);
658 if (shift == 32)
659 shift = 31;
660 tcg_gen_sari_i32(var, var, shift);
661 break;
662 case 3: /* ROR/RRX */
663 if (shift != 0) {
664 if (flags)
665 shifter_out_im(var, shift - 1);
f669df27 666 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 667 } else {
39d5492a 668 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 669 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
670 if (flags)
671 shifter_out_im(var, 0);
672 tcg_gen_shri_i32(var, var, 1);
b26eefb6 673 tcg_gen_or_i32(var, var, tmp);
7d1b0095 674 tcg_temp_free_i32(tmp);
b26eefb6
PB
675 }
676 }
677};
678
39d5492a
PM
679static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
680 TCGv_i32 shift, int flags)
8984bd2e
PB
681{
682 if (flags) {
683 switch (shiftop) {
9ef39277
BS
684 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
685 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
686 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
687 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
688 }
689 } else {
690 switch (shiftop) {
365af80e
AJ
691 case 0:
692 gen_shl(var, var, shift);
693 break;
694 case 1:
695 gen_shr(var, var, shift);
696 break;
697 case 2:
698 gen_sar(var, var, shift);
699 break;
f669df27
AJ
700 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
701 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
702 }
703 }
7d1b0095 704 tcg_temp_free_i32(shift);
8984bd2e
PB
705}
706
6ddbc6e4
PB
707#define PAS_OP(pfx) \
708 switch (op2) { \
709 case 0: gen_pas_helper(glue(pfx,add16)); break; \
710 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
711 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
712 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
713 case 4: gen_pas_helper(glue(pfx,add8)); break; \
714 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
715 }
39d5492a 716static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 717{
a7812ae4 718 TCGv_ptr tmp;
6ddbc6e4
PB
719
720 switch (op1) {
721#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
722 case 1:
a7812ae4 723 tmp = tcg_temp_new_ptr();
0ecb72a5 724 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 725 PAS_OP(s)
b75263d6 726 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
727 break;
728 case 5:
a7812ae4 729 tmp = tcg_temp_new_ptr();
0ecb72a5 730 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 731 PAS_OP(u)
b75263d6 732 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
733 break;
734#undef gen_pas_helper
735#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
736 case 2:
737 PAS_OP(q);
738 break;
739 case 3:
740 PAS_OP(sh);
741 break;
742 case 6:
743 PAS_OP(uq);
744 break;
745 case 7:
746 PAS_OP(uh);
747 break;
748#undef gen_pas_helper
749 }
750}
9ee6e8bb
PB
751#undef PAS_OP
752
6ddbc6e4
PB
753/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
754#define PAS_OP(pfx) \
ed89a2f1 755 switch (op1) { \
6ddbc6e4
PB
756 case 0: gen_pas_helper(glue(pfx,add8)); break; \
757 case 1: gen_pas_helper(glue(pfx,add16)); break; \
758 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
759 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
760 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
761 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
762 }
39d5492a 763static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 764{
a7812ae4 765 TCGv_ptr tmp;
6ddbc6e4 766
ed89a2f1 767 switch (op2) {
6ddbc6e4
PB
768#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
769 case 0:
a7812ae4 770 tmp = tcg_temp_new_ptr();
0ecb72a5 771 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 772 PAS_OP(s)
b75263d6 773 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
774 break;
775 case 4:
a7812ae4 776 tmp = tcg_temp_new_ptr();
0ecb72a5 777 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 778 PAS_OP(u)
b75263d6 779 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
780 break;
781#undef gen_pas_helper
782#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
783 case 1:
784 PAS_OP(q);
785 break;
786 case 2:
787 PAS_OP(sh);
788 break;
789 case 5:
790 PAS_OP(uq);
791 break;
792 case 6:
793 PAS_OP(uh);
794 break;
795#undef gen_pas_helper
796 }
797}
9ee6e8bb
PB
798#undef PAS_OP
799
39fb730a 800/*
6c2c63d3 801 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
802 * This is common between ARM and Aarch64 targets.
803 */
6c2c63d3 804void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 805{
6c2c63d3
RH
806 TCGv_i32 value;
807 TCGCond cond;
808 bool global = true;
d9ba4830 809
d9ba4830
PB
810 switch (cc) {
811 case 0: /* eq: Z */
d9ba4830 812 case 1: /* ne: !Z */
6c2c63d3
RH
813 cond = TCG_COND_EQ;
814 value = cpu_ZF;
d9ba4830 815 break;
6c2c63d3 816
d9ba4830 817 case 2: /* cs: C */
d9ba4830 818 case 3: /* cc: !C */
6c2c63d3
RH
819 cond = TCG_COND_NE;
820 value = cpu_CF;
d9ba4830 821 break;
6c2c63d3 822
d9ba4830 823 case 4: /* mi: N */
d9ba4830 824 case 5: /* pl: !N */
6c2c63d3
RH
825 cond = TCG_COND_LT;
826 value = cpu_NF;
d9ba4830 827 break;
6c2c63d3 828
d9ba4830 829 case 6: /* vs: V */
d9ba4830 830 case 7: /* vc: !V */
6c2c63d3
RH
831 cond = TCG_COND_LT;
832 value = cpu_VF;
d9ba4830 833 break;
6c2c63d3 834
d9ba4830 835 case 8: /* hi: C && !Z */
6c2c63d3
RH
836 case 9: /* ls: !C || Z -> !(C && !Z) */
837 cond = TCG_COND_NE;
838 value = tcg_temp_new_i32();
839 global = false;
840 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
841 ZF is non-zero for !Z; so AND the two subexpressions. */
842 tcg_gen_neg_i32(value, cpu_CF);
843 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 844 break;
6c2c63d3 845
d9ba4830 846 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 847 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
848 /* Since we're only interested in the sign bit, == 0 is >= 0. */
849 cond = TCG_COND_GE;
850 value = tcg_temp_new_i32();
851 global = false;
852 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 853 break;
6c2c63d3 854
d9ba4830 855 case 12: /* gt: !Z && N == V */
d9ba4830 856 case 13: /* le: Z || N != V */
6c2c63d3
RH
857 cond = TCG_COND_NE;
858 value = tcg_temp_new_i32();
859 global = false;
860 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
861 * the sign bit then AND with ZF to yield the result. */
862 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
863 tcg_gen_sari_i32(value, value, 31);
864 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 865 break;
6c2c63d3 866
9305eac0
RH
867 case 14: /* always */
868 case 15: /* always */
869 /* Use the ALWAYS condition, which will fold early.
870 * It doesn't matter what we use for the value. */
871 cond = TCG_COND_ALWAYS;
872 value = cpu_ZF;
873 goto no_invert;
874
d9ba4830
PB
875 default:
876 fprintf(stderr, "Bad condition code 0x%x\n", cc);
877 abort();
878 }
6c2c63d3
RH
879
880 if (cc & 1) {
881 cond = tcg_invert_cond(cond);
882 }
883
9305eac0 884 no_invert:
6c2c63d3
RH
885 cmp->cond = cond;
886 cmp->value = value;
887 cmp->value_global = global;
888}
889
890void arm_free_cc(DisasCompare *cmp)
891{
892 if (!cmp->value_global) {
893 tcg_temp_free_i32(cmp->value);
894 }
895}
896
897void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
898{
899 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
900}
901
902void arm_gen_test_cc(int cc, TCGLabel *label)
903{
904 DisasCompare cmp;
905 arm_test_cc(&cmp, cc);
906 arm_jump_cc(&cmp, label);
907 arm_free_cc(&cmp);
d9ba4830 908}
2c0262af 909
b1d8e52e 910static const uint8_t table_logic_cc[16] = {
2c0262af
FB
911 1, /* and */
912 1, /* xor */
913 0, /* sub */
914 0, /* rsb */
915 0, /* add */
916 0, /* adc */
917 0, /* sbc */
918 0, /* rsc */
919 1, /* andl */
920 1, /* xorl */
921 0, /* cmp */
922 0, /* cmn */
923 1, /* orr */
924 1, /* mov */
925 1, /* bic */
926 1, /* mvn */
927};
3b46e624 928
4d5e8c96
PM
929static inline void gen_set_condexec(DisasContext *s)
930{
931 if (s->condexec_mask) {
932 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
933 TCGv_i32 tmp = tcg_temp_new_i32();
934 tcg_gen_movi_i32(tmp, val);
935 store_cpu_field(tmp, condexec_bits);
936 }
937}
938
939static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
940{
941 tcg_gen_movi_i32(cpu_R[15], val);
942}
943
d9ba4830
PB
944/* Set PC and Thumb state from an immediate address. */
945static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 946{
39d5492a 947 TCGv_i32 tmp;
99c475ab 948
dcba3a8d 949 s->base.is_jmp = DISAS_JUMP;
d9ba4830 950 if (s->thumb != (addr & 1)) {
7d1b0095 951 tmp = tcg_temp_new_i32();
d9ba4830 952 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 953 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 954 tcg_temp_free_i32(tmp);
d9ba4830 955 }
155c3eac 956 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
957}
958
959/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 960static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 961{
dcba3a8d 962 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
963 tcg_gen_andi_i32(cpu_R[15], var, ~1);
964 tcg_gen_andi_i32(var, var, 1);
965 store_cpu_field(var, thumb);
d9ba4830
PB
966}
967
3bb8a96f
PM
968/* Set PC and Thumb state from var. var is marked as dead.
969 * For M-profile CPUs, include logic to detect exception-return
970 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
971 * and BX reg, and no others, and happens only for code in Handler mode.
972 */
973static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
974{
975 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 976 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
977 */
978 gen_bx(s, var);
d02a8698
PM
979 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
980 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 981 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
982 }
983}
984
985static inline void gen_bx_excret_final_code(DisasContext *s)
986{
987 /* Generate the code to finish possible exception return and end the TB */
988 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
989 uint32_t min_magic;
990
991 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
992 /* Covers FNC_RETURN and EXC_RETURN magic */
993 min_magic = FNC_RETURN_MIN_MAGIC;
994 } else {
995 /* EXC_RETURN magic only */
996 min_magic = EXC_RETURN_MIN_MAGIC;
997 }
3bb8a96f
PM
998
999 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 1000 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
1001 /* No: end the TB as we would for a DISAS_JMP */
1002 if (is_singlestepping(s)) {
1003 gen_singlestep_exception(s);
1004 } else {
07ea28b4 1005 tcg_gen_exit_tb(NULL, 0);
3bb8a96f
PM
1006 }
1007 gen_set_label(excret_label);
1008 /* Yes: this is an exception return.
1009 * At this point in runtime env->regs[15] and env->thumb will hold
1010 * the exception-return magic number, which do_v7m_exception_exit()
1011 * will read. Nothing else will be able to see those values because
1012 * the cpu-exec main loop guarantees that we will always go straight
1013 * from raising the exception to the exception-handling code.
1014 *
1015 * gen_ss_advance(s) does nothing on M profile currently but
1016 * calling it is conceptually the right thing as we have executed
1017 * this instruction (compare SWI, HVC, SMC handling).
1018 */
1019 gen_ss_advance(s);
1020 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1021}
1022
fb602cb7
PM
1023static inline void gen_bxns(DisasContext *s, int rm)
1024{
1025 TCGv_i32 var = load_reg(s, rm);
1026
1027 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1028 * we need to sync state before calling it, but:
1029 * - we don't need to do gen_set_pc_im() because the bxns helper will
1030 * always set the PC itself
1031 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1032 * unless it's outside an IT block or the last insn in an IT block,
1033 * so we know that condexec == 0 (already set at the top of the TB)
1034 * is correct in the non-UNPREDICTABLE cases, and we can choose
1035 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1036 */
1037 gen_helper_v7m_bxns(cpu_env, var);
1038 tcg_temp_free_i32(var);
ef475b5d 1039 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1040}
1041
3e3fa230
PM
1042static inline void gen_blxns(DisasContext *s, int rm)
1043{
1044 TCGv_i32 var = load_reg(s, rm);
1045
1046 /* We don't need to sync condexec state, for the same reason as bxns.
1047 * We do however need to set the PC, because the blxns helper reads it.
1048 * The blxns helper may throw an exception.
1049 */
1050 gen_set_pc_im(s, s->pc);
1051 gen_helper_v7m_blxns(cpu_env, var);
1052 tcg_temp_free_i32(var);
1053 s->base.is_jmp = DISAS_EXIT;
1054}
1055
21aeb343
JR
1056/* Variant of store_reg which uses branch&exchange logic when storing
1057 to r15 in ARM architecture v7 and above. The source must be a temporary
1058 and will be marked as dead. */
7dcc1f89 1059static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1060{
1061 if (reg == 15 && ENABLE_ARCH_7) {
1062 gen_bx(s, var);
1063 } else {
1064 store_reg(s, reg, var);
1065 }
1066}
1067
be5e7a76
DES
1068/* Variant of store_reg which uses branch&exchange logic when storing
1069 * to r15 in ARM architecture v5T and above. This is used for storing
1070 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1071 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1072static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1073{
1074 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1075 gen_bx_excret(s, var);
be5e7a76
DES
1076 } else {
1077 store_reg(s, reg, var);
1078 }
1079}
1080
e334bd31
PB
1081#ifdef CONFIG_USER_ONLY
1082#define IS_USER_ONLY 1
1083#else
1084#define IS_USER_ONLY 0
1085#endif
1086
08307563
PM
1087/* Abstractions of "generate code to do a guest load/store for
1088 * AArch32", where a vaddr is always 32 bits (and is zero
1089 * extended if we're a 64 bit core) and data is also
1090 * 32 bits unless specifically doing a 64 bit access.
1091 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1092 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1093 */
08307563 1094
7f5616f5 1095static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1096{
7f5616f5
RH
1097 TCGv addr = tcg_temp_new();
1098 tcg_gen_extu_i32_tl(addr, a32);
1099
e334bd31 1100 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1101 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1102 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1103 }
7f5616f5 1104 return addr;
08307563
PM
1105}
1106
7f5616f5
RH
1107static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1108 int index, TCGMemOp opc)
08307563 1109{
2aeba0d0
JS
1110 TCGv addr;
1111
1112 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1113 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1114 opc |= MO_ALIGN;
1115 }
1116
1117 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1118 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1119 tcg_temp_free(addr);
08307563
PM
1120}
1121
7f5616f5
RH
1122static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1123 int index, TCGMemOp opc)
1124{
2aeba0d0
JS
1125 TCGv addr;
1126
1127 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1128 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1129 opc |= MO_ALIGN;
1130 }
1131
1132 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1133 tcg_gen_qemu_st_i32(val, addr, index, opc);
1134 tcg_temp_free(addr);
1135}
08307563 1136
7f5616f5 1137#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1138static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1139 TCGv_i32 a32, int index) \
08307563 1140{ \
7f5616f5 1141 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1142} \
1143static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1144 TCGv_i32 val, \
1145 TCGv_i32 a32, int index, \
1146 ISSInfo issinfo) \
1147{ \
1148 gen_aa32_ld##SUFF(s, val, a32, index); \
1149 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1150}
1151
7f5616f5 1152#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1153static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1154 TCGv_i32 a32, int index) \
08307563 1155{ \
7f5616f5 1156 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1157} \
1158static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1159 TCGv_i32 val, \
1160 TCGv_i32 a32, int index, \
1161 ISSInfo issinfo) \
1162{ \
1163 gen_aa32_st##SUFF(s, val, a32, index); \
1164 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1165}
1166
7f5616f5 1167static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1168{
e334bd31
PB
1169 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1170 if (!IS_USER_ONLY && s->sctlr_b) {
1171 tcg_gen_rotri_i64(val, val, 32);
1172 }
08307563
PM
1173}
1174
7f5616f5
RH
1175static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1176 int index, TCGMemOp opc)
08307563 1177{
7f5616f5
RH
1178 TCGv addr = gen_aa32_addr(s, a32, opc);
1179 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1180 gen_aa32_frob64(s, val);
1181 tcg_temp_free(addr);
1182}
1183
1184static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1185 TCGv_i32 a32, int index)
1186{
1187 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1188}
1189
1190static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1191 int index, TCGMemOp opc)
1192{
1193 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1194
1195 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1196 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1197 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1198 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1199 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1200 tcg_temp_free_i64(tmp);
e334bd31 1201 } else {
7f5616f5 1202 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1203 }
7f5616f5 1204 tcg_temp_free(addr);
08307563
PM
1205}
1206
7f5616f5
RH
1207static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1208 TCGv_i32 a32, int index)
1209{
1210 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1211}
08307563 1212
7f5616f5
RH
1213DO_GEN_LD(8s, MO_SB)
1214DO_GEN_LD(8u, MO_UB)
1215DO_GEN_LD(16s, MO_SW)
1216DO_GEN_LD(16u, MO_UW)
1217DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1218DO_GEN_ST(8, MO_UB)
1219DO_GEN_ST(16, MO_UW)
1220DO_GEN_ST(32, MO_UL)
08307563 1221
37e6456e
PM
1222static inline void gen_hvc(DisasContext *s, int imm16)
1223{
1224 /* The pre HVC helper handles cases when HVC gets trapped
1225 * as an undefined insn by runtime configuration (ie before
1226 * the insn really executes).
1227 */
1228 gen_set_pc_im(s, s->pc - 4);
1229 gen_helper_pre_hvc(cpu_env);
1230 /* Otherwise we will treat this as a real exception which
1231 * happens after execution of the insn. (The distinction matters
1232 * for the PC value reported to the exception handler and also
1233 * for single stepping.)
1234 */
1235 s->svc_imm = imm16;
1236 gen_set_pc_im(s, s->pc);
dcba3a8d 1237 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1238}
1239
1240static inline void gen_smc(DisasContext *s)
1241{
1242 /* As with HVC, we may take an exception either before or after
1243 * the insn executes.
1244 */
1245 TCGv_i32 tmp;
1246
1247 gen_set_pc_im(s, s->pc - 4);
1248 tmp = tcg_const_i32(syn_aa32_smc());
1249 gen_helper_pre_smc(cpu_env, tmp);
1250 tcg_temp_free_i32(tmp);
1251 gen_set_pc_im(s, s->pc);
dcba3a8d 1252 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1253}
1254
d4a2dc67
PM
1255static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1256{
1257 gen_set_condexec(s);
1258 gen_set_pc_im(s, s->pc - offset);
1259 gen_exception_internal(excp);
dcba3a8d 1260 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1261}
1262
73710361
GB
1263static void gen_exception_insn(DisasContext *s, int offset, int excp,
1264 int syn, uint32_t target_el)
d4a2dc67
PM
1265{
1266 gen_set_condexec(s);
1267 gen_set_pc_im(s, s->pc - offset);
73710361 1268 gen_exception(excp, syn, target_el);
dcba3a8d 1269 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1270}
1271
c900a2e6
PM
1272static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1273{
1274 TCGv_i32 tcg_syn;
1275
1276 gen_set_condexec(s);
1277 gen_set_pc_im(s, s->pc - offset);
1278 tcg_syn = tcg_const_i32(syn);
1279 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1280 tcg_temp_free_i32(tcg_syn);
1281 s->base.is_jmp = DISAS_NORETURN;
1282}
1283
b5ff1b31
FB
1284/* Force a TB lookup after an instruction that changes the CPU state. */
1285static inline void gen_lookup_tb(DisasContext *s)
1286{
a6445c52 1287 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1288 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1289}
1290
19a6e31c
PM
1291static inline void gen_hlt(DisasContext *s, int imm)
1292{
1293 /* HLT. This has two purposes.
1294 * Architecturally, it is an external halting debug instruction.
1295 * Since QEMU doesn't implement external debug, we treat this as
1296 * it is required for halting debug disabled: it will UNDEF.
1297 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1298 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1299 * must trigger semihosting even for ARMv7 and earlier, where
1300 * HLT was an undefined encoding.
1301 * In system mode, we don't allow userspace access to
1302 * semihosting, to provide some semblance of security
1303 * (and for consistency with our 32-bit semihosting).
1304 */
1305 if (semihosting_enabled() &&
1306#ifndef CONFIG_USER_ONLY
1307 s->current_el != 0 &&
1308#endif
1309 (imm == (s->thumb ? 0x3c : 0xf000))) {
1310 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1311 return;
1312 }
1313
1314 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1315 default_exception_el(s));
1316}
1317
b0109805 1318static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1319 TCGv_i32 var)
2c0262af 1320{
1e8d4eec 1321 int val, rm, shift, shiftop;
39d5492a 1322 TCGv_i32 offset;
2c0262af
FB
1323
1324 if (!(insn & (1 << 25))) {
1325 /* immediate */
1326 val = insn & 0xfff;
1327 if (!(insn & (1 << 23)))
1328 val = -val;
537730b9 1329 if (val != 0)
b0109805 1330 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1331 } else {
1332 /* shift/register */
1333 rm = (insn) & 0xf;
1334 shift = (insn >> 7) & 0x1f;
1e8d4eec 1335 shiftop = (insn >> 5) & 3;
b26eefb6 1336 offset = load_reg(s, rm);
9a119ff6 1337 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1338 if (!(insn & (1 << 23)))
b0109805 1339 tcg_gen_sub_i32(var, var, offset);
2c0262af 1340 else
b0109805 1341 tcg_gen_add_i32(var, var, offset);
7d1b0095 1342 tcg_temp_free_i32(offset);
2c0262af
FB
1343 }
1344}
1345
191f9a93 1346static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1347 int extra, TCGv_i32 var)
2c0262af
FB
1348{
1349 int val, rm;
39d5492a 1350 TCGv_i32 offset;
3b46e624 1351
2c0262af
FB
1352 if (insn & (1 << 22)) {
1353 /* immediate */
1354 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1355 if (!(insn & (1 << 23)))
1356 val = -val;
18acad92 1357 val += extra;
537730b9 1358 if (val != 0)
b0109805 1359 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1360 } else {
1361 /* register */
191f9a93 1362 if (extra)
b0109805 1363 tcg_gen_addi_i32(var, var, extra);
2c0262af 1364 rm = (insn) & 0xf;
b26eefb6 1365 offset = load_reg(s, rm);
2c0262af 1366 if (!(insn & (1 << 23)))
b0109805 1367 tcg_gen_sub_i32(var, var, offset);
2c0262af 1368 else
b0109805 1369 tcg_gen_add_i32(var, var, offset);
7d1b0095 1370 tcg_temp_free_i32(offset);
2c0262af
FB
1371 }
1372}
1373
5aaebd13
PM
1374static TCGv_ptr get_fpstatus_ptr(int neon)
1375{
1376 TCGv_ptr statusptr = tcg_temp_new_ptr();
1377 int offset;
1378 if (neon) {
0ecb72a5 1379 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1380 } else {
0ecb72a5 1381 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1382 }
1383 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1384 return statusptr;
1385}
1386
4373f3ce
PB
1387#define VFP_OP2(name) \
1388static inline void gen_vfp_##name(int dp) \
1389{ \
ae1857ec
PM
1390 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1391 if (dp) { \
1392 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1393 } else { \
1394 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1395 } \
1396 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1397}
1398
4373f3ce
PB
1399VFP_OP2(add)
1400VFP_OP2(sub)
1401VFP_OP2(mul)
1402VFP_OP2(div)
1403
1404#undef VFP_OP2
1405
605a6aed
PM
1406static inline void gen_vfp_F1_mul(int dp)
1407{
1408 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1409 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1410 if (dp) {
ae1857ec 1411 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1412 } else {
ae1857ec 1413 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1414 }
ae1857ec 1415 tcg_temp_free_ptr(fpst);
605a6aed
PM
1416}
1417
1418static inline void gen_vfp_F1_neg(int dp)
1419{
1420 /* Like gen_vfp_neg() but put result in F1 */
1421 if (dp) {
1422 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1423 } else {
1424 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1425 }
1426}
1427
4373f3ce
PB
1428static inline void gen_vfp_abs(int dp)
1429{
1430 if (dp)
1431 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1432 else
1433 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1434}
1435
1436static inline void gen_vfp_neg(int dp)
1437{
1438 if (dp)
1439 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1440 else
1441 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1442}
1443
1444static inline void gen_vfp_sqrt(int dp)
1445{
1446 if (dp)
1447 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1448 else
1449 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1450}
1451
1452static inline void gen_vfp_cmp(int dp)
1453{
1454 if (dp)
1455 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1456 else
1457 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1458}
1459
1460static inline void gen_vfp_cmpe(int dp)
1461{
1462 if (dp)
1463 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1464 else
1465 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1466}
1467
1468static inline void gen_vfp_F1_ld0(int dp)
1469{
1470 if (dp)
5b340b51 1471 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1472 else
5b340b51 1473 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1474}
1475
5500b06c
PM
1476#define VFP_GEN_ITOF(name) \
1477static inline void gen_vfp_##name(int dp, int neon) \
1478{ \
5aaebd13 1479 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1480 if (dp) { \
1481 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1482 } else { \
1483 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1484 } \
b7fa9214 1485 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1486}
1487
5500b06c
PM
1488VFP_GEN_ITOF(uito)
1489VFP_GEN_ITOF(sito)
1490#undef VFP_GEN_ITOF
4373f3ce 1491
5500b06c
PM
1492#define VFP_GEN_FTOI(name) \
1493static inline void gen_vfp_##name(int dp, int neon) \
1494{ \
5aaebd13 1495 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1496 if (dp) { \
1497 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1498 } else { \
1499 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1500 } \
b7fa9214 1501 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1502}
1503
5500b06c
PM
1504VFP_GEN_FTOI(toui)
1505VFP_GEN_FTOI(touiz)
1506VFP_GEN_FTOI(tosi)
1507VFP_GEN_FTOI(tosiz)
1508#undef VFP_GEN_FTOI
4373f3ce 1509
16d5b3ca 1510#define VFP_GEN_FIX(name, round) \
5500b06c 1511static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1512{ \
39d5492a 1513 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1514 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1515 if (dp) { \
16d5b3ca
WN
1516 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1517 statusptr); \
5500b06c 1518 } else { \
16d5b3ca
WN
1519 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1520 statusptr); \
5500b06c 1521 } \
b75263d6 1522 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1523 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1524}
16d5b3ca
WN
1525VFP_GEN_FIX(tosh, _round_to_zero)
1526VFP_GEN_FIX(tosl, _round_to_zero)
1527VFP_GEN_FIX(touh, _round_to_zero)
1528VFP_GEN_FIX(toul, _round_to_zero)
1529VFP_GEN_FIX(shto, )
1530VFP_GEN_FIX(slto, )
1531VFP_GEN_FIX(uhto, )
1532VFP_GEN_FIX(ulto, )
4373f3ce 1533#undef VFP_GEN_FIX
9ee6e8bb 1534
39d5492a 1535static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1536{
08307563 1537 if (dp) {
12dcc321 1538 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1539 } else {
12dcc321 1540 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1541 }
b5ff1b31
FB
1542}
1543
39d5492a 1544static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1545{
08307563 1546 if (dp) {
12dcc321 1547 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1548 } else {
12dcc321 1549 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1550 }
b5ff1b31
FB
1551}
1552
c39c2b90 1553static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1554{
9a2b5256 1555 if (dp) {
c39c2b90 1556 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1557 } else {
c39c2b90 1558 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1559 if (reg & 1) {
1560 ofs += offsetof(CPU_DoubleU, l.upper);
1561 } else {
1562 ofs += offsetof(CPU_DoubleU, l.lower);
1563 }
1564 return ofs;
8e96005d
FB
1565 }
1566}
9ee6e8bb
PB
1567
1568/* Return the offset of a 32-bit piece of a NEON register.
1569 zero is the least significant end of the register. */
1570static inline long
1571neon_reg_offset (int reg, int n)
1572{
1573 int sreg;
1574 sreg = reg * 2 + n;
1575 return vfp_reg_offset(0, sreg);
1576}
1577
32f91fb7
RH
1578/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1579 * where 0 is the least significant end of the register.
1580 */
1581static inline long
1582neon_element_offset(int reg, int element, TCGMemOp size)
1583{
1584 int element_size = 1 << size;
1585 int ofs = element * element_size;
1586#ifdef HOST_WORDS_BIGENDIAN
1587 /* Calculate the offset assuming fully little-endian,
1588 * then XOR to account for the order of the 8-byte units.
1589 */
1590 if (element_size < 8) {
1591 ofs ^= 8 - element_size;
1592 }
1593#endif
1594 return neon_reg_offset(reg, 0) + ofs;
1595}
1596
39d5492a 1597static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1598{
39d5492a 1599 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1600 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1601 return tmp;
1602}
1603
2d6ac920
RH
1604static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
1605{
1606 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1607
1608 switch (mop) {
1609 case MO_UB:
1610 tcg_gen_ld8u_i32(var, cpu_env, offset);
1611 break;
1612 case MO_UW:
1613 tcg_gen_ld16u_i32(var, cpu_env, offset);
1614 break;
1615 case MO_UL:
1616 tcg_gen_ld_i32(var, cpu_env, offset);
1617 break;
1618 default:
1619 g_assert_not_reached();
1620 }
1621}
1622
ac55d007
RH
1623static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
1624{
1625 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1626
1627 switch (mop) {
1628 case MO_UB:
1629 tcg_gen_ld8u_i64(var, cpu_env, offset);
1630 break;
1631 case MO_UW:
1632 tcg_gen_ld16u_i64(var, cpu_env, offset);
1633 break;
1634 case MO_UL:
1635 tcg_gen_ld32u_i64(var, cpu_env, offset);
1636 break;
1637 case MO_Q:
1638 tcg_gen_ld_i64(var, cpu_env, offset);
1639 break;
1640 default:
1641 g_assert_not_reached();
1642 }
1643}
1644
39d5492a 1645static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1646{
1647 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1648 tcg_temp_free_i32(var);
8f8e3aa4
PB
1649}
1650
2d6ac920
RH
1651static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
1652{
1653 long offset = neon_element_offset(reg, ele, size);
1654
1655 switch (size) {
1656 case MO_8:
1657 tcg_gen_st8_i32(var, cpu_env, offset);
1658 break;
1659 case MO_16:
1660 tcg_gen_st16_i32(var, cpu_env, offset);
1661 break;
1662 case MO_32:
1663 tcg_gen_st_i32(var, cpu_env, offset);
1664 break;
1665 default:
1666 g_assert_not_reached();
1667 }
1668}
1669
ac55d007
RH
1670static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
1671{
1672 long offset = neon_element_offset(reg, ele, size);
1673
1674 switch (size) {
1675 case MO_8:
1676 tcg_gen_st8_i64(var, cpu_env, offset);
1677 break;
1678 case MO_16:
1679 tcg_gen_st16_i64(var, cpu_env, offset);
1680 break;
1681 case MO_32:
1682 tcg_gen_st32_i64(var, cpu_env, offset);
1683 break;
1684 case MO_64:
1685 tcg_gen_st_i64(var, cpu_env, offset);
1686 break;
1687 default:
1688 g_assert_not_reached();
1689 }
1690}
1691
a7812ae4 1692static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1693{
1694 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1695}
1696
a7812ae4 1697static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1698{
1699 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1700}
1701
1a66ac61
RH
1702static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1703{
1704 TCGv_ptr ret = tcg_temp_new_ptr();
1705 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1706 return ret;
1707}
1708
4373f3ce
PB
1709#define tcg_gen_ld_f32 tcg_gen_ld_i32
1710#define tcg_gen_ld_f64 tcg_gen_ld_i64
1711#define tcg_gen_st_f32 tcg_gen_st_i32
1712#define tcg_gen_st_f64 tcg_gen_st_i64
1713
b7bcbe95
FB
1714static inline void gen_mov_F0_vreg(int dp, int reg)
1715{
1716 if (dp)
4373f3ce 1717 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1718 else
4373f3ce 1719 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1720}
1721
1722static inline void gen_mov_F1_vreg(int dp, int reg)
1723{
1724 if (dp)
4373f3ce 1725 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1726 else
4373f3ce 1727 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1728}
1729
1730static inline void gen_mov_vreg_F0(int dp, int reg)
1731{
1732 if (dp)
4373f3ce 1733 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1734 else
4373f3ce 1735 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1736}
1737
d00584b7 1738#define ARM_CP_RW_BIT (1 << 20)
18c9b560 1739
a7812ae4 1740static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1741{
0ecb72a5 1742 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1743}
1744
a7812ae4 1745static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1746{
0ecb72a5 1747 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1748}
1749
39d5492a 1750static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1751{
39d5492a 1752 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1753 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1754 return var;
e677137d
PB
1755}
1756
39d5492a 1757static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1758{
0ecb72a5 1759 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1760 tcg_temp_free_i32(var);
e677137d
PB
1761}
1762
1763static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1764{
1765 iwmmxt_store_reg(cpu_M0, rn);
1766}
1767
1768static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1769{
1770 iwmmxt_load_reg(cpu_M0, rn);
1771}
1772
1773static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1774{
1775 iwmmxt_load_reg(cpu_V1, rn);
1776 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1777}
1778
1779static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1780{
1781 iwmmxt_load_reg(cpu_V1, rn);
1782 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1783}
1784
1785static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1786{
1787 iwmmxt_load_reg(cpu_V1, rn);
1788 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1789}
1790
1791#define IWMMXT_OP(name) \
1792static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1793{ \
1794 iwmmxt_load_reg(cpu_V1, rn); \
1795 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1796}
1797
477955bd
PM
1798#define IWMMXT_OP_ENV(name) \
1799static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1800{ \
1801 iwmmxt_load_reg(cpu_V1, rn); \
1802 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1803}
1804
1805#define IWMMXT_OP_ENV_SIZE(name) \
1806IWMMXT_OP_ENV(name##b) \
1807IWMMXT_OP_ENV(name##w) \
1808IWMMXT_OP_ENV(name##l)
e677137d 1809
477955bd 1810#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1811static inline void gen_op_iwmmxt_##name##_M0(void) \
1812{ \
477955bd 1813 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1814}
1815
1816IWMMXT_OP(maddsq)
1817IWMMXT_OP(madduq)
1818IWMMXT_OP(sadb)
1819IWMMXT_OP(sadw)
1820IWMMXT_OP(mulslw)
1821IWMMXT_OP(mulshw)
1822IWMMXT_OP(mululw)
1823IWMMXT_OP(muluhw)
1824IWMMXT_OP(macsw)
1825IWMMXT_OP(macuw)
1826
477955bd
PM
1827IWMMXT_OP_ENV_SIZE(unpackl)
1828IWMMXT_OP_ENV_SIZE(unpackh)
1829
1830IWMMXT_OP_ENV1(unpacklub)
1831IWMMXT_OP_ENV1(unpackluw)
1832IWMMXT_OP_ENV1(unpacklul)
1833IWMMXT_OP_ENV1(unpackhub)
1834IWMMXT_OP_ENV1(unpackhuw)
1835IWMMXT_OP_ENV1(unpackhul)
1836IWMMXT_OP_ENV1(unpacklsb)
1837IWMMXT_OP_ENV1(unpacklsw)
1838IWMMXT_OP_ENV1(unpacklsl)
1839IWMMXT_OP_ENV1(unpackhsb)
1840IWMMXT_OP_ENV1(unpackhsw)
1841IWMMXT_OP_ENV1(unpackhsl)
1842
1843IWMMXT_OP_ENV_SIZE(cmpeq)
1844IWMMXT_OP_ENV_SIZE(cmpgtu)
1845IWMMXT_OP_ENV_SIZE(cmpgts)
1846
1847IWMMXT_OP_ENV_SIZE(mins)
1848IWMMXT_OP_ENV_SIZE(minu)
1849IWMMXT_OP_ENV_SIZE(maxs)
1850IWMMXT_OP_ENV_SIZE(maxu)
1851
1852IWMMXT_OP_ENV_SIZE(subn)
1853IWMMXT_OP_ENV_SIZE(addn)
1854IWMMXT_OP_ENV_SIZE(subu)
1855IWMMXT_OP_ENV_SIZE(addu)
1856IWMMXT_OP_ENV_SIZE(subs)
1857IWMMXT_OP_ENV_SIZE(adds)
1858
1859IWMMXT_OP_ENV(avgb0)
1860IWMMXT_OP_ENV(avgb1)
1861IWMMXT_OP_ENV(avgw0)
1862IWMMXT_OP_ENV(avgw1)
e677137d 1863
477955bd
PM
1864IWMMXT_OP_ENV(packuw)
1865IWMMXT_OP_ENV(packul)
1866IWMMXT_OP_ENV(packuq)
1867IWMMXT_OP_ENV(packsw)
1868IWMMXT_OP_ENV(packsl)
1869IWMMXT_OP_ENV(packsq)
e677137d 1870
e677137d
PB
1871static void gen_op_iwmmxt_set_mup(void)
1872{
39d5492a 1873 TCGv_i32 tmp;
e677137d
PB
1874 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1875 tcg_gen_ori_i32(tmp, tmp, 2);
1876 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1877}
1878
1879static void gen_op_iwmmxt_set_cup(void)
1880{
39d5492a 1881 TCGv_i32 tmp;
e677137d
PB
1882 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1883 tcg_gen_ori_i32(tmp, tmp, 1);
1884 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1885}
1886
1887static void gen_op_iwmmxt_setpsr_nz(void)
1888{
39d5492a 1889 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1890 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1891 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1892}
1893
1894static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1895{
1896 iwmmxt_load_reg(cpu_V1, rn);
86831435 1897 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1898 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1899}
1900
39d5492a
PM
1901static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1902 TCGv_i32 dest)
18c9b560
AZ
1903{
1904 int rd;
1905 uint32_t offset;
39d5492a 1906 TCGv_i32 tmp;
18c9b560
AZ
1907
1908 rd = (insn >> 16) & 0xf;
da6b5335 1909 tmp = load_reg(s, rd);
18c9b560
AZ
1910
1911 offset = (insn & 0xff) << ((insn >> 7) & 2);
1912 if (insn & (1 << 24)) {
1913 /* Pre indexed */
1914 if (insn & (1 << 23))
da6b5335 1915 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1916 else
da6b5335
FN
1917 tcg_gen_addi_i32(tmp, tmp, -offset);
1918 tcg_gen_mov_i32(dest, tmp);
18c9b560 1919 if (insn & (1 << 21))
da6b5335
FN
1920 store_reg(s, rd, tmp);
1921 else
7d1b0095 1922 tcg_temp_free_i32(tmp);
18c9b560
AZ
1923 } else if (insn & (1 << 21)) {
1924 /* Post indexed */
da6b5335 1925 tcg_gen_mov_i32(dest, tmp);
18c9b560 1926 if (insn & (1 << 23))
da6b5335 1927 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1928 else
da6b5335
FN
1929 tcg_gen_addi_i32(tmp, tmp, -offset);
1930 store_reg(s, rd, tmp);
18c9b560
AZ
1931 } else if (!(insn & (1 << 23)))
1932 return 1;
1933 return 0;
1934}
1935
39d5492a 1936static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1937{
1938 int rd = (insn >> 0) & 0xf;
39d5492a 1939 TCGv_i32 tmp;
18c9b560 1940
da6b5335
FN
1941 if (insn & (1 << 8)) {
1942 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1943 return 1;
da6b5335
FN
1944 } else {
1945 tmp = iwmmxt_load_creg(rd);
1946 }
1947 } else {
7d1b0095 1948 tmp = tcg_temp_new_i32();
da6b5335 1949 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1950 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1951 }
1952 tcg_gen_andi_i32(tmp, tmp, mask);
1953 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1954 tcg_temp_free_i32(tmp);
18c9b560
AZ
1955 return 0;
1956}
1957
a1c7273b 1958/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1959 (ie. an undefined instruction). */
7dcc1f89 1960static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1961{
1962 int rd, wrd;
1963 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1964 TCGv_i32 addr;
1965 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1966
1967 if ((insn & 0x0e000e00) == 0x0c000000) {
1968 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1969 wrd = insn & 0xf;
1970 rdlo = (insn >> 12) & 0xf;
1971 rdhi = (insn >> 16) & 0xf;
d00584b7 1972 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1973 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1974 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1975 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1976 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
d00584b7 1977 } else { /* TMCRR */
da6b5335
FN
1978 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1979 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1980 gen_op_iwmmxt_set_mup();
1981 }
1982 return 0;
1983 }
1984
1985 wrd = (insn >> 12) & 0xf;
7d1b0095 1986 addr = tcg_temp_new_i32();
da6b5335 1987 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1988 tcg_temp_free_i32(addr);
18c9b560 1989 return 1;
da6b5335 1990 }
18c9b560 1991 if (insn & ARM_CP_RW_BIT) {
d00584b7 1992 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1993 tmp = tcg_temp_new_i32();
12dcc321 1994 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1995 iwmmxt_store_creg(wrd, tmp);
18c9b560 1996 } else {
e677137d
PB
1997 i = 1;
1998 if (insn & (1 << 8)) {
d00584b7 1999 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 2000 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d 2001 i = 0;
d00584b7 2002 } else { /* WLDRW wRd */
29531141 2003 tmp = tcg_temp_new_i32();
12dcc321 2004 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
2005 }
2006 } else {
29531141 2007 tmp = tcg_temp_new_i32();
d00584b7 2008 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 2009 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
d00584b7 2010 } else { /* WLDRB */
12dcc321 2011 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
2012 }
2013 }
2014 if (i) {
2015 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 2016 tcg_temp_free_i32(tmp);
e677137d 2017 }
18c9b560
AZ
2018 gen_op_iwmmxt_movq_wRn_M0(wrd);
2019 }
2020 } else {
d00584b7 2021 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 2022 tmp = iwmmxt_load_creg(wrd);
12dcc321 2023 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
2024 } else {
2025 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2026 tmp = tcg_temp_new_i32();
e677137d 2027 if (insn & (1 << 8)) {
d00584b7 2028 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 2029 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
d00584b7 2030 } else { /* WSTRW wRd */
ecc7b3aa 2031 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 2032 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
2033 }
2034 } else {
d00584b7 2035 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 2036 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 2037 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
d00584b7 2038 } else { /* WSTRB */
ecc7b3aa 2039 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 2040 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
2041 }
2042 }
18c9b560 2043 }
29531141 2044 tcg_temp_free_i32(tmp);
18c9b560 2045 }
7d1b0095 2046 tcg_temp_free_i32(addr);
18c9b560
AZ
2047 return 0;
2048 }
2049
2050 if ((insn & 0x0f000000) != 0x0e000000)
2051 return 1;
2052
2053 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
d00584b7 2054 case 0x000: /* WOR */
18c9b560
AZ
2055 wrd = (insn >> 12) & 0xf;
2056 rd0 = (insn >> 0) & 0xf;
2057 rd1 = (insn >> 16) & 0xf;
2058 gen_op_iwmmxt_movq_M0_wRn(rd0);
2059 gen_op_iwmmxt_orq_M0_wRn(rd1);
2060 gen_op_iwmmxt_setpsr_nz();
2061 gen_op_iwmmxt_movq_wRn_M0(wrd);
2062 gen_op_iwmmxt_set_mup();
2063 gen_op_iwmmxt_set_cup();
2064 break;
d00584b7 2065 case 0x011: /* TMCR */
18c9b560
AZ
2066 if (insn & 0xf)
2067 return 1;
2068 rd = (insn >> 12) & 0xf;
2069 wrd = (insn >> 16) & 0xf;
2070 switch (wrd) {
2071 case ARM_IWMMXT_wCID:
2072 case ARM_IWMMXT_wCASF:
2073 break;
2074 case ARM_IWMMXT_wCon:
2075 gen_op_iwmmxt_set_cup();
2076 /* Fall through. */
2077 case ARM_IWMMXT_wCSSF:
da6b5335
FN
2078 tmp = iwmmxt_load_creg(wrd);
2079 tmp2 = load_reg(s, rd);
f669df27 2080 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 2081 tcg_temp_free_i32(tmp2);
da6b5335 2082 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
2083 break;
2084 case ARM_IWMMXT_wCGR0:
2085 case ARM_IWMMXT_wCGR1:
2086 case ARM_IWMMXT_wCGR2:
2087 case ARM_IWMMXT_wCGR3:
2088 gen_op_iwmmxt_set_cup();
da6b5335
FN
2089 tmp = load_reg(s, rd);
2090 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
2091 break;
2092 default:
2093 return 1;
2094 }
2095 break;
d00584b7 2096 case 0x100: /* WXOR */
18c9b560
AZ
2097 wrd = (insn >> 12) & 0xf;
2098 rd0 = (insn >> 0) & 0xf;
2099 rd1 = (insn >> 16) & 0xf;
2100 gen_op_iwmmxt_movq_M0_wRn(rd0);
2101 gen_op_iwmmxt_xorq_M0_wRn(rd1);
2102 gen_op_iwmmxt_setpsr_nz();
2103 gen_op_iwmmxt_movq_wRn_M0(wrd);
2104 gen_op_iwmmxt_set_mup();
2105 gen_op_iwmmxt_set_cup();
2106 break;
d00584b7 2107 case 0x111: /* TMRC */
18c9b560
AZ
2108 if (insn & 0xf)
2109 return 1;
2110 rd = (insn >> 12) & 0xf;
2111 wrd = (insn >> 16) & 0xf;
da6b5335
FN
2112 tmp = iwmmxt_load_creg(wrd);
2113 store_reg(s, rd, tmp);
18c9b560 2114 break;
d00584b7 2115 case 0x300: /* WANDN */
18c9b560
AZ
2116 wrd = (insn >> 12) & 0xf;
2117 rd0 = (insn >> 0) & 0xf;
2118 rd1 = (insn >> 16) & 0xf;
2119 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 2120 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
2121 gen_op_iwmmxt_andq_M0_wRn(rd1);
2122 gen_op_iwmmxt_setpsr_nz();
2123 gen_op_iwmmxt_movq_wRn_M0(wrd);
2124 gen_op_iwmmxt_set_mup();
2125 gen_op_iwmmxt_set_cup();
2126 break;
d00584b7 2127 case 0x200: /* WAND */
18c9b560
AZ
2128 wrd = (insn >> 12) & 0xf;
2129 rd0 = (insn >> 0) & 0xf;
2130 rd1 = (insn >> 16) & 0xf;
2131 gen_op_iwmmxt_movq_M0_wRn(rd0);
2132 gen_op_iwmmxt_andq_M0_wRn(rd1);
2133 gen_op_iwmmxt_setpsr_nz();
2134 gen_op_iwmmxt_movq_wRn_M0(wrd);
2135 gen_op_iwmmxt_set_mup();
2136 gen_op_iwmmxt_set_cup();
2137 break;
d00584b7 2138 case 0x810: case 0xa10: /* WMADD */
18c9b560
AZ
2139 wrd = (insn >> 12) & 0xf;
2140 rd0 = (insn >> 0) & 0xf;
2141 rd1 = (insn >> 16) & 0xf;
2142 gen_op_iwmmxt_movq_M0_wRn(rd0);
2143 if (insn & (1 << 21))
2144 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2145 else
2146 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2147 gen_op_iwmmxt_movq_wRn_M0(wrd);
2148 gen_op_iwmmxt_set_mup();
2149 break;
d00584b7 2150 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
18c9b560
AZ
2151 wrd = (insn >> 12) & 0xf;
2152 rd0 = (insn >> 16) & 0xf;
2153 rd1 = (insn >> 0) & 0xf;
2154 gen_op_iwmmxt_movq_M0_wRn(rd0);
2155 switch ((insn >> 22) & 3) {
2156 case 0:
2157 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2158 break;
2159 case 1:
2160 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2161 break;
2162 case 2:
2163 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2164 break;
2165 case 3:
2166 return 1;
2167 }
2168 gen_op_iwmmxt_movq_wRn_M0(wrd);
2169 gen_op_iwmmxt_set_mup();
2170 gen_op_iwmmxt_set_cup();
2171 break;
d00584b7 2172 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
18c9b560
AZ
2173 wrd = (insn >> 12) & 0xf;
2174 rd0 = (insn >> 16) & 0xf;
2175 rd1 = (insn >> 0) & 0xf;
2176 gen_op_iwmmxt_movq_M0_wRn(rd0);
2177 switch ((insn >> 22) & 3) {
2178 case 0:
2179 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2180 break;
2181 case 1:
2182 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2183 break;
2184 case 2:
2185 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2186 break;
2187 case 3:
2188 return 1;
2189 }
2190 gen_op_iwmmxt_movq_wRn_M0(wrd);
2191 gen_op_iwmmxt_set_mup();
2192 gen_op_iwmmxt_set_cup();
2193 break;
d00584b7 2194 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
18c9b560
AZ
2195 wrd = (insn >> 12) & 0xf;
2196 rd0 = (insn >> 16) & 0xf;
2197 rd1 = (insn >> 0) & 0xf;
2198 gen_op_iwmmxt_movq_M0_wRn(rd0);
2199 if (insn & (1 << 22))
2200 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2201 else
2202 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2203 if (!(insn & (1 << 20)))
2204 gen_op_iwmmxt_addl_M0_wRn(wrd);
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 break;
d00584b7 2208 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
18c9b560
AZ
2209 wrd = (insn >> 12) & 0xf;
2210 rd0 = (insn >> 16) & 0xf;
2211 rd1 = (insn >> 0) & 0xf;
2212 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2213 if (insn & (1 << 21)) {
2214 if (insn & (1 << 20))
2215 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2216 else
2217 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2218 } else {
2219 if (insn & (1 << 20))
2220 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2221 else
2222 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2223 }
18c9b560
AZ
2224 gen_op_iwmmxt_movq_wRn_M0(wrd);
2225 gen_op_iwmmxt_set_mup();
2226 break;
d00584b7 2227 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
18c9b560
AZ
2228 wrd = (insn >> 12) & 0xf;
2229 rd0 = (insn >> 16) & 0xf;
2230 rd1 = (insn >> 0) & 0xf;
2231 gen_op_iwmmxt_movq_M0_wRn(rd0);
2232 if (insn & (1 << 21))
2233 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2234 else
2235 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2236 if (!(insn & (1 << 20))) {
e677137d
PB
2237 iwmmxt_load_reg(cpu_V1, wrd);
2238 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2239 }
2240 gen_op_iwmmxt_movq_wRn_M0(wrd);
2241 gen_op_iwmmxt_set_mup();
2242 break;
d00584b7 2243 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
18c9b560
AZ
2244 wrd = (insn >> 12) & 0xf;
2245 rd0 = (insn >> 16) & 0xf;
2246 rd1 = (insn >> 0) & 0xf;
2247 gen_op_iwmmxt_movq_M0_wRn(rd0);
2248 switch ((insn >> 22) & 3) {
2249 case 0:
2250 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2251 break;
2252 case 1:
2253 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2254 break;
2255 case 2:
2256 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2257 break;
2258 case 3:
2259 return 1;
2260 }
2261 gen_op_iwmmxt_movq_wRn_M0(wrd);
2262 gen_op_iwmmxt_set_mup();
2263 gen_op_iwmmxt_set_cup();
2264 break;
d00584b7 2265 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
18c9b560
AZ
2266 wrd = (insn >> 12) & 0xf;
2267 rd0 = (insn >> 16) & 0xf;
2268 rd1 = (insn >> 0) & 0xf;
2269 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2270 if (insn & (1 << 22)) {
2271 if (insn & (1 << 20))
2272 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2273 else
2274 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2275 } else {
2276 if (insn & (1 << 20))
2277 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2278 else
2279 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2280 }
18c9b560
AZ
2281 gen_op_iwmmxt_movq_wRn_M0(wrd);
2282 gen_op_iwmmxt_set_mup();
2283 gen_op_iwmmxt_set_cup();
2284 break;
d00584b7 2285 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
18c9b560
AZ
2286 wrd = (insn >> 12) & 0xf;
2287 rd0 = (insn >> 16) & 0xf;
2288 rd1 = (insn >> 0) & 0xf;
2289 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2290 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2291 tcg_gen_andi_i32(tmp, tmp, 7);
2292 iwmmxt_load_reg(cpu_V1, rd1);
2293 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2294 tcg_temp_free_i32(tmp);
18c9b560
AZ
2295 gen_op_iwmmxt_movq_wRn_M0(wrd);
2296 gen_op_iwmmxt_set_mup();
2297 break;
d00584b7 2298 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2299 if (((insn >> 6) & 3) == 3)
2300 return 1;
18c9b560
AZ
2301 rd = (insn >> 12) & 0xf;
2302 wrd = (insn >> 16) & 0xf;
da6b5335 2303 tmp = load_reg(s, rd);
18c9b560
AZ
2304 gen_op_iwmmxt_movq_M0_wRn(wrd);
2305 switch ((insn >> 6) & 3) {
2306 case 0:
da6b5335
FN
2307 tmp2 = tcg_const_i32(0xff);
2308 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2309 break;
2310 case 1:
da6b5335
FN
2311 tmp2 = tcg_const_i32(0xffff);
2312 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2313 break;
2314 case 2:
da6b5335
FN
2315 tmp2 = tcg_const_i32(0xffffffff);
2316 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2317 break;
da6b5335 2318 default:
f764718d
RH
2319 tmp2 = NULL;
2320 tmp3 = NULL;
18c9b560 2321 }
da6b5335 2322 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2323 tcg_temp_free_i32(tmp3);
2324 tcg_temp_free_i32(tmp2);
7d1b0095 2325 tcg_temp_free_i32(tmp);
18c9b560
AZ
2326 gen_op_iwmmxt_movq_wRn_M0(wrd);
2327 gen_op_iwmmxt_set_mup();
2328 break;
d00584b7 2329 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
18c9b560
AZ
2330 rd = (insn >> 12) & 0xf;
2331 wrd = (insn >> 16) & 0xf;
da6b5335 2332 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2333 return 1;
2334 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2335 tmp = tcg_temp_new_i32();
18c9b560
AZ
2336 switch ((insn >> 22) & 3) {
2337 case 0:
da6b5335 2338 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2339 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2340 if (insn & 8) {
2341 tcg_gen_ext8s_i32(tmp, tmp);
2342 } else {
2343 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2344 }
2345 break;
2346 case 1:
da6b5335 2347 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2348 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2349 if (insn & 8) {
2350 tcg_gen_ext16s_i32(tmp, tmp);
2351 } else {
2352 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2353 }
2354 break;
2355 case 2:
da6b5335 2356 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2357 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2358 break;
18c9b560 2359 }
da6b5335 2360 store_reg(s, rd, tmp);
18c9b560 2361 break;
d00584b7 2362 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2363 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2364 return 1;
da6b5335 2365 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2366 switch ((insn >> 22) & 3) {
2367 case 0:
da6b5335 2368 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2369 break;
2370 case 1:
da6b5335 2371 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2372 break;
2373 case 2:
da6b5335 2374 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2375 break;
18c9b560 2376 }
da6b5335
FN
2377 tcg_gen_shli_i32(tmp, tmp, 28);
2378 gen_set_nzcv(tmp);
7d1b0095 2379 tcg_temp_free_i32(tmp);
18c9b560 2380 break;
d00584b7 2381 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2382 if (((insn >> 6) & 3) == 3)
2383 return 1;
18c9b560
AZ
2384 rd = (insn >> 12) & 0xf;
2385 wrd = (insn >> 16) & 0xf;
da6b5335 2386 tmp = load_reg(s, rd);
18c9b560
AZ
2387 switch ((insn >> 6) & 3) {
2388 case 0:
da6b5335 2389 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2390 break;
2391 case 1:
da6b5335 2392 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2393 break;
2394 case 2:
da6b5335 2395 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2396 break;
18c9b560 2397 }
7d1b0095 2398 tcg_temp_free_i32(tmp);
18c9b560
AZ
2399 gen_op_iwmmxt_movq_wRn_M0(wrd);
2400 gen_op_iwmmxt_set_mup();
2401 break;
d00584b7 2402 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2403 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2404 return 1;
da6b5335 2405 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2406 tmp2 = tcg_temp_new_i32();
da6b5335 2407 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2408 switch ((insn >> 22) & 3) {
2409 case 0:
2410 for (i = 0; i < 7; i ++) {
da6b5335
FN
2411 tcg_gen_shli_i32(tmp2, tmp2, 4);
2412 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2413 }
2414 break;
2415 case 1:
2416 for (i = 0; i < 3; i ++) {
da6b5335
FN
2417 tcg_gen_shli_i32(tmp2, tmp2, 8);
2418 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2419 }
2420 break;
2421 case 2:
da6b5335
FN
2422 tcg_gen_shli_i32(tmp2, tmp2, 16);
2423 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2424 break;
18c9b560 2425 }
da6b5335 2426 gen_set_nzcv(tmp);
7d1b0095
PM
2427 tcg_temp_free_i32(tmp2);
2428 tcg_temp_free_i32(tmp);
18c9b560 2429 break;
d00584b7 2430 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
18c9b560
AZ
2431 wrd = (insn >> 12) & 0xf;
2432 rd0 = (insn >> 16) & 0xf;
2433 gen_op_iwmmxt_movq_M0_wRn(rd0);
2434 switch ((insn >> 22) & 3) {
2435 case 0:
e677137d 2436 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2437 break;
2438 case 1:
e677137d 2439 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2440 break;
2441 case 2:
e677137d 2442 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2443 break;
2444 case 3:
2445 return 1;
2446 }
2447 gen_op_iwmmxt_movq_wRn_M0(wrd);
2448 gen_op_iwmmxt_set_mup();
2449 break;
d00584b7 2450 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2451 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2452 return 1;
da6b5335 2453 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2454 tmp2 = tcg_temp_new_i32();
da6b5335 2455 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2456 switch ((insn >> 22) & 3) {
2457 case 0:
2458 for (i = 0; i < 7; i ++) {
da6b5335
FN
2459 tcg_gen_shli_i32(tmp2, tmp2, 4);
2460 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2461 }
2462 break;
2463 case 1:
2464 for (i = 0; i < 3; i ++) {
da6b5335
FN
2465 tcg_gen_shli_i32(tmp2, tmp2, 8);
2466 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2467 }
2468 break;
2469 case 2:
da6b5335
FN
2470 tcg_gen_shli_i32(tmp2, tmp2, 16);
2471 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2472 break;
18c9b560 2473 }
da6b5335 2474 gen_set_nzcv(tmp);
7d1b0095
PM
2475 tcg_temp_free_i32(tmp2);
2476 tcg_temp_free_i32(tmp);
18c9b560 2477 break;
d00584b7 2478 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
18c9b560
AZ
2479 rd = (insn >> 12) & 0xf;
2480 rd0 = (insn >> 16) & 0xf;
da6b5335 2481 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2482 return 1;
2483 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2484 tmp = tcg_temp_new_i32();
18c9b560
AZ
2485 switch ((insn >> 22) & 3) {
2486 case 0:
da6b5335 2487 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2488 break;
2489 case 1:
da6b5335 2490 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2491 break;
2492 case 2:
da6b5335 2493 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2494 break;
18c9b560 2495 }
da6b5335 2496 store_reg(s, rd, tmp);
18c9b560 2497 break;
d00584b7 2498 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
18c9b560
AZ
2499 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2500 wrd = (insn >> 12) & 0xf;
2501 rd0 = (insn >> 16) & 0xf;
2502 rd1 = (insn >> 0) & 0xf;
2503 gen_op_iwmmxt_movq_M0_wRn(rd0);
2504 switch ((insn >> 22) & 3) {
2505 case 0:
2506 if (insn & (1 << 21))
2507 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2508 else
2509 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2510 break;
2511 case 1:
2512 if (insn & (1 << 21))
2513 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2514 else
2515 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2516 break;
2517 case 2:
2518 if (insn & (1 << 21))
2519 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2520 else
2521 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2522 break;
2523 case 3:
2524 return 1;
2525 }
2526 gen_op_iwmmxt_movq_wRn_M0(wrd);
2527 gen_op_iwmmxt_set_mup();
2528 gen_op_iwmmxt_set_cup();
2529 break;
d00584b7 2530 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
18c9b560
AZ
2531 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2532 wrd = (insn >> 12) & 0xf;
2533 rd0 = (insn >> 16) & 0xf;
2534 gen_op_iwmmxt_movq_M0_wRn(rd0);
2535 switch ((insn >> 22) & 3) {
2536 case 0:
2537 if (insn & (1 << 21))
2538 gen_op_iwmmxt_unpacklsb_M0();
2539 else
2540 gen_op_iwmmxt_unpacklub_M0();
2541 break;
2542 case 1:
2543 if (insn & (1 << 21))
2544 gen_op_iwmmxt_unpacklsw_M0();
2545 else
2546 gen_op_iwmmxt_unpackluw_M0();
2547 break;
2548 case 2:
2549 if (insn & (1 << 21))
2550 gen_op_iwmmxt_unpacklsl_M0();
2551 else
2552 gen_op_iwmmxt_unpacklul_M0();
2553 break;
2554 case 3:
2555 return 1;
2556 }
2557 gen_op_iwmmxt_movq_wRn_M0(wrd);
2558 gen_op_iwmmxt_set_mup();
2559 gen_op_iwmmxt_set_cup();
2560 break;
d00584b7 2561 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
18c9b560
AZ
2562 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2563 wrd = (insn >> 12) & 0xf;
2564 rd0 = (insn >> 16) & 0xf;
2565 gen_op_iwmmxt_movq_M0_wRn(rd0);
2566 switch ((insn >> 22) & 3) {
2567 case 0:
2568 if (insn & (1 << 21))
2569 gen_op_iwmmxt_unpackhsb_M0();
2570 else
2571 gen_op_iwmmxt_unpackhub_M0();
2572 break;
2573 case 1:
2574 if (insn & (1 << 21))
2575 gen_op_iwmmxt_unpackhsw_M0();
2576 else
2577 gen_op_iwmmxt_unpackhuw_M0();
2578 break;
2579 case 2:
2580 if (insn & (1 << 21))
2581 gen_op_iwmmxt_unpackhsl_M0();
2582 else
2583 gen_op_iwmmxt_unpackhul_M0();
2584 break;
2585 case 3:
2586 return 1;
2587 }
2588 gen_op_iwmmxt_movq_wRn_M0(wrd);
2589 gen_op_iwmmxt_set_mup();
2590 gen_op_iwmmxt_set_cup();
2591 break;
d00584b7 2592 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
18c9b560 2593 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2594 if (((insn >> 22) & 3) == 0)
2595 return 1;
18c9b560
AZ
2596 wrd = (insn >> 12) & 0xf;
2597 rd0 = (insn >> 16) & 0xf;
2598 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2599 tmp = tcg_temp_new_i32();
da6b5335 2600 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2601 tcg_temp_free_i32(tmp);
18c9b560 2602 return 1;
da6b5335 2603 }
18c9b560 2604 switch ((insn >> 22) & 3) {
18c9b560 2605 case 1:
477955bd 2606 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2607 break;
2608 case 2:
477955bd 2609 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2610 break;
2611 case 3:
477955bd 2612 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2613 break;
2614 }
7d1b0095 2615 tcg_temp_free_i32(tmp);
18c9b560
AZ
2616 gen_op_iwmmxt_movq_wRn_M0(wrd);
2617 gen_op_iwmmxt_set_mup();
2618 gen_op_iwmmxt_set_cup();
2619 break;
d00584b7 2620 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
18c9b560 2621 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2622 if (((insn >> 22) & 3) == 0)
2623 return 1;
18c9b560
AZ
2624 wrd = (insn >> 12) & 0xf;
2625 rd0 = (insn >> 16) & 0xf;
2626 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2627 tmp = tcg_temp_new_i32();
da6b5335 2628 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2629 tcg_temp_free_i32(tmp);
18c9b560 2630 return 1;
da6b5335 2631 }
18c9b560 2632 switch ((insn >> 22) & 3) {
18c9b560 2633 case 1:
477955bd 2634 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2635 break;
2636 case 2:
477955bd 2637 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2638 break;
2639 case 3:
477955bd 2640 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2641 break;
2642 }
7d1b0095 2643 tcg_temp_free_i32(tmp);
18c9b560
AZ
2644 gen_op_iwmmxt_movq_wRn_M0(wrd);
2645 gen_op_iwmmxt_set_mup();
2646 gen_op_iwmmxt_set_cup();
2647 break;
d00584b7 2648 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
18c9b560 2649 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2650 if (((insn >> 22) & 3) == 0)
2651 return 1;
18c9b560
AZ
2652 wrd = (insn >> 12) & 0xf;
2653 rd0 = (insn >> 16) & 0xf;
2654 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2655 tmp = tcg_temp_new_i32();
da6b5335 2656 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2657 tcg_temp_free_i32(tmp);
18c9b560 2658 return 1;
da6b5335 2659 }
18c9b560 2660 switch ((insn >> 22) & 3) {
18c9b560 2661 case 1:
477955bd 2662 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2663 break;
2664 case 2:
477955bd 2665 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2666 break;
2667 case 3:
477955bd 2668 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2669 break;
2670 }
7d1b0095 2671 tcg_temp_free_i32(tmp);
18c9b560
AZ
2672 gen_op_iwmmxt_movq_wRn_M0(wrd);
2673 gen_op_iwmmxt_set_mup();
2674 gen_op_iwmmxt_set_cup();
2675 break;
d00584b7 2676 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
18c9b560 2677 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2678 if (((insn >> 22) & 3) == 0)
2679 return 1;
18c9b560
AZ
2680 wrd = (insn >> 12) & 0xf;
2681 rd0 = (insn >> 16) & 0xf;
2682 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2683 tmp = tcg_temp_new_i32();
18c9b560 2684 switch ((insn >> 22) & 3) {
18c9b560 2685 case 1:
da6b5335 2686 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2687 tcg_temp_free_i32(tmp);
18c9b560 2688 return 1;
da6b5335 2689 }
477955bd 2690 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2691 break;
2692 case 2:
da6b5335 2693 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2694 tcg_temp_free_i32(tmp);
18c9b560 2695 return 1;
da6b5335 2696 }
477955bd 2697 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2698 break;
2699 case 3:
da6b5335 2700 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2701 tcg_temp_free_i32(tmp);
18c9b560 2702 return 1;
da6b5335 2703 }
477955bd 2704 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2705 break;
2706 }
7d1b0095 2707 tcg_temp_free_i32(tmp);
18c9b560
AZ
2708 gen_op_iwmmxt_movq_wRn_M0(wrd);
2709 gen_op_iwmmxt_set_mup();
2710 gen_op_iwmmxt_set_cup();
2711 break;
d00584b7 2712 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
18c9b560
AZ
2713 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2714 wrd = (insn >> 12) & 0xf;
2715 rd0 = (insn >> 16) & 0xf;
2716 rd1 = (insn >> 0) & 0xf;
2717 gen_op_iwmmxt_movq_M0_wRn(rd0);
2718 switch ((insn >> 22) & 3) {
2719 case 0:
2720 if (insn & (1 << 21))
2721 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2722 else
2723 gen_op_iwmmxt_minub_M0_wRn(rd1);
2724 break;
2725 case 1:
2726 if (insn & (1 << 21))
2727 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2728 else
2729 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2730 break;
2731 case 2:
2732 if (insn & (1 << 21))
2733 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2734 else
2735 gen_op_iwmmxt_minul_M0_wRn(rd1);
2736 break;
2737 case 3:
2738 return 1;
2739 }
2740 gen_op_iwmmxt_movq_wRn_M0(wrd);
2741 gen_op_iwmmxt_set_mup();
2742 break;
d00584b7 2743 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
18c9b560
AZ
2744 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2745 wrd = (insn >> 12) & 0xf;
2746 rd0 = (insn >> 16) & 0xf;
2747 rd1 = (insn >> 0) & 0xf;
2748 gen_op_iwmmxt_movq_M0_wRn(rd0);
2749 switch ((insn >> 22) & 3) {
2750 case 0:
2751 if (insn & (1 << 21))
2752 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2753 else
2754 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2755 break;
2756 case 1:
2757 if (insn & (1 << 21))
2758 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2759 else
2760 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2761 break;
2762 case 2:
2763 if (insn & (1 << 21))
2764 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2765 else
2766 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2767 break;
2768 case 3:
2769 return 1;
2770 }
2771 gen_op_iwmmxt_movq_wRn_M0(wrd);
2772 gen_op_iwmmxt_set_mup();
2773 break;
d00584b7 2774 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
18c9b560
AZ
2775 case 0x402: case 0x502: case 0x602: case 0x702:
2776 wrd = (insn >> 12) & 0xf;
2777 rd0 = (insn >> 16) & 0xf;
2778 rd1 = (insn >> 0) & 0xf;
2779 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2780 tmp = tcg_const_i32((insn >> 20) & 3);
2781 iwmmxt_load_reg(cpu_V1, rd1);
2782 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2783 tcg_temp_free_i32(tmp);
18c9b560
AZ
2784 gen_op_iwmmxt_movq_wRn_M0(wrd);
2785 gen_op_iwmmxt_set_mup();
2786 break;
d00584b7 2787 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
18c9b560
AZ
2788 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2789 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2790 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2791 wrd = (insn >> 12) & 0xf;
2792 rd0 = (insn >> 16) & 0xf;
2793 rd1 = (insn >> 0) & 0xf;
2794 gen_op_iwmmxt_movq_M0_wRn(rd0);
2795 switch ((insn >> 20) & 0xf) {
2796 case 0x0:
2797 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2798 break;
2799 case 0x1:
2800 gen_op_iwmmxt_subub_M0_wRn(rd1);
2801 break;
2802 case 0x3:
2803 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2804 break;
2805 case 0x4:
2806 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2807 break;
2808 case 0x5:
2809 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2810 break;
2811 case 0x7:
2812 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2813 break;
2814 case 0x8:
2815 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2816 break;
2817 case 0x9:
2818 gen_op_iwmmxt_subul_M0_wRn(rd1);
2819 break;
2820 case 0xb:
2821 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2822 break;
2823 default:
2824 return 1;
2825 }
2826 gen_op_iwmmxt_movq_wRn_M0(wrd);
2827 gen_op_iwmmxt_set_mup();
2828 gen_op_iwmmxt_set_cup();
2829 break;
d00584b7 2830 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
18c9b560
AZ
2831 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2832 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2833 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2834 wrd = (insn >> 12) & 0xf;
2835 rd0 = (insn >> 16) & 0xf;
2836 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2837 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2838 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2839 tcg_temp_free_i32(tmp);
18c9b560
AZ
2840 gen_op_iwmmxt_movq_wRn_M0(wrd);
2841 gen_op_iwmmxt_set_mup();
2842 gen_op_iwmmxt_set_cup();
2843 break;
d00584b7 2844 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
18c9b560
AZ
2845 case 0x418: case 0x518: case 0x618: case 0x718:
2846 case 0x818: case 0x918: case 0xa18: case 0xb18:
2847 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2848 wrd = (insn >> 12) & 0xf;
2849 rd0 = (insn >> 16) & 0xf;
2850 rd1 = (insn >> 0) & 0xf;
2851 gen_op_iwmmxt_movq_M0_wRn(rd0);
2852 switch ((insn >> 20) & 0xf) {
2853 case 0x0:
2854 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2855 break;
2856 case 0x1:
2857 gen_op_iwmmxt_addub_M0_wRn(rd1);
2858 break;
2859 case 0x3:
2860 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2861 break;
2862 case 0x4:
2863 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2864 break;
2865 case 0x5:
2866 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2867 break;
2868 case 0x7:
2869 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2870 break;
2871 case 0x8:
2872 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2873 break;
2874 case 0x9:
2875 gen_op_iwmmxt_addul_M0_wRn(rd1);
2876 break;
2877 case 0xb:
2878 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2879 break;
2880 default:
2881 return 1;
2882 }
2883 gen_op_iwmmxt_movq_wRn_M0(wrd);
2884 gen_op_iwmmxt_set_mup();
2885 gen_op_iwmmxt_set_cup();
2886 break;
d00584b7 2887 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
18c9b560
AZ
2888 case 0x408: case 0x508: case 0x608: case 0x708:
2889 case 0x808: case 0x908: case 0xa08: case 0xb08:
2890 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2891 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2892 return 1;
18c9b560
AZ
2893 wrd = (insn >> 12) & 0xf;
2894 rd0 = (insn >> 16) & 0xf;
2895 rd1 = (insn >> 0) & 0xf;
2896 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2897 switch ((insn >> 22) & 3) {
18c9b560
AZ
2898 case 1:
2899 if (insn & (1 << 21))
2900 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2901 else
2902 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2903 break;
2904 case 2:
2905 if (insn & (1 << 21))
2906 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2907 else
2908 gen_op_iwmmxt_packul_M0_wRn(rd1);
2909 break;
2910 case 3:
2911 if (insn & (1 << 21))
2912 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2913 else
2914 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2915 break;
2916 }
2917 gen_op_iwmmxt_movq_wRn_M0(wrd);
2918 gen_op_iwmmxt_set_mup();
2919 gen_op_iwmmxt_set_cup();
2920 break;
2921 case 0x201: case 0x203: case 0x205: case 0x207:
2922 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2923 case 0x211: case 0x213: case 0x215: case 0x217:
2924 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2925 wrd = (insn >> 5) & 0xf;
2926 rd0 = (insn >> 12) & 0xf;
2927 rd1 = (insn >> 0) & 0xf;
2928 if (rd0 == 0xf || rd1 == 0xf)
2929 return 1;
2930 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2931 tmp = load_reg(s, rd0);
2932 tmp2 = load_reg(s, rd1);
18c9b560 2933 switch ((insn >> 16) & 0xf) {
d00584b7 2934 case 0x0: /* TMIA */
da6b5335 2935 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2936 break;
d00584b7 2937 case 0x8: /* TMIAPH */
da6b5335 2938 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2939 break;
d00584b7 2940 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2941 if (insn & (1 << 16))
da6b5335 2942 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2943 if (insn & (1 << 17))
da6b5335
FN
2944 tcg_gen_shri_i32(tmp2, tmp2, 16);
2945 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2946 break;
2947 default:
7d1b0095
PM
2948 tcg_temp_free_i32(tmp2);
2949 tcg_temp_free_i32(tmp);
18c9b560
AZ
2950 return 1;
2951 }
7d1b0095
PM
2952 tcg_temp_free_i32(tmp2);
2953 tcg_temp_free_i32(tmp);
18c9b560
AZ
2954 gen_op_iwmmxt_movq_wRn_M0(wrd);
2955 gen_op_iwmmxt_set_mup();
2956 break;
2957 default:
2958 return 1;
2959 }
2960
2961 return 0;
2962}
2963
a1c7273b 2964/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2965 (ie. an undefined instruction). */
7dcc1f89 2966static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2967{
2968 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2969 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2970
2971 if ((insn & 0x0ff00f10) == 0x0e200010) {
2972 /* Multiply with Internal Accumulate Format */
2973 rd0 = (insn >> 12) & 0xf;
2974 rd1 = insn & 0xf;
2975 acc = (insn >> 5) & 7;
2976
2977 if (acc != 0)
2978 return 1;
2979
3a554c0f
FN
2980 tmp = load_reg(s, rd0);
2981 tmp2 = load_reg(s, rd1);
18c9b560 2982 switch ((insn >> 16) & 0xf) {
d00584b7 2983 case 0x0: /* MIA */
3a554c0f 2984 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2985 break;
d00584b7 2986 case 0x8: /* MIAPH */
3a554c0f 2987 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2988 break;
d00584b7
PM
2989 case 0xc: /* MIABB */
2990 case 0xd: /* MIABT */
2991 case 0xe: /* MIATB */
2992 case 0xf: /* MIATT */
18c9b560 2993 if (insn & (1 << 16))
3a554c0f 2994 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2995 if (insn & (1 << 17))
3a554c0f
FN
2996 tcg_gen_shri_i32(tmp2, tmp2, 16);
2997 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2998 break;
2999 default:
3000 return 1;
3001 }
7d1b0095
PM
3002 tcg_temp_free_i32(tmp2);
3003 tcg_temp_free_i32(tmp);
18c9b560
AZ
3004
3005 gen_op_iwmmxt_movq_wRn_M0(acc);
3006 return 0;
3007 }
3008
3009 if ((insn & 0x0fe00ff8) == 0x0c400000) {
3010 /* Internal Accumulator Access Format */
3011 rdhi = (insn >> 16) & 0xf;
3012 rdlo = (insn >> 12) & 0xf;
3013 acc = insn & 7;
3014
3015 if (acc != 0)
3016 return 1;
3017
d00584b7 3018 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 3019 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 3020 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 3021 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 3022 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 3023 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
d00584b7 3024 } else { /* MAR */
3a554c0f
FN
3025 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
3026 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
3027 }
3028 return 0;
3029 }
3030
3031 return 1;
3032}
3033
9ee6e8bb
PB
3034#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
3035#define VFP_SREG(insn, bigbit, smallbit) \
3036 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
3037#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 3038 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
3039 reg = (((insn) >> (bigbit)) & 0x0f) \
3040 | (((insn) >> ((smallbit) - 4)) & 0x10); \
3041 } else { \
3042 if (insn & (1 << (smallbit))) \
3043 return 1; \
3044 reg = ((insn) >> (bigbit)) & 0x0f; \
3045 }} while (0)
3046
3047#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
3048#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
3049#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
3050#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
3051#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
3052#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
3053
4373f3ce 3054/* Move between integer and VFP cores. */
39d5492a 3055static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 3056{
39d5492a 3057 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
3058 tcg_gen_mov_i32(tmp, cpu_F0s);
3059 return tmp;
3060}
3061
39d5492a 3062static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
3063{
3064 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 3065 tcg_temp_free_i32(tmp);
4373f3ce
PB
3066}
3067
39d5492a 3068static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 3069{
39d5492a 3070 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 3071 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
3072 tcg_gen_shli_i32(tmp, var, 16);
3073 tcg_gen_or_i32(var, var, tmp);
7d1b0095 3074 tcg_temp_free_i32(tmp);
ad69471c
PB
3075}
3076
39d5492a 3077static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 3078{
39d5492a 3079 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
3080 tcg_gen_andi_i32(var, var, 0xffff0000);
3081 tcg_gen_shri_i32(tmp, var, 16);
3082 tcg_gen_or_i32(var, var, tmp);
7d1b0095 3083 tcg_temp_free_i32(tmp);
ad69471c
PB
3084}
3085
04731fb5
WN
3086static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
3087 uint32_t dp)
3088{
3089 uint32_t cc = extract32(insn, 20, 2);
3090
3091 if (dp) {
3092 TCGv_i64 frn, frm, dest;
3093 TCGv_i64 tmp, zero, zf, nf, vf;
3094
3095 zero = tcg_const_i64(0);
3096
3097 frn = tcg_temp_new_i64();
3098 frm = tcg_temp_new_i64();
3099 dest = tcg_temp_new_i64();
3100
3101 zf = tcg_temp_new_i64();
3102 nf = tcg_temp_new_i64();
3103 vf = tcg_temp_new_i64();
3104
3105 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3106 tcg_gen_ext_i32_i64(nf, cpu_NF);
3107 tcg_gen_ext_i32_i64(vf, cpu_VF);
3108
3109 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3110 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3111 switch (cc) {
3112 case 0: /* eq: Z */
3113 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3114 frn, frm);
3115 break;
3116 case 1: /* vs: V */
3117 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3118 frn, frm);
3119 break;
3120 case 2: /* ge: N == V -> N ^ V == 0 */
3121 tmp = tcg_temp_new_i64();
3122 tcg_gen_xor_i64(tmp, vf, nf);
3123 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3124 frn, frm);
3125 tcg_temp_free_i64(tmp);
3126 break;
3127 case 3: /* gt: !Z && N == V */
3128 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3129 frn, frm);
3130 tmp = tcg_temp_new_i64();
3131 tcg_gen_xor_i64(tmp, vf, nf);
3132 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3133 dest, frm);
3134 tcg_temp_free_i64(tmp);
3135 break;
3136 }
3137 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3138 tcg_temp_free_i64(frn);
3139 tcg_temp_free_i64(frm);
3140 tcg_temp_free_i64(dest);
3141
3142 tcg_temp_free_i64(zf);
3143 tcg_temp_free_i64(nf);
3144 tcg_temp_free_i64(vf);
3145
3146 tcg_temp_free_i64(zero);
3147 } else {
3148 TCGv_i32 frn, frm, dest;
3149 TCGv_i32 tmp, zero;
3150
3151 zero = tcg_const_i32(0);
3152
3153 frn = tcg_temp_new_i32();
3154 frm = tcg_temp_new_i32();
3155 dest = tcg_temp_new_i32();
3156 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3157 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3158 switch (cc) {
3159 case 0: /* eq: Z */
3160 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3161 frn, frm);
3162 break;
3163 case 1: /* vs: V */
3164 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3165 frn, frm);
3166 break;
3167 case 2: /* ge: N == V -> N ^ V == 0 */
3168 tmp = tcg_temp_new_i32();
3169 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3170 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3171 frn, frm);
3172 tcg_temp_free_i32(tmp);
3173 break;
3174 case 3: /* gt: !Z && N == V */
3175 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3176 frn, frm);
3177 tmp = tcg_temp_new_i32();
3178 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3179 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3180 dest, frm);
3181 tcg_temp_free_i32(tmp);
3182 break;
3183 }
3184 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3185 tcg_temp_free_i32(frn);
3186 tcg_temp_free_i32(frm);
3187 tcg_temp_free_i32(dest);
3188
3189 tcg_temp_free_i32(zero);
3190 }
3191
3192 return 0;
3193}
3194
40cfacdd
WN
3195static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3196 uint32_t rm, uint32_t dp)
3197{
3198 uint32_t vmin = extract32(insn, 6, 1);
3199 TCGv_ptr fpst = get_fpstatus_ptr(0);
3200
3201 if (dp) {
3202 TCGv_i64 frn, frm, dest;
3203
3204 frn = tcg_temp_new_i64();
3205 frm = tcg_temp_new_i64();
3206 dest = tcg_temp_new_i64();
3207
3208 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3209 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3210 if (vmin) {
f71a2ae5 3211 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3212 } else {
f71a2ae5 3213 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3214 }
3215 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3216 tcg_temp_free_i64(frn);
3217 tcg_temp_free_i64(frm);
3218 tcg_temp_free_i64(dest);
3219 } else {
3220 TCGv_i32 frn, frm, dest;
3221
3222 frn = tcg_temp_new_i32();
3223 frm = tcg_temp_new_i32();
3224 dest = tcg_temp_new_i32();
3225
3226 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3227 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3228 if (vmin) {
f71a2ae5 3229 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3230 } else {
f71a2ae5 3231 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3232 }
3233 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3234 tcg_temp_free_i32(frn);
3235 tcg_temp_free_i32(frm);
3236 tcg_temp_free_i32(dest);
3237 }
3238
3239 tcg_temp_free_ptr(fpst);
3240 return 0;
3241}
3242
7655f39b
WN
3243static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3244 int rounding)
3245{
3246 TCGv_ptr fpst = get_fpstatus_ptr(0);
3247 TCGv_i32 tcg_rmode;
3248
3249 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3250 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3251
3252 if (dp) {
3253 TCGv_i64 tcg_op;
3254 TCGv_i64 tcg_res;
3255 tcg_op = tcg_temp_new_i64();
3256 tcg_res = tcg_temp_new_i64();
3257 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3258 gen_helper_rintd(tcg_res, tcg_op, fpst);
3259 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3260 tcg_temp_free_i64(tcg_op);
3261 tcg_temp_free_i64(tcg_res);
3262 } else {
3263 TCGv_i32 tcg_op;
3264 TCGv_i32 tcg_res;
3265 tcg_op = tcg_temp_new_i32();
3266 tcg_res = tcg_temp_new_i32();
3267 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3268 gen_helper_rints(tcg_res, tcg_op, fpst);
3269 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3270 tcg_temp_free_i32(tcg_op);
3271 tcg_temp_free_i32(tcg_res);
3272 }
3273
9b049916 3274 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3275 tcg_temp_free_i32(tcg_rmode);
3276
3277 tcg_temp_free_ptr(fpst);
3278 return 0;
3279}
3280
c9975a83
WN
3281static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3282 int rounding)
3283{
3284 bool is_signed = extract32(insn, 7, 1);
3285 TCGv_ptr fpst = get_fpstatus_ptr(0);
3286 TCGv_i32 tcg_rmode, tcg_shift;
3287
3288 tcg_shift = tcg_const_i32(0);
3289
3290 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3291 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3292
3293 if (dp) {
3294 TCGv_i64 tcg_double, tcg_res;
3295 TCGv_i32 tcg_tmp;
3296 /* Rd is encoded as a single precision register even when the source
3297 * is double precision.
3298 */
3299 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3300 tcg_double = tcg_temp_new_i64();
3301 tcg_res = tcg_temp_new_i64();
3302 tcg_tmp = tcg_temp_new_i32();
3303 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3304 if (is_signed) {
3305 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3306 } else {
3307 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3308 }
ecc7b3aa 3309 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3310 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3311 tcg_temp_free_i32(tcg_tmp);
3312 tcg_temp_free_i64(tcg_res);
3313 tcg_temp_free_i64(tcg_double);
3314 } else {
3315 TCGv_i32 tcg_single, tcg_res;
3316 tcg_single = tcg_temp_new_i32();
3317 tcg_res = tcg_temp_new_i32();
3318 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3319 if (is_signed) {
3320 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3321 } else {
3322 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3323 }
3324 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3325 tcg_temp_free_i32(tcg_res);
3326 tcg_temp_free_i32(tcg_single);
3327 }
3328
9b049916 3329 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3330 tcg_temp_free_i32(tcg_rmode);
3331
3332 tcg_temp_free_i32(tcg_shift);
3333
3334 tcg_temp_free_ptr(fpst);
3335
3336 return 0;
3337}
7655f39b
WN
3338
3339/* Table for converting the most common AArch32 encoding of
3340 * rounding mode to arm_fprounding order (which matches the
3341 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3342 */
3343static const uint8_t fp_decode_rm[] = {
3344 FPROUNDING_TIEAWAY,
3345 FPROUNDING_TIEEVEN,
3346 FPROUNDING_POSINF,
3347 FPROUNDING_NEGINF,
3348};
3349
c0c760af 3350static int disas_vfp_misc_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3351{
3352 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3353
04731fb5
WN
3354 if (dp) {
3355 VFP_DREG_D(rd, insn);
3356 VFP_DREG_N(rn, insn);
3357 VFP_DREG_M(rm, insn);
3358 } else {
3359 rd = VFP_SREG_D(insn);
3360 rn = VFP_SREG_N(insn);
3361 rm = VFP_SREG_M(insn);
3362 }
3363
c0c760af 3364 if ((insn & 0x0f800e50) == 0x0e000a00 && dc_isar_feature(aa32_vsel, s)) {
04731fb5 3365 return handle_vsel(insn, rd, rn, rm, dp);
c0c760af
PM
3366 } else if ((insn & 0x0fb00e10) == 0x0e800a00 &&
3367 dc_isar_feature(aa32_vminmaxnm, s)) {
40cfacdd 3368 return handle_vminmaxnm(insn, rd, rn, rm, dp);
c0c760af
PM
3369 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40 &&
3370 dc_isar_feature(aa32_vrint, s)) {
7655f39b
WN
3371 /* VRINTA, VRINTN, VRINTP, VRINTM */
3372 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3373 return handle_vrint(insn, rd, rm, dp, rounding);
c0c760af
PM
3374 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40 &&
3375 dc_isar_feature(aa32_vcvt_dr, s)) {
c9975a83
WN
3376 /* VCVTA, VCVTN, VCVTP, VCVTM */
3377 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3378 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3379 }
3380 return 1;
3381}
3382
a1c7273b 3383/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3384 (ie. an undefined instruction). */
7dcc1f89 3385static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3386{
3387 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3388 int dp, veclen;
39d5492a
PM
3389 TCGv_i32 addr;
3390 TCGv_i32 tmp;
3391 TCGv_i32 tmp2;
b7bcbe95 3392
d614a513 3393 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3394 return 1;
d614a513 3395 }
40f137e1 3396
2c7ffc41
PM
3397 /* FIXME: this access check should not take precedence over UNDEF
3398 * for invalid encodings; we will generate incorrect syndrome information
3399 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3400 */
9dbbc748 3401 if (s->fp_excp_el) {
2c7ffc41 3402 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3403 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3404 return 0;
3405 }
3406
5df8bac1 3407 if (!s->vfp_enabled) {
9ee6e8bb 3408 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3409 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3410 return 1;
3411 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3412 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3413 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3414 return 1;
a50c0f51 3415 }
40f137e1 3416 }
6a57f3eb
WN
3417
3418 if (extract32(insn, 28, 4) == 0xf) {
c0c760af
PM
3419 /*
3420 * Encodings with T=1 (Thumb) or unconditional (ARM):
3421 * only used for the "miscellaneous VFP features" added in v8A
3422 * and v7M (and gated on the MVFR2.FPMisc field).
6a57f3eb 3423 */
c0c760af 3424 return disas_vfp_misc_insn(s, insn);
6a57f3eb
WN
3425 }
3426
b7bcbe95
FB
3427 dp = ((insn & 0xf00) == 0xb00);
3428 switch ((insn >> 24) & 0xf) {
3429 case 0xe:
3430 if (insn & (1 << 4)) {
3431 /* single register transfer */
b7bcbe95
FB
3432 rd = (insn >> 12) & 0xf;
3433 if (dp) {
9ee6e8bb
PB
3434 int size;
3435 int pass;
3436
3437 VFP_DREG_N(rn, insn);
3438 if (insn & 0xf)
b7bcbe95 3439 return 1;
9ee6e8bb 3440 if (insn & 0x00c00060
d614a513 3441 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3442 return 1;
d614a513 3443 }
9ee6e8bb
PB
3444
3445 pass = (insn >> 21) & 1;
3446 if (insn & (1 << 22)) {
3447 size = 0;
3448 offset = ((insn >> 5) & 3) * 8;
3449 } else if (insn & (1 << 5)) {
3450 size = 1;
3451 offset = (insn & (1 << 6)) ? 16 : 0;
3452 } else {
3453 size = 2;
3454 offset = 0;
3455 }
18c9b560 3456 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3457 /* vfp->arm */
ad69471c 3458 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3459 switch (size) {
3460 case 0:
9ee6e8bb 3461 if (offset)
ad69471c 3462 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3463 if (insn & (1 << 23))
ad69471c 3464 gen_uxtb(tmp);
9ee6e8bb 3465 else
ad69471c 3466 gen_sxtb(tmp);
9ee6e8bb
PB
3467 break;
3468 case 1:
9ee6e8bb
PB
3469 if (insn & (1 << 23)) {
3470 if (offset) {
ad69471c 3471 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3472 } else {
ad69471c 3473 gen_uxth(tmp);
9ee6e8bb
PB
3474 }
3475 } else {
3476 if (offset) {
ad69471c 3477 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3478 } else {
ad69471c 3479 gen_sxth(tmp);
9ee6e8bb
PB
3480 }
3481 }
3482 break;
3483 case 2:
9ee6e8bb
PB
3484 break;
3485 }
ad69471c 3486 store_reg(s, rd, tmp);
b7bcbe95
FB
3487 } else {
3488 /* arm->vfp */
ad69471c 3489 tmp = load_reg(s, rd);
9ee6e8bb
PB
3490 if (insn & (1 << 23)) {
3491 /* VDUP */
32f91fb7
RH
3492 int vec_size = pass ? 16 : 8;
3493 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rn, 0),
3494 vec_size, vec_size, tmp);
3495 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
3496 } else {
3497 /* VMOV */
3498 switch (size) {
3499 case 0:
ad69471c 3500 tmp2 = neon_load_reg(rn, pass);
d593c48e 3501 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3502 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3503 break;
3504 case 1:
ad69471c 3505 tmp2 = neon_load_reg(rn, pass);
d593c48e 3506 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3507 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3508 break;
3509 case 2:
9ee6e8bb
PB
3510 break;
3511 }
ad69471c 3512 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3513 }
b7bcbe95 3514 }
9ee6e8bb
PB
3515 } else { /* !dp */
3516 if ((insn & 0x6f) != 0x00)
3517 return 1;
3518 rn = VFP_SREG_N(insn);
18c9b560 3519 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3520 /* vfp->arm */
3521 if (insn & (1 << 21)) {
3522 /* system register */
40f137e1 3523 rn >>= 1;
9ee6e8bb 3524
b7bcbe95 3525 switch (rn) {
40f137e1 3526 case ARM_VFP_FPSID:
4373f3ce 3527 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3528 VFP3 restricts all id registers to privileged
3529 accesses. */
3530 if (IS_USER(s)
d614a513 3531 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3532 return 1;
d614a513 3533 }
4373f3ce 3534 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3535 break;
40f137e1 3536 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3537 if (IS_USER(s))
3538 return 1;
4373f3ce 3539 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3540 break;
40f137e1
PB
3541 case ARM_VFP_FPINST:
3542 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3543 /* Not present in VFP3. */
3544 if (IS_USER(s)
d614a513 3545 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3546 return 1;
d614a513 3547 }
4373f3ce 3548 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3549 break;
40f137e1 3550 case ARM_VFP_FPSCR:
601d70b9 3551 if (rd == 15) {
4373f3ce
PB
3552 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3553 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3554 } else {
7d1b0095 3555 tmp = tcg_temp_new_i32();
4373f3ce
PB
3556 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3557 }
b7bcbe95 3558 break;
a50c0f51 3559 case ARM_VFP_MVFR2:
d614a513 3560 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3561 return 1;
3562 }
3563 /* fall through */
9ee6e8bb
PB
3564 case ARM_VFP_MVFR0:
3565 case ARM_VFP_MVFR1:
3566 if (IS_USER(s)
d614a513 3567 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3568 return 1;
d614a513 3569 }
4373f3ce 3570 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3571 break;
b7bcbe95
FB
3572 default:
3573 return 1;
3574 }
3575 } else {
3576 gen_mov_F0_vreg(0, rn);
4373f3ce 3577 tmp = gen_vfp_mrs();
b7bcbe95
FB
3578 }
3579 if (rd == 15) {
b5ff1b31 3580 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3581 gen_set_nzcv(tmp);
7d1b0095 3582 tcg_temp_free_i32(tmp);
4373f3ce
PB
3583 } else {
3584 store_reg(s, rd, tmp);
3585 }
b7bcbe95
FB
3586 } else {
3587 /* arm->vfp */
b7bcbe95 3588 if (insn & (1 << 21)) {
40f137e1 3589 rn >>= 1;
b7bcbe95
FB
3590 /* system register */
3591 switch (rn) {
40f137e1 3592 case ARM_VFP_FPSID:
9ee6e8bb
PB
3593 case ARM_VFP_MVFR0:
3594 case ARM_VFP_MVFR1:
b7bcbe95
FB
3595 /* Writes are ignored. */
3596 break;
40f137e1 3597 case ARM_VFP_FPSCR:
e4c1cfa5 3598 tmp = load_reg(s, rd);
4373f3ce 3599 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3600 tcg_temp_free_i32(tmp);
b5ff1b31 3601 gen_lookup_tb(s);
b7bcbe95 3602 break;
40f137e1 3603 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3604 if (IS_USER(s))
3605 return 1;
71b3c3de
JR
3606 /* TODO: VFP subarchitecture support.
3607 * For now, keep the EN bit only */
e4c1cfa5 3608 tmp = load_reg(s, rd);
71b3c3de 3609 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3610 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3611 gen_lookup_tb(s);
3612 break;
3613 case ARM_VFP_FPINST:
3614 case ARM_VFP_FPINST2:
23adb861
PM
3615 if (IS_USER(s)) {
3616 return 1;
3617 }
e4c1cfa5 3618 tmp = load_reg(s, rd);
4373f3ce 3619 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3620 break;
b7bcbe95
FB
3621 default:
3622 return 1;
3623 }
3624 } else {
e4c1cfa5 3625 tmp = load_reg(s, rd);
4373f3ce 3626 gen_vfp_msr(tmp);
b7bcbe95
FB
3627 gen_mov_vreg_F0(0, rn);
3628 }
3629 }
3630 }
3631 } else {
3632 /* data processing */
e80941bd
RH
3633 bool rd_is_dp = dp;
3634 bool rm_is_dp = dp;
3635 bool no_output = false;
3636
b7bcbe95
FB
3637 /* The opcode is in bits 23, 21, 20 and 6. */
3638 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
e80941bd 3639 rn = VFP_SREG_N(insn);
b7bcbe95 3640
e80941bd
RH
3641 if (op == 15) {
3642 /* rn is opcode, encoded as per VFP_SREG_N. */
3643 switch (rn) {
3644 case 0x00: /* vmov */
3645 case 0x01: /* vabs */
3646 case 0x02: /* vneg */
3647 case 0x03: /* vsqrt */
3648 break;
3649
3650 case 0x04: /* vcvtb.f64.f16, vcvtb.f32.f16 */
3651 case 0x05: /* vcvtt.f64.f16, vcvtt.f32.f16 */
3652 /*
3653 * VCVTB, VCVTT: only present with the halfprec extension
3654 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3655 * (we choose to UNDEF)
04595bf6 3656 */
602f6e42
PM
3657 if (dp) {
3658 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
3659 return 1;
3660 }
3661 } else {
3662 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
3663 return 1;
3664 }
e80941bd
RH
3665 }
3666 rm_is_dp = false;
3667 break;
3668 case 0x06: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3669 case 0x07: /* vcvtt.f16.f32, vcvtt.f16.f64 */
602f6e42
PM
3670 if (dp) {
3671 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
3672 return 1;
3673 }
3674 } else {
3675 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
3676 return 1;
3677 }
e80941bd
RH
3678 }
3679 rd_is_dp = false;
3680 break;
3681
3682 case 0x08: case 0x0a: /* vcmp, vcmpz */
3683 case 0x09: case 0x0b: /* vcmpe, vcmpez */
3684 no_output = true;
3685 break;
3686
3687 case 0x0c: /* vrintr */
3688 case 0x0d: /* vrintz */
3689 case 0x0e: /* vrintx */
3690 break;
3691
3692 case 0x0f: /* vcvt double<->single */
3693 rd_is_dp = !dp;
3694 break;
3695
3696 case 0x10: /* vcvt.fxx.u32 */
3697 case 0x11: /* vcvt.fxx.s32 */
3698 rm_is_dp = false;
3699 break;
3700 case 0x18: /* vcvtr.u32.fxx */
3701 case 0x19: /* vcvtz.u32.fxx */
3702 case 0x1a: /* vcvtr.s32.fxx */
3703 case 0x1b: /* vcvtz.s32.fxx */
3704 rd_is_dp = false;
3705 break;
3706
3707 case 0x14: /* vcvt fp <-> fixed */
3708 case 0x15:
3709 case 0x16:
3710 case 0x17:
3711 case 0x1c:
3712 case 0x1d:
3713 case 0x1e:
3714 case 0x1f:
3715 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3716 return 1;
3717 }
3718 /* Immediate frac_bits has same format as SREG_M. */
3719 rm_is_dp = false;
3720 break;
3721
6c1f6f27
RH
3722 case 0x13: /* vjcvt */
3723 if (!dp || !dc_isar_feature(aa32_jscvt, s)) {
3724 return 1;
3725 }
3726 rd_is_dp = false;
3727 break;
3728
e80941bd
RH
3729 default:
3730 return 1;
b7bcbe95 3731 }
e80941bd
RH
3732 } else if (dp) {
3733 /* rn is register number */
3734 VFP_DREG_N(rn, insn);
3735 }
3736
3737 if (rd_is_dp) {
3738 VFP_DREG_D(rd, insn);
3739 } else {
3740 rd = VFP_SREG_D(insn);
3741 }
3742 if (rm_is_dp) {
3743 VFP_DREG_M(rm, insn);
b7bcbe95 3744 } else {
9ee6e8bb 3745 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3746 }
3747
69d1fc22 3748 veclen = s->vec_len;
e80941bd 3749 if (op == 15 && rn > 3) {
b7bcbe95 3750 veclen = 0;
e80941bd 3751 }
b7bcbe95
FB
3752
3753 /* Shut up compiler warnings. */
3754 delta_m = 0;
3755 delta_d = 0;
3756 bank_mask = 0;
3b46e624 3757
b7bcbe95
FB
3758 if (veclen > 0) {
3759 if (dp)
3760 bank_mask = 0xc;
3761 else
3762 bank_mask = 0x18;
3763
3764 /* Figure out what type of vector operation this is. */
3765 if ((rd & bank_mask) == 0) {
3766 /* scalar */
3767 veclen = 0;
3768 } else {
3769 if (dp)
69d1fc22 3770 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3771 else
69d1fc22 3772 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3773
3774 if ((rm & bank_mask) == 0) {
3775 /* mixed scalar/vector */
3776 delta_m = 0;
3777 } else {
3778 /* vector */
3779 delta_m = delta_d;
3780 }
3781 }
3782 }
3783
3784 /* Load the initial operands. */
3785 if (op == 15) {
3786 switch (rn) {
e80941bd 3787 case 0x08: case 0x09: /* Compare */
b7bcbe95
FB
3788 gen_mov_F0_vreg(dp, rd);
3789 gen_mov_F1_vreg(dp, rm);
3790 break;
e80941bd 3791 case 0x0a: case 0x0b: /* Compare with zero */
b7bcbe95
FB
3792 gen_mov_F0_vreg(dp, rd);
3793 gen_vfp_F1_ld0(dp);
3794 break;
e80941bd
RH
3795 case 0x14: /* vcvt fp <-> fixed */
3796 case 0x15:
3797 case 0x16:
3798 case 0x17:
3799 case 0x1c:
3800 case 0x1d:
3801 case 0x1e:
3802 case 0x1f:
9ee6e8bb
PB
3803 /* Source and destination the same. */
3804 gen_mov_F0_vreg(dp, rd);
3805 break;
b7bcbe95
FB
3806 default:
3807 /* One source operand. */
e80941bd 3808 gen_mov_F0_vreg(rm_is_dp, rm);
9ee6e8bb 3809 break;
b7bcbe95
FB
3810 }
3811 } else {
3812 /* Two source operands. */
3813 gen_mov_F0_vreg(dp, rn);
3814 gen_mov_F1_vreg(dp, rm);
3815 }
3816
3817 for (;;) {
3818 /* Perform the calculation. */
3819 switch (op) {
605a6aed
PM
3820 case 0: /* VMLA: fd + (fn * fm) */
3821 /* Note that order of inputs to the add matters for NaNs */
3822 gen_vfp_F1_mul(dp);
3823 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3824 gen_vfp_add(dp);
3825 break;
605a6aed 3826 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3827 gen_vfp_mul(dp);
605a6aed
PM
3828 gen_vfp_F1_neg(dp);
3829 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3830 gen_vfp_add(dp);
3831 break;
605a6aed
PM
3832 case 2: /* VNMLS: -fd + (fn * fm) */
3833 /* Note that it isn't valid to replace (-A + B) with (B - A)
3834 * or similar plausible looking simplifications
3835 * because this will give wrong results for NaNs.
3836 */
3837 gen_vfp_F1_mul(dp);
3838 gen_mov_F0_vreg(dp, rd);
3839 gen_vfp_neg(dp);
3840 gen_vfp_add(dp);
b7bcbe95 3841 break;
605a6aed 3842 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3843 gen_vfp_mul(dp);
605a6aed
PM
3844 gen_vfp_F1_neg(dp);
3845 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3846 gen_vfp_neg(dp);
605a6aed 3847 gen_vfp_add(dp);
b7bcbe95
FB
3848 break;
3849 case 4: /* mul: fn * fm */
3850 gen_vfp_mul(dp);
3851 break;
3852 case 5: /* nmul: -(fn * fm) */
3853 gen_vfp_mul(dp);
3854 gen_vfp_neg(dp);
3855 break;
3856 case 6: /* add: fn + fm */
3857 gen_vfp_add(dp);
3858 break;
3859 case 7: /* sub: fn - fm */
3860 gen_vfp_sub(dp);
3861 break;
3862 case 8: /* div: fn / fm */
3863 gen_vfp_div(dp);
3864 break;
da97f52c
PM
3865 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3866 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3867 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3868 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3869 /* These are fused multiply-add, and must be done as one
3870 * floating point operation with no rounding between the
3871 * multiplication and addition steps.
3872 * NB that doing the negations here as separate steps is
3873 * correct : an input NaN should come out with its sign bit
3874 * flipped if it is a negated-input.
3875 */
d614a513 3876 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3877 return 1;
3878 }
3879 if (dp) {
3880 TCGv_ptr fpst;
3881 TCGv_i64 frd;
3882 if (op & 1) {
3883 /* VFNMS, VFMS */
3884 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3885 }
3886 frd = tcg_temp_new_i64();
3887 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3888 if (op & 2) {
3889 /* VFNMA, VFNMS */
3890 gen_helper_vfp_negd(frd, frd);
3891 }
3892 fpst = get_fpstatus_ptr(0);
3893 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3894 cpu_F1d, frd, fpst);
3895 tcg_temp_free_ptr(fpst);
3896 tcg_temp_free_i64(frd);
3897 } else {
3898 TCGv_ptr fpst;
3899 TCGv_i32 frd;
3900 if (op & 1) {
3901 /* VFNMS, VFMS */
3902 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3903 }
3904 frd = tcg_temp_new_i32();
3905 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3906 if (op & 2) {
3907 gen_helper_vfp_negs(frd, frd);
3908 }
3909 fpst = get_fpstatus_ptr(0);
3910 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3911 cpu_F1s, frd, fpst);
3912 tcg_temp_free_ptr(fpst);
3913 tcg_temp_free_i32(frd);
3914 }
3915 break;
9ee6e8bb 3916 case 14: /* fconst */
d614a513
PM
3917 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3918 return 1;
3919 }
9ee6e8bb
PB
3920
3921 n = (insn << 12) & 0x80000000;
3922 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3923 if (dp) {
3924 if (i & 0x40)
3925 i |= 0x3f80;
3926 else
3927 i |= 0x4000;
3928 n |= i << 16;
4373f3ce 3929 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3930 } else {
3931 if (i & 0x40)
3932 i |= 0x780;
3933 else
3934 i |= 0x800;
3935 n |= i << 19;
5b340b51 3936 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3937 }
9ee6e8bb 3938 break;
b7bcbe95
FB
3939 case 15: /* extension space */
3940 switch (rn) {
3941 case 0: /* cpy */
3942 /* no-op */
3943 break;
3944 case 1: /* abs */
3945 gen_vfp_abs(dp);
3946 break;
3947 case 2: /* neg */
3948 gen_vfp_neg(dp);
3949 break;
3950 case 3: /* sqrt */
3951 gen_vfp_sqrt(dp);
3952 break;
239c20c7 3953 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
486624fc
AB
3954 {
3955 TCGv_ptr fpst = get_fpstatus_ptr(false);
3956 TCGv_i32 ahp_mode = get_ahp_flag();
60011498
PB
3957 tmp = gen_vfp_mrs();
3958 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3959 if (dp) {
3960 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 3961 fpst, ahp_mode);
239c20c7
WN
3962 } else {
3963 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 3964 fpst, ahp_mode);
239c20c7 3965 }
486624fc
AB
3966 tcg_temp_free_i32(ahp_mode);
3967 tcg_temp_free_ptr(fpst);
7d1b0095 3968 tcg_temp_free_i32(tmp);
60011498 3969 break;
486624fc 3970 }
239c20c7 3971 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
486624fc
AB
3972 {
3973 TCGv_ptr fpst = get_fpstatus_ptr(false);
3974 TCGv_i32 ahp = get_ahp_flag();
60011498
PB
3975 tmp = gen_vfp_mrs();
3976 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3977 if (dp) {
3978 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 3979 fpst, ahp);
239c20c7
WN
3980 } else {
3981 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 3982 fpst, ahp);
239c20c7 3983 }
7d1b0095 3984 tcg_temp_free_i32(tmp);
486624fc
AB
3985 tcg_temp_free_i32(ahp);
3986 tcg_temp_free_ptr(fpst);
60011498 3987 break;
486624fc 3988 }
239c20c7 3989 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
486624fc
AB
3990 {
3991 TCGv_ptr fpst = get_fpstatus_ptr(false);
3992 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 3993 tmp = tcg_temp_new_i32();
486624fc 3994
239c20c7
WN
3995 if (dp) {
3996 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 3997 fpst, ahp);
239c20c7
WN
3998 } else {
3999 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 4000 fpst, ahp);
239c20c7 4001 }
486624fc
AB
4002 tcg_temp_free_i32(ahp);
4003 tcg_temp_free_ptr(fpst);
60011498
PB
4004 gen_mov_F0_vreg(0, rd);
4005 tmp2 = gen_vfp_mrs();
4006 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
4007 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4008 tcg_temp_free_i32(tmp2);
60011498
PB
4009 gen_vfp_msr(tmp);
4010 break;
486624fc 4011 }
239c20c7 4012 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
486624fc
AB
4013 {
4014 TCGv_ptr fpst = get_fpstatus_ptr(false);
4015 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 4016 tmp = tcg_temp_new_i32();
239c20c7
WN
4017 if (dp) {
4018 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 4019 fpst, ahp);
239c20c7
WN
4020 } else {
4021 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 4022 fpst, ahp);
239c20c7 4023 }
486624fc
AB
4024 tcg_temp_free_i32(ahp);
4025 tcg_temp_free_ptr(fpst);
60011498
PB
4026 tcg_gen_shli_i32(tmp, tmp, 16);
4027 gen_mov_F0_vreg(0, rd);
4028 tmp2 = gen_vfp_mrs();
4029 tcg_gen_ext16u_i32(tmp2, tmp2);
4030 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4031 tcg_temp_free_i32(tmp2);
60011498
PB
4032 gen_vfp_msr(tmp);
4033 break;
486624fc 4034 }
b7bcbe95
FB
4035 case 8: /* cmp */
4036 gen_vfp_cmp(dp);
4037 break;
4038 case 9: /* cmpe */
4039 gen_vfp_cmpe(dp);
4040 break;
4041 case 10: /* cmpz */
4042 gen_vfp_cmp(dp);
4043 break;
4044 case 11: /* cmpez */
4045 gen_vfp_F1_ld0(dp);
4046 gen_vfp_cmpe(dp);
4047 break;
664c6733
WN
4048 case 12: /* vrintr */
4049 {
4050 TCGv_ptr fpst = get_fpstatus_ptr(0);
4051 if (dp) {
4052 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
4053 } else {
4054 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
4055 }
4056 tcg_temp_free_ptr(fpst);
4057 break;
4058 }
a290c62a
WN
4059 case 13: /* vrintz */
4060 {
4061 TCGv_ptr fpst = get_fpstatus_ptr(0);
4062 TCGv_i32 tcg_rmode;
4063 tcg_rmode = tcg_const_i32(float_round_to_zero);
9b049916 4064 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
4065 if (dp) {
4066 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
4067 } else {
4068 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
4069 }
9b049916 4070 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
4071 tcg_temp_free_i32(tcg_rmode);
4072 tcg_temp_free_ptr(fpst);
4073 break;
4074 }
4e82bc01
WN
4075 case 14: /* vrintx */
4076 {
4077 TCGv_ptr fpst = get_fpstatus_ptr(0);
4078 if (dp) {
4079 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
4080 } else {
4081 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
4082 }
4083 tcg_temp_free_ptr(fpst);
4084 break;
4085 }
b7bcbe95 4086 case 15: /* single<->double conversion */
e80941bd 4087 if (dp) {
4373f3ce 4088 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
e80941bd 4089 } else {
4373f3ce 4090 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
e80941bd 4091 }
b7bcbe95
FB
4092 break;
4093 case 16: /* fuito */
5500b06c 4094 gen_vfp_uito(dp, 0);
b7bcbe95
FB
4095 break;
4096 case 17: /* fsito */
5500b06c 4097 gen_vfp_sito(dp, 0);
b7bcbe95 4098 break;
6c1f6f27
RH
4099 case 19: /* vjcvt */
4100 gen_helper_vjcvt(cpu_F0s, cpu_F0d, cpu_env);
4101 break;
9ee6e8bb 4102 case 20: /* fshto */
5500b06c 4103 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
4104 break;
4105 case 21: /* fslto */
5500b06c 4106 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
4107 break;
4108 case 22: /* fuhto */
5500b06c 4109 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
4110 break;
4111 case 23: /* fulto */
5500b06c 4112 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 4113 break;
b7bcbe95 4114 case 24: /* ftoui */
5500b06c 4115 gen_vfp_toui(dp, 0);
b7bcbe95
FB
4116 break;
4117 case 25: /* ftouiz */
5500b06c 4118 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
4119 break;
4120 case 26: /* ftosi */
5500b06c 4121 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
4122 break;
4123 case 27: /* ftosiz */
5500b06c 4124 gen_vfp_tosiz(dp, 0);
b7bcbe95 4125 break;
9ee6e8bb 4126 case 28: /* ftosh */
5500b06c 4127 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
4128 break;
4129 case 29: /* ftosl */
5500b06c 4130 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
4131 break;
4132 case 30: /* ftouh */
5500b06c 4133 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
4134 break;
4135 case 31: /* ftoul */
5500b06c 4136 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 4137 break;
b7bcbe95 4138 default: /* undefined */
e80941bd 4139 g_assert_not_reached();
b7bcbe95
FB
4140 }
4141 break;
4142 default: /* undefined */
b7bcbe95
FB
4143 return 1;
4144 }
4145
e80941bd
RH
4146 /* Write back the result, if any. */
4147 if (!no_output) {
4148 gen_mov_vreg_F0(rd_is_dp, rd);
239c20c7 4149 }
b7bcbe95
FB
4150
4151 /* break out of the loop if we have finished */
e80941bd 4152 if (veclen == 0) {
b7bcbe95 4153 break;
e80941bd 4154 }
b7bcbe95
FB
4155
4156 if (op == 15 && delta_m == 0) {
4157 /* single source one-many */
4158 while (veclen--) {
4159 rd = ((rd + delta_d) & (bank_mask - 1))
4160 | (rd & bank_mask);
4161 gen_mov_vreg_F0(dp, rd);
4162 }
4163 break;
4164 }
4165 /* Setup the next operands. */
4166 veclen--;
4167 rd = ((rd + delta_d) & (bank_mask - 1))
4168 | (rd & bank_mask);
4169
4170 if (op == 15) {
4171 /* One source operand. */
4172 rm = ((rm + delta_m) & (bank_mask - 1))
4173 | (rm & bank_mask);
4174 gen_mov_F0_vreg(dp, rm);
4175 } else {
4176 /* Two source operands. */
4177 rn = ((rn + delta_d) & (bank_mask - 1))
4178 | (rn & bank_mask);
4179 gen_mov_F0_vreg(dp, rn);
4180 if (delta_m) {
4181 rm = ((rm + delta_m) & (bank_mask - 1))
4182 | (rm & bank_mask);
4183 gen_mov_F1_vreg(dp, rm);
4184 }
4185 }
4186 }
4187 }
4188 break;
4189 case 0xc:
4190 case 0xd:
8387da81 4191 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
4192 /* two-register transfer */
4193 rn = (insn >> 16) & 0xf;
4194 rd = (insn >> 12) & 0xf;
4195 if (dp) {
9ee6e8bb
PB
4196 VFP_DREG_M(rm, insn);
4197 } else {
4198 rm = VFP_SREG_M(insn);
4199 }
b7bcbe95 4200
18c9b560 4201 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4202 /* vfp->arm */
4203 if (dp) {
4373f3ce
PB
4204 gen_mov_F0_vreg(0, rm * 2);
4205 tmp = gen_vfp_mrs();
4206 store_reg(s, rd, tmp);
4207 gen_mov_F0_vreg(0, rm * 2 + 1);
4208 tmp = gen_vfp_mrs();
4209 store_reg(s, rn, tmp);
b7bcbe95
FB
4210 } else {
4211 gen_mov_F0_vreg(0, rm);
4373f3ce 4212 tmp = gen_vfp_mrs();
8387da81 4213 store_reg(s, rd, tmp);
b7bcbe95 4214 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4215 tmp = gen_vfp_mrs();
8387da81 4216 store_reg(s, rn, tmp);
b7bcbe95
FB
4217 }
4218 } else {
4219 /* arm->vfp */
4220 if (dp) {
4373f3ce
PB
4221 tmp = load_reg(s, rd);
4222 gen_vfp_msr(tmp);
4223 gen_mov_vreg_F0(0, rm * 2);
4224 tmp = load_reg(s, rn);
4225 gen_vfp_msr(tmp);
4226 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4227 } else {
8387da81 4228 tmp = load_reg(s, rd);
4373f3ce 4229 gen_vfp_msr(tmp);
b7bcbe95 4230 gen_mov_vreg_F0(0, rm);
8387da81 4231 tmp = load_reg(s, rn);
4373f3ce 4232 gen_vfp_msr(tmp);
b7bcbe95
FB
4233 gen_mov_vreg_F0(0, rm + 1);
4234 }
4235 }
4236 } else {
4237 /* Load/store */
4238 rn = (insn >> 16) & 0xf;
4239 if (dp)
9ee6e8bb 4240 VFP_DREG_D(rd, insn);
b7bcbe95 4241 else
9ee6e8bb 4242 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4243 if ((insn & 0x01200000) == 0x01000000) {
4244 /* Single load/store */
4245 offset = (insn & 0xff) << 2;
4246 if ((insn & (1 << 23)) == 0)
4247 offset = -offset;
934814f1
PM
4248 if (s->thumb && rn == 15) {
4249 /* This is actually UNPREDICTABLE */
4250 addr = tcg_temp_new_i32();
4251 tcg_gen_movi_i32(addr, s->pc & ~2);
4252 } else {
4253 addr = load_reg(s, rn);
4254 }
312eea9f 4255 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4256 if (insn & (1 << 20)) {
312eea9f 4257 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4258 gen_mov_vreg_F0(dp, rd);
4259 } else {
4260 gen_mov_F0_vreg(dp, rd);
312eea9f 4261 gen_vfp_st(s, dp, addr);
b7bcbe95 4262 }
7d1b0095 4263 tcg_temp_free_i32(addr);
b7bcbe95
FB
4264 } else {
4265 /* load/store multiple */
934814f1 4266 int w = insn & (1 << 21);
b7bcbe95
FB
4267 if (dp)
4268 n = (insn >> 1) & 0x7f;
4269 else
4270 n = insn & 0xff;
4271
934814f1
PM
4272 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4273 /* P == U , W == 1 => UNDEF */
4274 return 1;
4275 }
4276 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4277 /* UNPREDICTABLE cases for bad immediates: we choose to
4278 * UNDEF to avoid generating huge numbers of TCG ops
4279 */
4280 return 1;
4281 }
4282 if (rn == 15 && w) {
4283 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4284 return 1;
4285 }
4286
4287 if (s->thumb && rn == 15) {
4288 /* This is actually UNPREDICTABLE */
4289 addr = tcg_temp_new_i32();
4290 tcg_gen_movi_i32(addr, s->pc & ~2);
4291 } else {
4292 addr = load_reg(s, rn);
4293 }
b7bcbe95 4294 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4295 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95 4296
8a954faf
PM
4297 if (s->v8m_stackcheck && rn == 13 && w) {
4298 /*
4299 * Here 'addr' is the lowest address we will store to,
4300 * and is either the old SP (if post-increment) or
4301 * the new SP (if pre-decrement). For post-increment
4302 * where the old value is below the limit and the new
4303 * value is above, it is UNKNOWN whether the limit check
4304 * triggers; we choose to trigger.
4305 */
4306 gen_helper_v8m_stackcheck(cpu_env, addr);
4307 }
4308
b7bcbe95
FB
4309 if (dp)
4310 offset = 8;
4311 else
4312 offset = 4;
4313 for (i = 0; i < n; i++) {
18c9b560 4314 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4315 /* load */
312eea9f 4316 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4317 gen_mov_vreg_F0(dp, rd + i);
4318 } else {
4319 /* store */
4320 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4321 gen_vfp_st(s, dp, addr);
b7bcbe95 4322 }
312eea9f 4323 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4324 }
934814f1 4325 if (w) {
b7bcbe95
FB
4326 /* writeback */
4327 if (insn & (1 << 24))
4328 offset = -offset * n;
4329 else if (dp && (insn & 1))
4330 offset = 4;
4331 else
4332 offset = 0;
4333
4334 if (offset != 0)
312eea9f
FN
4335 tcg_gen_addi_i32(addr, addr, offset);
4336 store_reg(s, rn, addr);
4337 } else {
7d1b0095 4338 tcg_temp_free_i32(addr);
b7bcbe95
FB
4339 }
4340 }
4341 }
4342 break;
4343 default:
4344 /* Should never happen. */
4345 return 1;
4346 }
4347 return 0;
4348}
4349
90aa39a1 4350static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4351{
90aa39a1 4352#ifndef CONFIG_USER_ONLY
dcba3a8d 4353 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
4354 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4355#else
4356 return true;
4357#endif
4358}
6e256c93 4359
8a6b28c7
EC
4360static void gen_goto_ptr(void)
4361{
7f11636d 4362 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
4363}
4364
4cae8f56
AB
4365/* This will end the TB but doesn't guarantee we'll return to
4366 * cpu_loop_exec. Any live exit_requests will be processed as we
4367 * enter the next TB.
4368 */
8a6b28c7 4369static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4370{
4371 if (use_goto_tb(s, dest)) {
57fec1fe 4372 tcg_gen_goto_tb(n);
eaed129d 4373 gen_set_pc_im(s, dest);
07ea28b4 4374 tcg_gen_exit_tb(s->base.tb, n);
6e256c93 4375 } else {
eaed129d 4376 gen_set_pc_im(s, dest);
8a6b28c7 4377 gen_goto_ptr();
6e256c93 4378 }
dcba3a8d 4379 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
4380}
4381
8aaca4c0
FB
4382static inline void gen_jmp (DisasContext *s, uint32_t dest)
4383{
b636649f 4384 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4385 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4386 if (s->thumb)
d9ba4830
PB
4387 dest |= 1;
4388 gen_bx_im(s, dest);
8aaca4c0 4389 } else {
6e256c93 4390 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4391 }
4392}
4393
39d5492a 4394static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4395{
ee097184 4396 if (x)
d9ba4830 4397 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4398 else
d9ba4830 4399 gen_sxth(t0);
ee097184 4400 if (y)
d9ba4830 4401 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4402 else
d9ba4830
PB
4403 gen_sxth(t1);
4404 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4405}
4406
4407/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4408static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4409{
b5ff1b31
FB
4410 uint32_t mask;
4411
4412 mask = 0;
4413 if (flags & (1 << 0))
4414 mask |= 0xff;
4415 if (flags & (1 << 1))
4416 mask |= 0xff00;
4417 if (flags & (1 << 2))
4418 mask |= 0xff0000;
4419 if (flags & (1 << 3))
4420 mask |= 0xff000000;
9ee6e8bb 4421
2ae23e75 4422 /* Mask out undefined bits. */
9ee6e8bb 4423 mask &= ~CPSR_RESERVED;
d614a513 4424 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4425 mask &= ~CPSR_T;
d614a513
PM
4426 }
4427 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4428 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4429 }
4430 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4431 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4432 }
4433 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4434 mask &= ~CPSR_IT;
d614a513 4435 }
4051e12c
PM
4436 /* Mask out execution state and reserved bits. */
4437 if (!spsr) {
4438 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4439 }
b5ff1b31
FB
4440 /* Mask out privileged bits. */
4441 if (IS_USER(s))
9ee6e8bb 4442 mask &= CPSR_USER;
b5ff1b31
FB
4443 return mask;
4444}
4445
2fbac54b 4446/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4447static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4448{
39d5492a 4449 TCGv_i32 tmp;
b5ff1b31
FB
4450 if (spsr) {
4451 /* ??? This is also undefined in system mode. */
4452 if (IS_USER(s))
4453 return 1;
d9ba4830
PB
4454
4455 tmp = load_cpu_field(spsr);
4456 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4457 tcg_gen_andi_i32(t0, t0, mask);
4458 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4459 store_cpu_field(tmp, spsr);
b5ff1b31 4460 } else {
2fbac54b 4461 gen_set_cpsr(t0, mask);
b5ff1b31 4462 }
7d1b0095 4463 tcg_temp_free_i32(t0);
b5ff1b31
FB
4464 gen_lookup_tb(s);
4465 return 0;
4466}
4467
2fbac54b
FN
4468/* Returns nonzero if access to the PSR is not permitted. */
4469static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4470{
39d5492a 4471 TCGv_i32 tmp;
7d1b0095 4472 tmp = tcg_temp_new_i32();
2fbac54b
FN
4473 tcg_gen_movi_i32(tmp, val);
4474 return gen_set_psr(s, mask, spsr, tmp);
4475}
4476
8bfd0550
PM
4477static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4478 int *tgtmode, int *regno)
4479{
4480 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4481 * the target mode and register number, and identify the various
4482 * unpredictable cases.
4483 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4484 * + executed in user mode
4485 * + using R15 as the src/dest register
4486 * + accessing an unimplemented register
4487 * + accessing a register that's inaccessible at current PL/security state*
4488 * + accessing a register that you could access with a different insn
4489 * We choose to UNDEF in all these cases.
4490 * Since we don't know which of the various AArch32 modes we are in
4491 * we have to defer some checks to runtime.
4492 * Accesses to Monitor mode registers from Secure EL1 (which implies
4493 * that EL3 is AArch64) must trap to EL3.
4494 *
4495 * If the access checks fail this function will emit code to take
4496 * an exception and return false. Otherwise it will return true,
4497 * and set *tgtmode and *regno appropriately.
4498 */
4499 int exc_target = default_exception_el(s);
4500
4501 /* These instructions are present only in ARMv8, or in ARMv7 with the
4502 * Virtualization Extensions.
4503 */
4504 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4505 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4506 goto undef;
4507 }
4508
4509 if (IS_USER(s) || rn == 15) {
4510 goto undef;
4511 }
4512
4513 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4514 * of registers into (r, sysm).
4515 */
4516 if (r) {
4517 /* SPSRs for other modes */
4518 switch (sysm) {
4519 case 0xe: /* SPSR_fiq */
4520 *tgtmode = ARM_CPU_MODE_FIQ;
4521 break;
4522 case 0x10: /* SPSR_irq */
4523 *tgtmode = ARM_CPU_MODE_IRQ;
4524 break;
4525 case 0x12: /* SPSR_svc */
4526 *tgtmode = ARM_CPU_MODE_SVC;
4527 break;
4528 case 0x14: /* SPSR_abt */
4529 *tgtmode = ARM_CPU_MODE_ABT;
4530 break;
4531 case 0x16: /* SPSR_und */
4532 *tgtmode = ARM_CPU_MODE_UND;
4533 break;
4534 case 0x1c: /* SPSR_mon */
4535 *tgtmode = ARM_CPU_MODE_MON;
4536 break;
4537 case 0x1e: /* SPSR_hyp */
4538 *tgtmode = ARM_CPU_MODE_HYP;
4539 break;
4540 default: /* unallocated */
4541 goto undef;
4542 }
4543 /* We arbitrarily assign SPSR a register number of 16. */
4544 *regno = 16;
4545 } else {
4546 /* general purpose registers for other modes */
4547 switch (sysm) {
4548 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4549 *tgtmode = ARM_CPU_MODE_USR;
4550 *regno = sysm + 8;
4551 break;
4552 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4553 *tgtmode = ARM_CPU_MODE_FIQ;
4554 *regno = sysm;
4555 break;
4556 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4557 *tgtmode = ARM_CPU_MODE_IRQ;
4558 *regno = sysm & 1 ? 13 : 14;
4559 break;
4560 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4561 *tgtmode = ARM_CPU_MODE_SVC;
4562 *regno = sysm & 1 ? 13 : 14;
4563 break;
4564 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4565 *tgtmode = ARM_CPU_MODE_ABT;
4566 *regno = sysm & 1 ? 13 : 14;
4567 break;
4568 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4569 *tgtmode = ARM_CPU_MODE_UND;
4570 *regno = sysm & 1 ? 13 : 14;
4571 break;
4572 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4573 *tgtmode = ARM_CPU_MODE_MON;
4574 *regno = sysm & 1 ? 13 : 14;
4575 break;
4576 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4577 *tgtmode = ARM_CPU_MODE_HYP;
4578 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4579 *regno = sysm & 1 ? 13 : 17;
4580 break;
4581 default: /* unallocated */
4582 goto undef;
4583 }
4584 }
4585
4586 /* Catch the 'accessing inaccessible register' cases we can detect
4587 * at translate time.
4588 */
4589 switch (*tgtmode) {
4590 case ARM_CPU_MODE_MON:
4591 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4592 goto undef;
4593 }
4594 if (s->current_el == 1) {
4595 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4596 * then accesses to Mon registers trap to EL3
4597 */
4598 exc_target = 3;
4599 goto undef;
4600 }
4601 break;
4602 case ARM_CPU_MODE_HYP:
aec4dd09
PM
4603 /*
4604 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
4605 * (and so we can forbid accesses from EL2 or below). elr_hyp
4606 * can be accessed also from Hyp mode, so forbid accesses from
4607 * EL0 or EL1.
8bfd0550 4608 */
aec4dd09
PM
4609 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
4610 (s->current_el < 3 && *regno != 17)) {
8bfd0550
PM
4611 goto undef;
4612 }
4613 break;
4614 default:
4615 break;
4616 }
4617
4618 return true;
4619
4620undef:
4621 /* If we get here then some access check did not pass */
4622 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4623 return false;
4624}
4625
4626static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4627{
4628 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4629 int tgtmode = 0, regno = 0;
4630
4631 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4632 return;
4633 }
4634
4635 /* Sync state because msr_banked() can raise exceptions */
4636 gen_set_condexec(s);
4637 gen_set_pc_im(s, s->pc - 4);
4638 tcg_reg = load_reg(s, rn);
4639 tcg_tgtmode = tcg_const_i32(tgtmode);
4640 tcg_regno = tcg_const_i32(regno);
4641 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4642 tcg_temp_free_i32(tcg_tgtmode);
4643 tcg_temp_free_i32(tcg_regno);
4644 tcg_temp_free_i32(tcg_reg);
dcba3a8d 4645 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4646}
4647
4648static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4649{
4650 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4651 int tgtmode = 0, regno = 0;
4652
4653 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4654 return;
4655 }
4656
4657 /* Sync state because mrs_banked() can raise exceptions */
4658 gen_set_condexec(s);
4659 gen_set_pc_im(s, s->pc - 4);
4660 tcg_reg = tcg_temp_new_i32();
4661 tcg_tgtmode = tcg_const_i32(tgtmode);
4662 tcg_regno = tcg_const_i32(regno);
4663 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4664 tcg_temp_free_i32(tcg_tgtmode);
4665 tcg_temp_free_i32(tcg_regno);
4666 store_reg(s, rn, tcg_reg);
dcba3a8d 4667 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4668}
4669
fb0e8e79
PM
4670/* Store value to PC as for an exception return (ie don't
4671 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4672 * will do the masking based on the new value of the Thumb bit.
4673 */
4674static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4675{
fb0e8e79
PM
4676 tcg_gen_mov_i32(cpu_R[15], pc);
4677 tcg_temp_free_i32(pc);
b5ff1b31
FB
4678}
4679
b0109805 4680/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4681static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4682{
fb0e8e79
PM
4683 store_pc_exc_ret(s, pc);
4684 /* The cpsr_write_eret helper will mask the low bits of PC
4685 * appropriately depending on the new Thumb bit, so it must
4686 * be called after storing the new PC.
4687 */
e69ad9df
AL
4688 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4689 gen_io_start();
4690 }
235ea1f5 4691 gen_helper_cpsr_write_eret(cpu_env, cpsr);
e69ad9df
AL
4692 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4693 gen_io_end();
4694 }
7d1b0095 4695 tcg_temp_free_i32(cpsr);
b29fd33d 4696 /* Must exit loop to check un-masked IRQs */
dcba3a8d 4697 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 4698}
3b46e624 4699
fb0e8e79
PM
4700/* Generate an old-style exception return. Marks pc as dead. */
4701static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4702{
4703 gen_rfe(s, pc, load_cpu_field(spsr));
4704}
4705
c22edfeb
AB
4706/*
4707 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4708 * only call the helper when running single threaded TCG code to ensure
4709 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4710 * just skip this instruction. Currently the SEV/SEVL instructions
4711 * which are *one* of many ways to wake the CPU from WFE are not
4712 * implemented so we can't sleep like WFI does.
4713 */
9ee6e8bb
PB
4714static void gen_nop_hint(DisasContext *s, int val)
4715{
4716 switch (val) {
2399d4e7
EC
4717 /* When running in MTTCG we don't generate jumps to the yield and
4718 * WFE helpers as it won't affect the scheduling of other vCPUs.
4719 * If we wanted to more completely model WFE/SEV so we don't busy
4720 * spin unnecessarily we would need to do something more involved.
4721 */
c87e5a61 4722 case 1: /* yield */
2399d4e7 4723 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4724 gen_set_pc_im(s, s->pc);
dcba3a8d 4725 s->base.is_jmp = DISAS_YIELD;
c22edfeb 4726 }
c87e5a61 4727 break;
9ee6e8bb 4728 case 3: /* wfi */
eaed129d 4729 gen_set_pc_im(s, s->pc);
dcba3a8d 4730 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
4731 break;
4732 case 2: /* wfe */
2399d4e7 4733 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4734 gen_set_pc_im(s, s->pc);
dcba3a8d 4735 s->base.is_jmp = DISAS_WFE;
c22edfeb 4736 }
72c1d3af 4737 break;
9ee6e8bb 4738 case 4: /* sev */
12b10571
MR
4739 case 5: /* sevl */
4740 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4741 default: /* nop */
4742 break;
4743 }
4744}
99c475ab 4745
ad69471c 4746#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4747
39d5492a 4748static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4749{
4750 switch (size) {
dd8fbd78
FN
4751 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4752 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4753 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4754 default: abort();
9ee6e8bb 4755 }
9ee6e8bb
PB
4756}
4757
39d5492a 4758static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4759{
4760 switch (size) {
dd8fbd78
FN
4761 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4762 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4763 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4764 default: return;
4765 }
4766}
4767
4768/* 32-bit pairwise ops end up the same as the elementwise versions. */
9ecd3c5c
RH
4769#define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
4770#define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
4771#define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
4772#define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
ad69471c 4773
ad69471c
PB
4774#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4775 switch ((size << 1) | u) { \
4776 case 0: \
dd8fbd78 4777 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4778 break; \
4779 case 1: \
dd8fbd78 4780 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4781 break; \
4782 case 2: \
dd8fbd78 4783 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4784 break; \
4785 case 3: \
dd8fbd78 4786 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4787 break; \
4788 case 4: \
dd8fbd78 4789 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4790 break; \
4791 case 5: \
dd8fbd78 4792 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4793 break; \
4794 default: return 1; \
4795 }} while (0)
9ee6e8bb
PB
4796
4797#define GEN_NEON_INTEGER_OP(name) do { \
4798 switch ((size << 1) | u) { \
ad69471c 4799 case 0: \
dd8fbd78 4800 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4801 break; \
4802 case 1: \
dd8fbd78 4803 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4804 break; \
4805 case 2: \
dd8fbd78 4806 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4807 break; \
4808 case 3: \
dd8fbd78 4809 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4810 break; \
4811 case 4: \
dd8fbd78 4812 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4813 break; \
4814 case 5: \
dd8fbd78 4815 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4816 break; \
9ee6e8bb
PB
4817 default: return 1; \
4818 }} while (0)
4819
39d5492a 4820static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4821{
39d5492a 4822 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4823 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4824 return tmp;
9ee6e8bb
PB
4825}
4826
39d5492a 4827static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4828{
dd8fbd78 4829 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4830 tcg_temp_free_i32(var);
9ee6e8bb
PB
4831}
4832
39d5492a 4833static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4834{
39d5492a 4835 TCGv_i32 tmp;
9ee6e8bb 4836 if (size == 1) {
0fad6efc
PM
4837 tmp = neon_load_reg(reg & 7, reg >> 4);
4838 if (reg & 8) {
dd8fbd78 4839 gen_neon_dup_high16(tmp);
0fad6efc
PM
4840 } else {
4841 gen_neon_dup_low16(tmp);
dd8fbd78 4842 }
0fad6efc
PM
4843 } else {
4844 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4845 }
dd8fbd78 4846 return tmp;
9ee6e8bb
PB
4847}
4848
02acedf9 4849static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4850{
b13708bb
RH
4851 TCGv_ptr pd, pm;
4852
600b828c 4853 if (!q && size == 2) {
02acedf9
PM
4854 return 1;
4855 }
b13708bb
RH
4856 pd = vfp_reg_ptr(true, rd);
4857 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
4858 if (q) {
4859 switch (size) {
4860 case 0:
b13708bb 4861 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
4862 break;
4863 case 1:
b13708bb 4864 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
4865 break;
4866 case 2:
b13708bb 4867 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
4868 break;
4869 default:
4870 abort();
4871 }
4872 } else {
4873 switch (size) {
4874 case 0:
b13708bb 4875 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
4876 break;
4877 case 1:
b13708bb 4878 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
4879 break;
4880 default:
4881 abort();
4882 }
4883 }
b13708bb
RH
4884 tcg_temp_free_ptr(pd);
4885 tcg_temp_free_ptr(pm);
02acedf9 4886 return 0;
19457615
FN
4887}
4888
d68a6f3a 4889static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4890{
b13708bb
RH
4891 TCGv_ptr pd, pm;
4892
600b828c 4893 if (!q && size == 2) {
d68a6f3a
PM
4894 return 1;
4895 }
b13708bb
RH
4896 pd = vfp_reg_ptr(true, rd);
4897 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
4898 if (q) {
4899 switch (size) {
4900 case 0:
b13708bb 4901 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
4902 break;
4903 case 1:
b13708bb 4904 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
4905 break;
4906 case 2:
b13708bb 4907 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
4908 break;
4909 default:
4910 abort();
4911 }
4912 } else {
4913 switch (size) {
4914 case 0:
b13708bb 4915 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
4916 break;
4917 case 1:
b13708bb 4918 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
4919 break;
4920 default:
4921 abort();
4922 }
4923 }
b13708bb
RH
4924 tcg_temp_free_ptr(pd);
4925 tcg_temp_free_ptr(pm);
d68a6f3a 4926 return 0;
19457615
FN
4927}
4928
39d5492a 4929static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4930{
39d5492a 4931 TCGv_i32 rd, tmp;
19457615 4932
7d1b0095
PM
4933 rd = tcg_temp_new_i32();
4934 tmp = tcg_temp_new_i32();
19457615
FN
4935
4936 tcg_gen_shli_i32(rd, t0, 8);
4937 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4938 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4939 tcg_gen_or_i32(rd, rd, tmp);
4940
4941 tcg_gen_shri_i32(t1, t1, 8);
4942 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4943 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4944 tcg_gen_or_i32(t1, t1, tmp);
4945 tcg_gen_mov_i32(t0, rd);
4946
7d1b0095
PM
4947 tcg_temp_free_i32(tmp);
4948 tcg_temp_free_i32(rd);
19457615
FN
4949}
4950
39d5492a 4951static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4952{
39d5492a 4953 TCGv_i32 rd, tmp;
19457615 4954
7d1b0095
PM
4955 rd = tcg_temp_new_i32();
4956 tmp = tcg_temp_new_i32();
19457615
FN
4957
4958 tcg_gen_shli_i32(rd, t0, 16);
4959 tcg_gen_andi_i32(tmp, t1, 0xffff);
4960 tcg_gen_or_i32(rd, rd, tmp);
4961 tcg_gen_shri_i32(t1, t1, 16);
4962 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4963 tcg_gen_or_i32(t1, t1, tmp);
4964 tcg_gen_mov_i32(t0, rd);
4965
7d1b0095
PM
4966 tcg_temp_free_i32(tmp);
4967 tcg_temp_free_i32(rd);
19457615
FN
4968}
4969
4970
9ee6e8bb
PB
4971static struct {
4972 int nregs;
4973 int interleave;
4974 int spacing;
308e5636 4975} const neon_ls_element_type[11] = {
ac55d007
RH
4976 {1, 4, 1},
4977 {1, 4, 2},
9ee6e8bb 4978 {4, 1, 1},
ac55d007
RH
4979 {2, 2, 2},
4980 {1, 3, 1},
4981 {1, 3, 2},
9ee6e8bb
PB
4982 {3, 1, 1},
4983 {1, 1, 1},
ac55d007
RH
4984 {1, 2, 1},
4985 {1, 2, 2},
9ee6e8bb
PB
4986 {2, 1, 1}
4987};
4988
4989/* Translate a NEON load/store element instruction. Return nonzero if the
4990 instruction is invalid. */
7dcc1f89 4991static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4992{
4993 int rd, rn, rm;
4994 int op;
4995 int nregs;
4996 int interleave;
84496233 4997 int spacing;
9ee6e8bb
PB
4998 int stride;
4999 int size;
5000 int reg;
9ee6e8bb 5001 int load;
9ee6e8bb 5002 int n;
7377c2c9 5003 int vec_size;
ac55d007
RH
5004 int mmu_idx;
5005 TCGMemOp endian;
39d5492a
PM
5006 TCGv_i32 addr;
5007 TCGv_i32 tmp;
5008 TCGv_i32 tmp2;
84496233 5009 TCGv_i64 tmp64;
9ee6e8bb 5010
2c7ffc41
PM
5011 /* FIXME: this access check should not take precedence over UNDEF
5012 * for invalid encodings; we will generate incorrect syndrome information
5013 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5014 */
9dbbc748 5015 if (s->fp_excp_el) {
2c7ffc41 5016 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 5017 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5018 return 0;
5019 }
5020
5df8bac1 5021 if (!s->vfp_enabled)
9ee6e8bb
PB
5022 return 1;
5023 VFP_DREG_D(rd, insn);
5024 rn = (insn >> 16) & 0xf;
5025 rm = insn & 0xf;
5026 load = (insn & (1 << 21)) != 0;
ac55d007
RH
5027 endian = s->be_data;
5028 mmu_idx = get_mem_index(s);
9ee6e8bb
PB
5029 if ((insn & (1 << 23)) == 0) {
5030 /* Load store all elements. */
5031 op = (insn >> 8) & 0xf;
5032 size = (insn >> 6) & 3;
84496233 5033 if (op > 10)
9ee6e8bb 5034 return 1;
f2dd89d0
PM
5035 /* Catch UNDEF cases for bad values of align field */
5036 switch (op & 0xc) {
5037 case 4:
5038 if (((insn >> 5) & 1) == 1) {
5039 return 1;
5040 }
5041 break;
5042 case 8:
5043 if (((insn >> 4) & 3) == 3) {
5044 return 1;
5045 }
5046 break;
5047 default:
5048 break;
5049 }
9ee6e8bb
PB
5050 nregs = neon_ls_element_type[op].nregs;
5051 interleave = neon_ls_element_type[op].interleave;
84496233 5052 spacing = neon_ls_element_type[op].spacing;
ac55d007 5053 if (size == 3 && (interleave | spacing) != 1) {
84496233 5054 return 1;
ac55d007 5055 }
e23f12b3
RH
5056 /* For our purposes, bytes are always little-endian. */
5057 if (size == 0) {
5058 endian = MO_LE;
5059 }
5060 /* Consecutive little-endian elements from a single register
5061 * can be promoted to a larger little-endian operation.
5062 */
5063 if (interleave == 1 && endian == MO_LE) {
5064 size = 3;
5065 }
ac55d007 5066 tmp64 = tcg_temp_new_i64();
e318a60b 5067 addr = tcg_temp_new_i32();
ac55d007 5068 tmp2 = tcg_const_i32(1 << size);
dcc65026 5069 load_reg_var(s, addr, rn);
9ee6e8bb 5070 for (reg = 0; reg < nregs; reg++) {
ac55d007
RH
5071 for (n = 0; n < 8 >> size; n++) {
5072 int xs;
5073 for (xs = 0; xs < interleave; xs++) {
5074 int tt = rd + reg + spacing * xs;
5075
5076 if (load) {
5077 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
5078 neon_store_element64(tt, n, size, tmp64);
5079 } else {
5080 neon_load_element64(tmp64, tt, n, size);
5081 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
9ee6e8bb 5082 }
ac55d007 5083 tcg_gen_add_i32(addr, addr, tmp2);
9ee6e8bb
PB
5084 }
5085 }
9ee6e8bb 5086 }
e318a60b 5087 tcg_temp_free_i32(addr);
ac55d007
RH
5088 tcg_temp_free_i32(tmp2);
5089 tcg_temp_free_i64(tmp64);
5090 stride = nregs * interleave * 8;
9ee6e8bb
PB
5091 } else {
5092 size = (insn >> 10) & 3;
5093 if (size == 3) {
5094 /* Load single element to all lanes. */
8e18cde3
PM
5095 int a = (insn >> 4) & 1;
5096 if (!load) {
9ee6e8bb 5097 return 1;
8e18cde3 5098 }
9ee6e8bb
PB
5099 size = (insn >> 6) & 3;
5100 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
5101
5102 if (size == 3) {
5103 if (nregs != 4 || a == 0) {
9ee6e8bb 5104 return 1;
99c475ab 5105 }
8e18cde3
PM
5106 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5107 size = 2;
5108 }
5109 if (nregs == 1 && a == 1 && size == 0) {
5110 return 1;
5111 }
5112 if (nregs == 3 && a == 1) {
5113 return 1;
5114 }
e318a60b 5115 addr = tcg_temp_new_i32();
8e18cde3 5116 load_reg_var(s, addr, rn);
7377c2c9
RH
5117
5118 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
5119 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
5120 */
5121 stride = (insn & (1 << 5)) ? 2 : 1;
5122 vec_size = nregs == 1 ? stride * 8 : 8;
5123
5124 tmp = tcg_temp_new_i32();
5125 for (reg = 0; reg < nregs; reg++) {
5126 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
5127 s->be_data | size);
5128 if ((rd & 1) && vec_size == 16) {
5129 /* We cannot write 16 bytes at once because the
5130 * destination is unaligned.
5131 */
5132 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
5133 8, 8, tmp);
5134 tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
5135 neon_reg_offset(rd, 0), 8, 8);
5136 } else {
5137 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
5138 vec_size, vec_size, tmp);
8e18cde3 5139 }
7377c2c9
RH
5140 tcg_gen_addi_i32(addr, addr, 1 << size);
5141 rd += stride;
9ee6e8bb 5142 }
7377c2c9 5143 tcg_temp_free_i32(tmp);
e318a60b 5144 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5145 stride = (1 << size) * nregs;
5146 } else {
5147 /* Single element. */
93262b16 5148 int idx = (insn >> 4) & 0xf;
2d6ac920 5149 int reg_idx;
9ee6e8bb
PB
5150 switch (size) {
5151 case 0:
2d6ac920 5152 reg_idx = (insn >> 5) & 7;
9ee6e8bb
PB
5153 stride = 1;
5154 break;
5155 case 1:
2d6ac920 5156 reg_idx = (insn >> 6) & 3;
9ee6e8bb
PB
5157 stride = (insn & (1 << 5)) ? 2 : 1;
5158 break;
5159 case 2:
2d6ac920 5160 reg_idx = (insn >> 7) & 1;
9ee6e8bb
PB
5161 stride = (insn & (1 << 6)) ? 2 : 1;
5162 break;
5163 default:
5164 abort();
5165 }
5166 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
5167 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5168 switch (nregs) {
5169 case 1:
5170 if (((idx & (1 << size)) != 0) ||
5171 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5172 return 1;
5173 }
5174 break;
5175 case 3:
5176 if ((idx & 1) != 0) {
5177 return 1;
5178 }
5179 /* fall through */
5180 case 2:
5181 if (size == 2 && (idx & 2) != 0) {
5182 return 1;
5183 }
5184 break;
5185 case 4:
5186 if ((size == 2) && ((idx & 3) == 3)) {
5187 return 1;
5188 }
5189 break;
5190 default:
5191 abort();
5192 }
5193 if ((rd + stride * (nregs - 1)) > 31) {
5194 /* Attempts to write off the end of the register file
5195 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5196 * the neon_load_reg() would write off the end of the array.
5197 */
5198 return 1;
5199 }
2d6ac920 5200 tmp = tcg_temp_new_i32();
e318a60b 5201 addr = tcg_temp_new_i32();
dcc65026 5202 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5203 for (reg = 0; reg < nregs; reg++) {
5204 if (load) {
2d6ac920
RH
5205 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
5206 s->be_data | size);
5207 neon_store_element(rd, reg_idx, size, tmp);
9ee6e8bb 5208 } else { /* Store */
2d6ac920
RH
5209 neon_load_element(tmp, rd, reg_idx, size);
5210 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
5211 s->be_data | size);
99c475ab 5212 }
9ee6e8bb 5213 rd += stride;
1b2b1e54 5214 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5215 }
e318a60b 5216 tcg_temp_free_i32(addr);
2d6ac920 5217 tcg_temp_free_i32(tmp);
9ee6e8bb 5218 stride = nregs * (1 << size);
99c475ab 5219 }
9ee6e8bb
PB
5220 }
5221 if (rm != 15) {
39d5492a 5222 TCGv_i32 base;
b26eefb6
PB
5223
5224 base = load_reg(s, rn);
9ee6e8bb 5225 if (rm == 13) {
b26eefb6 5226 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5227 } else {
39d5492a 5228 TCGv_i32 index;
b26eefb6
PB
5229 index = load_reg(s, rm);
5230 tcg_gen_add_i32(base, base, index);
7d1b0095 5231 tcg_temp_free_i32(index);
9ee6e8bb 5232 }
b26eefb6 5233 store_reg(s, rn, base);
9ee6e8bb
PB
5234 }
5235 return 0;
5236}
3b46e624 5237
39d5492a 5238static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5239{
5240 switch (size) {
5241 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5242 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5243 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5244 default: abort();
5245 }
5246}
5247
39d5492a 5248static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5249{
5250 switch (size) {
02da0b2d
PM
5251 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5252 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5253 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5254 default: abort();
5255 }
5256}
5257
39d5492a 5258static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5259{
5260 switch (size) {
02da0b2d
PM
5261 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5262 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5263 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5264 default: abort();
5265 }
5266}
5267
39d5492a 5268static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5269{
5270 switch (size) {
02da0b2d
PM
5271 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5272 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5273 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5274 default: abort();
5275 }
5276}
5277
39d5492a 5278static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5279 int q, int u)
5280{
5281 if (q) {
5282 if (u) {
5283 switch (size) {
5284 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5285 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5286 default: abort();
5287 }
5288 } else {
5289 switch (size) {
5290 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5291 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5292 default: abort();
5293 }
5294 }
5295 } else {
5296 if (u) {
5297 switch (size) {
b408a9b0
CL
5298 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5299 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5300 default: abort();
5301 }
5302 } else {
5303 switch (size) {
5304 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5305 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5306 default: abort();
5307 }
5308 }
5309 }
5310}
5311
39d5492a 5312static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5313{
5314 if (u) {
5315 switch (size) {
5316 case 0: gen_helper_neon_widen_u8(dest, src); break;
5317 case 1: gen_helper_neon_widen_u16(dest, src); break;
5318 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5319 default: abort();
5320 }
5321 } else {
5322 switch (size) {
5323 case 0: gen_helper_neon_widen_s8(dest, src); break;
5324 case 1: gen_helper_neon_widen_s16(dest, src); break;
5325 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5326 default: abort();
5327 }
5328 }
7d1b0095 5329 tcg_temp_free_i32(src);
ad69471c
PB
5330}
5331
5332static inline void gen_neon_addl(int size)
5333{
5334 switch (size) {
5335 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5336 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5337 case 2: tcg_gen_add_i64(CPU_V001); break;
5338 default: abort();
5339 }
5340}
5341
5342static inline void gen_neon_subl(int size)
5343{
5344 switch (size) {
5345 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5346 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5347 case 2: tcg_gen_sub_i64(CPU_V001); break;
5348 default: abort();
5349 }
5350}
5351
a7812ae4 5352static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5353{
5354 switch (size) {
5355 case 0: gen_helper_neon_negl_u16(var, var); break;
5356 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5357 case 2:
5358 tcg_gen_neg_i64(var, var);
5359 break;
ad69471c
PB
5360 default: abort();
5361 }
5362}
5363
a7812ae4 5364static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5365{
5366 switch (size) {
02da0b2d
PM
5367 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5368 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5369 default: abort();
5370 }
5371}
5372
39d5492a
PM
5373static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5374 int size, int u)
ad69471c 5375{
a7812ae4 5376 TCGv_i64 tmp;
ad69471c
PB
5377
5378 switch ((size << 1) | u) {
5379 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5380 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5381 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5382 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5383 case 4:
5384 tmp = gen_muls_i64_i32(a, b);
5385 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5386 tcg_temp_free_i64(tmp);
ad69471c
PB
5387 break;
5388 case 5:
5389 tmp = gen_mulu_i64_i32(a, b);
5390 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5391 tcg_temp_free_i64(tmp);
ad69471c
PB
5392 break;
5393 default: abort();
5394 }
c6067f04
CL
5395
5396 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5397 Don't forget to clean them now. */
5398 if (size < 2) {
7d1b0095
PM
5399 tcg_temp_free_i32(a);
5400 tcg_temp_free_i32(b);
c6067f04 5401 }
ad69471c
PB
5402}
5403
39d5492a
PM
5404static void gen_neon_narrow_op(int op, int u, int size,
5405 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5406{
5407 if (op) {
5408 if (u) {
5409 gen_neon_unarrow_sats(size, dest, src);
5410 } else {
5411 gen_neon_narrow(size, dest, src);
5412 }
5413 } else {
5414 if (u) {
5415 gen_neon_narrow_satu(size, dest, src);
5416 } else {
5417 gen_neon_narrow_sats(size, dest, src);
5418 }
5419 }
5420}
5421
62698be3
PM
5422/* Symbolic constants for op fields for Neon 3-register same-length.
5423 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5424 * table A7-9.
5425 */
5426#define NEON_3R_VHADD 0
5427#define NEON_3R_VQADD 1
5428#define NEON_3R_VRHADD 2
5429#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5430#define NEON_3R_VHSUB 4
5431#define NEON_3R_VQSUB 5
5432#define NEON_3R_VCGT 6
5433#define NEON_3R_VCGE 7
5434#define NEON_3R_VSHL 8
5435#define NEON_3R_VQSHL 9
5436#define NEON_3R_VRSHL 10
5437#define NEON_3R_VQRSHL 11
5438#define NEON_3R_VMAX 12
5439#define NEON_3R_VMIN 13
5440#define NEON_3R_VABD 14
5441#define NEON_3R_VABA 15
5442#define NEON_3R_VADD_VSUB 16
5443#define NEON_3R_VTST_VCEQ 17
4a7832b0 5444#define NEON_3R_VML 18 /* VMLA, VMLS */
62698be3
PM
5445#define NEON_3R_VMUL 19
5446#define NEON_3R_VPMAX 20
5447#define NEON_3R_VPMIN 21
5448#define NEON_3R_VQDMULH_VQRDMULH 22
36a71934 5449#define NEON_3R_VPADD_VQRDMLAH 23
f1ecb913 5450#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
36a71934 5451#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
62698be3
PM
5452#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5453#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5454#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5455#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5456#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5457#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5458
5459static const uint8_t neon_3r_sizes[] = {
5460 [NEON_3R_VHADD] = 0x7,
5461 [NEON_3R_VQADD] = 0xf,
5462 [NEON_3R_VRHADD] = 0x7,
5463 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5464 [NEON_3R_VHSUB] = 0x7,
5465 [NEON_3R_VQSUB] = 0xf,
5466 [NEON_3R_VCGT] = 0x7,
5467 [NEON_3R_VCGE] = 0x7,
5468 [NEON_3R_VSHL] = 0xf,
5469 [NEON_3R_VQSHL] = 0xf,
5470 [NEON_3R_VRSHL] = 0xf,
5471 [NEON_3R_VQRSHL] = 0xf,
5472 [NEON_3R_VMAX] = 0x7,
5473 [NEON_3R_VMIN] = 0x7,
5474 [NEON_3R_VABD] = 0x7,
5475 [NEON_3R_VABA] = 0x7,
5476 [NEON_3R_VADD_VSUB] = 0xf,
5477 [NEON_3R_VTST_VCEQ] = 0x7,
5478 [NEON_3R_VML] = 0x7,
5479 [NEON_3R_VMUL] = 0x7,
5480 [NEON_3R_VPMAX] = 0x7,
5481 [NEON_3R_VPMIN] = 0x7,
5482 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
36a71934 5483 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
f1ecb913 5484 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
36a71934 5485 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
62698be3
PM
5486 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5487 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5488 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5489 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5490 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5491 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5492};
5493
600b828c
PM
5494/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5495 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5496 * table A7-13.
5497 */
5498#define NEON_2RM_VREV64 0
5499#define NEON_2RM_VREV32 1
5500#define NEON_2RM_VREV16 2
5501#define NEON_2RM_VPADDL 4
5502#define NEON_2RM_VPADDL_U 5
9d935509
AB
5503#define NEON_2RM_AESE 6 /* Includes AESD */
5504#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5505#define NEON_2RM_VCLS 8
5506#define NEON_2RM_VCLZ 9
5507#define NEON_2RM_VCNT 10
5508#define NEON_2RM_VMVN 11
5509#define NEON_2RM_VPADAL 12
5510#define NEON_2RM_VPADAL_U 13
5511#define NEON_2RM_VQABS 14
5512#define NEON_2RM_VQNEG 15
5513#define NEON_2RM_VCGT0 16
5514#define NEON_2RM_VCGE0 17
5515#define NEON_2RM_VCEQ0 18
5516#define NEON_2RM_VCLE0 19
5517#define NEON_2RM_VCLT0 20
f1ecb913 5518#define NEON_2RM_SHA1H 21
600b828c
PM
5519#define NEON_2RM_VABS 22
5520#define NEON_2RM_VNEG 23
5521#define NEON_2RM_VCGT0_F 24
5522#define NEON_2RM_VCGE0_F 25
5523#define NEON_2RM_VCEQ0_F 26
5524#define NEON_2RM_VCLE0_F 27
5525#define NEON_2RM_VCLT0_F 28
5526#define NEON_2RM_VABS_F 30
5527#define NEON_2RM_VNEG_F 31
5528#define NEON_2RM_VSWP 32
5529#define NEON_2RM_VTRN 33
5530#define NEON_2RM_VUZP 34
5531#define NEON_2RM_VZIP 35
5532#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5533#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5534#define NEON_2RM_VSHLL 38
f1ecb913 5535#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5536#define NEON_2RM_VRINTN 40
2ce70625 5537#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5538#define NEON_2RM_VRINTA 42
5539#define NEON_2RM_VRINTZ 43
600b828c 5540#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5541#define NEON_2RM_VRINTM 45
600b828c 5542#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5543#define NEON_2RM_VRINTP 47
901ad525
WN
5544#define NEON_2RM_VCVTAU 48
5545#define NEON_2RM_VCVTAS 49
5546#define NEON_2RM_VCVTNU 50
5547#define NEON_2RM_VCVTNS 51
5548#define NEON_2RM_VCVTPU 52
5549#define NEON_2RM_VCVTPS 53
5550#define NEON_2RM_VCVTMU 54
5551#define NEON_2RM_VCVTMS 55
600b828c
PM
5552#define NEON_2RM_VRECPE 56
5553#define NEON_2RM_VRSQRTE 57
5554#define NEON_2RM_VRECPE_F 58
5555#define NEON_2RM_VRSQRTE_F 59
5556#define NEON_2RM_VCVT_FS 60
5557#define NEON_2RM_VCVT_FU 61
5558#define NEON_2RM_VCVT_SF 62
5559#define NEON_2RM_VCVT_UF 63
5560
5561static int neon_2rm_is_float_op(int op)
5562{
5563 /* Return true if this neon 2reg-misc op is float-to-float */
5564 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5565 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5566 op == NEON_2RM_VRINTM ||
5567 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5568 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5569}
5570
fe8fcf3d
PM
5571static bool neon_2rm_is_v8_op(int op)
5572{
5573 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5574 switch (op) {
5575 case NEON_2RM_VRINTN:
5576 case NEON_2RM_VRINTA:
5577 case NEON_2RM_VRINTM:
5578 case NEON_2RM_VRINTP:
5579 case NEON_2RM_VRINTZ:
5580 case NEON_2RM_VRINTX:
5581 case NEON_2RM_VCVTAU:
5582 case NEON_2RM_VCVTAS:
5583 case NEON_2RM_VCVTNU:
5584 case NEON_2RM_VCVTNS:
5585 case NEON_2RM_VCVTPU:
5586 case NEON_2RM_VCVTPS:
5587 case NEON_2RM_VCVTMU:
5588 case NEON_2RM_VCVTMS:
5589 return true;
5590 default:
5591 return false;
5592 }
5593}
5594
600b828c
PM
5595/* Each entry in this array has bit n set if the insn allows
5596 * size value n (otherwise it will UNDEF). Since unallocated
5597 * op values will have no bits set they always UNDEF.
5598 */
5599static const uint8_t neon_2rm_sizes[] = {
5600 [NEON_2RM_VREV64] = 0x7,
5601 [NEON_2RM_VREV32] = 0x3,
5602 [NEON_2RM_VREV16] = 0x1,
5603 [NEON_2RM_VPADDL] = 0x7,
5604 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5605 [NEON_2RM_AESE] = 0x1,
5606 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5607 [NEON_2RM_VCLS] = 0x7,
5608 [NEON_2RM_VCLZ] = 0x7,
5609 [NEON_2RM_VCNT] = 0x1,
5610 [NEON_2RM_VMVN] = 0x1,
5611 [NEON_2RM_VPADAL] = 0x7,
5612 [NEON_2RM_VPADAL_U] = 0x7,
5613 [NEON_2RM_VQABS] = 0x7,
5614 [NEON_2RM_VQNEG] = 0x7,
5615 [NEON_2RM_VCGT0] = 0x7,
5616 [NEON_2RM_VCGE0] = 0x7,
5617 [NEON_2RM_VCEQ0] = 0x7,
5618 [NEON_2RM_VCLE0] = 0x7,
5619 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5620 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5621 [NEON_2RM_VABS] = 0x7,
5622 [NEON_2RM_VNEG] = 0x7,
5623 [NEON_2RM_VCGT0_F] = 0x4,
5624 [NEON_2RM_VCGE0_F] = 0x4,
5625 [NEON_2RM_VCEQ0_F] = 0x4,
5626 [NEON_2RM_VCLE0_F] = 0x4,
5627 [NEON_2RM_VCLT0_F] = 0x4,
5628 [NEON_2RM_VABS_F] = 0x4,
5629 [NEON_2RM_VNEG_F] = 0x4,
5630 [NEON_2RM_VSWP] = 0x1,
5631 [NEON_2RM_VTRN] = 0x7,
5632 [NEON_2RM_VUZP] = 0x7,
5633 [NEON_2RM_VZIP] = 0x7,
5634 [NEON_2RM_VMOVN] = 0x7,
5635 [NEON_2RM_VQMOVN] = 0x7,
5636 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5637 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5638 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5639 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5640 [NEON_2RM_VRINTA] = 0x4,
5641 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5642 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5643 [NEON_2RM_VRINTM] = 0x4,
600b828c 5644 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5645 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5646 [NEON_2RM_VCVTAU] = 0x4,
5647 [NEON_2RM_VCVTAS] = 0x4,
5648 [NEON_2RM_VCVTNU] = 0x4,
5649 [NEON_2RM_VCVTNS] = 0x4,
5650 [NEON_2RM_VCVTPU] = 0x4,
5651 [NEON_2RM_VCVTPS] = 0x4,
5652 [NEON_2RM_VCVTMU] = 0x4,
5653 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5654 [NEON_2RM_VRECPE] = 0x4,
5655 [NEON_2RM_VRSQRTE] = 0x4,
5656 [NEON_2RM_VRECPE_F] = 0x4,
5657 [NEON_2RM_VRSQRTE_F] = 0x4,
5658 [NEON_2RM_VCVT_FS] = 0x4,
5659 [NEON_2RM_VCVT_FU] = 0x4,
5660 [NEON_2RM_VCVT_SF] = 0x4,
5661 [NEON_2RM_VCVT_UF] = 0x4,
5662};
5663
36a71934
RH
5664
5665/* Expand v8.1 simd helper. */
5666static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
5667 int q, int rd, int rn, int rm)
5668{
962fcbf2 5669 if (dc_isar_feature(aa32_rdm, s)) {
36a71934
RH
5670 int opr_sz = (1 + q) * 8;
5671 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
5672 vfp_reg_offset(1, rn),
5673 vfp_reg_offset(1, rm), cpu_env,
5674 opr_sz, opr_sz, 0, fn);
5675 return 0;
5676 }
5677 return 1;
5678}
5679
eabcd6fa
RH
5680/*
5681 * Expanders for VBitOps_VBIF, VBIT, VBSL.
5682 */
5683static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5684{
5685 tcg_gen_xor_i64(rn, rn, rm);
5686 tcg_gen_and_i64(rn, rn, rd);
5687 tcg_gen_xor_i64(rd, rm, rn);
5688}
5689
5690static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5691{
5692 tcg_gen_xor_i64(rn, rn, rd);
5693 tcg_gen_and_i64(rn, rn, rm);
5694 tcg_gen_xor_i64(rd, rd, rn);
5695}
5696
5697static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5698{
5699 tcg_gen_xor_i64(rn, rn, rd);
5700 tcg_gen_andc_i64(rn, rn, rm);
5701 tcg_gen_xor_i64(rd, rd, rn);
5702}
5703
5704static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5705{
5706 tcg_gen_xor_vec(vece, rn, rn, rm);
5707 tcg_gen_and_vec(vece, rn, rn, rd);
5708 tcg_gen_xor_vec(vece, rd, rm, rn);
5709}
5710
5711static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5712{
5713 tcg_gen_xor_vec(vece, rn, rn, rd);
5714 tcg_gen_and_vec(vece, rn, rn, rm);
5715 tcg_gen_xor_vec(vece, rd, rd, rn);
5716}
5717
5718static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5719{
5720 tcg_gen_xor_vec(vece, rn, rn, rd);
5721 tcg_gen_andc_vec(vece, rn, rn, rm);
5722 tcg_gen_xor_vec(vece, rd, rd, rn);
5723}
5724
5725const GVecGen3 bsl_op = {
5726 .fni8 = gen_bsl_i64,
5727 .fniv = gen_bsl_vec,
5728 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5729 .load_dest = true
5730};
5731
5732const GVecGen3 bit_op = {
5733 .fni8 = gen_bit_i64,
5734 .fniv = gen_bit_vec,
5735 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5736 .load_dest = true
5737};
5738
5739const GVecGen3 bif_op = {
5740 .fni8 = gen_bif_i64,
5741 .fniv = gen_bif_vec,
5742 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5743 .load_dest = true
5744};
5745
41f6c113
RH
5746static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5747{
5748 tcg_gen_vec_sar8i_i64(a, a, shift);
5749 tcg_gen_vec_add8_i64(d, d, a);
5750}
5751
5752static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5753{
5754 tcg_gen_vec_sar16i_i64(a, a, shift);
5755 tcg_gen_vec_add16_i64(d, d, a);
5756}
5757
5758static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5759{
5760 tcg_gen_sari_i32(a, a, shift);
5761 tcg_gen_add_i32(d, d, a);
5762}
5763
5764static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5765{
5766 tcg_gen_sari_i64(a, a, shift);
5767 tcg_gen_add_i64(d, d, a);
5768}
5769
5770static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5771{
5772 tcg_gen_sari_vec(vece, a, a, sh);
5773 tcg_gen_add_vec(vece, d, d, a);
5774}
5775
5776const GVecGen2i ssra_op[4] = {
5777 { .fni8 = gen_ssra8_i64,
5778 .fniv = gen_ssra_vec,
5779 .load_dest = true,
5780 .opc = INDEX_op_sari_vec,
5781 .vece = MO_8 },
5782 { .fni8 = gen_ssra16_i64,
5783 .fniv = gen_ssra_vec,
5784 .load_dest = true,
5785 .opc = INDEX_op_sari_vec,
5786 .vece = MO_16 },
5787 { .fni4 = gen_ssra32_i32,
5788 .fniv = gen_ssra_vec,
5789 .load_dest = true,
5790 .opc = INDEX_op_sari_vec,
5791 .vece = MO_32 },
5792 { .fni8 = gen_ssra64_i64,
5793 .fniv = gen_ssra_vec,
5794 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5795 .load_dest = true,
5796 .opc = INDEX_op_sari_vec,
5797 .vece = MO_64 },
5798};
5799
5800static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5801{
5802 tcg_gen_vec_shr8i_i64(a, a, shift);
5803 tcg_gen_vec_add8_i64(d, d, a);
5804}
5805
5806static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5807{
5808 tcg_gen_vec_shr16i_i64(a, a, shift);
5809 tcg_gen_vec_add16_i64(d, d, a);
5810}
5811
5812static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5813{
5814 tcg_gen_shri_i32(a, a, shift);
5815 tcg_gen_add_i32(d, d, a);
5816}
5817
5818static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5819{
5820 tcg_gen_shri_i64(a, a, shift);
5821 tcg_gen_add_i64(d, d, a);
5822}
5823
5824static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5825{
5826 tcg_gen_shri_vec(vece, a, a, sh);
5827 tcg_gen_add_vec(vece, d, d, a);
5828}
5829
5830const GVecGen2i usra_op[4] = {
5831 { .fni8 = gen_usra8_i64,
5832 .fniv = gen_usra_vec,
5833 .load_dest = true,
5834 .opc = INDEX_op_shri_vec,
5835 .vece = MO_8, },
5836 { .fni8 = gen_usra16_i64,
5837 .fniv = gen_usra_vec,
5838 .load_dest = true,
5839 .opc = INDEX_op_shri_vec,
5840 .vece = MO_16, },
5841 { .fni4 = gen_usra32_i32,
5842 .fniv = gen_usra_vec,
5843 .load_dest = true,
5844 .opc = INDEX_op_shri_vec,
5845 .vece = MO_32, },
5846 { .fni8 = gen_usra64_i64,
5847 .fniv = gen_usra_vec,
5848 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5849 .load_dest = true,
5850 .opc = INDEX_op_shri_vec,
5851 .vece = MO_64, },
5852};
eabcd6fa 5853
f3cd8218
RH
5854static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5855{
5856 uint64_t mask = dup_const(MO_8, 0xff >> shift);
5857 TCGv_i64 t = tcg_temp_new_i64();
5858
5859 tcg_gen_shri_i64(t, a, shift);
5860 tcg_gen_andi_i64(t, t, mask);
5861 tcg_gen_andi_i64(d, d, ~mask);
5862 tcg_gen_or_i64(d, d, t);
5863 tcg_temp_free_i64(t);
5864}
5865
5866static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5867{
5868 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
5869 TCGv_i64 t = tcg_temp_new_i64();
5870
5871 tcg_gen_shri_i64(t, a, shift);
5872 tcg_gen_andi_i64(t, t, mask);
5873 tcg_gen_andi_i64(d, d, ~mask);
5874 tcg_gen_or_i64(d, d, t);
5875 tcg_temp_free_i64(t);
5876}
5877
5878static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5879{
5880 tcg_gen_shri_i32(a, a, shift);
5881 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
5882}
5883
5884static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5885{
5886 tcg_gen_shri_i64(a, a, shift);
5887 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
5888}
5889
5890static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5891{
5892 if (sh == 0) {
5893 tcg_gen_mov_vec(d, a);
5894 } else {
5895 TCGv_vec t = tcg_temp_new_vec_matching(d);
5896 TCGv_vec m = tcg_temp_new_vec_matching(d);
5897
5898 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
5899 tcg_gen_shri_vec(vece, t, a, sh);
5900 tcg_gen_and_vec(vece, d, d, m);
5901 tcg_gen_or_vec(vece, d, d, t);
5902
5903 tcg_temp_free_vec(t);
5904 tcg_temp_free_vec(m);
5905 }
5906}
5907
5908const GVecGen2i sri_op[4] = {
5909 { .fni8 = gen_shr8_ins_i64,
5910 .fniv = gen_shr_ins_vec,
5911 .load_dest = true,
5912 .opc = INDEX_op_shri_vec,
5913 .vece = MO_8 },
5914 { .fni8 = gen_shr16_ins_i64,
5915 .fniv = gen_shr_ins_vec,
5916 .load_dest = true,
5917 .opc = INDEX_op_shri_vec,
5918 .vece = MO_16 },
5919 { .fni4 = gen_shr32_ins_i32,
5920 .fniv = gen_shr_ins_vec,
5921 .load_dest = true,
5922 .opc = INDEX_op_shri_vec,
5923 .vece = MO_32 },
5924 { .fni8 = gen_shr64_ins_i64,
5925 .fniv = gen_shr_ins_vec,
5926 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5927 .load_dest = true,
5928 .opc = INDEX_op_shri_vec,
5929 .vece = MO_64 },
5930};
5931
5932static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5933{
5934 uint64_t mask = dup_const(MO_8, 0xff << shift);
5935 TCGv_i64 t = tcg_temp_new_i64();
5936
5937 tcg_gen_shli_i64(t, a, shift);
5938 tcg_gen_andi_i64(t, t, mask);
5939 tcg_gen_andi_i64(d, d, ~mask);
5940 tcg_gen_or_i64(d, d, t);
5941 tcg_temp_free_i64(t);
5942}
5943
5944static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5945{
5946 uint64_t mask = dup_const(MO_16, 0xffff << shift);
5947 TCGv_i64 t = tcg_temp_new_i64();
5948
5949 tcg_gen_shli_i64(t, a, shift);
5950 tcg_gen_andi_i64(t, t, mask);
5951 tcg_gen_andi_i64(d, d, ~mask);
5952 tcg_gen_or_i64(d, d, t);
5953 tcg_temp_free_i64(t);
5954}
5955
5956static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5957{
5958 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
5959}
5960
5961static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5962{
5963 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
5964}
5965
5966static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5967{
5968 if (sh == 0) {
5969 tcg_gen_mov_vec(d, a);
5970 } else {
5971 TCGv_vec t = tcg_temp_new_vec_matching(d);
5972 TCGv_vec m = tcg_temp_new_vec_matching(d);
5973
5974 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
5975 tcg_gen_shli_vec(vece, t, a, sh);
5976 tcg_gen_and_vec(vece, d, d, m);
5977 tcg_gen_or_vec(vece, d, d, t);
5978
5979 tcg_temp_free_vec(t);
5980 tcg_temp_free_vec(m);
5981 }
5982}
5983
5984const GVecGen2i sli_op[4] = {
5985 { .fni8 = gen_shl8_ins_i64,
5986 .fniv = gen_shl_ins_vec,
5987 .load_dest = true,
5988 .opc = INDEX_op_shli_vec,
5989 .vece = MO_8 },
5990 { .fni8 = gen_shl16_ins_i64,
5991 .fniv = gen_shl_ins_vec,
5992 .load_dest = true,
5993 .opc = INDEX_op_shli_vec,
5994 .vece = MO_16 },
5995 { .fni4 = gen_shl32_ins_i32,
5996 .fniv = gen_shl_ins_vec,
5997 .load_dest = true,
5998 .opc = INDEX_op_shli_vec,
5999 .vece = MO_32 },
6000 { .fni8 = gen_shl64_ins_i64,
6001 .fniv = gen_shl_ins_vec,
6002 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6003 .load_dest = true,
6004 .opc = INDEX_op_shli_vec,
6005 .vece = MO_64 },
6006};
6007
4a7832b0
RH
6008static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6009{
6010 gen_helper_neon_mul_u8(a, a, b);
6011 gen_helper_neon_add_u8(d, d, a);
6012}
6013
6014static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6015{
6016 gen_helper_neon_mul_u8(a, a, b);
6017 gen_helper_neon_sub_u8(d, d, a);
6018}
6019
6020static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6021{
6022 gen_helper_neon_mul_u16(a, a, b);
6023 gen_helper_neon_add_u16(d, d, a);
6024}
6025
6026static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6027{
6028 gen_helper_neon_mul_u16(a, a, b);
6029 gen_helper_neon_sub_u16(d, d, a);
6030}
6031
6032static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6033{
6034 tcg_gen_mul_i32(a, a, b);
6035 tcg_gen_add_i32(d, d, a);
6036}
6037
6038static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6039{
6040 tcg_gen_mul_i32(a, a, b);
6041 tcg_gen_sub_i32(d, d, a);
6042}
6043
6044static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6045{
6046 tcg_gen_mul_i64(a, a, b);
6047 tcg_gen_add_i64(d, d, a);
6048}
6049
6050static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6051{
6052 tcg_gen_mul_i64(a, a, b);
6053 tcg_gen_sub_i64(d, d, a);
6054}
6055
6056static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6057{
6058 tcg_gen_mul_vec(vece, a, a, b);
6059 tcg_gen_add_vec(vece, d, d, a);
6060}
6061
6062static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6063{
6064 tcg_gen_mul_vec(vece, a, a, b);
6065 tcg_gen_sub_vec(vece, d, d, a);
6066}
6067
6068/* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
6069 * these tables are shared with AArch64 which does support them.
6070 */
6071const GVecGen3 mla_op[4] = {
6072 { .fni4 = gen_mla8_i32,
6073 .fniv = gen_mla_vec,
6074 .opc = INDEX_op_mul_vec,
6075 .load_dest = true,
6076 .vece = MO_8 },
6077 { .fni4 = gen_mla16_i32,
6078 .fniv = gen_mla_vec,
6079 .opc = INDEX_op_mul_vec,
6080 .load_dest = true,
6081 .vece = MO_16 },
6082 { .fni4 = gen_mla32_i32,
6083 .fniv = gen_mla_vec,
6084 .opc = INDEX_op_mul_vec,
6085 .load_dest = true,
6086 .vece = MO_32 },
6087 { .fni8 = gen_mla64_i64,
6088 .fniv = gen_mla_vec,
6089 .opc = INDEX_op_mul_vec,
6090 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6091 .load_dest = true,
6092 .vece = MO_64 },
6093};
6094
6095const GVecGen3 mls_op[4] = {
6096 { .fni4 = gen_mls8_i32,
6097 .fniv = gen_mls_vec,
6098 .opc = INDEX_op_mul_vec,
6099 .load_dest = true,
6100 .vece = MO_8 },
6101 { .fni4 = gen_mls16_i32,
6102 .fniv = gen_mls_vec,
6103 .opc = INDEX_op_mul_vec,
6104 .load_dest = true,
6105 .vece = MO_16 },
6106 { .fni4 = gen_mls32_i32,
6107 .fniv = gen_mls_vec,
6108 .opc = INDEX_op_mul_vec,
6109 .load_dest = true,
6110 .vece = MO_32 },
6111 { .fni8 = gen_mls64_i64,
6112 .fniv = gen_mls_vec,
6113 .opc = INDEX_op_mul_vec,
6114 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6115 .load_dest = true,
6116 .vece = MO_64 },
6117};
6118
ea580fa3
RH
6119/* CMTST : test is "if (X & Y != 0)". */
6120static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6121{
6122 tcg_gen_and_i32(d, a, b);
6123 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
6124 tcg_gen_neg_i32(d, d);
6125}
6126
6127void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6128{
6129 tcg_gen_and_i64(d, a, b);
6130 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
6131 tcg_gen_neg_i64(d, d);
6132}
6133
6134static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6135{
6136 tcg_gen_and_vec(vece, d, a, b);
6137 tcg_gen_dupi_vec(vece, a, 0);
6138 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
6139}
6140
6141const GVecGen3 cmtst_op[4] = {
6142 { .fni4 = gen_helper_neon_tst_u8,
6143 .fniv = gen_cmtst_vec,
6144 .vece = MO_8 },
6145 { .fni4 = gen_helper_neon_tst_u16,
6146 .fniv = gen_cmtst_vec,
6147 .vece = MO_16 },
6148 { .fni4 = gen_cmtst_i32,
6149 .fniv = gen_cmtst_vec,
6150 .vece = MO_32 },
6151 { .fni8 = gen_cmtst_i64,
6152 .fniv = gen_cmtst_vec,
6153 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6154 .vece = MO_64 },
6155};
6156
89e68b57
RH
6157static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
6158 TCGv_vec a, TCGv_vec b)
6159{
6160 TCGv_vec x = tcg_temp_new_vec_matching(t);
6161 tcg_gen_add_vec(vece, x, a, b);
6162 tcg_gen_usadd_vec(vece, t, a, b);
6163 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
6164 tcg_gen_or_vec(vece, sat, sat, x);
6165 tcg_temp_free_vec(x);
6166}
6167
6168const GVecGen4 uqadd_op[4] = {
6169 { .fniv = gen_uqadd_vec,
6170 .fno = gen_helper_gvec_uqadd_b,
6171 .opc = INDEX_op_usadd_vec,
6172 .write_aofs = true,
6173 .vece = MO_8 },
6174 { .fniv = gen_uqadd_vec,
6175 .fno = gen_helper_gvec_uqadd_h,
6176 .opc = INDEX_op_usadd_vec,
6177 .write_aofs = true,
6178 .vece = MO_16 },
6179 { .fniv = gen_uqadd_vec,
6180 .fno = gen_helper_gvec_uqadd_s,
6181 .opc = INDEX_op_usadd_vec,
6182 .write_aofs = true,
6183 .vece = MO_32 },
6184 { .fniv = gen_uqadd_vec,
6185 .fno = gen_helper_gvec_uqadd_d,
6186 .opc = INDEX_op_usadd_vec,
6187 .write_aofs = true,
6188 .vece = MO_64 },
6189};
6190
6191static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
6192 TCGv_vec a, TCGv_vec b)
6193{
6194 TCGv_vec x = tcg_temp_new_vec_matching(t);
6195 tcg_gen_add_vec(vece, x, a, b);
6196 tcg_gen_ssadd_vec(vece, t, a, b);
6197 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
6198 tcg_gen_or_vec(vece, sat, sat, x);
6199 tcg_temp_free_vec(x);
6200}
6201
6202const GVecGen4 sqadd_op[4] = {
6203 { .fniv = gen_sqadd_vec,
6204 .fno = gen_helper_gvec_sqadd_b,
6205 .opc = INDEX_op_ssadd_vec,
6206 .write_aofs = true,
6207 .vece = MO_8 },
6208 { .fniv = gen_sqadd_vec,
6209 .fno = gen_helper_gvec_sqadd_h,
6210 .opc = INDEX_op_ssadd_vec,
6211 .write_aofs = true,
6212 .vece = MO_16 },
6213 { .fniv = gen_sqadd_vec,
6214 .fno = gen_helper_gvec_sqadd_s,
6215 .opc = INDEX_op_ssadd_vec,
6216 .write_aofs = true,
6217 .vece = MO_32 },
6218 { .fniv = gen_sqadd_vec,
6219 .fno = gen_helper_gvec_sqadd_d,
6220 .opc = INDEX_op_ssadd_vec,
6221 .write_aofs = true,
6222 .vece = MO_64 },
6223};
6224
6225static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
6226 TCGv_vec a, TCGv_vec b)
6227{
6228 TCGv_vec x = tcg_temp_new_vec_matching(t);
6229 tcg_gen_sub_vec(vece, x, a, b);
6230 tcg_gen_ussub_vec(vece, t, a, b);
6231 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
6232 tcg_gen_or_vec(vece, sat, sat, x);
6233 tcg_temp_free_vec(x);
6234}
6235
6236const GVecGen4 uqsub_op[4] = {
6237 { .fniv = gen_uqsub_vec,
6238 .fno = gen_helper_gvec_uqsub_b,
6239 .opc = INDEX_op_ussub_vec,
6240 .write_aofs = true,
6241 .vece = MO_8 },
6242 { .fniv = gen_uqsub_vec,
6243 .fno = gen_helper_gvec_uqsub_h,
6244 .opc = INDEX_op_ussub_vec,
6245 .write_aofs = true,
6246 .vece = MO_16 },
6247 { .fniv = gen_uqsub_vec,
6248 .fno = gen_helper_gvec_uqsub_s,
6249 .opc = INDEX_op_ussub_vec,
6250 .write_aofs = true,
6251 .vece = MO_32 },
6252 { .fniv = gen_uqsub_vec,
6253 .fno = gen_helper_gvec_uqsub_d,
6254 .opc = INDEX_op_ussub_vec,
6255 .write_aofs = true,
6256 .vece = MO_64 },
6257};
6258
6259static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
6260 TCGv_vec a, TCGv_vec b)
6261{
6262 TCGv_vec x = tcg_temp_new_vec_matching(t);
6263 tcg_gen_sub_vec(vece, x, a, b);
6264 tcg_gen_sssub_vec(vece, t, a, b);
6265 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
6266 tcg_gen_or_vec(vece, sat, sat, x);
6267 tcg_temp_free_vec(x);
6268}
6269
6270const GVecGen4 sqsub_op[4] = {
6271 { .fniv = gen_sqsub_vec,
6272 .fno = gen_helper_gvec_sqsub_b,
6273 .opc = INDEX_op_sssub_vec,
6274 .write_aofs = true,
6275 .vece = MO_8 },
6276 { .fniv = gen_sqsub_vec,
6277 .fno = gen_helper_gvec_sqsub_h,
6278 .opc = INDEX_op_sssub_vec,
6279 .write_aofs = true,
6280 .vece = MO_16 },
6281 { .fniv = gen_sqsub_vec,
6282 .fno = gen_helper_gvec_sqsub_s,
6283 .opc = INDEX_op_sssub_vec,
6284 .write_aofs = true,
6285 .vece = MO_32 },
6286 { .fniv = gen_sqsub_vec,
6287 .fno = gen_helper_gvec_sqsub_d,
6288 .opc = INDEX_op_sssub_vec,
6289 .write_aofs = true,
6290 .vece = MO_64 },
6291};
6292
9ee6e8bb
PB
6293/* Translate a NEON data processing instruction. Return nonzero if the
6294 instruction is invalid.
ad69471c
PB
6295 We process data in a mixture of 32-bit and 64-bit chunks.
6296 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 6297
7dcc1f89 6298static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
6299{
6300 int op;
6301 int q;
eabcd6fa 6302 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
9ee6e8bb
PB
6303 int size;
6304 int shift;
6305 int pass;
6306 int count;
6307 int pairwise;
6308 int u;
eabcd6fa 6309 int vec_size;
f3cd8218 6310 uint32_t imm;
39d5492a 6311 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 6312 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 6313 TCGv_i64 tmp64;
9ee6e8bb 6314
2c7ffc41
PM
6315 /* FIXME: this access check should not take precedence over UNDEF
6316 * for invalid encodings; we will generate incorrect syndrome information
6317 * for attempts to execute invalid vfp/neon encodings with FP disabled.
6318 */
9dbbc748 6319 if (s->fp_excp_el) {
2c7ffc41 6320 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 6321 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
6322 return 0;
6323 }
6324
5df8bac1 6325 if (!s->vfp_enabled)
9ee6e8bb
PB
6326 return 1;
6327 q = (insn & (1 << 6)) != 0;
6328 u = (insn >> 24) & 1;
6329 VFP_DREG_D(rd, insn);
6330 VFP_DREG_N(rn, insn);
6331 VFP_DREG_M(rm, insn);
6332 size = (insn >> 20) & 3;
eabcd6fa
RH
6333 vec_size = q ? 16 : 8;
6334 rd_ofs = neon_reg_offset(rd, 0);
6335 rn_ofs = neon_reg_offset(rn, 0);
6336 rm_ofs = neon_reg_offset(rm, 0);
6337
9ee6e8bb
PB
6338 if ((insn & (1 << 23)) == 0) {
6339 /* Three register same length. */
6340 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
6341 /* Catch invalid op and bad size combinations: UNDEF */
6342 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
6343 return 1;
6344 }
25f84f79
PM
6345 /* All insns of this form UNDEF for either this condition or the
6346 * superset of cases "Q==1"; we catch the latter later.
6347 */
6348 if (q && ((rd | rn | rm) & 1)) {
6349 return 1;
6350 }
36a71934
RH
6351 switch (op) {
6352 case NEON_3R_SHA:
6353 /* The SHA-1/SHA-256 3-register instructions require special
6354 * treatment here, as their size field is overloaded as an
6355 * op type selector, and they all consume their input in a
6356 * single pass.
6357 */
f1ecb913
AB
6358 if (!q) {
6359 return 1;
6360 }
6361 if (!u) { /* SHA-1 */
962fcbf2 6362 if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
6363 return 1;
6364 }
1a66ac61
RH
6365 ptr1 = vfp_reg_ptr(true, rd);
6366 ptr2 = vfp_reg_ptr(true, rn);
6367 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 6368 tmp4 = tcg_const_i32(size);
1a66ac61 6369 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
6370 tcg_temp_free_i32(tmp4);
6371 } else { /* SHA-256 */
962fcbf2 6372 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
f1ecb913
AB
6373 return 1;
6374 }
1a66ac61
RH
6375 ptr1 = vfp_reg_ptr(true, rd);
6376 ptr2 = vfp_reg_ptr(true, rn);
6377 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
6378 switch (size) {
6379 case 0:
1a66ac61 6380 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
6381 break;
6382 case 1:
1a66ac61 6383 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
6384 break;
6385 case 2:
1a66ac61 6386 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
6387 break;
6388 }
6389 }
1a66ac61
RH
6390 tcg_temp_free_ptr(ptr1);
6391 tcg_temp_free_ptr(ptr2);
6392 tcg_temp_free_ptr(ptr3);
f1ecb913 6393 return 0;
36a71934
RH
6394
6395 case NEON_3R_VPADD_VQRDMLAH:
6396 if (!u) {
6397 break; /* VPADD */
6398 }
6399 /* VQRDMLAH */
6400 switch (size) {
6401 case 1:
6402 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
6403 q, rd, rn, rm);
6404 case 2:
6405 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
6406 q, rd, rn, rm);
6407 }
6408 return 1;
6409
6410 case NEON_3R_VFM_VQRDMLSH:
6411 if (!u) {
6412 /* VFM, VFMS */
6413 if (size == 1) {
6414 return 1;
6415 }
6416 break;
6417 }
6418 /* VQRDMLSH */
6419 switch (size) {
6420 case 1:
6421 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
6422 q, rd, rn, rm);
6423 case 2:
6424 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
6425 q, rd, rn, rm);
6426 }
6427 return 1;
eabcd6fa
RH
6428
6429 case NEON_3R_LOGIC: /* Logic ops. */
6430 switch ((u << 2) | size) {
6431 case 0: /* VAND */
6432 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
6433 vec_size, vec_size);
6434 break;
6435 case 1: /* VBIC */
6436 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
6437 vec_size, vec_size);
6438 break;
2900847f
RH
6439 case 2: /* VORR */
6440 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
6441 vec_size, vec_size);
eabcd6fa
RH
6442 break;
6443 case 3: /* VORN */
6444 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
6445 vec_size, vec_size);
6446 break;
6447 case 4: /* VEOR */
6448 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
6449 vec_size, vec_size);
6450 break;
6451 case 5: /* VBSL */
6452 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6453 vec_size, vec_size, &bsl_op);
6454 break;
6455 case 6: /* VBIT */
6456 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6457 vec_size, vec_size, &bit_op);
6458 break;
6459 case 7: /* VBIF */
6460 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6461 vec_size, vec_size, &bif_op);
6462 break;
6463 }
6464 return 0;
e4717ae0
RH
6465
6466 case NEON_3R_VADD_VSUB:
6467 if (u) {
6468 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
6469 vec_size, vec_size);
6470 } else {
6471 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
6472 vec_size, vec_size);
6473 }
6474 return 0;
82083184 6475
89e68b57
RH
6476 case NEON_3R_VQADD:
6477 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
6478 rn_ofs, rm_ofs, vec_size, vec_size,
6479 (u ? uqadd_op : sqadd_op) + size);
6480 break;
6481
6482 case NEON_3R_VQSUB:
6483 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
6484 rn_ofs, rm_ofs, vec_size, vec_size,
6485 (u ? uqsub_op : sqsub_op) + size);
6486 break;
6487
82083184
RH
6488 case NEON_3R_VMUL: /* VMUL */
6489 if (u) {
6490 /* Polynomial case allows only P8 and is handled below. */
6491 if (size != 0) {
6492 return 1;
6493 }
6494 } else {
6495 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
6496 vec_size, vec_size);
6497 return 0;
6498 }
6499 break;
4a7832b0
RH
6500
6501 case NEON_3R_VML: /* VMLA, VMLS */
6502 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
6503 u ? &mls_op[size] : &mla_op[size]);
6504 return 0;
ea580fa3
RH
6505
6506 case NEON_3R_VTST_VCEQ:
6507 if (u) { /* VCEQ */
6508 tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
6509 vec_size, vec_size);
6510 } else { /* VTST */
6511 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6512 vec_size, vec_size, &cmtst_op[size]);
6513 }
6514 return 0;
6515
6516 case NEON_3R_VCGT:
6517 tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
6518 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
6519 return 0;
6520
6521 case NEON_3R_VCGE:
6522 tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
6523 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
6524 return 0;
6f278221
RH
6525
6526 case NEON_3R_VMAX:
6527 if (u) {
6528 tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
6529 vec_size, vec_size);
6530 } else {
6531 tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
6532 vec_size, vec_size);
6533 }
6534 return 0;
6535 case NEON_3R_VMIN:
6536 if (u) {
6537 tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
6538 vec_size, vec_size);
6539 } else {
6540 tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
6541 vec_size, vec_size);
6542 }
6543 return 0;
f1ecb913 6544 }
4a7832b0 6545
eabcd6fa 6546 if (size == 3) {
62698be3 6547 /* 64-bit element instructions. */
9ee6e8bb 6548 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
6549 neon_load_reg64(cpu_V0, rn + pass);
6550 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 6551 switch (op) {
62698be3 6552 case NEON_3R_VSHL:
ad69471c
PB
6553 if (u) {
6554 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
6555 } else {
6556 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
6557 }
6558 break;
62698be3 6559 case NEON_3R_VQSHL:
ad69471c 6560 if (u) {
02da0b2d
PM
6561 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
6562 cpu_V1, cpu_V0);
ad69471c 6563 } else {
02da0b2d
PM
6564 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6565 cpu_V1, cpu_V0);
ad69471c
PB
6566 }
6567 break;
62698be3 6568 case NEON_3R_VRSHL:
ad69471c
PB
6569 if (u) {
6570 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 6571 } else {
ad69471c
PB
6572 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
6573 }
6574 break;
62698be3 6575 case NEON_3R_VQRSHL:
ad69471c 6576 if (u) {
02da0b2d
PM
6577 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
6578 cpu_V1, cpu_V0);
ad69471c 6579 } else {
02da0b2d
PM
6580 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
6581 cpu_V1, cpu_V0);
1e8d4eec 6582 }
9ee6e8bb 6583 break;
9ee6e8bb
PB
6584 default:
6585 abort();
2c0262af 6586 }
ad69471c 6587 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 6588 }
9ee6e8bb 6589 return 0;
2c0262af 6590 }
25f84f79 6591 pairwise = 0;
9ee6e8bb 6592 switch (op) {
62698be3
PM
6593 case NEON_3R_VSHL:
6594 case NEON_3R_VQSHL:
6595 case NEON_3R_VRSHL:
6596 case NEON_3R_VQRSHL:
9ee6e8bb 6597 {
ad69471c
PB
6598 int rtmp;
6599 /* Shift instruction operands are reversed. */
6600 rtmp = rn;
9ee6e8bb 6601 rn = rm;
ad69471c 6602 rm = rtmp;
9ee6e8bb 6603 }
2c0262af 6604 break;
36a71934 6605 case NEON_3R_VPADD_VQRDMLAH:
62698be3
PM
6606 case NEON_3R_VPMAX:
6607 case NEON_3R_VPMIN:
9ee6e8bb 6608 pairwise = 1;
2c0262af 6609 break;
25f84f79
PM
6610 case NEON_3R_FLOAT_ARITH:
6611 pairwise = (u && size < 2); /* if VPADD (float) */
6612 break;
6613 case NEON_3R_FLOAT_MINMAX:
6614 pairwise = u; /* if VPMIN/VPMAX (float) */
6615 break;
6616 case NEON_3R_FLOAT_CMP:
6617 if (!u && size) {
6618 /* no encoding for U=0 C=1x */
6619 return 1;
6620 }
6621 break;
6622 case NEON_3R_FLOAT_ACMP:
6623 if (!u) {
6624 return 1;
6625 }
6626 break;
505935fc
WN
6627 case NEON_3R_FLOAT_MISC:
6628 /* VMAXNM/VMINNM in ARMv8 */
d614a513 6629 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
6630 return 1;
6631 }
2c0262af 6632 break;
36a71934
RH
6633 case NEON_3R_VFM_VQRDMLSH:
6634 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
6635 return 1;
6636 }
6637 break;
9ee6e8bb 6638 default:
2c0262af 6639 break;
9ee6e8bb 6640 }
dd8fbd78 6641
25f84f79
PM
6642 if (pairwise && q) {
6643 /* All the pairwise insns UNDEF if Q is set */
6644 return 1;
6645 }
6646
9ee6e8bb
PB
6647 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6648
6649 if (pairwise) {
6650 /* Pairwise. */
a5a14945
JR
6651 if (pass < 1) {
6652 tmp = neon_load_reg(rn, 0);
6653 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 6654 } else {
a5a14945
JR
6655 tmp = neon_load_reg(rm, 0);
6656 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
6657 }
6658 } else {
6659 /* Elementwise. */
dd8fbd78
FN
6660 tmp = neon_load_reg(rn, pass);
6661 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
6662 }
6663 switch (op) {
62698be3 6664 case NEON_3R_VHADD:
9ee6e8bb
PB
6665 GEN_NEON_INTEGER_OP(hadd);
6666 break;
62698be3 6667 case NEON_3R_VRHADD:
9ee6e8bb 6668 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 6669 break;
62698be3 6670 case NEON_3R_VHSUB:
9ee6e8bb
PB
6671 GEN_NEON_INTEGER_OP(hsub);
6672 break;
62698be3 6673 case NEON_3R_VSHL:
ad69471c 6674 GEN_NEON_INTEGER_OP(shl);
2c0262af 6675 break;
62698be3 6676 case NEON_3R_VQSHL:
02da0b2d 6677 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 6678 break;
62698be3 6679 case NEON_3R_VRSHL:
ad69471c 6680 GEN_NEON_INTEGER_OP(rshl);
2c0262af 6681 break;
62698be3 6682 case NEON_3R_VQRSHL:
02da0b2d 6683 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 6684 break;
62698be3 6685 case NEON_3R_VABD:
9ee6e8bb
PB
6686 GEN_NEON_INTEGER_OP(abd);
6687 break;
62698be3 6688 case NEON_3R_VABA:
9ee6e8bb 6689 GEN_NEON_INTEGER_OP(abd);
7d1b0095 6690 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
6691 tmp2 = neon_load_reg(rd, pass);
6692 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 6693 break;
62698be3 6694 case NEON_3R_VMUL:
82083184
RH
6695 /* VMUL.P8; other cases already eliminated. */
6696 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb 6697 break;
62698be3 6698 case NEON_3R_VPMAX:
9ee6e8bb
PB
6699 GEN_NEON_INTEGER_OP(pmax);
6700 break;
62698be3 6701 case NEON_3R_VPMIN:
9ee6e8bb
PB
6702 GEN_NEON_INTEGER_OP(pmin);
6703 break;
62698be3 6704 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
6705 if (!u) { /* VQDMULH */
6706 switch (size) {
02da0b2d
PM
6707 case 1:
6708 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6709 break;
6710 case 2:
6711 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6712 break;
62698be3 6713 default: abort();
9ee6e8bb 6714 }
62698be3 6715 } else { /* VQRDMULH */
9ee6e8bb 6716 switch (size) {
02da0b2d
PM
6717 case 1:
6718 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6719 break;
6720 case 2:
6721 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6722 break;
62698be3 6723 default: abort();
9ee6e8bb
PB
6724 }
6725 }
6726 break;
36a71934 6727 case NEON_3R_VPADD_VQRDMLAH:
9ee6e8bb 6728 switch (size) {
dd8fbd78
FN
6729 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6730 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6731 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 6732 default: abort();
9ee6e8bb
PB
6733 }
6734 break;
62698be3 6735 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
6736 {
6737 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
6738 switch ((u << 2) | size) {
6739 case 0: /* VADD */
aa47cfdd
PM
6740 case 4: /* VPADD */
6741 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6742 break;
6743 case 2: /* VSUB */
aa47cfdd 6744 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6745 break;
6746 case 6: /* VABD */
aa47cfdd 6747 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6748 break;
6749 default:
62698be3 6750 abort();
9ee6e8bb 6751 }
aa47cfdd 6752 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6753 break;
aa47cfdd 6754 }
62698be3 6755 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
6756 {
6757 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6758 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6759 if (!u) {
7d1b0095 6760 tcg_temp_free_i32(tmp2);
dd8fbd78 6761 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6762 if (size == 0) {
aa47cfdd 6763 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6764 } else {
aa47cfdd 6765 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
6766 }
6767 }
aa47cfdd 6768 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6769 break;
aa47cfdd 6770 }
62698be3 6771 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
6772 {
6773 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 6774 if (!u) {
aa47cfdd 6775 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 6776 } else {
aa47cfdd
PM
6777 if (size == 0) {
6778 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6779 } else {
6780 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6781 }
b5ff1b31 6782 }
aa47cfdd 6783 tcg_temp_free_ptr(fpstatus);
2c0262af 6784 break;
aa47cfdd 6785 }
62698be3 6786 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6787 {
6788 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6789 if (size == 0) {
6790 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6791 } else {
6792 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6793 }
6794 tcg_temp_free_ptr(fpstatus);
2c0262af 6795 break;
aa47cfdd 6796 }
62698be3 6797 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6798 {
6799 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6800 if (size == 0) {
f71a2ae5 6801 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6802 } else {
f71a2ae5 6803 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6804 }
6805 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6806 break;
aa47cfdd 6807 }
505935fc
WN
6808 case NEON_3R_FLOAT_MISC:
6809 if (u) {
6810 /* VMAXNM/VMINNM */
6811 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6812 if (size == 0) {
f71a2ae5 6813 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6814 } else {
f71a2ae5 6815 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6816 }
6817 tcg_temp_free_ptr(fpstatus);
6818 } else {
6819 if (size == 0) {
6820 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6821 } else {
6822 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6823 }
6824 }
2c0262af 6825 break;
36a71934 6826 case NEON_3R_VFM_VQRDMLSH:
da97f52c
PM
6827 {
6828 /* VFMA, VFMS: fused multiply-add */
6829 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6830 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6831 if (size) {
6832 /* VFMS */
6833 gen_helper_vfp_negs(tmp, tmp);
6834 }
6835 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6836 tcg_temp_free_i32(tmp3);
6837 tcg_temp_free_ptr(fpstatus);
6838 break;
6839 }
9ee6e8bb
PB
6840 default:
6841 abort();
2c0262af 6842 }
7d1b0095 6843 tcg_temp_free_i32(tmp2);
dd8fbd78 6844
9ee6e8bb
PB
6845 /* Save the result. For elementwise operations we can put it
6846 straight into the destination register. For pairwise operations
6847 we have to be careful to avoid clobbering the source operands. */
6848 if (pairwise && rd == rm) {
dd8fbd78 6849 neon_store_scratch(pass, tmp);
9ee6e8bb 6850 } else {
dd8fbd78 6851 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6852 }
6853
6854 } /* for pass */
6855 if (pairwise && rd == rm) {
6856 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6857 tmp = neon_load_scratch(pass);
6858 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6859 }
6860 }
ad69471c 6861 /* End of 3 register same size operations. */
9ee6e8bb
PB
6862 } else if (insn & (1 << 4)) {
6863 if ((insn & 0x00380080) != 0) {
6864 /* Two registers and shift. */
6865 op = (insn >> 8) & 0xf;
6866 if (insn & (1 << 7)) {
cc13115b
PM
6867 /* 64-bit shift. */
6868 if (op > 7) {
6869 return 1;
6870 }
9ee6e8bb
PB
6871 size = 3;
6872 } else {
6873 size = 2;
6874 while ((insn & (1 << (size + 19))) == 0)
6875 size--;
6876 }
6877 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
9ee6e8bb
PB
6878 if (op < 8) {
6879 /* Shift by immediate:
6880 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6881 if (q && ((rd | rm) & 1)) {
6882 return 1;
6883 }
6884 if (!u && (op == 4 || op == 6)) {
6885 return 1;
6886 }
9ee6e8bb
PB
6887 /* Right shifts are encoded as N - shift, where N is the
6888 element size in bits. */
1dc8425e 6889 if (op <= 4) {
9ee6e8bb 6890 shift = shift - (1 << (size + 3));
1dc8425e
RH
6891 }
6892
6893 switch (op) {
6894 case 0: /* VSHR */
6895 /* Right shift comes here negative. */
6896 shift = -shift;
6897 /* Shifts larger than the element size are architecturally
6898 * valid. Unsigned results in all zeros; signed results
6899 * in all sign bits.
6900 */
6901 if (!u) {
6902 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
6903 MIN(shift, (8 << size) - 1),
6904 vec_size, vec_size);
6905 } else if (shift >= 8 << size) {
6906 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
6907 } else {
6908 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
6909 vec_size, vec_size);
6910 }
6911 return 0;
6912
41f6c113
RH
6913 case 1: /* VSRA */
6914 /* Right shift comes here negative. */
6915 shift = -shift;
6916 /* Shifts larger than the element size are architecturally
6917 * valid. Unsigned results in all zeros; signed results
6918 * in all sign bits.
6919 */
6920 if (!u) {
6921 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6922 MIN(shift, (8 << size) - 1),
6923 &ssra_op[size]);
6924 } else if (shift >= 8 << size) {
6925 /* rd += 0 */
6926 } else {
6927 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6928 shift, &usra_op[size]);
6929 }
6930 return 0;
6931
f3cd8218
RH
6932 case 4: /* VSRI */
6933 if (!u) {
6934 return 1;
6935 }
6936 /* Right shift comes here negative. */
6937 shift = -shift;
6938 /* Shift out of range leaves destination unchanged. */
6939 if (shift < 8 << size) {
6940 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6941 shift, &sri_op[size]);
6942 }
6943 return 0;
6944
1dc8425e 6945 case 5: /* VSHL, VSLI */
f3cd8218
RH
6946 if (u) { /* VSLI */
6947 /* Shift out of range leaves destination unchanged. */
6948 if (shift < 8 << size) {
6949 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
6950 vec_size, shift, &sli_op[size]);
6951 }
6952 } else { /* VSHL */
1dc8425e
RH
6953 /* Shifts larger than the element size are
6954 * architecturally valid and results in zero.
6955 */
6956 if (shift >= 8 << size) {
6957 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
6958 } else {
6959 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
6960 vec_size, vec_size);
6961 }
1dc8425e 6962 }
f3cd8218 6963 return 0;
1dc8425e
RH
6964 }
6965
9ee6e8bb
PB
6966 if (size == 3) {
6967 count = q + 1;
6968 } else {
6969 count = q ? 4: 2;
6970 }
1dc8425e
RH
6971
6972 /* To avoid excessive duplication of ops we implement shift
6973 * by immediate using the variable shift operations.
6974 */
6975 imm = dup_const(size, shift);
9ee6e8bb
PB
6976
6977 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6978 if (size == 3) {
6979 neon_load_reg64(cpu_V0, rm + pass);
6980 tcg_gen_movi_i64(cpu_V1, imm);
6981 switch (op) {
ad69471c
PB
6982 case 2: /* VRSHR */
6983 case 3: /* VRSRA */
6984 if (u)
6985 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6986 else
ad69471c 6987 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6988 break;
0322b26e 6989 case 6: /* VQSHLU */
02da0b2d
PM
6990 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6991 cpu_V0, cpu_V1);
ad69471c 6992 break;
0322b26e
PM
6993 case 7: /* VQSHL */
6994 if (u) {
02da0b2d 6995 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6996 cpu_V0, cpu_V1);
6997 } else {
02da0b2d 6998 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6999 cpu_V0, cpu_V1);
7000 }
9ee6e8bb 7001 break;
1dc8425e
RH
7002 default:
7003 g_assert_not_reached();
9ee6e8bb 7004 }
41f6c113 7005 if (op == 3) {
ad69471c 7006 /* Accumulate. */
5371cb81 7007 neon_load_reg64(cpu_V1, rd + pass);
ad69471c 7008 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
7009 }
7010 neon_store_reg64(cpu_V0, rd + pass);
7011 } else { /* size < 3 */
7012 /* Operands in T0 and T1. */
dd8fbd78 7013 tmp = neon_load_reg(rm, pass);
7d1b0095 7014 tmp2 = tcg_temp_new_i32();
dd8fbd78 7015 tcg_gen_movi_i32(tmp2, imm);
ad69471c 7016 switch (op) {
ad69471c
PB
7017 case 2: /* VRSHR */
7018 case 3: /* VRSRA */
7019 GEN_NEON_INTEGER_OP(rshl);
7020 break;
0322b26e 7021 case 6: /* VQSHLU */
ad69471c 7022 switch (size) {
0322b26e 7023 case 0:
02da0b2d
PM
7024 gen_helper_neon_qshlu_s8(tmp, cpu_env,
7025 tmp, tmp2);
0322b26e
PM
7026 break;
7027 case 1:
02da0b2d
PM
7028 gen_helper_neon_qshlu_s16(tmp, cpu_env,
7029 tmp, tmp2);
0322b26e
PM
7030 break;
7031 case 2:
02da0b2d
PM
7032 gen_helper_neon_qshlu_s32(tmp, cpu_env,
7033 tmp, tmp2);
0322b26e
PM
7034 break;
7035 default:
cc13115b 7036 abort();
ad69471c
PB
7037 }
7038 break;
0322b26e 7039 case 7: /* VQSHL */
02da0b2d 7040 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 7041 break;
1dc8425e
RH
7042 default:
7043 g_assert_not_reached();
ad69471c 7044 }
7d1b0095 7045 tcg_temp_free_i32(tmp2);
ad69471c 7046
41f6c113 7047 if (op == 3) {
ad69471c 7048 /* Accumulate. */
dd8fbd78 7049 tmp2 = neon_load_reg(rd, pass);
5371cb81 7050 gen_neon_add(size, tmp, tmp2);
7d1b0095 7051 tcg_temp_free_i32(tmp2);
ad69471c 7052 }
dd8fbd78 7053 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7054 }
7055 } /* for pass */
7056 } else if (op < 10) {
ad69471c 7057 /* Shift by immediate and narrow:
9ee6e8bb 7058 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 7059 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
7060 if (rm & 1) {
7061 return 1;
7062 }
9ee6e8bb
PB
7063 shift = shift - (1 << (size + 3));
7064 size++;
92cdfaeb 7065 if (size == 3) {
a7812ae4 7066 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
7067 neon_load_reg64(cpu_V0, rm);
7068 neon_load_reg64(cpu_V1, rm + 1);
7069 for (pass = 0; pass < 2; pass++) {
7070 TCGv_i64 in;
7071 if (pass == 0) {
7072 in = cpu_V0;
7073 } else {
7074 in = cpu_V1;
7075 }
ad69471c 7076 if (q) {
0b36f4cd 7077 if (input_unsigned) {
92cdfaeb 7078 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 7079 } else {
92cdfaeb 7080 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 7081 }
ad69471c 7082 } else {
0b36f4cd 7083 if (input_unsigned) {
92cdfaeb 7084 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 7085 } else {
92cdfaeb 7086 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 7087 }
ad69471c 7088 }
7d1b0095 7089 tmp = tcg_temp_new_i32();
92cdfaeb
PM
7090 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
7091 neon_store_reg(rd, pass, tmp);
7092 } /* for pass */
7093 tcg_temp_free_i64(tmp64);
7094 } else {
7095 if (size == 1) {
7096 imm = (uint16_t)shift;
7097 imm |= imm << 16;
2c0262af 7098 } else {
92cdfaeb
PM
7099 /* size == 2 */
7100 imm = (uint32_t)shift;
7101 }
7102 tmp2 = tcg_const_i32(imm);
7103 tmp4 = neon_load_reg(rm + 1, 0);
7104 tmp5 = neon_load_reg(rm + 1, 1);
7105 for (pass = 0; pass < 2; pass++) {
7106 if (pass == 0) {
7107 tmp = neon_load_reg(rm, 0);
7108 } else {
7109 tmp = tmp4;
7110 }
0b36f4cd
CL
7111 gen_neon_shift_narrow(size, tmp, tmp2, q,
7112 input_unsigned);
92cdfaeb
PM
7113 if (pass == 0) {
7114 tmp3 = neon_load_reg(rm, 1);
7115 } else {
7116 tmp3 = tmp5;
7117 }
0b36f4cd
CL
7118 gen_neon_shift_narrow(size, tmp3, tmp2, q,
7119 input_unsigned);
36aa55dc 7120 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
7121 tcg_temp_free_i32(tmp);
7122 tcg_temp_free_i32(tmp3);
7123 tmp = tcg_temp_new_i32();
92cdfaeb
PM
7124 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
7125 neon_store_reg(rd, pass, tmp);
7126 } /* for pass */
c6067f04 7127 tcg_temp_free_i32(tmp2);
b75263d6 7128 }
9ee6e8bb 7129 } else if (op == 10) {
cc13115b
PM
7130 /* VSHLL, VMOVL */
7131 if (q || (rd & 1)) {
9ee6e8bb 7132 return 1;
cc13115b 7133 }
ad69471c
PB
7134 tmp = neon_load_reg(rm, 0);
7135 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7136 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7137 if (pass == 1)
7138 tmp = tmp2;
7139
7140 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 7141
9ee6e8bb
PB
7142 if (shift != 0) {
7143 /* The shift is less than the width of the source
ad69471c
PB
7144 type, so we can just shift the whole register. */
7145 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
7146 /* Widen the result of shift: we need to clear
7147 * the potential overflow bits resulting from
7148 * left bits of the narrow input appearing as
7149 * right bits of left the neighbour narrow
7150 * input. */
ad69471c
PB
7151 if (size < 2 || !u) {
7152 uint64_t imm64;
7153 if (size == 0) {
7154 imm = (0xffu >> (8 - shift));
7155 imm |= imm << 16;
acdf01ef 7156 } else if (size == 1) {
ad69471c 7157 imm = 0xffff >> (16 - shift);
acdf01ef
CL
7158 } else {
7159 /* size == 2 */
7160 imm = 0xffffffff >> (32 - shift);
7161 }
7162 if (size < 2) {
7163 imm64 = imm | (((uint64_t)imm) << 32);
7164 } else {
7165 imm64 = imm;
9ee6e8bb 7166 }
acdf01ef 7167 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
7168 }
7169 }
ad69471c 7170 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 7171 }
f73534a5 7172 } else if (op >= 14) {
9ee6e8bb 7173 /* VCVT fixed-point. */
cc13115b
PM
7174 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
7175 return 1;
7176 }
f73534a5
PM
7177 /* We have already masked out the must-be-1 top bit of imm6,
7178 * hence this 32-shift where the ARM ARM has 64-imm6.
7179 */
7180 shift = 32 - shift;
9ee6e8bb 7181 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 7182 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 7183 if (!(op & 1)) {
9ee6e8bb 7184 if (u)
5500b06c 7185 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 7186 else
5500b06c 7187 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
7188 } else {
7189 if (u)
5500b06c 7190 gen_vfp_toul(0, shift, 1);
9ee6e8bb 7191 else
5500b06c 7192 gen_vfp_tosl(0, shift, 1);
2c0262af 7193 }
4373f3ce 7194 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
7195 }
7196 } else {
9ee6e8bb
PB
7197 return 1;
7198 }
7199 } else { /* (insn & 0x00380080) == 0 */
246fa4ac
RH
7200 int invert, reg_ofs, vec_size;
7201
7d80fee5
PM
7202 if (q && (rd & 1)) {
7203 return 1;
7204 }
9ee6e8bb
PB
7205
7206 op = (insn >> 8) & 0xf;
7207 /* One register and immediate. */
7208 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
7209 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
7210 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
7211 * We choose to not special-case this and will behave as if a
7212 * valid constant encoding of 0 had been given.
7213 */
9ee6e8bb
PB
7214 switch (op) {
7215 case 0: case 1:
7216 /* no-op */
7217 break;
7218 case 2: case 3:
7219 imm <<= 8;
7220 break;
7221 case 4: case 5:
7222 imm <<= 16;
7223 break;
7224 case 6: case 7:
7225 imm <<= 24;
7226 break;
7227 case 8: case 9:
7228 imm |= imm << 16;
7229 break;
7230 case 10: case 11:
7231 imm = (imm << 8) | (imm << 24);
7232 break;
7233 case 12:
8e31209e 7234 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
7235 break;
7236 case 13:
7237 imm = (imm << 16) | 0xffff;
7238 break;
7239 case 14:
7240 imm |= (imm << 8) | (imm << 16) | (imm << 24);
246fa4ac 7241 if (invert) {
9ee6e8bb 7242 imm = ~imm;
246fa4ac 7243 }
9ee6e8bb
PB
7244 break;
7245 case 15:
7d80fee5
PM
7246 if (invert) {
7247 return 1;
7248 }
9ee6e8bb
PB
7249 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
7250 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
7251 break;
7252 }
246fa4ac 7253 if (invert) {
9ee6e8bb 7254 imm = ~imm;
246fa4ac 7255 }
9ee6e8bb 7256
246fa4ac
RH
7257 reg_ofs = neon_reg_offset(rd, 0);
7258 vec_size = q ? 16 : 8;
7259
7260 if (op & 1 && op < 12) {
7261 if (invert) {
7262 /* The immediate value has already been inverted,
7263 * so BIC becomes AND.
7264 */
7265 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
7266 vec_size, vec_size);
9ee6e8bb 7267 } else {
246fa4ac
RH
7268 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
7269 vec_size, vec_size);
7270 }
7271 } else {
7272 /* VMOV, VMVN. */
7273 if (op == 14 && invert) {
7274 TCGv_i64 t64 = tcg_temp_new_i64();
7275
7276 for (pass = 0; pass <= q; ++pass) {
7277 uint64_t val = 0;
a5a14945 7278 int n;
246fa4ac
RH
7279
7280 for (n = 0; n < 8; n++) {
7281 if (imm & (1 << (n + pass * 8))) {
7282 val |= 0xffull << (n * 8);
7283 }
9ee6e8bb 7284 }
246fa4ac
RH
7285 tcg_gen_movi_i64(t64, val);
7286 neon_store_reg64(t64, rd + pass);
9ee6e8bb 7287 }
246fa4ac
RH
7288 tcg_temp_free_i64(t64);
7289 } else {
7290 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
9ee6e8bb
PB
7291 }
7292 }
7293 }
e4b3861d 7294 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
7295 if (size != 3) {
7296 op = (insn >> 8) & 0xf;
7297 if ((insn & (1 << 6)) == 0) {
7298 /* Three registers of different lengths. */
7299 int src1_wide;
7300 int src2_wide;
7301 int prewiden;
526d0096
PM
7302 /* undefreq: bit 0 : UNDEF if size == 0
7303 * bit 1 : UNDEF if size == 1
7304 * bit 2 : UNDEF if size == 2
7305 * bit 3 : UNDEF if U == 1
7306 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
7307 */
7308 int undefreq;
7309 /* prewiden, src1_wide, src2_wide, undefreq */
7310 static const int neon_3reg_wide[16][4] = {
7311 {1, 0, 0, 0}, /* VADDL */
7312 {1, 1, 0, 0}, /* VADDW */
7313 {1, 0, 0, 0}, /* VSUBL */
7314 {1, 1, 0, 0}, /* VSUBW */
7315 {0, 1, 1, 0}, /* VADDHN */
7316 {0, 0, 0, 0}, /* VABAL */
7317 {0, 1, 1, 0}, /* VSUBHN */
7318 {0, 0, 0, 0}, /* VABDL */
7319 {0, 0, 0, 0}, /* VMLAL */
526d0096 7320 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 7321 {0, 0, 0, 0}, /* VMLSL */
526d0096 7322 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 7323 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 7324 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 7325 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 7326 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
7327 };
7328
7329 prewiden = neon_3reg_wide[op][0];
7330 src1_wide = neon_3reg_wide[op][1];
7331 src2_wide = neon_3reg_wide[op][2];
695272dc 7332 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 7333
526d0096
PM
7334 if ((undefreq & (1 << size)) ||
7335 ((undefreq & 8) && u)) {
695272dc
PM
7336 return 1;
7337 }
7338 if ((src1_wide && (rn & 1)) ||
7339 (src2_wide && (rm & 1)) ||
7340 (!src2_wide && (rd & 1))) {
ad69471c 7341 return 1;
695272dc 7342 }
ad69471c 7343
4e624eda
PM
7344 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
7345 * outside the loop below as it only performs a single pass.
7346 */
7347 if (op == 14 && size == 2) {
7348 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
7349
962fcbf2 7350 if (!dc_isar_feature(aa32_pmull, s)) {
4e624eda
PM
7351 return 1;
7352 }
7353 tcg_rn = tcg_temp_new_i64();
7354 tcg_rm = tcg_temp_new_i64();
7355 tcg_rd = tcg_temp_new_i64();
7356 neon_load_reg64(tcg_rn, rn);
7357 neon_load_reg64(tcg_rm, rm);
7358 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
7359 neon_store_reg64(tcg_rd, rd);
7360 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
7361 neon_store_reg64(tcg_rd, rd + 1);
7362 tcg_temp_free_i64(tcg_rn);
7363 tcg_temp_free_i64(tcg_rm);
7364 tcg_temp_free_i64(tcg_rd);
7365 return 0;
7366 }
7367
9ee6e8bb
PB
7368 /* Avoid overlapping operands. Wide source operands are
7369 always aligned so will never overlap with wide
7370 destinations in problematic ways. */
8f8e3aa4 7371 if (rd == rm && !src2_wide) {
dd8fbd78
FN
7372 tmp = neon_load_reg(rm, 1);
7373 neon_store_scratch(2, tmp);
8f8e3aa4 7374 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
7375 tmp = neon_load_reg(rn, 1);
7376 neon_store_scratch(2, tmp);
9ee6e8bb 7377 }
f764718d 7378 tmp3 = NULL;
9ee6e8bb 7379 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7380 if (src1_wide) {
7381 neon_load_reg64(cpu_V0, rn + pass);
f764718d 7382 tmp = NULL;
9ee6e8bb 7383 } else {
ad69471c 7384 if (pass == 1 && rd == rn) {
dd8fbd78 7385 tmp = neon_load_scratch(2);
9ee6e8bb 7386 } else {
ad69471c
PB
7387 tmp = neon_load_reg(rn, pass);
7388 }
7389 if (prewiden) {
7390 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
7391 }
7392 }
ad69471c
PB
7393 if (src2_wide) {
7394 neon_load_reg64(cpu_V1, rm + pass);
f764718d 7395 tmp2 = NULL;
9ee6e8bb 7396 } else {
ad69471c 7397 if (pass == 1 && rd == rm) {
dd8fbd78 7398 tmp2 = neon_load_scratch(2);
9ee6e8bb 7399 } else {
ad69471c
PB
7400 tmp2 = neon_load_reg(rm, pass);
7401 }
7402 if (prewiden) {
7403 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 7404 }
9ee6e8bb
PB
7405 }
7406 switch (op) {
7407 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 7408 gen_neon_addl(size);
9ee6e8bb 7409 break;
79b0e534 7410 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 7411 gen_neon_subl(size);
9ee6e8bb
PB
7412 break;
7413 case 5: case 7: /* VABAL, VABDL */
7414 switch ((size << 1) | u) {
ad69471c
PB
7415 case 0:
7416 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
7417 break;
7418 case 1:
7419 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
7420 break;
7421 case 2:
7422 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
7423 break;
7424 case 3:
7425 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
7426 break;
7427 case 4:
7428 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
7429 break;
7430 case 5:
7431 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
7432 break;
9ee6e8bb
PB
7433 default: abort();
7434 }
7d1b0095
PM
7435 tcg_temp_free_i32(tmp2);
7436 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7437 break;
7438 case 8: case 9: case 10: case 11: case 12: case 13:
7439 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 7440 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
7441 break;
7442 case 14: /* Polynomial VMULL */
e5ca24cb 7443 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
7444 tcg_temp_free_i32(tmp2);
7445 tcg_temp_free_i32(tmp);
e5ca24cb 7446 break;
695272dc
PM
7447 default: /* 15 is RESERVED: caught earlier */
7448 abort();
9ee6e8bb 7449 }
ebcd88ce
PM
7450 if (op == 13) {
7451 /* VQDMULL */
7452 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7453 neon_store_reg64(cpu_V0, rd + pass);
7454 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 7455 /* Accumulate. */
ebcd88ce 7456 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 7457 switch (op) {
4dc064e6
PM
7458 case 10: /* VMLSL */
7459 gen_neon_negl(cpu_V0, size);
7460 /* Fall through */
7461 case 5: case 8: /* VABAL, VMLAL */
ad69471c 7462 gen_neon_addl(size);
9ee6e8bb
PB
7463 break;
7464 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 7465 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
7466 if (op == 11) {
7467 gen_neon_negl(cpu_V0, size);
7468 }
ad69471c
PB
7469 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
7470 break;
9ee6e8bb
PB
7471 default:
7472 abort();
7473 }
ad69471c 7474 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7475 } else if (op == 4 || op == 6) {
7476 /* Narrowing operation. */
7d1b0095 7477 tmp = tcg_temp_new_i32();
79b0e534 7478 if (!u) {
9ee6e8bb 7479 switch (size) {
ad69471c
PB
7480 case 0:
7481 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
7482 break;
7483 case 1:
7484 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
7485 break;
7486 case 2:
7487 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 7488 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 7489 break;
9ee6e8bb
PB
7490 default: abort();
7491 }
7492 } else {
7493 switch (size) {
ad69471c
PB
7494 case 0:
7495 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
7496 break;
7497 case 1:
7498 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
7499 break;
7500 case 2:
7501 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
7502 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 7503 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 7504 break;
9ee6e8bb
PB
7505 default: abort();
7506 }
7507 }
ad69471c
PB
7508 if (pass == 0) {
7509 tmp3 = tmp;
7510 } else {
7511 neon_store_reg(rd, 0, tmp3);
7512 neon_store_reg(rd, 1, tmp);
7513 }
9ee6e8bb
PB
7514 } else {
7515 /* Write back the result. */
ad69471c 7516 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7517 }
7518 }
7519 } else {
3e3326df
PM
7520 /* Two registers and a scalar. NB that for ops of this form
7521 * the ARM ARM labels bit 24 as Q, but it is in our variable
7522 * 'u', not 'q'.
7523 */
7524 if (size == 0) {
7525 return 1;
7526 }
9ee6e8bb 7527 switch (op) {
9ee6e8bb 7528 case 1: /* Float VMLA scalar */
9ee6e8bb 7529 case 5: /* Floating point VMLS scalar */
9ee6e8bb 7530 case 9: /* Floating point VMUL scalar */
3e3326df
PM
7531 if (size == 1) {
7532 return 1;
7533 }
7534 /* fall through */
7535 case 0: /* Integer VMLA scalar */
7536 case 4: /* Integer VMLS scalar */
7537 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
7538 case 12: /* VQDMULH scalar */
7539 case 13: /* VQRDMULH scalar */
3e3326df
PM
7540 if (u && ((rd | rn) & 1)) {
7541 return 1;
7542 }
dd8fbd78
FN
7543 tmp = neon_get_scalar(size, rm);
7544 neon_store_scratch(0, tmp);
9ee6e8bb 7545 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
7546 tmp = neon_load_scratch(0);
7547 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
7548 if (op == 12) {
7549 if (size == 1) {
02da0b2d 7550 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7551 } else {
02da0b2d 7552 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
7553 }
7554 } else if (op == 13) {
7555 if (size == 1) {
02da0b2d 7556 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7557 } else {
02da0b2d 7558 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
7559 }
7560 } else if (op & 1) {
aa47cfdd
PM
7561 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7562 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
7563 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
7564 } else {
7565 switch (size) {
dd8fbd78
FN
7566 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
7567 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
7568 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 7569 default: abort();
9ee6e8bb
PB
7570 }
7571 }
7d1b0095 7572 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7573 if (op < 8) {
7574 /* Accumulate. */
dd8fbd78 7575 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
7576 switch (op) {
7577 case 0:
dd8fbd78 7578 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
7579 break;
7580 case 1:
aa47cfdd
PM
7581 {
7582 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7583 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
7584 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7585 break;
aa47cfdd 7586 }
9ee6e8bb 7587 case 4:
dd8fbd78 7588 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
7589 break;
7590 case 5:
aa47cfdd
PM
7591 {
7592 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7593 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
7594 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7595 break;
aa47cfdd 7596 }
9ee6e8bb
PB
7597 default:
7598 abort();
7599 }
7d1b0095 7600 tcg_temp_free_i32(tmp2);
9ee6e8bb 7601 }
dd8fbd78 7602 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7603 }
7604 break;
9ee6e8bb 7605 case 3: /* VQDMLAL scalar */
9ee6e8bb 7606 case 7: /* VQDMLSL scalar */
9ee6e8bb 7607 case 11: /* VQDMULL scalar */
3e3326df 7608 if (u == 1) {
ad69471c 7609 return 1;
3e3326df
PM
7610 }
7611 /* fall through */
7612 case 2: /* VMLAL sclar */
7613 case 6: /* VMLSL scalar */
7614 case 10: /* VMULL scalar */
7615 if (rd & 1) {
7616 return 1;
7617 }
dd8fbd78 7618 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
7619 /* We need a copy of tmp2 because gen_neon_mull
7620 * deletes it during pass 0. */
7d1b0095 7621 tmp4 = tcg_temp_new_i32();
c6067f04 7622 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 7623 tmp3 = neon_load_reg(rn, 1);
ad69471c 7624
9ee6e8bb 7625 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7626 if (pass == 0) {
7627 tmp = neon_load_reg(rn, 0);
9ee6e8bb 7628 } else {
dd8fbd78 7629 tmp = tmp3;
c6067f04 7630 tmp2 = tmp4;
9ee6e8bb 7631 }
ad69471c 7632 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
7633 if (op != 11) {
7634 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 7635 }
9ee6e8bb 7636 switch (op) {
4dc064e6
PM
7637 case 6:
7638 gen_neon_negl(cpu_V0, size);
7639 /* Fall through */
7640 case 2:
ad69471c 7641 gen_neon_addl(size);
9ee6e8bb
PB
7642 break;
7643 case 3: case 7:
ad69471c 7644 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
7645 if (op == 7) {
7646 gen_neon_negl(cpu_V0, size);
7647 }
ad69471c 7648 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
7649 break;
7650 case 10:
7651 /* no-op */
7652 break;
7653 case 11:
ad69471c 7654 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
7655 break;
7656 default:
7657 abort();
7658 }
ad69471c 7659 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 7660 }
61adacc8
RH
7661 break;
7662 case 14: /* VQRDMLAH scalar */
7663 case 15: /* VQRDMLSH scalar */
7664 {
7665 NeonGenThreeOpEnvFn *fn;
dd8fbd78 7666
962fcbf2 7667 if (!dc_isar_feature(aa32_rdm, s)) {
61adacc8
RH
7668 return 1;
7669 }
7670 if (u && ((rd | rn) & 1)) {
7671 return 1;
7672 }
7673 if (op == 14) {
7674 if (size == 1) {
7675 fn = gen_helper_neon_qrdmlah_s16;
7676 } else {
7677 fn = gen_helper_neon_qrdmlah_s32;
7678 }
7679 } else {
7680 if (size == 1) {
7681 fn = gen_helper_neon_qrdmlsh_s16;
7682 } else {
7683 fn = gen_helper_neon_qrdmlsh_s32;
7684 }
7685 }
dd8fbd78 7686
61adacc8
RH
7687 tmp2 = neon_get_scalar(size, rm);
7688 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7689 tmp = neon_load_reg(rn, pass);
7690 tmp3 = neon_load_reg(rd, pass);
7691 fn(tmp, cpu_env, tmp, tmp2, tmp3);
7692 tcg_temp_free_i32(tmp3);
7693 neon_store_reg(rd, pass, tmp);
7694 }
7695 tcg_temp_free_i32(tmp2);
7696 }
9ee6e8bb 7697 break;
61adacc8
RH
7698 default:
7699 g_assert_not_reached();
9ee6e8bb
PB
7700 }
7701 }
7702 } else { /* size == 3 */
7703 if (!u) {
7704 /* Extract. */
9ee6e8bb 7705 imm = (insn >> 8) & 0xf;
ad69471c
PB
7706
7707 if (imm > 7 && !q)
7708 return 1;
7709
52579ea1
PM
7710 if (q && ((rd | rn | rm) & 1)) {
7711 return 1;
7712 }
7713
ad69471c
PB
7714 if (imm == 0) {
7715 neon_load_reg64(cpu_V0, rn);
7716 if (q) {
7717 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 7718 }
ad69471c
PB
7719 } else if (imm == 8) {
7720 neon_load_reg64(cpu_V0, rn + 1);
7721 if (q) {
7722 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7723 }
ad69471c 7724 } else if (q) {
a7812ae4 7725 tmp64 = tcg_temp_new_i64();
ad69471c
PB
7726 if (imm < 8) {
7727 neon_load_reg64(cpu_V0, rn);
a7812ae4 7728 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
7729 } else {
7730 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 7731 neon_load_reg64(tmp64, rm);
ad69471c
PB
7732 }
7733 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 7734 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
7735 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7736 if (imm < 8) {
7737 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7738 } else {
ad69471c
PB
7739 neon_load_reg64(cpu_V1, rm + 1);
7740 imm -= 8;
9ee6e8bb 7741 }
ad69471c 7742 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
7743 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
7744 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 7745 tcg_temp_free_i64(tmp64);
ad69471c 7746 } else {
a7812ae4 7747 /* BUGFIX */
ad69471c 7748 neon_load_reg64(cpu_V0, rn);
a7812ae4 7749 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 7750 neon_load_reg64(cpu_V1, rm);
a7812ae4 7751 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
7752 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7753 }
7754 neon_store_reg64(cpu_V0, rd);
7755 if (q) {
7756 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
7757 }
7758 } else if ((insn & (1 << 11)) == 0) {
7759 /* Two register misc. */
7760 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7761 size = (insn >> 18) & 3;
600b828c
PM
7762 /* UNDEF for unknown op values and bad op-size combinations */
7763 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7764 return 1;
7765 }
fe8fcf3d
PM
7766 if (neon_2rm_is_v8_op(op) &&
7767 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7768 return 1;
7769 }
fc2a9b37
PM
7770 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7771 q && ((rm | rd) & 1)) {
7772 return 1;
7773 }
9ee6e8bb 7774 switch (op) {
600b828c 7775 case NEON_2RM_VREV64:
9ee6e8bb 7776 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
7777 tmp = neon_load_reg(rm, pass * 2);
7778 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 7779 switch (size) {
dd8fbd78
FN
7780 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7781 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
7782 case 2: /* no-op */ break;
7783 default: abort();
7784 }
dd8fbd78 7785 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 7786 if (size == 2) {
dd8fbd78 7787 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 7788 } else {
9ee6e8bb 7789 switch (size) {
dd8fbd78
FN
7790 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7791 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
7792 default: abort();
7793 }
dd8fbd78 7794 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
7795 }
7796 }
7797 break;
600b828c
PM
7798 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7799 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
7800 for (pass = 0; pass < q + 1; pass++) {
7801 tmp = neon_load_reg(rm, pass * 2);
7802 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7803 tmp = neon_load_reg(rm, pass * 2 + 1);
7804 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7805 switch (size) {
7806 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7807 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7808 case 2: tcg_gen_add_i64(CPU_V001); break;
7809 default: abort();
7810 }
600b828c 7811 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 7812 /* Accumulate. */
ad69471c
PB
7813 neon_load_reg64(cpu_V1, rd + pass);
7814 gen_neon_addl(size);
9ee6e8bb 7815 }
ad69471c 7816 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7817 }
7818 break;
600b828c 7819 case NEON_2RM_VTRN:
9ee6e8bb 7820 if (size == 2) {
a5a14945 7821 int n;
9ee6e8bb 7822 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7823 tmp = neon_load_reg(rm, n);
7824 tmp2 = neon_load_reg(rd, n + 1);
7825 neon_store_reg(rm, n, tmp2);
7826 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7827 }
7828 } else {
7829 goto elementwise;
7830 }
7831 break;
600b828c 7832 case NEON_2RM_VUZP:
02acedf9 7833 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7834 return 1;
9ee6e8bb
PB
7835 }
7836 break;
600b828c 7837 case NEON_2RM_VZIP:
d68a6f3a 7838 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7839 return 1;
9ee6e8bb
PB
7840 }
7841 break;
600b828c
PM
7842 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7843 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7844 if (rm & 1) {
7845 return 1;
7846 }
f764718d 7847 tmp2 = NULL;
9ee6e8bb 7848 for (pass = 0; pass < 2; pass++) {
ad69471c 7849 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7850 tmp = tcg_temp_new_i32();
600b828c
PM
7851 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7852 tmp, cpu_V0);
ad69471c
PB
7853 if (pass == 0) {
7854 tmp2 = tmp;
7855 } else {
7856 neon_store_reg(rd, 0, tmp2);
7857 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7858 }
9ee6e8bb
PB
7859 }
7860 break;
600b828c 7861 case NEON_2RM_VSHLL:
fc2a9b37 7862 if (q || (rd & 1)) {
9ee6e8bb 7863 return 1;
600b828c 7864 }
ad69471c
PB
7865 tmp = neon_load_reg(rm, 0);
7866 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7867 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7868 if (pass == 1)
7869 tmp = tmp2;
7870 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7871 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7872 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7873 }
7874 break;
600b828c 7875 case NEON_2RM_VCVT_F16_F32:
486624fc
AB
7876 {
7877 TCGv_ptr fpst;
7878 TCGv_i32 ahp;
7879
602f6e42 7880 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
7881 q || (rm & 1)) {
7882 return 1;
7883 }
7d1b0095
PM
7884 tmp = tcg_temp_new_i32();
7885 tmp2 = tcg_temp_new_i32();
486624fc
AB
7886 fpst = get_fpstatus_ptr(true);
7887 ahp = get_ahp_flag();
60011498 7888 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
486624fc 7889 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498 7890 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
486624fc 7891 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7892 tcg_gen_shli_i32(tmp2, tmp2, 16);
7893 tcg_gen_or_i32(tmp2, tmp2, tmp);
7894 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
486624fc 7895 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498
PB
7896 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7897 neon_store_reg(rd, 0, tmp2);
7d1b0095 7898 tmp2 = tcg_temp_new_i32();
486624fc 7899 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7900 tcg_gen_shli_i32(tmp2, tmp2, 16);
7901 tcg_gen_or_i32(tmp2, tmp2, tmp);
7902 neon_store_reg(rd, 1, tmp2);
7d1b0095 7903 tcg_temp_free_i32(tmp);
486624fc
AB
7904 tcg_temp_free_i32(ahp);
7905 tcg_temp_free_ptr(fpst);
60011498 7906 break;
486624fc 7907 }
600b828c 7908 case NEON_2RM_VCVT_F32_F16:
486624fc
AB
7909 {
7910 TCGv_ptr fpst;
7911 TCGv_i32 ahp;
602f6e42 7912 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
fc2a9b37
PM
7913 q || (rd & 1)) {
7914 return 1;
7915 }
486624fc
AB
7916 fpst = get_fpstatus_ptr(true);
7917 ahp = get_ahp_flag();
7d1b0095 7918 tmp3 = tcg_temp_new_i32();
60011498
PB
7919 tmp = neon_load_reg(rm, 0);
7920 tmp2 = neon_load_reg(rm, 1);
7921 tcg_gen_ext16u_i32(tmp3, tmp);
486624fc 7922 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7923 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7924 tcg_gen_shri_i32(tmp3, tmp, 16);
486624fc 7925 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7926 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7927 tcg_temp_free_i32(tmp);
60011498 7928 tcg_gen_ext16u_i32(tmp3, tmp2);
486624fc 7929 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7930 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7931 tcg_gen_shri_i32(tmp3, tmp2, 16);
486624fc 7932 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7933 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7934 tcg_temp_free_i32(tmp2);
7935 tcg_temp_free_i32(tmp3);
486624fc
AB
7936 tcg_temp_free_i32(ahp);
7937 tcg_temp_free_ptr(fpst);
60011498 7938 break;
486624fc 7939 }
9d935509 7940 case NEON_2RM_AESE: case NEON_2RM_AESMC:
962fcbf2 7941 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
9d935509
AB
7942 return 1;
7943 }
1a66ac61
RH
7944 ptr1 = vfp_reg_ptr(true, rd);
7945 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
7946
7947 /* Bit 6 is the lowest opcode bit; it distinguishes between
7948 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7949 */
7950 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7951
7952 if (op == NEON_2RM_AESE) {
1a66ac61 7953 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 7954 } else {
1a66ac61 7955 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 7956 }
1a66ac61
RH
7957 tcg_temp_free_ptr(ptr1);
7958 tcg_temp_free_ptr(ptr2);
9d935509
AB
7959 tcg_temp_free_i32(tmp3);
7960 break;
f1ecb913 7961 case NEON_2RM_SHA1H:
962fcbf2 7962 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
f1ecb913
AB
7963 return 1;
7964 }
1a66ac61
RH
7965 ptr1 = vfp_reg_ptr(true, rd);
7966 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7967
1a66ac61 7968 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 7969
1a66ac61
RH
7970 tcg_temp_free_ptr(ptr1);
7971 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
7972 break;
7973 case NEON_2RM_SHA1SU1:
7974 if ((rm | rd) & 1) {
7975 return 1;
7976 }
7977 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7978 if (q) {
962fcbf2 7979 if (!dc_isar_feature(aa32_sha2, s)) {
f1ecb913
AB
7980 return 1;
7981 }
962fcbf2 7982 } else if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
7983 return 1;
7984 }
1a66ac61
RH
7985 ptr1 = vfp_reg_ptr(true, rd);
7986 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7987 if (q) {
1a66ac61 7988 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 7989 } else {
1a66ac61 7990 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 7991 }
1a66ac61
RH
7992 tcg_temp_free_ptr(ptr1);
7993 tcg_temp_free_ptr(ptr2);
f1ecb913 7994 break;
4bf940be
RH
7995
7996 case NEON_2RM_VMVN:
7997 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
7998 break;
7999 case NEON_2RM_VNEG:
8000 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
8001 break;
8002
9ee6e8bb
PB
8003 default:
8004 elementwise:
8005 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 8006 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
8007 tcg_gen_ld_f32(cpu_F0s, cpu_env,
8008 neon_reg_offset(rm, pass));
f764718d 8009 tmp = NULL;
9ee6e8bb 8010 } else {
dd8fbd78 8011 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
8012 }
8013 switch (op) {
600b828c 8014 case NEON_2RM_VREV32:
9ee6e8bb 8015 switch (size) {
dd8fbd78
FN
8016 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8017 case 1: gen_swap_half(tmp); break;
600b828c 8018 default: abort();
9ee6e8bb
PB
8019 }
8020 break;
600b828c 8021 case NEON_2RM_VREV16:
dd8fbd78 8022 gen_rev16(tmp);
9ee6e8bb 8023 break;
600b828c 8024 case NEON_2RM_VCLS:
9ee6e8bb 8025 switch (size) {
dd8fbd78
FN
8026 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
8027 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
8028 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 8029 default: abort();
9ee6e8bb
PB
8030 }
8031 break;
600b828c 8032 case NEON_2RM_VCLZ:
9ee6e8bb 8033 switch (size) {
dd8fbd78
FN
8034 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
8035 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 8036 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 8037 default: abort();
9ee6e8bb
PB
8038 }
8039 break;
600b828c 8040 case NEON_2RM_VCNT:
dd8fbd78 8041 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 8042 break;
600b828c 8043 case NEON_2RM_VQABS:
9ee6e8bb 8044 switch (size) {
02da0b2d
PM
8045 case 0:
8046 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
8047 break;
8048 case 1:
8049 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
8050 break;
8051 case 2:
8052 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
8053 break;
600b828c 8054 default: abort();
9ee6e8bb
PB
8055 }
8056 break;
600b828c 8057 case NEON_2RM_VQNEG:
9ee6e8bb 8058 switch (size) {
02da0b2d
PM
8059 case 0:
8060 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
8061 break;
8062 case 1:
8063 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
8064 break;
8065 case 2:
8066 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
8067 break;
600b828c 8068 default: abort();
9ee6e8bb
PB
8069 }
8070 break;
600b828c 8071 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 8072 tmp2 = tcg_const_i32(0);
9ee6e8bb 8073 switch(size) {
dd8fbd78
FN
8074 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
8075 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
8076 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 8077 default: abort();
9ee6e8bb 8078 }
39d5492a 8079 tcg_temp_free_i32(tmp2);
600b828c 8080 if (op == NEON_2RM_VCLE0) {
dd8fbd78 8081 tcg_gen_not_i32(tmp, tmp);
600b828c 8082 }
9ee6e8bb 8083 break;
600b828c 8084 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 8085 tmp2 = tcg_const_i32(0);
9ee6e8bb 8086 switch(size) {
dd8fbd78
FN
8087 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
8088 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
8089 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 8090 default: abort();
9ee6e8bb 8091 }
39d5492a 8092 tcg_temp_free_i32(tmp2);
600b828c 8093 if (op == NEON_2RM_VCLT0) {
dd8fbd78 8094 tcg_gen_not_i32(tmp, tmp);
600b828c 8095 }
9ee6e8bb 8096 break;
600b828c 8097 case NEON_2RM_VCEQ0:
dd8fbd78 8098 tmp2 = tcg_const_i32(0);
9ee6e8bb 8099 switch(size) {
dd8fbd78
FN
8100 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
8101 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
8102 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 8103 default: abort();
9ee6e8bb 8104 }
39d5492a 8105 tcg_temp_free_i32(tmp2);
9ee6e8bb 8106 break;
600b828c 8107 case NEON_2RM_VABS:
9ee6e8bb 8108 switch(size) {
dd8fbd78
FN
8109 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
8110 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
8111 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 8112 default: abort();
9ee6e8bb
PB
8113 }
8114 break;
600b828c 8115 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
8116 {
8117 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 8118 tmp2 = tcg_const_i32(0);
aa47cfdd 8119 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 8120 tcg_temp_free_i32(tmp2);
aa47cfdd 8121 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8122 break;
aa47cfdd 8123 }
600b828c 8124 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
8125 {
8126 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 8127 tmp2 = tcg_const_i32(0);
aa47cfdd 8128 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 8129 tcg_temp_free_i32(tmp2);
aa47cfdd 8130 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8131 break;
aa47cfdd 8132 }
600b828c 8133 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
8134 {
8135 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 8136 tmp2 = tcg_const_i32(0);
aa47cfdd 8137 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 8138 tcg_temp_free_i32(tmp2);
aa47cfdd 8139 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8140 break;
aa47cfdd 8141 }
600b828c 8142 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
8143 {
8144 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 8145 tmp2 = tcg_const_i32(0);
aa47cfdd 8146 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 8147 tcg_temp_free_i32(tmp2);
aa47cfdd 8148 tcg_temp_free_ptr(fpstatus);
0e326109 8149 break;
aa47cfdd 8150 }
600b828c 8151 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
8152 {
8153 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 8154 tmp2 = tcg_const_i32(0);
aa47cfdd 8155 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 8156 tcg_temp_free_i32(tmp2);
aa47cfdd 8157 tcg_temp_free_ptr(fpstatus);
0e326109 8158 break;
aa47cfdd 8159 }
600b828c 8160 case NEON_2RM_VABS_F:
4373f3ce 8161 gen_vfp_abs(0);
9ee6e8bb 8162 break;
600b828c 8163 case NEON_2RM_VNEG_F:
4373f3ce 8164 gen_vfp_neg(0);
9ee6e8bb 8165 break;
600b828c 8166 case NEON_2RM_VSWP:
dd8fbd78
FN
8167 tmp2 = neon_load_reg(rd, pass);
8168 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 8169 break;
600b828c 8170 case NEON_2RM_VTRN:
dd8fbd78 8171 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 8172 switch (size) {
dd8fbd78
FN
8173 case 0: gen_neon_trn_u8(tmp, tmp2); break;
8174 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 8175 default: abort();
9ee6e8bb 8176 }
dd8fbd78 8177 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 8178 break;
34f7b0a2
WN
8179 case NEON_2RM_VRINTN:
8180 case NEON_2RM_VRINTA:
8181 case NEON_2RM_VRINTM:
8182 case NEON_2RM_VRINTP:
8183 case NEON_2RM_VRINTZ:
8184 {
8185 TCGv_i32 tcg_rmode;
8186 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8187 int rmode;
8188
8189 if (op == NEON_2RM_VRINTZ) {
8190 rmode = FPROUNDING_ZERO;
8191 } else {
8192 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
8193 }
8194
8195 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8196 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8197 cpu_env);
8198 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
8199 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8200 cpu_env);
8201 tcg_temp_free_ptr(fpstatus);
8202 tcg_temp_free_i32(tcg_rmode);
8203 break;
8204 }
2ce70625
WN
8205 case NEON_2RM_VRINTX:
8206 {
8207 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8208 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
8209 tcg_temp_free_ptr(fpstatus);
8210 break;
8211 }
901ad525
WN
8212 case NEON_2RM_VCVTAU:
8213 case NEON_2RM_VCVTAS:
8214 case NEON_2RM_VCVTNU:
8215 case NEON_2RM_VCVTNS:
8216 case NEON_2RM_VCVTPU:
8217 case NEON_2RM_VCVTPS:
8218 case NEON_2RM_VCVTMU:
8219 case NEON_2RM_VCVTMS:
8220 {
8221 bool is_signed = !extract32(insn, 7, 1);
8222 TCGv_ptr fpst = get_fpstatus_ptr(1);
8223 TCGv_i32 tcg_rmode, tcg_shift;
8224 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
8225
8226 tcg_shift = tcg_const_i32(0);
8227 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8228 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8229 cpu_env);
8230
8231 if (is_signed) {
8232 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
8233 tcg_shift, fpst);
8234 } else {
8235 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
8236 tcg_shift, fpst);
8237 }
8238
8239 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8240 cpu_env);
8241 tcg_temp_free_i32(tcg_rmode);
8242 tcg_temp_free_i32(tcg_shift);
8243 tcg_temp_free_ptr(fpst);
8244 break;
8245 }
600b828c 8246 case NEON_2RM_VRECPE:
b6d4443a
AB
8247 {
8248 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8249 gen_helper_recpe_u32(tmp, tmp, fpstatus);
8250 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8251 break;
b6d4443a 8252 }
600b828c 8253 case NEON_2RM_VRSQRTE:
c2fb418e
AB
8254 {
8255 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8256 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
8257 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8258 break;
c2fb418e 8259 }
600b828c 8260 case NEON_2RM_VRECPE_F:
b6d4443a
AB
8261 {
8262 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8263 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
8264 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8265 break;
b6d4443a 8266 }
600b828c 8267 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
8268 {
8269 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8270 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
8271 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8272 break;
c2fb418e 8273 }
600b828c 8274 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 8275 gen_vfp_sito(0, 1);
9ee6e8bb 8276 break;
600b828c 8277 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 8278 gen_vfp_uito(0, 1);
9ee6e8bb 8279 break;
600b828c 8280 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 8281 gen_vfp_tosiz(0, 1);
9ee6e8bb 8282 break;
600b828c 8283 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 8284 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
8285 break;
8286 default:
600b828c
PM
8287 /* Reserved op values were caught by the
8288 * neon_2rm_sizes[] check earlier.
8289 */
8290 abort();
9ee6e8bb 8291 }
600b828c 8292 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
8293 tcg_gen_st_f32(cpu_F0s, cpu_env,
8294 neon_reg_offset(rd, pass));
9ee6e8bb 8295 } else {
dd8fbd78 8296 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
8297 }
8298 }
8299 break;
8300 }
8301 } else if ((insn & (1 << 10)) == 0) {
8302 /* VTBL, VTBX. */
56907d77
PM
8303 int n = ((insn >> 8) & 3) + 1;
8304 if ((rn + n) > 32) {
8305 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
8306 * helper function running off the end of the register file.
8307 */
8308 return 1;
8309 }
8310 n <<= 3;
9ee6e8bb 8311 if (insn & (1 << 6)) {
8f8e3aa4 8312 tmp = neon_load_reg(rd, 0);
9ee6e8bb 8313 } else {
7d1b0095 8314 tmp = tcg_temp_new_i32();
8f8e3aa4 8315 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8316 }
8f8e3aa4 8317 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 8318 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 8319 tmp5 = tcg_const_i32(n);
e7c06c4e 8320 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 8321 tcg_temp_free_i32(tmp);
9ee6e8bb 8322 if (insn & (1 << 6)) {
8f8e3aa4 8323 tmp = neon_load_reg(rd, 1);
9ee6e8bb 8324 } else {
7d1b0095 8325 tmp = tcg_temp_new_i32();
8f8e3aa4 8326 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8327 }
8f8e3aa4 8328 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 8329 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 8330 tcg_temp_free_i32(tmp5);
e7c06c4e 8331 tcg_temp_free_ptr(ptr1);
8f8e3aa4 8332 neon_store_reg(rd, 0, tmp2);
3018f259 8333 neon_store_reg(rd, 1, tmp3);
7d1b0095 8334 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8335 } else if ((insn & 0x380) == 0) {
8336 /* VDUP */
32f91fb7
RH
8337 int element;
8338 TCGMemOp size;
8339
133da6aa
JR
8340 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
8341 return 1;
8342 }
9ee6e8bb 8343 if (insn & (1 << 16)) {
32f91fb7
RH
8344 size = MO_8;
8345 element = (insn >> 17) & 7;
9ee6e8bb 8346 } else if (insn & (1 << 17)) {
32f91fb7
RH
8347 size = MO_16;
8348 element = (insn >> 18) & 3;
8349 } else {
8350 size = MO_32;
8351 element = (insn >> 19) & 1;
9ee6e8bb 8352 }
32f91fb7
RH
8353 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
8354 neon_element_offset(rm, element, size),
8355 q ? 16 : 8, q ? 16 : 8);
9ee6e8bb
PB
8356 } else {
8357 return 1;
8358 }
8359 }
8360 }
8361 return 0;
8362}
8363
8b7209fa
RH
8364/* Advanced SIMD three registers of the same length extension.
8365 * 31 25 23 22 20 16 12 11 10 9 8 3 0
8366 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
8367 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
8368 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
8369 */
8370static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
8371{
26c470a7
RH
8372 gen_helper_gvec_3 *fn_gvec = NULL;
8373 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
8374 int rd, rn, rm, opr_sz;
8375 int data = 0;
87732318
RH
8376 int off_rn, off_rm;
8377 bool is_long = false, q = extract32(insn, 6, 1);
8378 bool ptr_is_env = false;
8b7209fa
RH
8379
8380 if ((insn & 0xfe200f10) == 0xfc200800) {
8381 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
26c470a7
RH
8382 int size = extract32(insn, 20, 1);
8383 data = extract32(insn, 23, 2); /* rot */
962fcbf2 8384 if (!dc_isar_feature(aa32_vcma, s)
5763190f 8385 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
8386 return 1;
8387 }
8388 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
8389 } else if ((insn & 0xfea00f10) == 0xfc800800) {
8390 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
26c470a7
RH
8391 int size = extract32(insn, 20, 1);
8392 data = extract32(insn, 24, 1); /* rot */
962fcbf2 8393 if (!dc_isar_feature(aa32_vcma, s)
5763190f 8394 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
8395 return 1;
8396 }
8397 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
26c470a7
RH
8398 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
8399 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
8400 bool u = extract32(insn, 4, 1);
962fcbf2 8401 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
8402 return 1;
8403 }
8404 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
87732318
RH
8405 } else if ((insn & 0xff300f10) == 0xfc200810) {
8406 /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
8407 int is_s = extract32(insn, 23, 1);
8408 if (!dc_isar_feature(aa32_fhm, s)) {
8409 return 1;
8410 }
8411 is_long = true;
8412 data = is_s; /* is_2 == 0 */
8413 fn_gvec_ptr = gen_helper_gvec_fmlal_a32;
8414 ptr_is_env = true;
8b7209fa
RH
8415 } else {
8416 return 1;
8417 }
8418
87732318
RH
8419 VFP_DREG_D(rd, insn);
8420 if (rd & q) {
8421 return 1;
8422 }
8423 if (q || !is_long) {
8424 VFP_DREG_N(rn, insn);
8425 VFP_DREG_M(rm, insn);
8426 if ((rn | rm) & q & !is_long) {
8427 return 1;
8428 }
8429 off_rn = vfp_reg_offset(1, rn);
8430 off_rm = vfp_reg_offset(1, rm);
8431 } else {
8432 rn = VFP_SREG_N(insn);
8433 rm = VFP_SREG_M(insn);
8434 off_rn = vfp_reg_offset(0, rn);
8435 off_rm = vfp_reg_offset(0, rm);
8436 }
8437
8b7209fa
RH
8438 if (s->fp_excp_el) {
8439 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 8440 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
8b7209fa
RH
8441 return 0;
8442 }
8443 if (!s->vfp_enabled) {
8444 return 1;
8445 }
8446
8447 opr_sz = (1 + q) * 8;
26c470a7 8448 if (fn_gvec_ptr) {
87732318
RH
8449 TCGv_ptr ptr;
8450 if (ptr_is_env) {
8451 ptr = cpu_env;
8452 } else {
8453 ptr = get_fpstatus_ptr(1);
8454 }
8455 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 8456 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
8457 if (!ptr_is_env) {
8458 tcg_temp_free_ptr(ptr);
8459 }
26c470a7 8460 } else {
87732318 8461 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
8462 opr_sz, opr_sz, data, fn_gvec);
8463 }
8b7209fa
RH
8464 return 0;
8465}
8466
638808ff
RH
8467/* Advanced SIMD two registers and a scalar extension.
8468 * 31 24 23 22 20 16 12 11 10 9 8 3 0
8469 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
8470 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
8471 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
8472 *
8473 */
8474
8475static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
8476{
26c470a7
RH
8477 gen_helper_gvec_3 *fn_gvec = NULL;
8478 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
2cc99919 8479 int rd, rn, rm, opr_sz, data;
87732318
RH
8480 int off_rn, off_rm;
8481 bool is_long = false, q = extract32(insn, 6, 1);
8482 bool ptr_is_env = false;
638808ff
RH
8483
8484 if ((insn & 0xff000f10) == 0xfe000800) {
8485 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
2cc99919
RH
8486 int rot = extract32(insn, 20, 2);
8487 int size = extract32(insn, 23, 1);
8488 int index;
8489
962fcbf2 8490 if (!dc_isar_feature(aa32_vcma, s)) {
638808ff
RH
8491 return 1;
8492 }
2cc99919 8493 if (size == 0) {
5763190f 8494 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2cc99919
RH
8495 return 1;
8496 }
8497 /* For fp16, rm is just Vm, and index is M. */
8498 rm = extract32(insn, 0, 4);
8499 index = extract32(insn, 5, 1);
8500 } else {
8501 /* For fp32, rm is the usual M:Vm, and index is 0. */
8502 VFP_DREG_M(rm, insn);
8503 index = 0;
8504 }
8505 data = (index << 2) | rot;
8506 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
8507 : gen_helper_gvec_fcmlah_idx);
26c470a7
RH
8508 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
8509 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
8510 int u = extract32(insn, 4, 1);
87732318 8511
962fcbf2 8512 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
8513 return 1;
8514 }
8515 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
8516 /* rm is just Vm, and index is M. */
8517 data = extract32(insn, 5, 1); /* index */
8518 rm = extract32(insn, 0, 4);
87732318
RH
8519 } else if ((insn & 0xffa00f10) == 0xfe000810) {
8520 /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
8521 int is_s = extract32(insn, 20, 1);
8522 int vm20 = extract32(insn, 0, 3);
8523 int vm3 = extract32(insn, 3, 1);
8524 int m = extract32(insn, 5, 1);
8525 int index;
8526
8527 if (!dc_isar_feature(aa32_fhm, s)) {
8528 return 1;
8529 }
8530 if (q) {
8531 rm = vm20;
8532 index = m * 2 + vm3;
8533 } else {
8534 rm = vm20 * 2 + m;
8535 index = vm3;
8536 }
8537 is_long = true;
8538 data = (index << 2) | is_s; /* is_2 == 0 */
8539 fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32;
8540 ptr_is_env = true;
638808ff
RH
8541 } else {
8542 return 1;
8543 }
8544
87732318
RH
8545 VFP_DREG_D(rd, insn);
8546 if (rd & q) {
8547 return 1;
8548 }
8549 if (q || !is_long) {
8550 VFP_DREG_N(rn, insn);
8551 if (rn & q & !is_long) {
8552 return 1;
8553 }
8554 off_rn = vfp_reg_offset(1, rn);
8555 off_rm = vfp_reg_offset(1, rm);
8556 } else {
8557 rn = VFP_SREG_N(insn);
8558 off_rn = vfp_reg_offset(0, rn);
8559 off_rm = vfp_reg_offset(0, rm);
8560 }
638808ff
RH
8561 if (s->fp_excp_el) {
8562 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 8563 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
638808ff
RH
8564 return 0;
8565 }
8566 if (!s->vfp_enabled) {
8567 return 1;
8568 }
8569
8570 opr_sz = (1 + q) * 8;
26c470a7 8571 if (fn_gvec_ptr) {
87732318
RH
8572 TCGv_ptr ptr;
8573 if (ptr_is_env) {
8574 ptr = cpu_env;
8575 } else {
8576 ptr = get_fpstatus_ptr(1);
8577 }
8578 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
26c470a7 8579 opr_sz, opr_sz, data, fn_gvec_ptr);
87732318
RH
8580 if (!ptr_is_env) {
8581 tcg_temp_free_ptr(ptr);
8582 }
26c470a7 8583 } else {
87732318 8584 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
26c470a7
RH
8585 opr_sz, opr_sz, data, fn_gvec);
8586 }
638808ff
RH
8587 return 0;
8588}
8589
7dcc1f89 8590static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 8591{
4b6a83fb
PM
8592 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
8593 const ARMCPRegInfo *ri;
9ee6e8bb
PB
8594
8595 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
8596
8597 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 8598 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
8599 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
8600 return 1;
8601 }
d614a513 8602 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 8603 return disas_iwmmxt_insn(s, insn);
d614a513 8604 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 8605 return disas_dsp_insn(s, insn);
c0f4af17
PM
8606 }
8607 return 1;
4b6a83fb
PM
8608 }
8609
8610 /* Otherwise treat as a generic register access */
8611 is64 = (insn & (1 << 25)) == 0;
8612 if (!is64 && ((insn & (1 << 4)) == 0)) {
8613 /* cdp */
8614 return 1;
8615 }
8616
8617 crm = insn & 0xf;
8618 if (is64) {
8619 crn = 0;
8620 opc1 = (insn >> 4) & 0xf;
8621 opc2 = 0;
8622 rt2 = (insn >> 16) & 0xf;
8623 } else {
8624 crn = (insn >> 16) & 0xf;
8625 opc1 = (insn >> 21) & 7;
8626 opc2 = (insn >> 5) & 7;
8627 rt2 = 0;
8628 }
8629 isread = (insn >> 20) & 1;
8630 rt = (insn >> 12) & 0xf;
8631
60322b39 8632 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 8633 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
8634 if (ri) {
8635 /* Check access permissions */
dcbff19b 8636 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
8637 return 1;
8638 }
8639
c0f4af17 8640 if (ri->accessfn ||
d614a513 8641 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
8642 /* Emit code to perform further access permissions checks at
8643 * runtime; this may result in an exception.
c0f4af17
PM
8644 * Note that on XScale all cp0..c13 registers do an access check
8645 * call in order to handle c15_cpar.
f59df3f2
PM
8646 */
8647 TCGv_ptr tmpptr;
3f208fd7 8648 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
8649 uint32_t syndrome;
8650
8651 /* Note that since we are an implementation which takes an
8652 * exception on a trapped conditional instruction only if the
8653 * instruction passes its condition code check, we can take
8654 * advantage of the clause in the ARM ARM that allows us to set
8655 * the COND field in the instruction to 0xE in all cases.
8656 * We could fish the actual condition out of the insn (ARM)
8657 * or the condexec bits (Thumb) but it isn't necessary.
8658 */
8659 switch (cpnum) {
8660 case 14:
8661 if (is64) {
8662 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8663 isread, false);
8bcbf37c
PM
8664 } else {
8665 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8666 rt, isread, false);
8bcbf37c
PM
8667 }
8668 break;
8669 case 15:
8670 if (is64) {
8671 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8672 isread, false);
8bcbf37c
PM
8673 } else {
8674 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8675 rt, isread, false);
8bcbf37c
PM
8676 }
8677 break;
8678 default:
8679 /* ARMv8 defines that only coprocessors 14 and 15 exist,
8680 * so this can only happen if this is an ARMv7 or earlier CPU,
8681 * in which case the syndrome information won't actually be
8682 * guest visible.
8683 */
d614a513 8684 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
8685 syndrome = syn_uncategorized();
8686 break;
8687 }
8688
43bfa4a1 8689 gen_set_condexec(s);
3977ee5d 8690 gen_set_pc_im(s, s->pc - 4);
f59df3f2 8691 tmpptr = tcg_const_ptr(ri);
8bcbf37c 8692 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
8693 tcg_isread = tcg_const_i32(isread);
8694 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
8695 tcg_isread);
f59df3f2 8696 tcg_temp_free_ptr(tmpptr);
8bcbf37c 8697 tcg_temp_free_i32(tcg_syn);
3f208fd7 8698 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
8699 }
8700
4b6a83fb
PM
8701 /* Handle special cases first */
8702 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
8703 case ARM_CP_NOP:
8704 return 0;
8705 case ARM_CP_WFI:
8706 if (isread) {
8707 return 1;
8708 }
eaed129d 8709 gen_set_pc_im(s, s->pc);
dcba3a8d 8710 s->base.is_jmp = DISAS_WFI;
2bee5105 8711 return 0;
4b6a83fb
PM
8712 default:
8713 break;
8714 }
8715
c5a49c63 8716 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8717 gen_io_start();
8718 }
8719
4b6a83fb
PM
8720 if (isread) {
8721 /* Read */
8722 if (is64) {
8723 TCGv_i64 tmp64;
8724 TCGv_i32 tmp;
8725 if (ri->type & ARM_CP_CONST) {
8726 tmp64 = tcg_const_i64(ri->resetvalue);
8727 } else if (ri->readfn) {
8728 TCGv_ptr tmpptr;
4b6a83fb
PM
8729 tmp64 = tcg_temp_new_i64();
8730 tmpptr = tcg_const_ptr(ri);
8731 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
8732 tcg_temp_free_ptr(tmpptr);
8733 } else {
8734 tmp64 = tcg_temp_new_i64();
8735 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
8736 }
8737 tmp = tcg_temp_new_i32();
ecc7b3aa 8738 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
8739 store_reg(s, rt, tmp);
8740 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 8741 tmp = tcg_temp_new_i32();
ecc7b3aa 8742 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 8743 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
8744 store_reg(s, rt2, tmp);
8745 } else {
39d5492a 8746 TCGv_i32 tmp;
4b6a83fb
PM
8747 if (ri->type & ARM_CP_CONST) {
8748 tmp = tcg_const_i32(ri->resetvalue);
8749 } else if (ri->readfn) {
8750 TCGv_ptr tmpptr;
4b6a83fb
PM
8751 tmp = tcg_temp_new_i32();
8752 tmpptr = tcg_const_ptr(ri);
8753 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
8754 tcg_temp_free_ptr(tmpptr);
8755 } else {
8756 tmp = load_cpu_offset(ri->fieldoffset);
8757 }
8758 if (rt == 15) {
8759 /* Destination register of r15 for 32 bit loads sets
8760 * the condition codes from the high 4 bits of the value
8761 */
8762 gen_set_nzcv(tmp);
8763 tcg_temp_free_i32(tmp);
8764 } else {
8765 store_reg(s, rt, tmp);
8766 }
8767 }
8768 } else {
8769 /* Write */
8770 if (ri->type & ARM_CP_CONST) {
8771 /* If not forbidden by access permissions, treat as WI */
8772 return 0;
8773 }
8774
8775 if (is64) {
39d5492a 8776 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
8777 TCGv_i64 tmp64 = tcg_temp_new_i64();
8778 tmplo = load_reg(s, rt);
8779 tmphi = load_reg(s, rt2);
8780 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
8781 tcg_temp_free_i32(tmplo);
8782 tcg_temp_free_i32(tmphi);
8783 if (ri->writefn) {
8784 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
8785 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
8786 tcg_temp_free_ptr(tmpptr);
8787 } else {
8788 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
8789 }
8790 tcg_temp_free_i64(tmp64);
8791 } else {
8792 if (ri->writefn) {
39d5492a 8793 TCGv_i32 tmp;
4b6a83fb 8794 TCGv_ptr tmpptr;
4b6a83fb
PM
8795 tmp = load_reg(s, rt);
8796 tmpptr = tcg_const_ptr(ri);
8797 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
8798 tcg_temp_free_ptr(tmpptr);
8799 tcg_temp_free_i32(tmp);
8800 } else {
39d5492a 8801 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
8802 store_cpu_offset(tmp, ri->fieldoffset);
8803 }
8804 }
2452731c
PM
8805 }
8806
c5a49c63 8807 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8808 /* I/O operations must end the TB here (whether read or write) */
8809 gen_io_end();
8810 gen_lookup_tb(s);
8811 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
8812 /* We default to ending the TB on a coprocessor register write,
8813 * but allow this to be suppressed by the register definition
8814 * (usually only necessary to work around guest bugs).
8815 */
2452731c 8816 gen_lookup_tb(s);
4b6a83fb 8817 }
2452731c 8818
4b6a83fb
PM
8819 return 0;
8820 }
8821
626187d8
PM
8822 /* Unknown register; this might be a guest error or a QEMU
8823 * unimplemented feature.
8824 */
8825 if (is64) {
8826 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8827 "64 bit system register cp:%d opc1: %d crm:%d "
8828 "(%s)\n",
8829 isread ? "read" : "write", cpnum, opc1, crm,
8830 s->ns ? "non-secure" : "secure");
626187d8
PM
8831 } else {
8832 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8833 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
8834 "(%s)\n",
8835 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
8836 s->ns ? "non-secure" : "secure");
626187d8
PM
8837 }
8838
4a9a539f 8839 return 1;
9ee6e8bb
PB
8840}
8841
5e3f878a
PB
8842
8843/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 8844static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 8845{
39d5492a 8846 TCGv_i32 tmp;
7d1b0095 8847 tmp = tcg_temp_new_i32();
ecc7b3aa 8848 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 8849 store_reg(s, rlow, tmp);
7d1b0095 8850 tmp = tcg_temp_new_i32();
5e3f878a 8851 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 8852 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
8853 store_reg(s, rhigh, tmp);
8854}
8855
8856/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 8857static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 8858{
a7812ae4 8859 TCGv_i64 tmp;
39d5492a 8860 TCGv_i32 tmp2;
5e3f878a 8861
36aa55dc 8862 /* Load value and extend to 64 bits. */
a7812ae4 8863 tmp = tcg_temp_new_i64();
5e3f878a
PB
8864 tmp2 = load_reg(s, rlow);
8865 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 8866 tcg_temp_free_i32(tmp2);
5e3f878a 8867 tcg_gen_add_i64(val, val, tmp);
b75263d6 8868 tcg_temp_free_i64(tmp);
5e3f878a
PB
8869}
8870
8871/* load and add a 64-bit value from a register pair. */
a7812ae4 8872static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 8873{
a7812ae4 8874 TCGv_i64 tmp;
39d5492a
PM
8875 TCGv_i32 tmpl;
8876 TCGv_i32 tmph;
5e3f878a
PB
8877
8878 /* Load 64-bit value rd:rn. */
36aa55dc
PB
8879 tmpl = load_reg(s, rlow);
8880 tmph = load_reg(s, rhigh);
a7812ae4 8881 tmp = tcg_temp_new_i64();
36aa55dc 8882 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
8883 tcg_temp_free_i32(tmpl);
8884 tcg_temp_free_i32(tmph);
5e3f878a 8885 tcg_gen_add_i64(val, val, tmp);
b75263d6 8886 tcg_temp_free_i64(tmp);
5e3f878a
PB
8887}
8888
c9f10124 8889/* Set N and Z flags from hi|lo. */
39d5492a 8890static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 8891{
c9f10124
RH
8892 tcg_gen_mov_i32(cpu_NF, hi);
8893 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
8894}
8895
426f5abc
PB
8896/* Load/Store exclusive instructions are implemented by remembering
8897 the value/address loaded, and seeing if these are the same
354161b3 8898 when the store is performed. This should be sufficient to implement
426f5abc 8899 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
8900 regular stores. The compare vs the remembered value is done during
8901 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 8902static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 8903 TCGv_i32 addr, int size)
426f5abc 8904{
94ee24e7 8905 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 8906 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 8907
50225ad0
PM
8908 s->is_ldex = true;
8909
426f5abc 8910 if (size == 3) {
39d5492a 8911 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 8912 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 8913
3448d47b
PM
8914 /* For AArch32, architecturally the 32-bit word at the lowest
8915 * address is always Rt and the one at addr+4 is Rt2, even if
8916 * the CPU is big-endian. That means we don't want to do a
8917 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
8918 * for an architecturally 64-bit access, but instead do a
8919 * 64-bit access using MO_BE if appropriate and then split
8920 * the two halves.
8921 * This only makes a difference for BE32 user-mode, where
8922 * frob64() must not flip the two halves of the 64-bit data
8923 * but this code must treat BE32 user-mode like BE32 system.
8924 */
8925 TCGv taddr = gen_aa32_addr(s, addr, opc);
8926
8927 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
8928 tcg_temp_free(taddr);
354161b3 8929 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
8930 if (s->be_data == MO_BE) {
8931 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
8932 } else {
8933 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
8934 }
354161b3
EC
8935 tcg_temp_free_i64(t64);
8936
8937 store_reg(s, rt2, tmp2);
03d05e2d 8938 } else {
354161b3 8939 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 8940 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 8941 }
03d05e2d
PM
8942
8943 store_reg(s, rt, tmp);
8944 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
8945}
8946
8947static void gen_clrex(DisasContext *s)
8948{
03d05e2d 8949 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
8950}
8951
426f5abc 8952static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 8953 TCGv_i32 addr, int size)
426f5abc 8954{
354161b3
EC
8955 TCGv_i32 t0, t1, t2;
8956 TCGv_i64 extaddr;
8957 TCGv taddr;
42a268c2
RH
8958 TCGLabel *done_label;
8959 TCGLabel *fail_label;
354161b3 8960 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
8961
8962 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
8963 [addr] = {Rt};
8964 {Rd} = 0;
8965 } else {
8966 {Rd} = 1;
8967 } */
8968 fail_label = gen_new_label();
8969 done_label = gen_new_label();
03d05e2d
PM
8970 extaddr = tcg_temp_new_i64();
8971 tcg_gen_extu_i32_i64(extaddr, addr);
8972 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
8973 tcg_temp_free_i64(extaddr);
8974
354161b3
EC
8975 taddr = gen_aa32_addr(s, addr, opc);
8976 t0 = tcg_temp_new_i32();
8977 t1 = load_reg(s, rt);
426f5abc 8978 if (size == 3) {
354161b3
EC
8979 TCGv_i64 o64 = tcg_temp_new_i64();
8980 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 8981
354161b3 8982 t2 = load_reg(s, rt2);
3448d47b
PM
8983 /* For AArch32, architecturally the 32-bit word at the lowest
8984 * address is always Rt and the one at addr+4 is Rt2, even if
8985 * the CPU is big-endian. Since we're going to treat this as a
8986 * single 64-bit BE store, we need to put the two halves in the
8987 * opposite order for BE to LE, so that they end up in the right
8988 * places.
8989 * We don't want gen_aa32_frob64() because that does the wrong
8990 * thing for BE32 usermode.
8991 */
8992 if (s->be_data == MO_BE) {
8993 tcg_gen_concat_i32_i64(n64, t2, t1);
8994 } else {
8995 tcg_gen_concat_i32_i64(n64, t1, t2);
8996 }
354161b3 8997 tcg_temp_free_i32(t2);
03d05e2d 8998
354161b3
EC
8999 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
9000 get_mem_index(s), opc);
9001 tcg_temp_free_i64(n64);
9002
354161b3
EC
9003 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
9004 tcg_gen_extrl_i64_i32(t0, o64);
9005
9006 tcg_temp_free_i64(o64);
9007 } else {
9008 t2 = tcg_temp_new_i32();
9009 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
9010 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
9011 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
9012 tcg_temp_free_i32(t2);
426f5abc 9013 }
354161b3
EC
9014 tcg_temp_free_i32(t1);
9015 tcg_temp_free(taddr);
9016 tcg_gen_mov_i32(cpu_R[rd], t0);
9017 tcg_temp_free_i32(t0);
426f5abc 9018 tcg_gen_br(done_label);
354161b3 9019
426f5abc
PB
9020 gen_set_label(fail_label);
9021 tcg_gen_movi_i32(cpu_R[rd], 1);
9022 gen_set_label(done_label);
03d05e2d 9023 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 9024}
426f5abc 9025
81465888
PM
9026/* gen_srs:
9027 * @env: CPUARMState
9028 * @s: DisasContext
9029 * @mode: mode field from insn (which stack to store to)
9030 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
9031 * @writeback: true if writeback bit set
9032 *
9033 * Generate code for the SRS (Store Return State) insn.
9034 */
9035static void gen_srs(DisasContext *s,
9036 uint32_t mode, uint32_t amode, bool writeback)
9037{
9038 int32_t offset;
cbc0326b
PM
9039 TCGv_i32 addr, tmp;
9040 bool undef = false;
9041
9042 /* SRS is:
9043 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 9044 * and specified mode is monitor mode
cbc0326b
PM
9045 * - UNDEFINED in Hyp mode
9046 * - UNPREDICTABLE in User or System mode
9047 * - UNPREDICTABLE if the specified mode is:
9048 * -- not implemented
9049 * -- not a valid mode number
9050 * -- a mode that's at a higher exception level
9051 * -- Monitor, if we are Non-secure
f01377f5 9052 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 9053 */
ba63cf47 9054 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
9055 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
9056 return;
9057 }
9058
9059 if (s->current_el == 0 || s->current_el == 2) {
9060 undef = true;
9061 }
9062
9063 switch (mode) {
9064 case ARM_CPU_MODE_USR:
9065 case ARM_CPU_MODE_FIQ:
9066 case ARM_CPU_MODE_IRQ:
9067 case ARM_CPU_MODE_SVC:
9068 case ARM_CPU_MODE_ABT:
9069 case ARM_CPU_MODE_UND:
9070 case ARM_CPU_MODE_SYS:
9071 break;
9072 case ARM_CPU_MODE_HYP:
9073 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
9074 undef = true;
9075 }
9076 break;
9077 case ARM_CPU_MODE_MON:
9078 /* No need to check specifically for "are we non-secure" because
9079 * we've already made EL0 UNDEF and handled the trap for S-EL1;
9080 * so if this isn't EL3 then we must be non-secure.
9081 */
9082 if (s->current_el != 3) {
9083 undef = true;
9084 }
9085 break;
9086 default:
9087 undef = true;
9088 }
9089
9090 if (undef) {
9091 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9092 default_exception_el(s));
9093 return;
9094 }
9095
9096 addr = tcg_temp_new_i32();
9097 tmp = tcg_const_i32(mode);
f01377f5
PM
9098 /* get_r13_banked() will raise an exception if called from System mode */
9099 gen_set_condexec(s);
9100 gen_set_pc_im(s, s->pc - 4);
81465888
PM
9101 gen_helper_get_r13_banked(addr, cpu_env, tmp);
9102 tcg_temp_free_i32(tmp);
9103 switch (amode) {
9104 case 0: /* DA */
9105 offset = -4;
9106 break;
9107 case 1: /* IA */
9108 offset = 0;
9109 break;
9110 case 2: /* DB */
9111 offset = -8;
9112 break;
9113 case 3: /* IB */
9114 offset = 4;
9115 break;
9116 default:
9117 abort();
9118 }
9119 tcg_gen_addi_i32(addr, addr, offset);
9120 tmp = load_reg(s, 14);
12dcc321 9121 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9122 tcg_temp_free_i32(tmp);
81465888
PM
9123 tmp = load_cpu_field(spsr);
9124 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 9125 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9126 tcg_temp_free_i32(tmp);
81465888
PM
9127 if (writeback) {
9128 switch (amode) {
9129 case 0:
9130 offset = -8;
9131 break;
9132 case 1:
9133 offset = 4;
9134 break;
9135 case 2:
9136 offset = -4;
9137 break;
9138 case 3:
9139 offset = 0;
9140 break;
9141 default:
9142 abort();
9143 }
9144 tcg_gen_addi_i32(addr, addr, offset);
9145 tmp = tcg_const_i32(mode);
9146 gen_helper_set_r13_banked(cpu_env, tmp, addr);
9147 tcg_temp_free_i32(tmp);
9148 }
9149 tcg_temp_free_i32(addr);
dcba3a8d 9150 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
9151}
9152
c2d9644e
RK
9153/* Generate a label used for skipping this instruction */
9154static void arm_gen_condlabel(DisasContext *s)
9155{
9156 if (!s->condjmp) {
9157 s->condlabel = gen_new_label();
9158 s->condjmp = 1;
9159 }
9160}
9161
9162/* Skip this instruction if the ARM condition is false */
9163static void arm_skip_unless(DisasContext *s, uint32_t cond)
9164{
9165 arm_gen_condlabel(s);
9166 arm_gen_test_cc(cond ^ 1, s->condlabel);
9167}
9168
f4df2210 9169static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 9170{
f4df2210 9171 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
9172 TCGv_i32 tmp;
9173 TCGv_i32 tmp2;
9174 TCGv_i32 tmp3;
9175 TCGv_i32 addr;
a7812ae4 9176 TCGv_i64 tmp64;
9ee6e8bb 9177
e13886e3
PM
9178 /* M variants do not implement ARM mode; this must raise the INVSTATE
9179 * UsageFault exception.
9180 */
b53d8923 9181 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
9182 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
9183 default_exception_el(s));
9184 return;
b53d8923 9185 }
9ee6e8bb
PB
9186 cond = insn >> 28;
9187 if (cond == 0xf){
be5e7a76
DES
9188 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
9189 * choose to UNDEF. In ARMv5 and above the space is used
9190 * for miscellaneous unconditional instructions.
9191 */
9192 ARCH(5);
9193
9ee6e8bb
PB
9194 /* Unconditional instructions. */
9195 if (((insn >> 25) & 7) == 1) {
9196 /* NEON Data processing. */
d614a513 9197 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 9198 goto illegal_op;
d614a513 9199 }
9ee6e8bb 9200
7dcc1f89 9201 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 9202 goto illegal_op;
7dcc1f89 9203 }
9ee6e8bb
PB
9204 return;
9205 }
9206 if ((insn & 0x0f100000) == 0x04000000) {
9207 /* NEON load/store. */
d614a513 9208 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 9209 goto illegal_op;
d614a513 9210 }
9ee6e8bb 9211
7dcc1f89 9212 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 9213 goto illegal_op;
7dcc1f89 9214 }
9ee6e8bb
PB
9215 return;
9216 }
6a57f3eb
WN
9217 if ((insn & 0x0f000e10) == 0x0e000a00) {
9218 /* VFP. */
7dcc1f89 9219 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9220 goto illegal_op;
9221 }
9222 return;
9223 }
3d185e5d
PM
9224 if (((insn & 0x0f30f000) == 0x0510f000) ||
9225 ((insn & 0x0f30f010) == 0x0710f000)) {
9226 if ((insn & (1 << 22)) == 0) {
9227 /* PLDW; v7MP */
d614a513 9228 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
9229 goto illegal_op;
9230 }
9231 }
9232 /* Otherwise PLD; v5TE+ */
be5e7a76 9233 ARCH(5TE);
3d185e5d
PM
9234 return;
9235 }
9236 if (((insn & 0x0f70f000) == 0x0450f000) ||
9237 ((insn & 0x0f70f010) == 0x0650f000)) {
9238 ARCH(7);
9239 return; /* PLI; V7 */
9240 }
9241 if (((insn & 0x0f700000) == 0x04100000) ||
9242 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 9243 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
9244 goto illegal_op;
9245 }
9246 return; /* v7MP: Unallocated memory hint: must NOP */
9247 }
9248
9249 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
9250 ARCH(6);
9251 /* setend */
9886ecdf
PB
9252 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
9253 gen_helper_setend(cpu_env);
dcba3a8d 9254 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
9255 }
9256 return;
9257 } else if ((insn & 0x0fffff00) == 0x057ff000) {
9258 switch ((insn >> 4) & 0xf) {
9259 case 1: /* clrex */
9260 ARCH(6K);
426f5abc 9261 gen_clrex(s);
9ee6e8bb
PB
9262 return;
9263 case 4: /* dsb */
9264 case 5: /* dmb */
9ee6e8bb 9265 ARCH(7);
61e4c432 9266 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 9267 return;
6df99dec
SS
9268 case 6: /* isb */
9269 /* We need to break the TB after this insn to execute
9270 * self-modifying code correctly and also to take
9271 * any pending interrupts immediately.
9272 */
0b609cc1 9273 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 9274 return;
9888bd1e
RH
9275 case 7: /* sb */
9276 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
9277 goto illegal_op;
9278 }
9279 /*
9280 * TODO: There is no speculation barrier opcode
9281 * for TCG; MB and end the TB instead.
9282 */
9283 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9284 gen_goto_tb(s, 0, s->pc & ~1);
9285 return;
9ee6e8bb
PB
9286 default:
9287 goto illegal_op;
9288 }
9289 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
9290 /* srs */
81465888
PM
9291 ARCH(6);
9292 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 9293 return;
ea825eee 9294 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 9295 /* rfe */
c67b6b71 9296 int32_t offset;
9ee6e8bb
PB
9297 if (IS_USER(s))
9298 goto illegal_op;
9299 ARCH(6);
9300 rn = (insn >> 16) & 0xf;
b0109805 9301 addr = load_reg(s, rn);
9ee6e8bb
PB
9302 i = (insn >> 23) & 3;
9303 switch (i) {
b0109805 9304 case 0: offset = -4; break; /* DA */
c67b6b71
FN
9305 case 1: offset = 0; break; /* IA */
9306 case 2: offset = -8; break; /* DB */
b0109805 9307 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
9308 default: abort();
9309 }
9310 if (offset)
b0109805
PB
9311 tcg_gen_addi_i32(addr, addr, offset);
9312 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 9313 tmp = tcg_temp_new_i32();
12dcc321 9314 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9315 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9316 tmp2 = tcg_temp_new_i32();
12dcc321 9317 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9318 if (insn & (1 << 21)) {
9319 /* Base writeback. */
9320 switch (i) {
b0109805 9321 case 0: offset = -8; break;
c67b6b71
FN
9322 case 1: offset = 4; break;
9323 case 2: offset = -4; break;
b0109805 9324 case 3: offset = 0; break;
9ee6e8bb
PB
9325 default: abort();
9326 }
9327 if (offset)
b0109805
PB
9328 tcg_gen_addi_i32(addr, addr, offset);
9329 store_reg(s, rn, addr);
9330 } else {
7d1b0095 9331 tcg_temp_free_i32(addr);
9ee6e8bb 9332 }
b0109805 9333 gen_rfe(s, tmp, tmp2);
c67b6b71 9334 return;
9ee6e8bb
PB
9335 } else if ((insn & 0x0e000000) == 0x0a000000) {
9336 /* branch link and change to thumb (blx <offset>) */
9337 int32_t offset;
9338
9339 val = (uint32_t)s->pc;
7d1b0095 9340 tmp = tcg_temp_new_i32();
d9ba4830
PB
9341 tcg_gen_movi_i32(tmp, val);
9342 store_reg(s, 14, tmp);
9ee6e8bb
PB
9343 /* Sign-extend the 24-bit offset */
9344 offset = (((int32_t)insn) << 8) >> 8;
9345 /* offset * 4 + bit24 * 2 + (thumb bit) */
9346 val += (offset << 2) | ((insn >> 23) & 2) | 1;
9347 /* pipeline offset */
9348 val += 4;
be5e7a76 9349 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 9350 gen_bx_im(s, val);
9ee6e8bb
PB
9351 return;
9352 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 9353 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 9354 /* iWMMXt register transfer. */
c0f4af17 9355 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 9356 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 9357 return;
c0f4af17
PM
9358 }
9359 }
9ee6e8bb 9360 }
8b7209fa
RH
9361 } else if ((insn & 0x0e000a00) == 0x0c000800
9362 && arm_dc_feature(s, ARM_FEATURE_V8)) {
9363 if (disas_neon_insn_3same_ext(s, insn)) {
9364 goto illegal_op;
9365 }
9366 return;
638808ff
RH
9367 } else if ((insn & 0x0f000a00) == 0x0e000800
9368 && arm_dc_feature(s, ARM_FEATURE_V8)) {
9369 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
9370 goto illegal_op;
9371 }
9372 return;
9ee6e8bb
PB
9373 } else if ((insn & 0x0fe00000) == 0x0c400000) {
9374 /* Coprocessor double register transfer. */
be5e7a76 9375 ARCH(5TE);
9ee6e8bb
PB
9376 } else if ((insn & 0x0f000010) == 0x0e000010) {
9377 /* Additional coprocessor register transfer. */
7997d92f 9378 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
9379 uint32_t mask;
9380 uint32_t val;
9381 /* cps (privileged) */
9382 if (IS_USER(s))
9383 return;
9384 mask = val = 0;
9385 if (insn & (1 << 19)) {
9386 if (insn & (1 << 8))
9387 mask |= CPSR_A;
9388 if (insn & (1 << 7))
9389 mask |= CPSR_I;
9390 if (insn & (1 << 6))
9391 mask |= CPSR_F;
9392 if (insn & (1 << 18))
9393 val |= mask;
9394 }
7997d92f 9395 if (insn & (1 << 17)) {
9ee6e8bb
PB
9396 mask |= CPSR_M;
9397 val |= (insn & 0x1f);
9398 }
9399 if (mask) {
2fbac54b 9400 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
9401 }
9402 return;
9403 }
9404 goto illegal_op;
9405 }
9406 if (cond != 0xe) {
9407 /* if not always execute, we generate a conditional jump to
9408 next instruction */
c2d9644e 9409 arm_skip_unless(s, cond);
9ee6e8bb
PB
9410 }
9411 if ((insn & 0x0f900000) == 0x03000000) {
9412 if ((insn & (1 << 21)) == 0) {
9413 ARCH(6T2);
9414 rd = (insn >> 12) & 0xf;
9415 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
9416 if ((insn & (1 << 22)) == 0) {
9417 /* MOVW */
7d1b0095 9418 tmp = tcg_temp_new_i32();
5e3f878a 9419 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
9420 } else {
9421 /* MOVT */
5e3f878a 9422 tmp = load_reg(s, rd);
86831435 9423 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 9424 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 9425 }
5e3f878a 9426 store_reg(s, rd, tmp);
9ee6e8bb
PB
9427 } else {
9428 if (((insn >> 12) & 0xf) != 0xf)
9429 goto illegal_op;
9430 if (((insn >> 16) & 0xf) == 0) {
9431 gen_nop_hint(s, insn & 0xff);
9432 } else {
9433 /* CPSR = immediate */
9434 val = insn & 0xff;
9435 shift = ((insn >> 8) & 0xf) * 2;
9436 if (shift)
9437 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 9438 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
9439 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
9440 i, val)) {
9ee6e8bb 9441 goto illegal_op;
7dcc1f89 9442 }
9ee6e8bb
PB
9443 }
9444 }
9445 } else if ((insn & 0x0f900000) == 0x01000000
9446 && (insn & 0x00000090) != 0x00000090) {
9447 /* miscellaneous instructions */
9448 op1 = (insn >> 21) & 3;
9449 sh = (insn >> 4) & 0xf;
9450 rm = insn & 0xf;
9451 switch (sh) {
8bfd0550
PM
9452 case 0x0: /* MSR, MRS */
9453 if (insn & (1 << 9)) {
9454 /* MSR (banked) and MRS (banked) */
9455 int sysm = extract32(insn, 16, 4) |
9456 (extract32(insn, 8, 1) << 4);
9457 int r = extract32(insn, 22, 1);
9458
9459 if (op1 & 1) {
9460 /* MSR (banked) */
9461 gen_msr_banked(s, r, sysm, rm);
9462 } else {
9463 /* MRS (banked) */
9464 int rd = extract32(insn, 12, 4);
9465
9466 gen_mrs_banked(s, r, sysm, rd);
9467 }
9468 break;
9469 }
9470
9471 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
9472 if (op1 & 1) {
9473 /* PSR = reg */
2fbac54b 9474 tmp = load_reg(s, rm);
9ee6e8bb 9475 i = ((op1 & 2) != 0);
7dcc1f89 9476 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
9477 goto illegal_op;
9478 } else {
9479 /* reg = PSR */
9480 rd = (insn >> 12) & 0xf;
9481 if (op1 & 2) {
9482 if (IS_USER(s))
9483 goto illegal_op;
d9ba4830 9484 tmp = load_cpu_field(spsr);
9ee6e8bb 9485 } else {
7d1b0095 9486 tmp = tcg_temp_new_i32();
9ef39277 9487 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 9488 }
d9ba4830 9489 store_reg(s, rd, tmp);
9ee6e8bb
PB
9490 }
9491 break;
9492 case 0x1:
9493 if (op1 == 1) {
9494 /* branch/exchange thumb (bx). */
be5e7a76 9495 ARCH(4T);
d9ba4830
PB
9496 tmp = load_reg(s, rm);
9497 gen_bx(s, tmp);
9ee6e8bb
PB
9498 } else if (op1 == 3) {
9499 /* clz */
be5e7a76 9500 ARCH(5);
9ee6e8bb 9501 rd = (insn >> 12) & 0xf;
1497c961 9502 tmp = load_reg(s, rm);
7539a012 9503 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 9504 store_reg(s, rd, tmp);
9ee6e8bb
PB
9505 } else {
9506 goto illegal_op;
9507 }
9508 break;
9509 case 0x2:
9510 if (op1 == 1) {
9511 ARCH(5J); /* bxj */
9512 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
9513 tmp = load_reg(s, rm);
9514 gen_bx(s, tmp);
9ee6e8bb
PB
9515 } else {
9516 goto illegal_op;
9517 }
9518 break;
9519 case 0x3:
9520 if (op1 != 1)
9521 goto illegal_op;
9522
be5e7a76 9523 ARCH(5);
9ee6e8bb 9524 /* branch link/exchange thumb (blx) */
d9ba4830 9525 tmp = load_reg(s, rm);
7d1b0095 9526 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
9527 tcg_gen_movi_i32(tmp2, s->pc);
9528 store_reg(s, 14, tmp2);
9529 gen_bx(s, tmp);
9ee6e8bb 9530 break;
eb0ecd5a
WN
9531 case 0x4:
9532 {
9533 /* crc32/crc32c */
9534 uint32_t c = extract32(insn, 8, 4);
9535
9536 /* Check this CPU supports ARMv8 CRC instructions.
9537 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
9538 * Bits 8, 10 and 11 should be zero.
9539 */
962fcbf2 9540 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
eb0ecd5a
WN
9541 goto illegal_op;
9542 }
9543
9544 rn = extract32(insn, 16, 4);
9545 rd = extract32(insn, 12, 4);
9546
9547 tmp = load_reg(s, rn);
9548 tmp2 = load_reg(s, rm);
aa633469
PM
9549 if (op1 == 0) {
9550 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9551 } else if (op1 == 1) {
9552 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9553 }
eb0ecd5a
WN
9554 tmp3 = tcg_const_i32(1 << op1);
9555 if (c & 0x2) {
9556 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9557 } else {
9558 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9559 }
9560 tcg_temp_free_i32(tmp2);
9561 tcg_temp_free_i32(tmp3);
9562 store_reg(s, rd, tmp);
9563 break;
9564 }
9ee6e8bb 9565 case 0x5: /* saturating add/subtract */
be5e7a76 9566 ARCH(5TE);
9ee6e8bb
PB
9567 rd = (insn >> 12) & 0xf;
9568 rn = (insn >> 16) & 0xf;
b40d0353 9569 tmp = load_reg(s, rm);
5e3f878a 9570 tmp2 = load_reg(s, rn);
9ee6e8bb 9571 if (op1 & 2)
9ef39277 9572 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 9573 if (op1 & 1)
9ef39277 9574 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9575 else
9ef39277 9576 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9577 tcg_temp_free_i32(tmp2);
5e3f878a 9578 store_reg(s, rd, tmp);
9ee6e8bb 9579 break;
55c544ed
PM
9580 case 0x6: /* ERET */
9581 if (op1 != 3) {
9582 goto illegal_op;
9583 }
9584 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
9585 goto illegal_op;
9586 }
9587 if ((insn & 0x000fff0f) != 0x0000000e) {
9588 /* UNPREDICTABLE; we choose to UNDEF */
9589 goto illegal_op;
9590 }
9591
9592 if (s->current_el == 2) {
9593 tmp = load_cpu_field(elr_el[2]);
9594 } else {
9595 tmp = load_reg(s, 14);
9596 }
9597 gen_exception_return(s, tmp);
9598 break;
49e14940 9599 case 7:
d4a2dc67
PM
9600 {
9601 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 9602 switch (op1) {
19a6e31c
PM
9603 case 0:
9604 /* HLT */
9605 gen_hlt(s, imm16);
9606 break;
37e6456e
PM
9607 case 1:
9608 /* bkpt */
9609 ARCH(5);
c900a2e6 9610 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
37e6456e
PM
9611 break;
9612 case 2:
9613 /* Hypervisor call (v7) */
9614 ARCH(7);
9615 if (IS_USER(s)) {
9616 goto illegal_op;
9617 }
9618 gen_hvc(s, imm16);
9619 break;
9620 case 3:
9621 /* Secure monitor call (v6+) */
9622 ARCH(6K);
9623 if (IS_USER(s)) {
9624 goto illegal_op;
9625 }
9626 gen_smc(s);
9627 break;
9628 default:
19a6e31c 9629 g_assert_not_reached();
49e14940 9630 }
9ee6e8bb 9631 break;
d4a2dc67 9632 }
9ee6e8bb
PB
9633 case 0x8: /* signed multiply */
9634 case 0xa:
9635 case 0xc:
9636 case 0xe:
be5e7a76 9637 ARCH(5TE);
9ee6e8bb
PB
9638 rs = (insn >> 8) & 0xf;
9639 rn = (insn >> 12) & 0xf;
9640 rd = (insn >> 16) & 0xf;
9641 if (op1 == 1) {
9642 /* (32 * 16) >> 16 */
5e3f878a
PB
9643 tmp = load_reg(s, rm);
9644 tmp2 = load_reg(s, rs);
9ee6e8bb 9645 if (sh & 4)
5e3f878a 9646 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 9647 else
5e3f878a 9648 gen_sxth(tmp2);
a7812ae4
PB
9649 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9650 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9651 tmp = tcg_temp_new_i32();
ecc7b3aa 9652 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 9653 tcg_temp_free_i64(tmp64);
9ee6e8bb 9654 if ((sh & 2) == 0) {
5e3f878a 9655 tmp2 = load_reg(s, rn);
9ef39277 9656 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9657 tcg_temp_free_i32(tmp2);
9ee6e8bb 9658 }
5e3f878a 9659 store_reg(s, rd, tmp);
9ee6e8bb
PB
9660 } else {
9661 /* 16 * 16 */
5e3f878a
PB
9662 tmp = load_reg(s, rm);
9663 tmp2 = load_reg(s, rs);
9664 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 9665 tcg_temp_free_i32(tmp2);
9ee6e8bb 9666 if (op1 == 2) {
a7812ae4
PB
9667 tmp64 = tcg_temp_new_i64();
9668 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9669 tcg_temp_free_i32(tmp);
a7812ae4
PB
9670 gen_addq(s, tmp64, rn, rd);
9671 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 9672 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9673 } else {
9674 if (op1 == 0) {
5e3f878a 9675 tmp2 = load_reg(s, rn);
9ef39277 9676 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9677 tcg_temp_free_i32(tmp2);
9ee6e8bb 9678 }
5e3f878a 9679 store_reg(s, rd, tmp);
9ee6e8bb
PB
9680 }
9681 }
9682 break;
9683 default:
9684 goto illegal_op;
9685 }
9686 } else if (((insn & 0x0e000000) == 0 &&
9687 (insn & 0x00000090) != 0x90) ||
9688 ((insn & 0x0e000000) == (1 << 25))) {
9689 int set_cc, logic_cc, shiftop;
9690
9691 op1 = (insn >> 21) & 0xf;
9692 set_cc = (insn >> 20) & 1;
9693 logic_cc = table_logic_cc[op1] & set_cc;
9694
9695 /* data processing instruction */
9696 if (insn & (1 << 25)) {
9697 /* immediate operand */
9698 val = insn & 0xff;
9699 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 9700 if (shift) {
9ee6e8bb 9701 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 9702 }
7d1b0095 9703 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
9704 tcg_gen_movi_i32(tmp2, val);
9705 if (logic_cc && shift) {
9706 gen_set_CF_bit31(tmp2);
9707 }
9ee6e8bb
PB
9708 } else {
9709 /* register */
9710 rm = (insn) & 0xf;
e9bb4aa9 9711 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9712 shiftop = (insn >> 5) & 3;
9713 if (!(insn & (1 << 4))) {
9714 shift = (insn >> 7) & 0x1f;
e9bb4aa9 9715 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
9716 } else {
9717 rs = (insn >> 8) & 0xf;
8984bd2e 9718 tmp = load_reg(s, rs);
e9bb4aa9 9719 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
9720 }
9721 }
9722 if (op1 != 0x0f && op1 != 0x0d) {
9723 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
9724 tmp = load_reg(s, rn);
9725 } else {
f764718d 9726 tmp = NULL;
9ee6e8bb
PB
9727 }
9728 rd = (insn >> 12) & 0xf;
9729 switch(op1) {
9730 case 0x00:
e9bb4aa9
JR
9731 tcg_gen_and_i32(tmp, tmp, tmp2);
9732 if (logic_cc) {
9733 gen_logic_CC(tmp);
9734 }
7dcc1f89 9735 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9736 break;
9737 case 0x01:
e9bb4aa9
JR
9738 tcg_gen_xor_i32(tmp, tmp, tmp2);
9739 if (logic_cc) {
9740 gen_logic_CC(tmp);
9741 }
7dcc1f89 9742 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9743 break;
9744 case 0x02:
9745 if (set_cc && rd == 15) {
9746 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 9747 if (IS_USER(s)) {
9ee6e8bb 9748 goto illegal_op;
e9bb4aa9 9749 }
72485ec4 9750 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 9751 gen_exception_return(s, tmp);
9ee6e8bb 9752 } else {
e9bb4aa9 9753 if (set_cc) {
72485ec4 9754 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9755 } else {
9756 tcg_gen_sub_i32(tmp, tmp, tmp2);
9757 }
7dcc1f89 9758 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9759 }
9760 break;
9761 case 0x03:
e9bb4aa9 9762 if (set_cc) {
72485ec4 9763 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9764 } else {
9765 tcg_gen_sub_i32(tmp, tmp2, tmp);
9766 }
7dcc1f89 9767 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9768 break;
9769 case 0x04:
e9bb4aa9 9770 if (set_cc) {
72485ec4 9771 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9772 } else {
9773 tcg_gen_add_i32(tmp, tmp, tmp2);
9774 }
7dcc1f89 9775 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9776 break;
9777 case 0x05:
e9bb4aa9 9778 if (set_cc) {
49b4c31e 9779 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9780 } else {
9781 gen_add_carry(tmp, tmp, tmp2);
9782 }
7dcc1f89 9783 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9784 break;
9785 case 0x06:
e9bb4aa9 9786 if (set_cc) {
2de68a49 9787 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9788 } else {
9789 gen_sub_carry(tmp, tmp, tmp2);
9790 }
7dcc1f89 9791 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9792 break;
9793 case 0x07:
e9bb4aa9 9794 if (set_cc) {
2de68a49 9795 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9796 } else {
9797 gen_sub_carry(tmp, tmp2, tmp);
9798 }
7dcc1f89 9799 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9800 break;
9801 case 0x08:
9802 if (set_cc) {
e9bb4aa9
JR
9803 tcg_gen_and_i32(tmp, tmp, tmp2);
9804 gen_logic_CC(tmp);
9ee6e8bb 9805 }
7d1b0095 9806 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9807 break;
9808 case 0x09:
9809 if (set_cc) {
e9bb4aa9
JR
9810 tcg_gen_xor_i32(tmp, tmp, tmp2);
9811 gen_logic_CC(tmp);
9ee6e8bb 9812 }
7d1b0095 9813 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9814 break;
9815 case 0x0a:
9816 if (set_cc) {
72485ec4 9817 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 9818 }
7d1b0095 9819 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9820 break;
9821 case 0x0b:
9822 if (set_cc) {
72485ec4 9823 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9824 }
7d1b0095 9825 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9826 break;
9827 case 0x0c:
e9bb4aa9
JR
9828 tcg_gen_or_i32(tmp, tmp, tmp2);
9829 if (logic_cc) {
9830 gen_logic_CC(tmp);
9831 }
7dcc1f89 9832 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9833 break;
9834 case 0x0d:
9835 if (logic_cc && rd == 15) {
9836 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 9837 if (IS_USER(s)) {
9ee6e8bb 9838 goto illegal_op;
e9bb4aa9
JR
9839 }
9840 gen_exception_return(s, tmp2);
9ee6e8bb 9841 } else {
e9bb4aa9
JR
9842 if (logic_cc) {
9843 gen_logic_CC(tmp2);
9844 }
7dcc1f89 9845 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9846 }
9847 break;
9848 case 0x0e:
f669df27 9849 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
9850 if (logic_cc) {
9851 gen_logic_CC(tmp);
9852 }
7dcc1f89 9853 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9854 break;
9855 default:
9856 case 0x0f:
e9bb4aa9
JR
9857 tcg_gen_not_i32(tmp2, tmp2);
9858 if (logic_cc) {
9859 gen_logic_CC(tmp2);
9860 }
7dcc1f89 9861 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9862 break;
9863 }
e9bb4aa9 9864 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 9865 tcg_temp_free_i32(tmp2);
e9bb4aa9 9866 }
9ee6e8bb
PB
9867 } else {
9868 /* other instructions */
9869 op1 = (insn >> 24) & 0xf;
9870 switch(op1) {
9871 case 0x0:
9872 case 0x1:
9873 /* multiplies, extra load/stores */
9874 sh = (insn >> 5) & 3;
9875 if (sh == 0) {
9876 if (op1 == 0x0) {
9877 rd = (insn >> 16) & 0xf;
9878 rn = (insn >> 12) & 0xf;
9879 rs = (insn >> 8) & 0xf;
9880 rm = (insn) & 0xf;
9881 op1 = (insn >> 20) & 0xf;
9882 switch (op1) {
9883 case 0: case 1: case 2: case 3: case 6:
9884 /* 32 bit mul */
5e3f878a
PB
9885 tmp = load_reg(s, rs);
9886 tmp2 = load_reg(s, rm);
9887 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9888 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9889 if (insn & (1 << 22)) {
9890 /* Subtract (mls) */
9891 ARCH(6T2);
5e3f878a
PB
9892 tmp2 = load_reg(s, rn);
9893 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 9894 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9895 } else if (insn & (1 << 21)) {
9896 /* Add */
5e3f878a
PB
9897 tmp2 = load_reg(s, rn);
9898 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9899 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9900 }
9901 if (insn & (1 << 20))
5e3f878a
PB
9902 gen_logic_CC(tmp);
9903 store_reg(s, rd, tmp);
9ee6e8bb 9904 break;
8aac08b1
AJ
9905 case 4:
9906 /* 64 bit mul double accumulate (UMAAL) */
9907 ARCH(6);
9908 tmp = load_reg(s, rs);
9909 tmp2 = load_reg(s, rm);
9910 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9911 gen_addq_lo(s, tmp64, rn);
9912 gen_addq_lo(s, tmp64, rd);
9913 gen_storeq_reg(s, rn, rd, tmp64);
9914 tcg_temp_free_i64(tmp64);
9915 break;
9916 case 8: case 9: case 10: case 11:
9917 case 12: case 13: case 14: case 15:
9918 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
9919 tmp = load_reg(s, rs);
9920 tmp2 = load_reg(s, rm);
8aac08b1 9921 if (insn & (1 << 22)) {
c9f10124 9922 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 9923 } else {
c9f10124 9924 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
9925 }
9926 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
9927 TCGv_i32 al = load_reg(s, rn);
9928 TCGv_i32 ah = load_reg(s, rd);
c9f10124 9929 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
9930 tcg_temp_free_i32(al);
9931 tcg_temp_free_i32(ah);
9ee6e8bb 9932 }
8aac08b1 9933 if (insn & (1 << 20)) {
c9f10124 9934 gen_logicq_cc(tmp, tmp2);
8aac08b1 9935 }
c9f10124
RH
9936 store_reg(s, rn, tmp);
9937 store_reg(s, rd, tmp2);
9ee6e8bb 9938 break;
8aac08b1
AJ
9939 default:
9940 goto illegal_op;
9ee6e8bb
PB
9941 }
9942 } else {
9943 rn = (insn >> 16) & 0xf;
9944 rd = (insn >> 12) & 0xf;
9945 if (insn & (1 << 23)) {
9946 /* load/store exclusive */
96c55295
PM
9947 bool is_ld = extract32(insn, 20, 1);
9948 bool is_lasr = !extract32(insn, 8, 1);
2359bf80 9949 int op2 = (insn >> 8) & 3;
86753403 9950 op1 = (insn >> 21) & 0x3;
2359bf80
MR
9951
9952 switch (op2) {
9953 case 0: /* lda/stl */
9954 if (op1 == 1) {
9955 goto illegal_op;
9956 }
9957 ARCH(8);
9958 break;
9959 case 1: /* reserved */
9960 goto illegal_op;
9961 case 2: /* ldaex/stlex */
9962 ARCH(8);
9963 break;
9964 case 3: /* ldrex/strex */
9965 if (op1) {
9966 ARCH(6K);
9967 } else {
9968 ARCH(6);
9969 }
9970 break;
9971 }
9972
3174f8e9 9973 addr = tcg_temp_local_new_i32();
98a46317 9974 load_reg_var(s, addr, rn);
2359bf80 9975
96c55295
PM
9976 if (is_lasr && !is_ld) {
9977 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9978 }
9979
2359bf80 9980 if (op2 == 0) {
96c55295 9981 if (is_ld) {
2359bf80
MR
9982 tmp = tcg_temp_new_i32();
9983 switch (op1) {
9984 case 0: /* lda */
9bb6558a
PM
9985 gen_aa32_ld32u_iss(s, tmp, addr,
9986 get_mem_index(s),
9987 rd | ISSIsAcqRel);
2359bf80
MR
9988 break;
9989 case 2: /* ldab */
9bb6558a
PM
9990 gen_aa32_ld8u_iss(s, tmp, addr,
9991 get_mem_index(s),
9992 rd | ISSIsAcqRel);
2359bf80
MR
9993 break;
9994 case 3: /* ldah */
9bb6558a
PM
9995 gen_aa32_ld16u_iss(s, tmp, addr,
9996 get_mem_index(s),
9997 rd | ISSIsAcqRel);
2359bf80
MR
9998 break;
9999 default:
10000 abort();
10001 }
10002 store_reg(s, rd, tmp);
10003 } else {
10004 rm = insn & 0xf;
10005 tmp = load_reg(s, rm);
10006 switch (op1) {
10007 case 0: /* stl */
9bb6558a
PM
10008 gen_aa32_st32_iss(s, tmp, addr,
10009 get_mem_index(s),
10010 rm | ISSIsAcqRel);
2359bf80
MR
10011 break;
10012 case 2: /* stlb */
9bb6558a
PM
10013 gen_aa32_st8_iss(s, tmp, addr,
10014 get_mem_index(s),
10015 rm | ISSIsAcqRel);
2359bf80
MR
10016 break;
10017 case 3: /* stlh */
9bb6558a
PM
10018 gen_aa32_st16_iss(s, tmp, addr,
10019 get_mem_index(s),
10020 rm | ISSIsAcqRel);
2359bf80
MR
10021 break;
10022 default:
10023 abort();
10024 }
10025 tcg_temp_free_i32(tmp);
10026 }
96c55295 10027 } else if (is_ld) {
86753403
PB
10028 switch (op1) {
10029 case 0: /* ldrex */
426f5abc 10030 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
10031 break;
10032 case 1: /* ldrexd */
426f5abc 10033 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
10034 break;
10035 case 2: /* ldrexb */
426f5abc 10036 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
10037 break;
10038 case 3: /* ldrexh */
426f5abc 10039 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
10040 break;
10041 default:
10042 abort();
10043 }
9ee6e8bb
PB
10044 } else {
10045 rm = insn & 0xf;
86753403
PB
10046 switch (op1) {
10047 case 0: /* strex */
426f5abc 10048 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
10049 break;
10050 case 1: /* strexd */
502e64fe 10051 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
10052 break;
10053 case 2: /* strexb */
426f5abc 10054 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
10055 break;
10056 case 3: /* strexh */
426f5abc 10057 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
10058 break;
10059 default:
10060 abort();
10061 }
9ee6e8bb 10062 }
39d5492a 10063 tcg_temp_free_i32(addr);
96c55295
PM
10064
10065 if (is_lasr && is_ld) {
10066 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
10067 }
c4869ca6
OS
10068 } else if ((insn & 0x00300f00) == 0) {
10069 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
10070 * - SWP, SWPB
10071 */
10072
cf12bce0
EC
10073 TCGv taddr;
10074 TCGMemOp opc = s->be_data;
10075
9ee6e8bb
PB
10076 rm = (insn) & 0xf;
10077
9ee6e8bb 10078 if (insn & (1 << 22)) {
cf12bce0 10079 opc |= MO_UB;
9ee6e8bb 10080 } else {
cf12bce0 10081 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 10082 }
cf12bce0
EC
10083
10084 addr = load_reg(s, rn);
10085 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 10086 tcg_temp_free_i32(addr);
cf12bce0
EC
10087
10088 tmp = load_reg(s, rm);
10089 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
10090 get_mem_index(s), opc);
10091 tcg_temp_free(taddr);
10092 store_reg(s, rd, tmp);
c4869ca6
OS
10093 } else {
10094 goto illegal_op;
9ee6e8bb
PB
10095 }
10096 }
10097 } else {
10098 int address_offset;
3960c336 10099 bool load = insn & (1 << 20);
63f26fcf
PM
10100 bool wbit = insn & (1 << 21);
10101 bool pbit = insn & (1 << 24);
3960c336 10102 bool doubleword = false;
9bb6558a
PM
10103 ISSInfo issinfo;
10104
9ee6e8bb
PB
10105 /* Misc load/store */
10106 rn = (insn >> 16) & 0xf;
10107 rd = (insn >> 12) & 0xf;
3960c336 10108
9bb6558a
PM
10109 /* ISS not valid if writeback */
10110 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
10111
3960c336
PM
10112 if (!load && (sh & 2)) {
10113 /* doubleword */
10114 ARCH(5TE);
10115 if (rd & 1) {
10116 /* UNPREDICTABLE; we choose to UNDEF */
10117 goto illegal_op;
10118 }
10119 load = (sh & 1) == 0;
10120 doubleword = true;
10121 }
10122
b0109805 10123 addr = load_reg(s, rn);
63f26fcf 10124 if (pbit) {
b0109805 10125 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 10126 }
9ee6e8bb 10127 address_offset = 0;
3960c336
PM
10128
10129 if (doubleword) {
10130 if (!load) {
9ee6e8bb 10131 /* store */
b0109805 10132 tmp = load_reg(s, rd);
12dcc321 10133 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 10134 tcg_temp_free_i32(tmp);
b0109805
PB
10135 tcg_gen_addi_i32(addr, addr, 4);
10136 tmp = load_reg(s, rd + 1);
12dcc321 10137 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 10138 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10139 } else {
10140 /* load */
5a839c0d 10141 tmp = tcg_temp_new_i32();
12dcc321 10142 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
10143 store_reg(s, rd, tmp);
10144 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 10145 tmp = tcg_temp_new_i32();
12dcc321 10146 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10147 rd++;
9ee6e8bb
PB
10148 }
10149 address_offset = -4;
3960c336
PM
10150 } else if (load) {
10151 /* load */
10152 tmp = tcg_temp_new_i32();
10153 switch (sh) {
10154 case 1:
9bb6558a
PM
10155 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10156 issinfo);
3960c336
PM
10157 break;
10158 case 2:
9bb6558a
PM
10159 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
10160 issinfo);
3960c336
PM
10161 break;
10162 default:
10163 case 3:
9bb6558a
PM
10164 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
10165 issinfo);
3960c336
PM
10166 break;
10167 }
9ee6e8bb
PB
10168 } else {
10169 /* store */
b0109805 10170 tmp = load_reg(s, rd);
9bb6558a 10171 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 10172 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10173 }
10174 /* Perform base writeback before the loaded value to
10175 ensure correct behavior with overlapping index registers.
b6af0975 10176 ldrd with base writeback is undefined if the
9ee6e8bb 10177 destination and index registers overlap. */
63f26fcf 10178 if (!pbit) {
b0109805
PB
10179 gen_add_datah_offset(s, insn, address_offset, addr);
10180 store_reg(s, rn, addr);
63f26fcf 10181 } else if (wbit) {
9ee6e8bb 10182 if (address_offset)
b0109805
PB
10183 tcg_gen_addi_i32(addr, addr, address_offset);
10184 store_reg(s, rn, addr);
10185 } else {
7d1b0095 10186 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10187 }
10188 if (load) {
10189 /* Complete the load. */
b0109805 10190 store_reg(s, rd, tmp);
9ee6e8bb
PB
10191 }
10192 }
10193 break;
10194 case 0x4:
10195 case 0x5:
10196 goto do_ldst;
10197 case 0x6:
10198 case 0x7:
10199 if (insn & (1 << 4)) {
10200 ARCH(6);
10201 /* Armv6 Media instructions. */
10202 rm = insn & 0xf;
10203 rn = (insn >> 16) & 0xf;
2c0262af 10204 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
10205 rs = (insn >> 8) & 0xf;
10206 switch ((insn >> 23) & 3) {
10207 case 0: /* Parallel add/subtract. */
10208 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
10209 tmp = load_reg(s, rn);
10210 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10211 sh = (insn >> 5) & 7;
10212 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
10213 goto illegal_op;
6ddbc6e4 10214 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 10215 tcg_temp_free_i32(tmp2);
6ddbc6e4 10216 store_reg(s, rd, tmp);
9ee6e8bb
PB
10217 break;
10218 case 1:
10219 if ((insn & 0x00700020) == 0) {
6c95676b 10220 /* Halfword pack. */
3670669c
PB
10221 tmp = load_reg(s, rn);
10222 tmp2 = load_reg(s, rm);
9ee6e8bb 10223 shift = (insn >> 7) & 0x1f;
3670669c
PB
10224 if (insn & (1 << 6)) {
10225 /* pkhtb */
22478e79
AZ
10226 if (shift == 0)
10227 shift = 31;
10228 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 10229 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 10230 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
10231 } else {
10232 /* pkhbt */
22478e79
AZ
10233 if (shift)
10234 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 10235 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
10236 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10237 }
10238 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10239 tcg_temp_free_i32(tmp2);
3670669c 10240 store_reg(s, rd, tmp);
9ee6e8bb
PB
10241 } else if ((insn & 0x00200020) == 0x00200000) {
10242 /* [us]sat */
6ddbc6e4 10243 tmp = load_reg(s, rm);
9ee6e8bb
PB
10244 shift = (insn >> 7) & 0x1f;
10245 if (insn & (1 << 6)) {
10246 if (shift == 0)
10247 shift = 31;
6ddbc6e4 10248 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10249 } else {
6ddbc6e4 10250 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
10251 }
10252 sh = (insn >> 16) & 0x1f;
40d3c433
CL
10253 tmp2 = tcg_const_i32(sh);
10254 if (insn & (1 << 22))
9ef39277 10255 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 10256 else
9ef39277 10257 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 10258 tcg_temp_free_i32(tmp2);
6ddbc6e4 10259 store_reg(s, rd, tmp);
9ee6e8bb
PB
10260 } else if ((insn & 0x00300fe0) == 0x00200f20) {
10261 /* [us]sat16 */
6ddbc6e4 10262 tmp = load_reg(s, rm);
9ee6e8bb 10263 sh = (insn >> 16) & 0x1f;
40d3c433
CL
10264 tmp2 = tcg_const_i32(sh);
10265 if (insn & (1 << 22))
9ef39277 10266 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 10267 else
9ef39277 10268 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 10269 tcg_temp_free_i32(tmp2);
6ddbc6e4 10270 store_reg(s, rd, tmp);
9ee6e8bb
PB
10271 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
10272 /* Select bytes. */
6ddbc6e4
PB
10273 tmp = load_reg(s, rn);
10274 tmp2 = load_reg(s, rm);
7d1b0095 10275 tmp3 = tcg_temp_new_i32();
0ecb72a5 10276 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 10277 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10278 tcg_temp_free_i32(tmp3);
10279 tcg_temp_free_i32(tmp2);
6ddbc6e4 10280 store_reg(s, rd, tmp);
9ee6e8bb 10281 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 10282 tmp = load_reg(s, rm);
9ee6e8bb 10283 shift = (insn >> 10) & 3;
1301f322 10284 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10285 rotate, a shift is sufficient. */
10286 if (shift != 0)
f669df27 10287 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10288 op1 = (insn >> 20) & 7;
10289 switch (op1) {
5e3f878a
PB
10290 case 0: gen_sxtb16(tmp); break;
10291 case 2: gen_sxtb(tmp); break;
10292 case 3: gen_sxth(tmp); break;
10293 case 4: gen_uxtb16(tmp); break;
10294 case 6: gen_uxtb(tmp); break;
10295 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
10296 default: goto illegal_op;
10297 }
10298 if (rn != 15) {
5e3f878a 10299 tmp2 = load_reg(s, rn);
9ee6e8bb 10300 if ((op1 & 3) == 0) {
5e3f878a 10301 gen_add16(tmp, tmp2);
9ee6e8bb 10302 } else {
5e3f878a 10303 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10304 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10305 }
10306 }
6c95676b 10307 store_reg(s, rd, tmp);
9ee6e8bb
PB
10308 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
10309 /* rev */
b0109805 10310 tmp = load_reg(s, rm);
9ee6e8bb
PB
10311 if (insn & (1 << 22)) {
10312 if (insn & (1 << 7)) {
b0109805 10313 gen_revsh(tmp);
9ee6e8bb
PB
10314 } else {
10315 ARCH(6T2);
b0109805 10316 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10317 }
10318 } else {
10319 if (insn & (1 << 7))
b0109805 10320 gen_rev16(tmp);
9ee6e8bb 10321 else
66896cb8 10322 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 10323 }
b0109805 10324 store_reg(s, rd, tmp);
9ee6e8bb
PB
10325 } else {
10326 goto illegal_op;
10327 }
10328 break;
10329 case 2: /* Multiplies (Type 3). */
41e9564d
PM
10330 switch ((insn >> 20) & 0x7) {
10331 case 5:
10332 if (((insn >> 6) ^ (insn >> 7)) & 1) {
10333 /* op2 not 00x or 11x : UNDEF */
10334 goto illegal_op;
10335 }
838fa72d
AJ
10336 /* Signed multiply most significant [accumulate].
10337 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
10338 tmp = load_reg(s, rm);
10339 tmp2 = load_reg(s, rs);
a7812ae4 10340 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 10341
955a7dd5 10342 if (rd != 15) {
838fa72d 10343 tmp = load_reg(s, rd);
9ee6e8bb 10344 if (insn & (1 << 6)) {
838fa72d 10345 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 10346 } else {
838fa72d 10347 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
10348 }
10349 }
838fa72d
AJ
10350 if (insn & (1 << 5)) {
10351 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10352 }
10353 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10354 tmp = tcg_temp_new_i32();
ecc7b3aa 10355 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10356 tcg_temp_free_i64(tmp64);
955a7dd5 10357 store_reg(s, rn, tmp);
41e9564d
PM
10358 break;
10359 case 0:
10360 case 4:
10361 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
10362 if (insn & (1 << 7)) {
10363 goto illegal_op;
10364 }
10365 tmp = load_reg(s, rm);
10366 tmp2 = load_reg(s, rs);
9ee6e8bb 10367 if (insn & (1 << 5))
5e3f878a
PB
10368 gen_swap_half(tmp2);
10369 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10370 if (insn & (1 << 22)) {
5e3f878a 10371 /* smlald, smlsld */
33bbd75a
PC
10372 TCGv_i64 tmp64_2;
10373
a7812ae4 10374 tmp64 = tcg_temp_new_i64();
33bbd75a 10375 tmp64_2 = tcg_temp_new_i64();
a7812ae4 10376 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 10377 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 10378 tcg_temp_free_i32(tmp);
33bbd75a
PC
10379 tcg_temp_free_i32(tmp2);
10380 if (insn & (1 << 6)) {
10381 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
10382 } else {
10383 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
10384 }
10385 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
10386 gen_addq(s, tmp64, rd, rn);
10387 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 10388 tcg_temp_free_i64(tmp64);
9ee6e8bb 10389 } else {
5e3f878a 10390 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
10391 if (insn & (1 << 6)) {
10392 /* This subtraction cannot overflow. */
10393 tcg_gen_sub_i32(tmp, tmp, tmp2);
10394 } else {
10395 /* This addition cannot overflow 32 bits;
10396 * however it may overflow considered as a
10397 * signed operation, in which case we must set
10398 * the Q flag.
10399 */
10400 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10401 }
10402 tcg_temp_free_i32(tmp2);
22478e79 10403 if (rd != 15)
9ee6e8bb 10404 {
22478e79 10405 tmp2 = load_reg(s, rd);
9ef39277 10406 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10407 tcg_temp_free_i32(tmp2);
9ee6e8bb 10408 }
22478e79 10409 store_reg(s, rn, tmp);
9ee6e8bb 10410 }
41e9564d 10411 break;
b8b8ea05
PM
10412 case 1:
10413 case 3:
10414 /* SDIV, UDIV */
7e0cf8b4 10415 if (!dc_isar_feature(arm_div, s)) {
b8b8ea05
PM
10416 goto illegal_op;
10417 }
10418 if (((insn >> 5) & 7) || (rd != 15)) {
10419 goto illegal_op;
10420 }
10421 tmp = load_reg(s, rm);
10422 tmp2 = load_reg(s, rs);
10423 if (insn & (1 << 21)) {
10424 gen_helper_udiv(tmp, tmp, tmp2);
10425 } else {
10426 gen_helper_sdiv(tmp, tmp, tmp2);
10427 }
10428 tcg_temp_free_i32(tmp2);
10429 store_reg(s, rn, tmp);
10430 break;
41e9564d
PM
10431 default:
10432 goto illegal_op;
9ee6e8bb
PB
10433 }
10434 break;
10435 case 3:
10436 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
10437 switch (op1) {
10438 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
10439 ARCH(6);
10440 tmp = load_reg(s, rm);
10441 tmp2 = load_reg(s, rs);
10442 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10443 tcg_temp_free_i32(tmp2);
ded9d295
AZ
10444 if (rd != 15) {
10445 tmp2 = load_reg(s, rd);
6ddbc6e4 10446 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10447 tcg_temp_free_i32(tmp2);
9ee6e8bb 10448 }
ded9d295 10449 store_reg(s, rn, tmp);
9ee6e8bb
PB
10450 break;
10451 case 0x20: case 0x24: case 0x28: case 0x2c:
10452 /* Bitfield insert/clear. */
10453 ARCH(6T2);
10454 shift = (insn >> 7) & 0x1f;
10455 i = (insn >> 16) & 0x1f;
45140a57
KB
10456 if (i < shift) {
10457 /* UNPREDICTABLE; we choose to UNDEF */
10458 goto illegal_op;
10459 }
9ee6e8bb
PB
10460 i = i + 1 - shift;
10461 if (rm == 15) {
7d1b0095 10462 tmp = tcg_temp_new_i32();
5e3f878a 10463 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 10464 } else {
5e3f878a 10465 tmp = load_reg(s, rm);
9ee6e8bb
PB
10466 }
10467 if (i != 32) {
5e3f878a 10468 tmp2 = load_reg(s, rd);
d593c48e 10469 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 10470 tcg_temp_free_i32(tmp2);
9ee6e8bb 10471 }
5e3f878a 10472 store_reg(s, rd, tmp);
9ee6e8bb
PB
10473 break;
10474 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
10475 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 10476 ARCH(6T2);
5e3f878a 10477 tmp = load_reg(s, rm);
9ee6e8bb
PB
10478 shift = (insn >> 7) & 0x1f;
10479 i = ((insn >> 16) & 0x1f) + 1;
10480 if (shift + i > 32)
10481 goto illegal_op;
10482 if (i < 32) {
10483 if (op1 & 0x20) {
59a71b4c 10484 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 10485 } else {
59a71b4c 10486 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
10487 }
10488 }
5e3f878a 10489 store_reg(s, rd, tmp);
9ee6e8bb
PB
10490 break;
10491 default:
10492 goto illegal_op;
10493 }
10494 break;
10495 }
10496 break;
10497 }
10498 do_ldst:
10499 /* Check for undefined extension instructions
10500 * per the ARM Bible IE:
10501 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
10502 */
10503 sh = (0xf << 20) | (0xf << 4);
10504 if (op1 == 0x7 && ((insn & sh) == sh))
10505 {
10506 goto illegal_op;
10507 }
10508 /* load/store byte/word */
10509 rn = (insn >> 16) & 0xf;
10510 rd = (insn >> 12) & 0xf;
b0109805 10511 tmp2 = load_reg(s, rn);
a99caa48
PM
10512 if ((insn & 0x01200000) == 0x00200000) {
10513 /* ldrt/strt */
579d21cc 10514 i = get_a32_user_mem_index(s);
a99caa48
PM
10515 } else {
10516 i = get_mem_index(s);
10517 }
9ee6e8bb 10518 if (insn & (1 << 24))
b0109805 10519 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
10520 if (insn & (1 << 20)) {
10521 /* load */
5a839c0d 10522 tmp = tcg_temp_new_i32();
9ee6e8bb 10523 if (insn & (1 << 22)) {
9bb6558a 10524 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 10525 } else {
9bb6558a 10526 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 10527 }
9ee6e8bb
PB
10528 } else {
10529 /* store */
b0109805 10530 tmp = load_reg(s, rd);
5a839c0d 10531 if (insn & (1 << 22)) {
9bb6558a 10532 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 10533 } else {
9bb6558a 10534 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
10535 }
10536 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10537 }
10538 if (!(insn & (1 << 24))) {
b0109805
PB
10539 gen_add_data_offset(s, insn, tmp2);
10540 store_reg(s, rn, tmp2);
10541 } else if (insn & (1 << 21)) {
10542 store_reg(s, rn, tmp2);
10543 } else {
7d1b0095 10544 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10545 }
10546 if (insn & (1 << 20)) {
10547 /* Complete the load. */
7dcc1f89 10548 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
10549 }
10550 break;
10551 case 0x08:
10552 case 0x09:
10553 {
da3e53dd
PM
10554 int j, n, loaded_base;
10555 bool exc_return = false;
10556 bool is_load = extract32(insn, 20, 1);
10557 bool user = false;
39d5492a 10558 TCGv_i32 loaded_var;
9ee6e8bb
PB
10559 /* load/store multiple words */
10560 /* XXX: store correct base if write back */
9ee6e8bb 10561 if (insn & (1 << 22)) {
da3e53dd 10562 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
10563 if (IS_USER(s))
10564 goto illegal_op; /* only usable in supervisor mode */
10565
da3e53dd
PM
10566 if (is_load && extract32(insn, 15, 1)) {
10567 exc_return = true;
10568 } else {
10569 user = true;
10570 }
9ee6e8bb
PB
10571 }
10572 rn = (insn >> 16) & 0xf;
b0109805 10573 addr = load_reg(s, rn);
9ee6e8bb
PB
10574
10575 /* compute total size */
10576 loaded_base = 0;
f764718d 10577 loaded_var = NULL;
9ee6e8bb
PB
10578 n = 0;
10579 for(i=0;i<16;i++) {
10580 if (insn & (1 << i))
10581 n++;
10582 }
10583 /* XXX: test invalid n == 0 case ? */
10584 if (insn & (1 << 23)) {
10585 if (insn & (1 << 24)) {
10586 /* pre increment */
b0109805 10587 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10588 } else {
10589 /* post increment */
10590 }
10591 } else {
10592 if (insn & (1 << 24)) {
10593 /* pre decrement */
b0109805 10594 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
10595 } else {
10596 /* post decrement */
10597 if (n != 1)
b0109805 10598 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
10599 }
10600 }
10601 j = 0;
10602 for(i=0;i<16;i++) {
10603 if (insn & (1 << i)) {
da3e53dd 10604 if (is_load) {
9ee6e8bb 10605 /* load */
5a839c0d 10606 tmp = tcg_temp_new_i32();
12dcc321 10607 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 10608 if (user) {
b75263d6 10609 tmp2 = tcg_const_i32(i);
1ce94f81 10610 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 10611 tcg_temp_free_i32(tmp2);
7d1b0095 10612 tcg_temp_free_i32(tmp);
9ee6e8bb 10613 } else if (i == rn) {
b0109805 10614 loaded_var = tmp;
9ee6e8bb 10615 loaded_base = 1;
9d090d17 10616 } else if (i == 15 && exc_return) {
fb0e8e79 10617 store_pc_exc_ret(s, tmp);
9ee6e8bb 10618 } else {
7dcc1f89 10619 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
10620 }
10621 } else {
10622 /* store */
10623 if (i == 15) {
10624 /* special case: r15 = PC + 8 */
10625 val = (long)s->pc + 4;
7d1b0095 10626 tmp = tcg_temp_new_i32();
b0109805 10627 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 10628 } else if (user) {
7d1b0095 10629 tmp = tcg_temp_new_i32();
b75263d6 10630 tmp2 = tcg_const_i32(i);
9ef39277 10631 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 10632 tcg_temp_free_i32(tmp2);
9ee6e8bb 10633 } else {
b0109805 10634 tmp = load_reg(s, i);
9ee6e8bb 10635 }
12dcc321 10636 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 10637 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10638 }
10639 j++;
10640 /* no need to add after the last transfer */
10641 if (j != n)
b0109805 10642 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10643 }
10644 }
10645 if (insn & (1 << 21)) {
10646 /* write back */
10647 if (insn & (1 << 23)) {
10648 if (insn & (1 << 24)) {
10649 /* pre increment */
10650 } else {
10651 /* post increment */
b0109805 10652 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10653 }
10654 } else {
10655 if (insn & (1 << 24)) {
10656 /* pre decrement */
10657 if (n != 1)
b0109805 10658 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
10659 } else {
10660 /* post decrement */
b0109805 10661 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
10662 }
10663 }
b0109805
PB
10664 store_reg(s, rn, addr);
10665 } else {
7d1b0095 10666 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10667 }
10668 if (loaded_base) {
b0109805 10669 store_reg(s, rn, loaded_var);
9ee6e8bb 10670 }
da3e53dd 10671 if (exc_return) {
9ee6e8bb 10672 /* Restore CPSR from SPSR. */
d9ba4830 10673 tmp = load_cpu_field(spsr);
e69ad9df
AL
10674 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10675 gen_io_start();
10676 }
235ea1f5 10677 gen_helper_cpsr_write_eret(cpu_env, tmp);
e69ad9df
AL
10678 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10679 gen_io_end();
10680 }
7d1b0095 10681 tcg_temp_free_i32(tmp);
b29fd33d 10682 /* Must exit loop to check un-masked IRQs */
dcba3a8d 10683 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
10684 }
10685 }
10686 break;
10687 case 0xa:
10688 case 0xb:
10689 {
10690 int32_t offset;
10691
10692 /* branch (and link) */
10693 val = (int32_t)s->pc;
10694 if (insn & (1 << 24)) {
7d1b0095 10695 tmp = tcg_temp_new_i32();
5e3f878a
PB
10696 tcg_gen_movi_i32(tmp, val);
10697 store_reg(s, 14, tmp);
9ee6e8bb 10698 }
534df156
PM
10699 offset = sextract32(insn << 2, 0, 26);
10700 val += offset + 4;
9ee6e8bb
PB
10701 gen_jmp(s, val);
10702 }
10703 break;
10704 case 0xc:
10705 case 0xd:
10706 case 0xe:
6a57f3eb
WN
10707 if (((insn >> 8) & 0xe) == 10) {
10708 /* VFP. */
7dcc1f89 10709 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10710 goto illegal_op;
10711 }
7dcc1f89 10712 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 10713 /* Coprocessor. */
9ee6e8bb 10714 goto illegal_op;
6a57f3eb 10715 }
9ee6e8bb
PB
10716 break;
10717 case 0xf:
10718 /* swi */
eaed129d 10719 gen_set_pc_im(s, s->pc);
d4a2dc67 10720 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 10721 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
10722 break;
10723 default:
10724 illegal_op:
73710361
GB
10725 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
10726 default_exception_el(s));
9ee6e8bb
PB
10727 break;
10728 }
10729 }
10730}
10731
296e5a0a
PM
10732static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
10733{
10734 /* Return true if this is a 16 bit instruction. We must be precise
10735 * about this (matching the decode). We assume that s->pc still
10736 * points to the first 16 bits of the insn.
10737 */
10738 if ((insn >> 11) < 0x1d) {
10739 /* Definitely a 16-bit instruction */
10740 return true;
10741 }
10742
10743 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10744 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10745 * end up actually treating this as two 16-bit insns, though,
10746 * if it's half of a bl/blx pair that might span a page boundary.
10747 */
14120108
JS
10748 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
10749 arm_dc_feature(s, ARM_FEATURE_M)) {
296e5a0a
PM
10750 /* Thumb2 cores (including all M profile ones) always treat
10751 * 32-bit insns as 32-bit.
10752 */
10753 return false;
10754 }
10755
bfe7ad5b 10756 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
296e5a0a
PM
10757 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10758 * is not on the next page; we merge this into a 32-bit
10759 * insn.
10760 */
10761 return false;
10762 }
10763 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10764 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10765 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10766 * -- handle as single 16 bit insn
10767 */
10768 return true;
10769}
10770
9ee6e8bb
PB
10771/* Return true if this is a Thumb-2 logical op. */
10772static int
10773thumb2_logic_op(int op)
10774{
10775 return (op < 8);
10776}
10777
10778/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
10779 then set condition code flags based on the result of the operation.
10780 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
10781 to the high bit of T1.
10782 Returns zero if the opcode is valid. */
10783
10784static int
39d5492a
PM
10785gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
10786 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
10787{
10788 int logic_cc;
10789
10790 logic_cc = 0;
10791 switch (op) {
10792 case 0: /* and */
396e467c 10793 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
10794 logic_cc = conds;
10795 break;
10796 case 1: /* bic */
f669df27 10797 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
10798 logic_cc = conds;
10799 break;
10800 case 2: /* orr */
396e467c 10801 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
10802 logic_cc = conds;
10803 break;
10804 case 3: /* orn */
29501f1b 10805 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
10806 logic_cc = conds;
10807 break;
10808 case 4: /* eor */
396e467c 10809 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
10810 logic_cc = conds;
10811 break;
10812 case 8: /* add */
10813 if (conds)
72485ec4 10814 gen_add_CC(t0, t0, t1);
9ee6e8bb 10815 else
396e467c 10816 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
10817 break;
10818 case 10: /* adc */
10819 if (conds)
49b4c31e 10820 gen_adc_CC(t0, t0, t1);
9ee6e8bb 10821 else
396e467c 10822 gen_adc(t0, t1);
9ee6e8bb
PB
10823 break;
10824 case 11: /* sbc */
2de68a49
RH
10825 if (conds) {
10826 gen_sbc_CC(t0, t0, t1);
10827 } else {
396e467c 10828 gen_sub_carry(t0, t0, t1);
2de68a49 10829 }
9ee6e8bb
PB
10830 break;
10831 case 13: /* sub */
10832 if (conds)
72485ec4 10833 gen_sub_CC(t0, t0, t1);
9ee6e8bb 10834 else
396e467c 10835 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
10836 break;
10837 case 14: /* rsb */
10838 if (conds)
72485ec4 10839 gen_sub_CC(t0, t1, t0);
9ee6e8bb 10840 else
396e467c 10841 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
10842 break;
10843 default: /* 5, 6, 7, 9, 12, 15. */
10844 return 1;
10845 }
10846 if (logic_cc) {
396e467c 10847 gen_logic_CC(t0);
9ee6e8bb 10848 if (shifter_out)
396e467c 10849 gen_set_CF_bit31(t1);
9ee6e8bb
PB
10850 }
10851 return 0;
10852}
10853
2eea841c
PM
10854/* Translate a 32-bit thumb instruction. */
10855static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 10856{
296e5a0a 10857 uint32_t imm, shift, offset;
9ee6e8bb 10858 uint32_t rd, rn, rm, rs;
39d5492a
PM
10859 TCGv_i32 tmp;
10860 TCGv_i32 tmp2;
10861 TCGv_i32 tmp3;
10862 TCGv_i32 addr;
a7812ae4 10863 TCGv_i64 tmp64;
9ee6e8bb
PB
10864 int op;
10865 int shiftop;
10866 int conds;
10867 int logic_cc;
10868
14120108
JS
10869 /*
10870 * ARMv6-M supports a limited subset of Thumb2 instructions.
10871 * Other Thumb1 architectures allow only 32-bit
10872 * combined BL/BLX prefix and suffix.
296e5a0a 10873 */
14120108
JS
10874 if (arm_dc_feature(s, ARM_FEATURE_M) &&
10875 !arm_dc_feature(s, ARM_FEATURE_V7)) {
10876 int i;
10877 bool found = false;
8297cb13
JS
10878 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
10879 0xf3b08040 /* dsb */,
10880 0xf3b08050 /* dmb */,
10881 0xf3b08060 /* isb */,
10882 0xf3e08000 /* mrs */,
10883 0xf000d000 /* bl */};
10884 static const uint32_t armv6m_mask[] = {0xffe0d000,
10885 0xfff0d0f0,
10886 0xfff0d0f0,
10887 0xfff0d0f0,
10888 0xffe0d000,
10889 0xf800d000};
14120108
JS
10890
10891 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
10892 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
10893 found = true;
10894 break;
10895 }
10896 }
10897 if (!found) {
10898 goto illegal_op;
10899 }
10900 } else if ((insn & 0xf800e800) != 0xf000e800) {
9ee6e8bb
PB
10901 ARCH(6T2);
10902 }
10903
10904 rn = (insn >> 16) & 0xf;
10905 rs = (insn >> 12) & 0xf;
10906 rd = (insn >> 8) & 0xf;
10907 rm = insn & 0xf;
10908 switch ((insn >> 25) & 0xf) {
10909 case 0: case 1: case 2: case 3:
10910 /* 16-bit instructions. Should never happen. */
10911 abort();
10912 case 4:
10913 if (insn & (1 << 22)) {
ebfe27c5
PM
10914 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10915 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 10916 * table branch, TT.
ebfe27c5 10917 */
76eff04d
PM
10918 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
10919 arm_dc_feature(s, ARM_FEATURE_V8)) {
10920 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10921 * - SG (v8M only)
10922 * The bulk of the behaviour for this instruction is implemented
10923 * in v7m_handle_execute_nsc(), which deals with the insn when
10924 * it is executed by a CPU in non-secure state from memory
10925 * which is Secure & NonSecure-Callable.
10926 * Here we only need to handle the remaining cases:
10927 * * in NS memory (including the "security extension not
10928 * implemented" case) : NOP
10929 * * in S memory but CPU already secure (clear IT bits)
10930 * We know that the attribute for the memory this insn is
10931 * in must match the current CPU state, because otherwise
10932 * get_phys_addr_pmsav8 would have generated an exception.
10933 */
10934 if (s->v8m_secure) {
10935 /* Like the IT insn, we don't need to generate any code */
10936 s->condexec_cond = 0;
10937 s->condexec_mask = 0;
10938 }
10939 } else if (insn & 0x01200000) {
ebfe27c5
PM
10940 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10941 * - load/store dual (post-indexed)
10942 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
10943 * - load/store dual (literal and immediate)
10944 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10945 * - load/store dual (pre-indexed)
10946 */
910d7692
PM
10947 bool wback = extract32(insn, 21, 1);
10948
9ee6e8bb 10949 if (rn == 15) {
ebfe27c5
PM
10950 if (insn & (1 << 21)) {
10951 /* UNPREDICTABLE */
10952 goto illegal_op;
10953 }
7d1b0095 10954 addr = tcg_temp_new_i32();
b0109805 10955 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 10956 } else {
b0109805 10957 addr = load_reg(s, rn);
9ee6e8bb
PB
10958 }
10959 offset = (insn & 0xff) * 4;
910d7692 10960 if ((insn & (1 << 23)) == 0) {
9ee6e8bb 10961 offset = -offset;
910d7692
PM
10962 }
10963
10964 if (s->v8m_stackcheck && rn == 13 && wback) {
10965 /*
10966 * Here 'addr' is the current SP; if offset is +ve we're
10967 * moving SP up, else down. It is UNKNOWN whether the limit
10968 * check triggers when SP starts below the limit and ends
10969 * up above it; check whichever of the current and final
10970 * SP is lower, so QEMU will trigger in that situation.
10971 */
10972 if ((int32_t)offset < 0) {
10973 TCGv_i32 newsp = tcg_temp_new_i32();
10974
10975 tcg_gen_addi_i32(newsp, addr, offset);
10976 gen_helper_v8m_stackcheck(cpu_env, newsp);
10977 tcg_temp_free_i32(newsp);
10978 } else {
10979 gen_helper_v8m_stackcheck(cpu_env, addr);
10980 }
10981 }
10982
9ee6e8bb 10983 if (insn & (1 << 24)) {
b0109805 10984 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
10985 offset = 0;
10986 }
10987 if (insn & (1 << 20)) {
10988 /* ldrd */
e2592fad 10989 tmp = tcg_temp_new_i32();
12dcc321 10990 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
10991 store_reg(s, rs, tmp);
10992 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10993 tmp = tcg_temp_new_i32();
12dcc321 10994 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10995 store_reg(s, rd, tmp);
9ee6e8bb
PB
10996 } else {
10997 /* strd */
b0109805 10998 tmp = load_reg(s, rs);
12dcc321 10999 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 11000 tcg_temp_free_i32(tmp);
b0109805
PB
11001 tcg_gen_addi_i32(addr, addr, 4);
11002 tmp = load_reg(s, rd);
12dcc321 11003 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 11004 tcg_temp_free_i32(tmp);
9ee6e8bb 11005 }
910d7692 11006 if (wback) {
9ee6e8bb 11007 /* Base writeback. */
b0109805
PB
11008 tcg_gen_addi_i32(addr, addr, offset - 4);
11009 store_reg(s, rn, addr);
11010 } else {
7d1b0095 11011 tcg_temp_free_i32(addr);
9ee6e8bb
PB
11012 }
11013 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
11014 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
11015 * - load/store exclusive word
5158de24 11016 * - TT (v8M only)
ebfe27c5
PM
11017 */
11018 if (rs == 15) {
5158de24
PM
11019 if (!(insn & (1 << 20)) &&
11020 arm_dc_feature(s, ARM_FEATURE_M) &&
11021 arm_dc_feature(s, ARM_FEATURE_V8)) {
11022 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
11023 * - TT (v8M only)
11024 */
11025 bool alt = insn & (1 << 7);
11026 TCGv_i32 addr, op, ttresp;
11027
11028 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
11029 /* we UNDEF for these UNPREDICTABLE cases */
11030 goto illegal_op;
11031 }
11032
11033 if (alt && !s->v8m_secure) {
11034 goto illegal_op;
11035 }
11036
11037 addr = load_reg(s, rn);
11038 op = tcg_const_i32(extract32(insn, 6, 2));
11039 ttresp = tcg_temp_new_i32();
11040 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
11041 tcg_temp_free_i32(addr);
11042 tcg_temp_free_i32(op);
11043 store_reg(s, rd, ttresp);
384c6c03 11044 break;
5158de24 11045 }
ebfe27c5
PM
11046 goto illegal_op;
11047 }
39d5492a 11048 addr = tcg_temp_local_new_i32();
98a46317 11049 load_reg_var(s, addr, rn);
426f5abc 11050 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 11051 if (insn & (1 << 20)) {
426f5abc 11052 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 11053 } else {
426f5abc 11054 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 11055 }
39d5492a 11056 tcg_temp_free_i32(addr);
2359bf80 11057 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
11058 /* Table Branch. */
11059 if (rn == 15) {
7d1b0095 11060 addr = tcg_temp_new_i32();
b0109805 11061 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 11062 } else {
b0109805 11063 addr = load_reg(s, rn);
9ee6e8bb 11064 }
b26eefb6 11065 tmp = load_reg(s, rm);
b0109805 11066 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
11067 if (insn & (1 << 4)) {
11068 /* tbh */
b0109805 11069 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11070 tcg_temp_free_i32(tmp);
e2592fad 11071 tmp = tcg_temp_new_i32();
12dcc321 11072 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 11073 } else { /* tbb */
7d1b0095 11074 tcg_temp_free_i32(tmp);
e2592fad 11075 tmp = tcg_temp_new_i32();
12dcc321 11076 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 11077 }
7d1b0095 11078 tcg_temp_free_i32(addr);
b0109805
PB
11079 tcg_gen_shli_i32(tmp, tmp, 1);
11080 tcg_gen_addi_i32(tmp, tmp, s->pc);
11081 store_reg(s, 15, tmp);
9ee6e8bb 11082 } else {
96c55295
PM
11083 bool is_lasr = false;
11084 bool is_ld = extract32(insn, 20, 1);
2359bf80 11085 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 11086 op = (insn >> 4) & 0x3;
2359bf80
MR
11087 switch (op2) {
11088 case 0:
426f5abc 11089 goto illegal_op;
2359bf80
MR
11090 case 1:
11091 /* Load/store exclusive byte/halfword/doubleword */
11092 if (op == 2) {
11093 goto illegal_op;
11094 }
11095 ARCH(7);
11096 break;
11097 case 2:
11098 /* Load-acquire/store-release */
11099 if (op == 3) {
11100 goto illegal_op;
11101 }
11102 /* Fall through */
11103 case 3:
11104 /* Load-acquire/store-release exclusive */
11105 ARCH(8);
96c55295 11106 is_lasr = true;
2359bf80 11107 break;
426f5abc 11108 }
96c55295
PM
11109
11110 if (is_lasr && !is_ld) {
11111 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
11112 }
11113
39d5492a 11114 addr = tcg_temp_local_new_i32();
98a46317 11115 load_reg_var(s, addr, rn);
2359bf80 11116 if (!(op2 & 1)) {
96c55295 11117 if (is_ld) {
2359bf80
MR
11118 tmp = tcg_temp_new_i32();
11119 switch (op) {
11120 case 0: /* ldab */
9bb6558a
PM
11121 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
11122 rs | ISSIsAcqRel);
2359bf80
MR
11123 break;
11124 case 1: /* ldah */
9bb6558a
PM
11125 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
11126 rs | ISSIsAcqRel);
2359bf80
MR
11127 break;
11128 case 2: /* lda */
9bb6558a
PM
11129 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11130 rs | ISSIsAcqRel);
2359bf80
MR
11131 break;
11132 default:
11133 abort();
11134 }
11135 store_reg(s, rs, tmp);
11136 } else {
11137 tmp = load_reg(s, rs);
11138 switch (op) {
11139 case 0: /* stlb */
9bb6558a
PM
11140 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
11141 rs | ISSIsAcqRel);
2359bf80
MR
11142 break;
11143 case 1: /* stlh */
9bb6558a
PM
11144 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
11145 rs | ISSIsAcqRel);
2359bf80
MR
11146 break;
11147 case 2: /* stl */
9bb6558a
PM
11148 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
11149 rs | ISSIsAcqRel);
2359bf80
MR
11150 break;
11151 default:
11152 abort();
11153 }
11154 tcg_temp_free_i32(tmp);
11155 }
96c55295 11156 } else if (is_ld) {
426f5abc 11157 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 11158 } else {
426f5abc 11159 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 11160 }
39d5492a 11161 tcg_temp_free_i32(addr);
96c55295
PM
11162
11163 if (is_lasr && is_ld) {
11164 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
11165 }
9ee6e8bb
PB
11166 }
11167 } else {
11168 /* Load/store multiple, RFE, SRS. */
11169 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 11170 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 11171 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11172 goto illegal_op;
00115976 11173 }
9ee6e8bb
PB
11174 if (insn & (1 << 20)) {
11175 /* rfe */
b0109805
PB
11176 addr = load_reg(s, rn);
11177 if ((insn & (1 << 24)) == 0)
11178 tcg_gen_addi_i32(addr, addr, -8);
11179 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 11180 tmp = tcg_temp_new_i32();
12dcc321 11181 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11182 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 11183 tmp2 = tcg_temp_new_i32();
12dcc321 11184 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
11185 if (insn & (1 << 21)) {
11186 /* Base writeback. */
b0109805
PB
11187 if (insn & (1 << 24)) {
11188 tcg_gen_addi_i32(addr, addr, 4);
11189 } else {
11190 tcg_gen_addi_i32(addr, addr, -4);
11191 }
11192 store_reg(s, rn, addr);
11193 } else {
7d1b0095 11194 tcg_temp_free_i32(addr);
9ee6e8bb 11195 }
b0109805 11196 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
11197 } else {
11198 /* srs */
81465888
PM
11199 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
11200 insn & (1 << 21));
9ee6e8bb
PB
11201 }
11202 } else {
5856d44e 11203 int i, loaded_base = 0;
39d5492a 11204 TCGv_i32 loaded_var;
7c0ed88e 11205 bool wback = extract32(insn, 21, 1);
9ee6e8bb 11206 /* Load/store multiple. */
b0109805 11207 addr = load_reg(s, rn);
9ee6e8bb
PB
11208 offset = 0;
11209 for (i = 0; i < 16; i++) {
11210 if (insn & (1 << i))
11211 offset += 4;
11212 }
7c0ed88e 11213
9ee6e8bb 11214 if (insn & (1 << 24)) {
b0109805 11215 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
11216 }
11217
7c0ed88e
PM
11218 if (s->v8m_stackcheck && rn == 13 && wback) {
11219 /*
11220 * If the writeback is incrementing SP rather than
11221 * decrementing it, and the initial SP is below the
11222 * stack limit but the final written-back SP would
11223 * be above, then then we must not perform any memory
11224 * accesses, but it is IMPDEF whether we generate
11225 * an exception. We choose to do so in this case.
11226 * At this point 'addr' is the lowest address, so
11227 * either the original SP (if incrementing) or our
11228 * final SP (if decrementing), so that's what we check.
11229 */
11230 gen_helper_v8m_stackcheck(cpu_env, addr);
11231 }
11232
f764718d 11233 loaded_var = NULL;
9ee6e8bb
PB
11234 for (i = 0; i < 16; i++) {
11235 if ((insn & (1 << i)) == 0)
11236 continue;
11237 if (insn & (1 << 20)) {
11238 /* Load. */
e2592fad 11239 tmp = tcg_temp_new_i32();
12dcc321 11240 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 11241 if (i == 15) {
3bb8a96f 11242 gen_bx_excret(s, tmp);
5856d44e
YO
11243 } else if (i == rn) {
11244 loaded_var = tmp;
11245 loaded_base = 1;
9ee6e8bb 11246 } else {
b0109805 11247 store_reg(s, i, tmp);
9ee6e8bb
PB
11248 }
11249 } else {
11250 /* Store. */
b0109805 11251 tmp = load_reg(s, i);
12dcc321 11252 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 11253 tcg_temp_free_i32(tmp);
9ee6e8bb 11254 }
b0109805 11255 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 11256 }
5856d44e
YO
11257 if (loaded_base) {
11258 store_reg(s, rn, loaded_var);
11259 }
7c0ed88e 11260 if (wback) {
9ee6e8bb
PB
11261 /* Base register writeback. */
11262 if (insn & (1 << 24)) {
b0109805 11263 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
11264 }
11265 /* Fault if writeback register is in register list. */
11266 if (insn & (1 << rn))
11267 goto illegal_op;
b0109805
PB
11268 store_reg(s, rn, addr);
11269 } else {
7d1b0095 11270 tcg_temp_free_i32(addr);
9ee6e8bb
PB
11271 }
11272 }
11273 }
11274 break;
2af9ab77
JB
11275 case 5:
11276
9ee6e8bb 11277 op = (insn >> 21) & 0xf;
2af9ab77 11278 if (op == 6) {
62b44f05
AR
11279 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11280 goto illegal_op;
11281 }
2af9ab77
JB
11282 /* Halfword pack. */
11283 tmp = load_reg(s, rn);
11284 tmp2 = load_reg(s, rm);
11285 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
11286 if (insn & (1 << 5)) {
11287 /* pkhtb */
11288 if (shift == 0)
11289 shift = 31;
11290 tcg_gen_sari_i32(tmp2, tmp2, shift);
11291 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
11292 tcg_gen_ext16u_i32(tmp2, tmp2);
11293 } else {
11294 /* pkhbt */
11295 if (shift)
11296 tcg_gen_shli_i32(tmp2, tmp2, shift);
11297 tcg_gen_ext16u_i32(tmp, tmp);
11298 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
11299 }
11300 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 11301 tcg_temp_free_i32(tmp2);
3174f8e9
FN
11302 store_reg(s, rd, tmp);
11303 } else {
2af9ab77
JB
11304 /* Data processing register constant shift. */
11305 if (rn == 15) {
7d1b0095 11306 tmp = tcg_temp_new_i32();
2af9ab77
JB
11307 tcg_gen_movi_i32(tmp, 0);
11308 } else {
11309 tmp = load_reg(s, rn);
11310 }
11311 tmp2 = load_reg(s, rm);
11312
11313 shiftop = (insn >> 4) & 3;
11314 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
11315 conds = (insn & (1 << 20)) != 0;
11316 logic_cc = (conds && thumb2_logic_op(op));
11317 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
11318 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
11319 goto illegal_op;
7d1b0095 11320 tcg_temp_free_i32(tmp2);
55203189
PM
11321 if (rd == 13 &&
11322 ((op == 2 && rn == 15) ||
11323 (op == 8 && rn == 13) ||
11324 (op == 13 && rn == 13))) {
11325 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
11326 store_sp_checked(s, tmp);
11327 } else if (rd != 15) {
2af9ab77
JB
11328 store_reg(s, rd, tmp);
11329 } else {
7d1b0095 11330 tcg_temp_free_i32(tmp);
2af9ab77 11331 }
3174f8e9 11332 }
9ee6e8bb
PB
11333 break;
11334 case 13: /* Misc data processing. */
11335 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
11336 if (op < 4 && (insn & 0xf000) != 0xf000)
11337 goto illegal_op;
11338 switch (op) {
11339 case 0: /* Register controlled shift. */
8984bd2e
PB
11340 tmp = load_reg(s, rn);
11341 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
11342 if ((insn & 0x70) != 0)
11343 goto illegal_op;
a2d12f0f
PM
11344 /*
11345 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
11346 * - MOV, MOVS (register-shifted register), flagsetting
11347 */
9ee6e8bb 11348 op = (insn >> 21) & 3;
8984bd2e
PB
11349 logic_cc = (insn & (1 << 20)) != 0;
11350 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
11351 if (logic_cc)
11352 gen_logic_CC(tmp);
bedb8a6b 11353 store_reg(s, rd, tmp);
9ee6e8bb
PB
11354 break;
11355 case 1: /* Sign/zero extend. */
62b44f05
AR
11356 op = (insn >> 20) & 7;
11357 switch (op) {
11358 case 0: /* SXTAH, SXTH */
11359 case 1: /* UXTAH, UXTH */
11360 case 4: /* SXTAB, SXTB */
11361 case 5: /* UXTAB, UXTB */
11362 break;
11363 case 2: /* SXTAB16, SXTB16 */
11364 case 3: /* UXTAB16, UXTB16 */
11365 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11366 goto illegal_op;
11367 }
11368 break;
11369 default:
11370 goto illegal_op;
11371 }
11372 if (rn != 15) {
11373 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11374 goto illegal_op;
11375 }
11376 }
5e3f878a 11377 tmp = load_reg(s, rm);
9ee6e8bb 11378 shift = (insn >> 4) & 3;
1301f322 11379 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
11380 rotate, a shift is sufficient. */
11381 if (shift != 0)
f669df27 11382 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
11383 op = (insn >> 20) & 7;
11384 switch (op) {
5e3f878a
PB
11385 case 0: gen_sxth(tmp); break;
11386 case 1: gen_uxth(tmp); break;
11387 case 2: gen_sxtb16(tmp); break;
11388 case 3: gen_uxtb16(tmp); break;
11389 case 4: gen_sxtb(tmp); break;
11390 case 5: gen_uxtb(tmp); break;
62b44f05
AR
11391 default:
11392 g_assert_not_reached();
9ee6e8bb
PB
11393 }
11394 if (rn != 15) {
5e3f878a 11395 tmp2 = load_reg(s, rn);
9ee6e8bb 11396 if ((op >> 1) == 1) {
5e3f878a 11397 gen_add16(tmp, tmp2);
9ee6e8bb 11398 } else {
5e3f878a 11399 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11400 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11401 }
11402 }
5e3f878a 11403 store_reg(s, rd, tmp);
9ee6e8bb
PB
11404 break;
11405 case 2: /* SIMD add/subtract. */
62b44f05
AR
11406 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11407 goto illegal_op;
11408 }
9ee6e8bb
PB
11409 op = (insn >> 20) & 7;
11410 shift = (insn >> 4) & 7;
11411 if ((op & 3) == 3 || (shift & 3) == 3)
11412 goto illegal_op;
6ddbc6e4
PB
11413 tmp = load_reg(s, rn);
11414 tmp2 = load_reg(s, rm);
11415 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 11416 tcg_temp_free_i32(tmp2);
6ddbc6e4 11417 store_reg(s, rd, tmp);
9ee6e8bb
PB
11418 break;
11419 case 3: /* Other data processing. */
11420 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
11421 if (op < 4) {
11422 /* Saturating add/subtract. */
62b44f05
AR
11423 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11424 goto illegal_op;
11425 }
d9ba4830
PB
11426 tmp = load_reg(s, rn);
11427 tmp2 = load_reg(s, rm);
9ee6e8bb 11428 if (op & 1)
9ef39277 11429 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 11430 if (op & 2)
9ef39277 11431 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 11432 else
9ef39277 11433 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 11434 tcg_temp_free_i32(tmp2);
9ee6e8bb 11435 } else {
62b44f05
AR
11436 switch (op) {
11437 case 0x0a: /* rbit */
11438 case 0x08: /* rev */
11439 case 0x09: /* rev16 */
11440 case 0x0b: /* revsh */
11441 case 0x18: /* clz */
11442 break;
11443 case 0x10: /* sel */
11444 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11445 goto illegal_op;
11446 }
11447 break;
11448 case 0x20: /* crc32/crc32c */
11449 case 0x21:
11450 case 0x22:
11451 case 0x28:
11452 case 0x29:
11453 case 0x2a:
962fcbf2 11454 if (!dc_isar_feature(aa32_crc32, s)) {
62b44f05
AR
11455 goto illegal_op;
11456 }
11457 break;
11458 default:
11459 goto illegal_op;
11460 }
d9ba4830 11461 tmp = load_reg(s, rn);
9ee6e8bb
PB
11462 switch (op) {
11463 case 0x0a: /* rbit */
d9ba4830 11464 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
11465 break;
11466 case 0x08: /* rev */
66896cb8 11467 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
11468 break;
11469 case 0x09: /* rev16 */
d9ba4830 11470 gen_rev16(tmp);
9ee6e8bb
PB
11471 break;
11472 case 0x0b: /* revsh */
d9ba4830 11473 gen_revsh(tmp);
9ee6e8bb
PB
11474 break;
11475 case 0x10: /* sel */
d9ba4830 11476 tmp2 = load_reg(s, rm);
7d1b0095 11477 tmp3 = tcg_temp_new_i32();
0ecb72a5 11478 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 11479 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
11480 tcg_temp_free_i32(tmp3);
11481 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11482 break;
11483 case 0x18: /* clz */
7539a012 11484 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 11485 break;
eb0ecd5a
WN
11486 case 0x20:
11487 case 0x21:
11488 case 0x22:
11489 case 0x28:
11490 case 0x29:
11491 case 0x2a:
11492 {
11493 /* crc32/crc32c */
11494 uint32_t sz = op & 0x3;
11495 uint32_t c = op & 0x8;
11496
eb0ecd5a 11497 tmp2 = load_reg(s, rm);
aa633469
PM
11498 if (sz == 0) {
11499 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
11500 } else if (sz == 1) {
11501 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
11502 }
eb0ecd5a
WN
11503 tmp3 = tcg_const_i32(1 << sz);
11504 if (c) {
11505 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
11506 } else {
11507 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
11508 }
11509 tcg_temp_free_i32(tmp2);
11510 tcg_temp_free_i32(tmp3);
11511 break;
11512 }
9ee6e8bb 11513 default:
62b44f05 11514 g_assert_not_reached();
9ee6e8bb
PB
11515 }
11516 }
d9ba4830 11517 store_reg(s, rd, tmp);
9ee6e8bb
PB
11518 break;
11519 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
11520 switch ((insn >> 20) & 7) {
11521 case 0: /* 32 x 32 -> 32 */
11522 case 7: /* Unsigned sum of absolute differences. */
11523 break;
11524 case 1: /* 16 x 16 -> 32 */
11525 case 2: /* Dual multiply add. */
11526 case 3: /* 32 * 16 -> 32msb */
11527 case 4: /* Dual multiply subtract. */
11528 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
11529 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11530 goto illegal_op;
11531 }
11532 break;
11533 }
9ee6e8bb 11534 op = (insn >> 4) & 0xf;
d9ba4830
PB
11535 tmp = load_reg(s, rn);
11536 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
11537 switch ((insn >> 20) & 7) {
11538 case 0: /* 32 x 32 -> 32 */
d9ba4830 11539 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 11540 tcg_temp_free_i32(tmp2);
9ee6e8bb 11541 if (rs != 15) {
d9ba4830 11542 tmp2 = load_reg(s, rs);
9ee6e8bb 11543 if (op)
d9ba4830 11544 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 11545 else
d9ba4830 11546 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11547 tcg_temp_free_i32(tmp2);
9ee6e8bb 11548 }
9ee6e8bb
PB
11549 break;
11550 case 1: /* 16 x 16 -> 32 */
d9ba4830 11551 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 11552 tcg_temp_free_i32(tmp2);
9ee6e8bb 11553 if (rs != 15) {
d9ba4830 11554 tmp2 = load_reg(s, rs);
9ef39277 11555 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 11556 tcg_temp_free_i32(tmp2);
9ee6e8bb 11557 }
9ee6e8bb
PB
11558 break;
11559 case 2: /* Dual multiply add. */
11560 case 4: /* Dual multiply subtract. */
11561 if (op)
d9ba4830
PB
11562 gen_swap_half(tmp2);
11563 gen_smul_dual(tmp, tmp2);
9ee6e8bb 11564 if (insn & (1 << 22)) {
e1d177b9 11565 /* This subtraction cannot overflow. */
d9ba4830 11566 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11567 } else {
e1d177b9
PM
11568 /* This addition cannot overflow 32 bits;
11569 * however it may overflow considered as a signed
11570 * operation, in which case we must set the Q flag.
11571 */
9ef39277 11572 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 11573 }
7d1b0095 11574 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11575 if (rs != 15)
11576 {
d9ba4830 11577 tmp2 = load_reg(s, rs);
9ef39277 11578 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 11579 tcg_temp_free_i32(tmp2);
9ee6e8bb 11580 }
9ee6e8bb
PB
11581 break;
11582 case 3: /* 32 * 16 -> 32msb */
11583 if (op)
d9ba4830 11584 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 11585 else
d9ba4830 11586 gen_sxth(tmp2);
a7812ae4
PB
11587 tmp64 = gen_muls_i64_i32(tmp, tmp2);
11588 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 11589 tmp = tcg_temp_new_i32();
ecc7b3aa 11590 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 11591 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
11592 if (rs != 15)
11593 {
d9ba4830 11594 tmp2 = load_reg(s, rs);
9ef39277 11595 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 11596 tcg_temp_free_i32(tmp2);
9ee6e8bb 11597 }
9ee6e8bb 11598 break;
838fa72d
AJ
11599 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
11600 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 11601 if (rs != 15) {
838fa72d
AJ
11602 tmp = load_reg(s, rs);
11603 if (insn & (1 << 20)) {
11604 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 11605 } else {
838fa72d 11606 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 11607 }
2c0262af 11608 }
838fa72d
AJ
11609 if (insn & (1 << 4)) {
11610 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
11611 }
11612 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 11613 tmp = tcg_temp_new_i32();
ecc7b3aa 11614 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 11615 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
11616 break;
11617 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 11618 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 11619 tcg_temp_free_i32(tmp2);
9ee6e8bb 11620 if (rs != 15) {
d9ba4830
PB
11621 tmp2 = load_reg(s, rs);
11622 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11623 tcg_temp_free_i32(tmp2);
5fd46862 11624 }
9ee6e8bb 11625 break;
2c0262af 11626 }
d9ba4830 11627 store_reg(s, rd, tmp);
2c0262af 11628 break;
9ee6e8bb
PB
11629 case 6: case 7: /* 64-bit multiply, Divide. */
11630 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
11631 tmp = load_reg(s, rn);
11632 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
11633 if ((op & 0x50) == 0x10) {
11634 /* sdiv, udiv */
7e0cf8b4 11635 if (!dc_isar_feature(thumb_div, s)) {
9ee6e8bb 11636 goto illegal_op;
47789990 11637 }
9ee6e8bb 11638 if (op & 0x20)
5e3f878a 11639 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 11640 else
5e3f878a 11641 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 11642 tcg_temp_free_i32(tmp2);
5e3f878a 11643 store_reg(s, rd, tmp);
9ee6e8bb
PB
11644 } else if ((op & 0xe) == 0xc) {
11645 /* Dual multiply accumulate long. */
62b44f05
AR
11646 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11647 tcg_temp_free_i32(tmp);
11648 tcg_temp_free_i32(tmp2);
11649 goto illegal_op;
11650 }
9ee6e8bb 11651 if (op & 1)
5e3f878a
PB
11652 gen_swap_half(tmp2);
11653 gen_smul_dual(tmp, tmp2);
9ee6e8bb 11654 if (op & 0x10) {
5e3f878a 11655 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 11656 } else {
5e3f878a 11657 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 11658 }
7d1b0095 11659 tcg_temp_free_i32(tmp2);
a7812ae4
PB
11660 /* BUGFIX */
11661 tmp64 = tcg_temp_new_i64();
11662 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 11663 tcg_temp_free_i32(tmp);
a7812ae4
PB
11664 gen_addq(s, tmp64, rs, rd);
11665 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 11666 tcg_temp_free_i64(tmp64);
2c0262af 11667 } else {
9ee6e8bb
PB
11668 if (op & 0x20) {
11669 /* Unsigned 64-bit multiply */
a7812ae4 11670 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 11671 } else {
9ee6e8bb
PB
11672 if (op & 8) {
11673 /* smlalxy */
62b44f05
AR
11674 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11675 tcg_temp_free_i32(tmp2);
11676 tcg_temp_free_i32(tmp);
11677 goto illegal_op;
11678 }
5e3f878a 11679 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 11680 tcg_temp_free_i32(tmp2);
a7812ae4
PB
11681 tmp64 = tcg_temp_new_i64();
11682 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 11683 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11684 } else {
11685 /* Signed 64-bit multiply */
a7812ae4 11686 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 11687 }
b5ff1b31 11688 }
9ee6e8bb
PB
11689 if (op & 4) {
11690 /* umaal */
62b44f05
AR
11691 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11692 tcg_temp_free_i64(tmp64);
11693 goto illegal_op;
11694 }
a7812ae4
PB
11695 gen_addq_lo(s, tmp64, rs);
11696 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
11697 } else if (op & 0x40) {
11698 /* 64-bit accumulate. */
a7812ae4 11699 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 11700 }
a7812ae4 11701 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 11702 tcg_temp_free_i64(tmp64);
5fd46862 11703 }
2c0262af 11704 break;
9ee6e8bb
PB
11705 }
11706 break;
11707 case 6: case 7: case 14: case 15:
11708 /* Coprocessor. */
7517748e
PM
11709 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11710 /* We don't currently implement M profile FP support,
b1e5336a
PM
11711 * so this entire space should give a NOCP fault, with
11712 * the exception of the v8M VLLDM and VLSTM insns, which
11713 * must be NOPs in Secure state and UNDEF in Nonsecure state.
7517748e 11714 */
b1e5336a
PM
11715 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
11716 (insn & 0xffa00f00) == 0xec200a00) {
11717 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
11718 * - VLLDM, VLSTM
11719 * We choose to UNDEF if the RAZ bits are non-zero.
11720 */
11721 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
11722 goto illegal_op;
11723 }
11724 /* Just NOP since FP support is not implemented */
11725 break;
11726 }
11727 /* All other insns: NOCP */
7517748e
PM
11728 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
11729 default_exception_el(s));
11730 break;
11731 }
0052087e
RH
11732 if ((insn & 0xfe000a00) == 0xfc000800
11733 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11734 /* The Thumb2 and ARM encodings are identical. */
11735 if (disas_neon_insn_3same_ext(s, insn)) {
11736 goto illegal_op;
11737 }
11738 } else if ((insn & 0xff000a00) == 0xfe000800
11739 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11740 /* The Thumb2 and ARM encodings are identical. */
11741 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
11742 goto illegal_op;
11743 }
11744 } else if (((insn >> 24) & 3) == 3) {
9ee6e8bb 11745 /* Translate into the equivalent ARM encoding. */
f06053e3 11746 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 11747 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 11748 goto illegal_op;
7dcc1f89 11749 }
6a57f3eb 11750 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 11751 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
11752 goto illegal_op;
11753 }
9ee6e8bb
PB
11754 } else {
11755 if (insn & (1 << 28))
11756 goto illegal_op;
7dcc1f89 11757 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 11758 goto illegal_op;
7dcc1f89 11759 }
9ee6e8bb
PB
11760 }
11761 break;
11762 case 8: case 9: case 10: case 11:
11763 if (insn & (1 << 15)) {
11764 /* Branches, misc control. */
11765 if (insn & 0x5000) {
11766 /* Unconditional branch. */
11767 /* signextend(hw1[10:0]) -> offset[:12]. */
11768 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
11769 /* hw1[10:0] -> offset[11:1]. */
11770 offset |= (insn & 0x7ff) << 1;
11771 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
11772 offset[24:22] already have the same value because of the
11773 sign extension above. */
11774 offset ^= ((~insn) & (1 << 13)) << 10;
11775 offset ^= ((~insn) & (1 << 11)) << 11;
11776
9ee6e8bb
PB
11777 if (insn & (1 << 14)) {
11778 /* Branch and link. */
3174f8e9 11779 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 11780 }
3b46e624 11781
b0109805 11782 offset += s->pc;
9ee6e8bb
PB
11783 if (insn & (1 << 12)) {
11784 /* b/bl */
b0109805 11785 gen_jmp(s, offset);
9ee6e8bb
PB
11786 } else {
11787 /* blx */
b0109805 11788 offset &= ~(uint32_t)2;
be5e7a76 11789 /* thumb2 bx, no need to check */
b0109805 11790 gen_bx_im(s, offset);
2c0262af 11791 }
9ee6e8bb
PB
11792 } else if (((insn >> 23) & 7) == 7) {
11793 /* Misc control */
11794 if (insn & (1 << 13))
11795 goto illegal_op;
11796
11797 if (insn & (1 << 26)) {
001b3cab
PM
11798 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11799 goto illegal_op;
11800 }
37e6456e
PM
11801 if (!(insn & (1 << 20))) {
11802 /* Hypervisor call (v7) */
11803 int imm16 = extract32(insn, 16, 4) << 12
11804 | extract32(insn, 0, 12);
11805 ARCH(7);
11806 if (IS_USER(s)) {
11807 goto illegal_op;
11808 }
11809 gen_hvc(s, imm16);
11810 } else {
11811 /* Secure monitor call (v6+) */
11812 ARCH(6K);
11813 if (IS_USER(s)) {
11814 goto illegal_op;
11815 }
11816 gen_smc(s);
11817 }
2c0262af 11818 } else {
9ee6e8bb
PB
11819 op = (insn >> 20) & 7;
11820 switch (op) {
11821 case 0: /* msr cpsr. */
b53d8923 11822 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 11823 tmp = load_reg(s, rn);
b28b3377
PM
11824 /* the constant is the mask and SYSm fields */
11825 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 11826 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 11827 tcg_temp_free_i32(addr);
7d1b0095 11828 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11829 gen_lookup_tb(s);
11830 break;
11831 }
11832 /* fall through */
11833 case 1: /* msr spsr. */
b53d8923 11834 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11835 goto illegal_op;
b53d8923 11836 }
8bfd0550
PM
11837
11838 if (extract32(insn, 5, 1)) {
11839 /* MSR (banked) */
11840 int sysm = extract32(insn, 8, 4) |
11841 (extract32(insn, 4, 1) << 4);
11842 int r = op & 1;
11843
11844 gen_msr_banked(s, r, sysm, rm);
11845 break;
11846 }
11847
11848 /* MSR (for PSRs) */
2fbac54b
FN
11849 tmp = load_reg(s, rn);
11850 if (gen_set_psr(s,
7dcc1f89 11851 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 11852 op == 1, tmp))
9ee6e8bb
PB
11853 goto illegal_op;
11854 break;
11855 case 2: /* cps, nop-hint. */
11856 if (((insn >> 8) & 7) == 0) {
11857 gen_nop_hint(s, insn & 0xff);
11858 }
11859 /* Implemented as NOP in user mode. */
11860 if (IS_USER(s))
11861 break;
11862 offset = 0;
11863 imm = 0;
11864 if (insn & (1 << 10)) {
11865 if (insn & (1 << 7))
11866 offset |= CPSR_A;
11867 if (insn & (1 << 6))
11868 offset |= CPSR_I;
11869 if (insn & (1 << 5))
11870 offset |= CPSR_F;
11871 if (insn & (1 << 9))
11872 imm = CPSR_A | CPSR_I | CPSR_F;
11873 }
11874 if (insn & (1 << 8)) {
11875 offset |= 0x1f;
11876 imm |= (insn & 0x1f);
11877 }
11878 if (offset) {
2fbac54b 11879 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
11880 }
11881 break;
11882 case 3: /* Special control operations. */
14120108 11883 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
8297cb13 11884 !arm_dc_feature(s, ARM_FEATURE_M)) {
14120108
JS
11885 goto illegal_op;
11886 }
9ee6e8bb
PB
11887 op = (insn >> 4) & 0xf;
11888 switch (op) {
11889 case 2: /* clrex */
426f5abc 11890 gen_clrex(s);
9ee6e8bb
PB
11891 break;
11892 case 4: /* dsb */
11893 case 5: /* dmb */
61e4c432 11894 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 11895 break;
6df99dec
SS
11896 case 6: /* isb */
11897 /* We need to break the TB after this insn
11898 * to execute self-modifying code correctly
11899 * and also to take any pending interrupts
11900 * immediately.
11901 */
0b609cc1 11902 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 11903 break;
9888bd1e
RH
11904 case 7: /* sb */
11905 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
11906 goto illegal_op;
11907 }
11908 /*
11909 * TODO: There is no speculation barrier opcode
11910 * for TCG; MB and end the TB instead.
11911 */
11912 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
11913 gen_goto_tb(s, 0, s->pc & ~1);
11914 break;
9ee6e8bb
PB
11915 default:
11916 goto illegal_op;
11917 }
11918 break;
11919 case 4: /* bxj */
9d7c59c8
PM
11920 /* Trivial implementation equivalent to bx.
11921 * This instruction doesn't exist at all for M-profile.
11922 */
11923 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11924 goto illegal_op;
11925 }
d9ba4830
PB
11926 tmp = load_reg(s, rn);
11927 gen_bx(s, tmp);
9ee6e8bb
PB
11928 break;
11929 case 5: /* Exception return. */
b8b45b68
RV
11930 if (IS_USER(s)) {
11931 goto illegal_op;
11932 }
11933 if (rn != 14 || rd != 15) {
11934 goto illegal_op;
11935 }
55c544ed
PM
11936 if (s->current_el == 2) {
11937 /* ERET from Hyp uses ELR_Hyp, not LR */
11938 if (insn & 0xff) {
11939 goto illegal_op;
11940 }
11941 tmp = load_cpu_field(elr_el[2]);
11942 } else {
11943 tmp = load_reg(s, rn);
11944 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
11945 }
b8b45b68
RV
11946 gen_exception_return(s, tmp);
11947 break;
8bfd0550 11948 case 6: /* MRS */
43ac6574
PM
11949 if (extract32(insn, 5, 1) &&
11950 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
11951 /* MRS (banked) */
11952 int sysm = extract32(insn, 16, 4) |
11953 (extract32(insn, 4, 1) << 4);
11954
11955 gen_mrs_banked(s, 0, sysm, rd);
11956 break;
11957 }
11958
3d54026f
PM
11959 if (extract32(insn, 16, 4) != 0xf) {
11960 goto illegal_op;
11961 }
11962 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
11963 extract32(insn, 0, 8) != 0) {
11964 goto illegal_op;
11965 }
11966
8bfd0550 11967 /* mrs cpsr */
7d1b0095 11968 tmp = tcg_temp_new_i32();
b53d8923 11969 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
11970 addr = tcg_const_i32(insn & 0xff);
11971 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 11972 tcg_temp_free_i32(addr);
9ee6e8bb 11973 } else {
9ef39277 11974 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 11975 }
8984bd2e 11976 store_reg(s, rd, tmp);
9ee6e8bb 11977 break;
8bfd0550 11978 case 7: /* MRS */
43ac6574
PM
11979 if (extract32(insn, 5, 1) &&
11980 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
11981 /* MRS (banked) */
11982 int sysm = extract32(insn, 16, 4) |
11983 (extract32(insn, 4, 1) << 4);
11984
11985 gen_mrs_banked(s, 1, sysm, rd);
11986 break;
11987 }
11988
11989 /* mrs spsr. */
9ee6e8bb 11990 /* Not accessible in user mode. */
b53d8923 11991 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11992 goto illegal_op;
b53d8923 11993 }
3d54026f
PM
11994
11995 if (extract32(insn, 16, 4) != 0xf ||
11996 extract32(insn, 0, 8) != 0) {
11997 goto illegal_op;
11998 }
11999
d9ba4830
PB
12000 tmp = load_cpu_field(spsr);
12001 store_reg(s, rd, tmp);
9ee6e8bb 12002 break;
2c0262af
FB
12003 }
12004 }
9ee6e8bb
PB
12005 } else {
12006 /* Conditional branch. */
12007 op = (insn >> 22) & 0xf;
12008 /* Generate a conditional jump to next instruction. */
c2d9644e 12009 arm_skip_unless(s, op);
9ee6e8bb
PB
12010
12011 /* offset[11:1] = insn[10:0] */
12012 offset = (insn & 0x7ff) << 1;
12013 /* offset[17:12] = insn[21:16]. */
12014 offset |= (insn & 0x003f0000) >> 4;
12015 /* offset[31:20] = insn[26]. */
12016 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
12017 /* offset[18] = insn[13]. */
12018 offset |= (insn & (1 << 13)) << 5;
12019 /* offset[19] = insn[11]. */
12020 offset |= (insn & (1 << 11)) << 8;
12021
12022 /* jump to the offset */
b0109805 12023 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
12024 }
12025 } else {
55203189
PM
12026 /*
12027 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
12028 * - Data-processing (modified immediate, plain binary immediate)
12029 */
9ee6e8bb 12030 if (insn & (1 << 25)) {
55203189
PM
12031 /*
12032 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
12033 * - Data-processing (plain binary immediate)
12034 */
9ee6e8bb
PB
12035 if (insn & (1 << 24)) {
12036 if (insn & (1 << 20))
12037 goto illegal_op;
12038 /* Bitfield/Saturate. */
12039 op = (insn >> 21) & 7;
12040 imm = insn & 0x1f;
12041 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 12042 if (rn == 15) {
7d1b0095 12043 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
12044 tcg_gen_movi_i32(tmp, 0);
12045 } else {
12046 tmp = load_reg(s, rn);
12047 }
9ee6e8bb
PB
12048 switch (op) {
12049 case 2: /* Signed bitfield extract. */
12050 imm++;
12051 if (shift + imm > 32)
12052 goto illegal_op;
59a71b4c
RH
12053 if (imm < 32) {
12054 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
12055 }
9ee6e8bb
PB
12056 break;
12057 case 6: /* Unsigned bitfield extract. */
12058 imm++;
12059 if (shift + imm > 32)
12060 goto illegal_op;
59a71b4c
RH
12061 if (imm < 32) {
12062 tcg_gen_extract_i32(tmp, tmp, shift, imm);
12063 }
9ee6e8bb
PB
12064 break;
12065 case 3: /* Bitfield insert/clear. */
12066 if (imm < shift)
12067 goto illegal_op;
12068 imm = imm + 1 - shift;
12069 if (imm != 32) {
6ddbc6e4 12070 tmp2 = load_reg(s, rd);
d593c48e 12071 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 12072 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
12073 }
12074 break;
12075 case 7:
12076 goto illegal_op;
12077 default: /* Saturate. */
9ee6e8bb
PB
12078 if (shift) {
12079 if (op & 1)
6ddbc6e4 12080 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 12081 else
6ddbc6e4 12082 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 12083 }
6ddbc6e4 12084 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
12085 if (op & 4) {
12086 /* Unsigned. */
62b44f05
AR
12087 if ((op & 1) && shift == 0) {
12088 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
12089 tcg_temp_free_i32(tmp);
12090 tcg_temp_free_i32(tmp2);
12091 goto illegal_op;
12092 }
9ef39277 12093 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 12094 } else {
9ef39277 12095 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 12096 }
2c0262af 12097 } else {
9ee6e8bb 12098 /* Signed. */
62b44f05
AR
12099 if ((op & 1) && shift == 0) {
12100 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
12101 tcg_temp_free_i32(tmp);
12102 tcg_temp_free_i32(tmp2);
12103 goto illegal_op;
12104 }
9ef39277 12105 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 12106 } else {
9ef39277 12107 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 12108 }
2c0262af 12109 }
b75263d6 12110 tcg_temp_free_i32(tmp2);
9ee6e8bb 12111 break;
2c0262af 12112 }
6ddbc6e4 12113 store_reg(s, rd, tmp);
9ee6e8bb
PB
12114 } else {
12115 imm = ((insn & 0x04000000) >> 15)
12116 | ((insn & 0x7000) >> 4) | (insn & 0xff);
12117 if (insn & (1 << 22)) {
12118 /* 16-bit immediate. */
12119 imm |= (insn >> 4) & 0xf000;
12120 if (insn & (1 << 23)) {
12121 /* movt */
5e3f878a 12122 tmp = load_reg(s, rd);
86831435 12123 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 12124 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 12125 } else {
9ee6e8bb 12126 /* movw */
7d1b0095 12127 tmp = tcg_temp_new_i32();
5e3f878a 12128 tcg_gen_movi_i32(tmp, imm);
2c0262af 12129 }
55203189 12130 store_reg(s, rd, tmp);
2c0262af 12131 } else {
9ee6e8bb
PB
12132 /* Add/sub 12-bit immediate. */
12133 if (rn == 15) {
b0109805 12134 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 12135 if (insn & (1 << 23))
b0109805 12136 offset -= imm;
9ee6e8bb 12137 else
b0109805 12138 offset += imm;
7d1b0095 12139 tmp = tcg_temp_new_i32();
5e3f878a 12140 tcg_gen_movi_i32(tmp, offset);
55203189 12141 store_reg(s, rd, tmp);
2c0262af 12142 } else {
5e3f878a 12143 tmp = load_reg(s, rn);
9ee6e8bb 12144 if (insn & (1 << 23))
5e3f878a 12145 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 12146 else
5e3f878a 12147 tcg_gen_addi_i32(tmp, tmp, imm);
55203189
PM
12148 if (rn == 13 && rd == 13) {
12149 /* ADD SP, SP, imm or SUB SP, SP, imm */
12150 store_sp_checked(s, tmp);
12151 } else {
12152 store_reg(s, rd, tmp);
12153 }
2c0262af 12154 }
9ee6e8bb 12155 }
191abaa2 12156 }
9ee6e8bb 12157 } else {
55203189
PM
12158 /*
12159 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
12160 * - Data-processing (modified immediate)
12161 */
9ee6e8bb
PB
12162 int shifter_out = 0;
12163 /* modified 12-bit immediate. */
12164 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
12165 imm = (insn & 0xff);
12166 switch (shift) {
12167 case 0: /* XY */
12168 /* Nothing to do. */
12169 break;
12170 case 1: /* 00XY00XY */
12171 imm |= imm << 16;
12172 break;
12173 case 2: /* XY00XY00 */
12174 imm |= imm << 16;
12175 imm <<= 8;
12176 break;
12177 case 3: /* XYXYXYXY */
12178 imm |= imm << 16;
12179 imm |= imm << 8;
12180 break;
12181 default: /* Rotated constant. */
12182 shift = (shift << 1) | (imm >> 7);
12183 imm |= 0x80;
12184 imm = imm << (32 - shift);
12185 shifter_out = 1;
12186 break;
b5ff1b31 12187 }
7d1b0095 12188 tmp2 = tcg_temp_new_i32();
3174f8e9 12189 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 12190 rn = (insn >> 16) & 0xf;
3174f8e9 12191 if (rn == 15) {
7d1b0095 12192 tmp = tcg_temp_new_i32();
3174f8e9
FN
12193 tcg_gen_movi_i32(tmp, 0);
12194 } else {
12195 tmp = load_reg(s, rn);
12196 }
9ee6e8bb
PB
12197 op = (insn >> 21) & 0xf;
12198 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 12199 shifter_out, tmp, tmp2))
9ee6e8bb 12200 goto illegal_op;
7d1b0095 12201 tcg_temp_free_i32(tmp2);
9ee6e8bb 12202 rd = (insn >> 8) & 0xf;
55203189
PM
12203 if (rd == 13 && rn == 13
12204 && (op == 8 || op == 13)) {
12205 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
12206 store_sp_checked(s, tmp);
12207 } else if (rd != 15) {
3174f8e9
FN
12208 store_reg(s, rd, tmp);
12209 } else {
7d1b0095 12210 tcg_temp_free_i32(tmp);
2c0262af 12211 }
2c0262af 12212 }
9ee6e8bb
PB
12213 }
12214 break;
12215 case 12: /* Load/store single data item. */
12216 {
12217 int postinc = 0;
12218 int writeback = 0;
a99caa48 12219 int memidx;
9bb6558a
PM
12220 ISSInfo issinfo;
12221
9ee6e8bb 12222 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 12223 if (disas_neon_ls_insn(s, insn)) {
c1713132 12224 goto illegal_op;
7dcc1f89 12225 }
9ee6e8bb
PB
12226 break;
12227 }
a2fdc890
PM
12228 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
12229 if (rs == 15) {
12230 if (!(insn & (1 << 20))) {
12231 goto illegal_op;
12232 }
12233 if (op != 2) {
12234 /* Byte or halfword load space with dest == r15 : memory hints.
12235 * Catch them early so we don't emit pointless addressing code.
12236 * This space is a mix of:
12237 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
12238 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
12239 * cores)
12240 * unallocated hints, which must be treated as NOPs
12241 * UNPREDICTABLE space, which we NOP or UNDEF depending on
12242 * which is easiest for the decoding logic
12243 * Some space which must UNDEF
12244 */
12245 int op1 = (insn >> 23) & 3;
12246 int op2 = (insn >> 6) & 0x3f;
12247 if (op & 2) {
12248 goto illegal_op;
12249 }
12250 if (rn == 15) {
02afbf64
PM
12251 /* UNPREDICTABLE, unallocated hint or
12252 * PLD/PLDW/PLI (literal)
12253 */
2eea841c 12254 return;
a2fdc890
PM
12255 }
12256 if (op1 & 1) {
2eea841c 12257 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
12258 }
12259 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 12260 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
12261 }
12262 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 12263 goto illegal_op;
a2fdc890
PM
12264 }
12265 }
a99caa48 12266 memidx = get_mem_index(s);
9ee6e8bb 12267 if (rn == 15) {
7d1b0095 12268 addr = tcg_temp_new_i32();
9ee6e8bb
PB
12269 /* PC relative. */
12270 /* s->pc has already been incremented by 4. */
12271 imm = s->pc & 0xfffffffc;
12272 if (insn & (1 << 23))
12273 imm += insn & 0xfff;
12274 else
12275 imm -= insn & 0xfff;
b0109805 12276 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 12277 } else {
b0109805 12278 addr = load_reg(s, rn);
9ee6e8bb
PB
12279 if (insn & (1 << 23)) {
12280 /* Positive offset. */
12281 imm = insn & 0xfff;
b0109805 12282 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 12283 } else {
9ee6e8bb 12284 imm = insn & 0xff;
2a0308c5
PM
12285 switch ((insn >> 8) & 0xf) {
12286 case 0x0: /* Shifted Register. */
9ee6e8bb 12287 shift = (insn >> 4) & 0xf;
2a0308c5
PM
12288 if (shift > 3) {
12289 tcg_temp_free_i32(addr);
18c9b560 12290 goto illegal_op;
2a0308c5 12291 }
b26eefb6 12292 tmp = load_reg(s, rm);
9ee6e8bb 12293 if (shift)
b26eefb6 12294 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 12295 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 12296 tcg_temp_free_i32(tmp);
9ee6e8bb 12297 break;
2a0308c5 12298 case 0xc: /* Negative offset. */
b0109805 12299 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 12300 break;
2a0308c5 12301 case 0xe: /* User privilege. */
b0109805 12302 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 12303 memidx = get_a32_user_mem_index(s);
9ee6e8bb 12304 break;
2a0308c5 12305 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
12306 imm = -imm;
12307 /* Fall through. */
2a0308c5 12308 case 0xb: /* Post-increment. */
9ee6e8bb
PB
12309 postinc = 1;
12310 writeback = 1;
12311 break;
2a0308c5 12312 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
12313 imm = -imm;
12314 /* Fall through. */
2a0308c5 12315 case 0xf: /* Pre-increment. */
9ee6e8bb
PB
12316 writeback = 1;
12317 break;
12318 default:
2a0308c5 12319 tcg_temp_free_i32(addr);
b7bcbe95 12320 goto illegal_op;
9ee6e8bb
PB
12321 }
12322 }
12323 }
9bb6558a
PM
12324
12325 issinfo = writeback ? ISSInvalid : rs;
12326
0bc003ba
PM
12327 if (s->v8m_stackcheck && rn == 13 && writeback) {
12328 /*
12329 * Stackcheck. Here we know 'addr' is the current SP;
12330 * if imm is +ve we're moving SP up, else down. It is
12331 * UNKNOWN whether the limit check triggers when SP starts
12332 * below the limit and ends up above it; we chose to do so.
12333 */
12334 if ((int32_t)imm < 0) {
12335 TCGv_i32 newsp = tcg_temp_new_i32();
12336
12337 tcg_gen_addi_i32(newsp, addr, imm);
12338 gen_helper_v8m_stackcheck(cpu_env, newsp);
12339 tcg_temp_free_i32(newsp);
12340 } else {
12341 gen_helper_v8m_stackcheck(cpu_env, addr);
12342 }
12343 }
12344
12345 if (writeback && !postinc) {
12346 tcg_gen_addi_i32(addr, addr, imm);
12347 }
12348
9ee6e8bb
PB
12349 if (insn & (1 << 20)) {
12350 /* Load. */
5a839c0d 12351 tmp = tcg_temp_new_i32();
a2fdc890 12352 switch (op) {
5a839c0d 12353 case 0:
9bb6558a 12354 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12355 break;
12356 case 4:
9bb6558a 12357 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12358 break;
12359 case 1:
9bb6558a 12360 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12361 break;
12362 case 5:
9bb6558a 12363 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12364 break;
12365 case 2:
9bb6558a 12366 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 12367 break;
2a0308c5 12368 default:
5a839c0d 12369 tcg_temp_free_i32(tmp);
2a0308c5
PM
12370 tcg_temp_free_i32(addr);
12371 goto illegal_op;
a2fdc890
PM
12372 }
12373 if (rs == 15) {
3bb8a96f 12374 gen_bx_excret(s, tmp);
9ee6e8bb 12375 } else {
a2fdc890 12376 store_reg(s, rs, tmp);
9ee6e8bb
PB
12377 }
12378 } else {
12379 /* Store. */
b0109805 12380 tmp = load_reg(s, rs);
9ee6e8bb 12381 switch (op) {
5a839c0d 12382 case 0:
9bb6558a 12383 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12384 break;
12385 case 1:
9bb6558a 12386 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12387 break;
12388 case 2:
9bb6558a 12389 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 12390 break;
2a0308c5 12391 default:
5a839c0d 12392 tcg_temp_free_i32(tmp);
2a0308c5
PM
12393 tcg_temp_free_i32(addr);
12394 goto illegal_op;
b7bcbe95 12395 }
5a839c0d 12396 tcg_temp_free_i32(tmp);
2c0262af 12397 }
9ee6e8bb 12398 if (postinc)
b0109805
PB
12399 tcg_gen_addi_i32(addr, addr, imm);
12400 if (writeback) {
12401 store_reg(s, rn, addr);
12402 } else {
7d1b0095 12403 tcg_temp_free_i32(addr);
b0109805 12404 }
9ee6e8bb
PB
12405 }
12406 break;
12407 default:
12408 goto illegal_op;
2c0262af 12409 }
2eea841c 12410 return;
9ee6e8bb 12411illegal_op:
2eea841c
PM
12412 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
12413 default_exception_el(s));
2c0262af
FB
12414}
12415
296e5a0a 12416static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 12417{
296e5a0a 12418 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
12419 int32_t offset;
12420 int i;
39d5492a
PM
12421 TCGv_i32 tmp;
12422 TCGv_i32 tmp2;
12423 TCGv_i32 addr;
99c475ab 12424
99c475ab
FB
12425 switch (insn >> 12) {
12426 case 0: case 1:
396e467c 12427
99c475ab
FB
12428 rd = insn & 7;
12429 op = (insn >> 11) & 3;
12430 if (op == 3) {
a2d12f0f
PM
12431 /*
12432 * 0b0001_1xxx_xxxx_xxxx
12433 * - Add, subtract (three low registers)
12434 * - Add, subtract (two low registers and immediate)
12435 */
99c475ab 12436 rn = (insn >> 3) & 7;
396e467c 12437 tmp = load_reg(s, rn);
99c475ab
FB
12438 if (insn & (1 << 10)) {
12439 /* immediate */
7d1b0095 12440 tmp2 = tcg_temp_new_i32();
396e467c 12441 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
12442 } else {
12443 /* reg */
12444 rm = (insn >> 6) & 7;
396e467c 12445 tmp2 = load_reg(s, rm);
99c475ab 12446 }
9ee6e8bb
PB
12447 if (insn & (1 << 9)) {
12448 if (s->condexec_mask)
396e467c 12449 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 12450 else
72485ec4 12451 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
12452 } else {
12453 if (s->condexec_mask)
396e467c 12454 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 12455 else
72485ec4 12456 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 12457 }
7d1b0095 12458 tcg_temp_free_i32(tmp2);
396e467c 12459 store_reg(s, rd, tmp);
99c475ab
FB
12460 } else {
12461 /* shift immediate */
12462 rm = (insn >> 3) & 7;
12463 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
12464 tmp = load_reg(s, rm);
12465 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
12466 if (!s->condexec_mask)
12467 gen_logic_CC(tmp);
12468 store_reg(s, rd, tmp);
99c475ab
FB
12469 }
12470 break;
12471 case 2: case 3:
a2d12f0f
PM
12472 /*
12473 * 0b001x_xxxx_xxxx_xxxx
12474 * - Add, subtract, compare, move (one low register and immediate)
12475 */
99c475ab
FB
12476 op = (insn >> 11) & 3;
12477 rd = (insn >> 8) & 0x7;
396e467c 12478 if (op == 0) { /* mov */
7d1b0095 12479 tmp = tcg_temp_new_i32();
396e467c 12480 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 12481 if (!s->condexec_mask)
396e467c
FN
12482 gen_logic_CC(tmp);
12483 store_reg(s, rd, tmp);
12484 } else {
12485 tmp = load_reg(s, rd);
7d1b0095 12486 tmp2 = tcg_temp_new_i32();
396e467c
FN
12487 tcg_gen_movi_i32(tmp2, insn & 0xff);
12488 switch (op) {
12489 case 1: /* cmp */
72485ec4 12490 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
12491 tcg_temp_free_i32(tmp);
12492 tcg_temp_free_i32(tmp2);
396e467c
FN
12493 break;
12494 case 2: /* add */
12495 if (s->condexec_mask)
12496 tcg_gen_add_i32(tmp, tmp, tmp2);
12497 else
72485ec4 12498 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 12499 tcg_temp_free_i32(tmp2);
396e467c
FN
12500 store_reg(s, rd, tmp);
12501 break;
12502 case 3: /* sub */
12503 if (s->condexec_mask)
12504 tcg_gen_sub_i32(tmp, tmp, tmp2);
12505 else
72485ec4 12506 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 12507 tcg_temp_free_i32(tmp2);
396e467c
FN
12508 store_reg(s, rd, tmp);
12509 break;
12510 }
99c475ab 12511 }
99c475ab
FB
12512 break;
12513 case 4:
12514 if (insn & (1 << 11)) {
12515 rd = (insn >> 8) & 7;
5899f386
FB
12516 /* load pc-relative. Bit 1 of PC is ignored. */
12517 val = s->pc + 2 + ((insn & 0xff) * 4);
12518 val &= ~(uint32_t)2;
7d1b0095 12519 addr = tcg_temp_new_i32();
b0109805 12520 tcg_gen_movi_i32(addr, val);
c40c8556 12521 tmp = tcg_temp_new_i32();
9bb6558a
PM
12522 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
12523 rd | ISSIs16Bit);
7d1b0095 12524 tcg_temp_free_i32(addr);
b0109805 12525 store_reg(s, rd, tmp);
99c475ab
FB
12526 break;
12527 }
12528 if (insn & (1 << 10)) {
ebfe27c5
PM
12529 /* 0b0100_01xx_xxxx_xxxx
12530 * - data processing extended, branch and exchange
12531 */
99c475ab
FB
12532 rd = (insn & 7) | ((insn >> 4) & 8);
12533 rm = (insn >> 3) & 0xf;
12534 op = (insn >> 8) & 3;
12535 switch (op) {
12536 case 0: /* add */
396e467c
FN
12537 tmp = load_reg(s, rd);
12538 tmp2 = load_reg(s, rm);
12539 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 12540 tcg_temp_free_i32(tmp2);
55203189
PM
12541 if (rd == 13) {
12542 /* ADD SP, SP, reg */
12543 store_sp_checked(s, tmp);
12544 } else {
12545 store_reg(s, rd, tmp);
12546 }
99c475ab
FB
12547 break;
12548 case 1: /* cmp */
396e467c
FN
12549 tmp = load_reg(s, rd);
12550 tmp2 = load_reg(s, rm);
72485ec4 12551 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
12552 tcg_temp_free_i32(tmp2);
12553 tcg_temp_free_i32(tmp);
99c475ab
FB
12554 break;
12555 case 2: /* mov/cpy */
396e467c 12556 tmp = load_reg(s, rm);
55203189
PM
12557 if (rd == 13) {
12558 /* MOV SP, reg */
12559 store_sp_checked(s, tmp);
12560 } else {
12561 store_reg(s, rd, tmp);
12562 }
99c475ab 12563 break;
ebfe27c5
PM
12564 case 3:
12565 {
12566 /* 0b0100_0111_xxxx_xxxx
12567 * - branch [and link] exchange thumb register
12568 */
12569 bool link = insn & (1 << 7);
12570
fb602cb7 12571 if (insn & 3) {
ebfe27c5
PM
12572 goto undef;
12573 }
12574 if (link) {
be5e7a76 12575 ARCH(5);
ebfe27c5 12576 }
fb602cb7
PM
12577 if ((insn & 4)) {
12578 /* BXNS/BLXNS: only exists for v8M with the
12579 * security extensions, and always UNDEF if NonSecure.
12580 * We don't implement these in the user-only mode
12581 * either (in theory you can use them from Secure User
12582 * mode but they are too tied in to system emulation.)
12583 */
12584 if (!s->v8m_secure || IS_USER_ONLY) {
12585 goto undef;
12586 }
12587 if (link) {
3e3fa230 12588 gen_blxns(s, rm);
fb602cb7
PM
12589 } else {
12590 gen_bxns(s, rm);
12591 }
12592 break;
12593 }
12594 /* BLX/BX */
ebfe27c5
PM
12595 tmp = load_reg(s, rm);
12596 if (link) {
99c475ab 12597 val = (uint32_t)s->pc | 1;
7d1b0095 12598 tmp2 = tcg_temp_new_i32();
b0109805
PB
12599 tcg_gen_movi_i32(tmp2, val);
12600 store_reg(s, 14, tmp2);
3bb8a96f
PM
12601 gen_bx(s, tmp);
12602 } else {
12603 /* Only BX works as exception-return, not BLX */
12604 gen_bx_excret(s, tmp);
99c475ab 12605 }
99c475ab
FB
12606 break;
12607 }
ebfe27c5 12608 }
99c475ab
FB
12609 break;
12610 }
12611
a2d12f0f
PM
12612 /*
12613 * 0b0100_00xx_xxxx_xxxx
12614 * - Data-processing (two low registers)
12615 */
99c475ab
FB
12616 rd = insn & 7;
12617 rm = (insn >> 3) & 7;
12618 op = (insn >> 6) & 0xf;
12619 if (op == 2 || op == 3 || op == 4 || op == 7) {
12620 /* the shift/rotate ops want the operands backwards */
12621 val = rm;
12622 rm = rd;
12623 rd = val;
12624 val = 1;
12625 } else {
12626 val = 0;
12627 }
12628
396e467c 12629 if (op == 9) { /* neg */
7d1b0095 12630 tmp = tcg_temp_new_i32();
396e467c
FN
12631 tcg_gen_movi_i32(tmp, 0);
12632 } else if (op != 0xf) { /* mvn doesn't read its first operand */
12633 tmp = load_reg(s, rd);
12634 } else {
f764718d 12635 tmp = NULL;
396e467c 12636 }
99c475ab 12637
396e467c 12638 tmp2 = load_reg(s, rm);
5899f386 12639 switch (op) {
99c475ab 12640 case 0x0: /* and */
396e467c 12641 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 12642 if (!s->condexec_mask)
396e467c 12643 gen_logic_CC(tmp);
99c475ab
FB
12644 break;
12645 case 0x1: /* eor */
396e467c 12646 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 12647 if (!s->condexec_mask)
396e467c 12648 gen_logic_CC(tmp);
99c475ab
FB
12649 break;
12650 case 0x2: /* lsl */
9ee6e8bb 12651 if (s->condexec_mask) {
365af80e 12652 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 12653 } else {
9ef39277 12654 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12655 gen_logic_CC(tmp2);
9ee6e8bb 12656 }
99c475ab
FB
12657 break;
12658 case 0x3: /* lsr */
9ee6e8bb 12659 if (s->condexec_mask) {
365af80e 12660 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 12661 } else {
9ef39277 12662 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12663 gen_logic_CC(tmp2);
9ee6e8bb 12664 }
99c475ab
FB
12665 break;
12666 case 0x4: /* asr */
9ee6e8bb 12667 if (s->condexec_mask) {
365af80e 12668 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 12669 } else {
9ef39277 12670 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12671 gen_logic_CC(tmp2);
9ee6e8bb 12672 }
99c475ab
FB
12673 break;
12674 case 0x5: /* adc */
49b4c31e 12675 if (s->condexec_mask) {
396e467c 12676 gen_adc(tmp, tmp2);
49b4c31e
RH
12677 } else {
12678 gen_adc_CC(tmp, tmp, tmp2);
12679 }
99c475ab
FB
12680 break;
12681 case 0x6: /* sbc */
2de68a49 12682 if (s->condexec_mask) {
396e467c 12683 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
12684 } else {
12685 gen_sbc_CC(tmp, tmp, tmp2);
12686 }
99c475ab
FB
12687 break;
12688 case 0x7: /* ror */
9ee6e8bb 12689 if (s->condexec_mask) {
f669df27
AJ
12690 tcg_gen_andi_i32(tmp, tmp, 0x1f);
12691 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 12692 } else {
9ef39277 12693 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12694 gen_logic_CC(tmp2);
9ee6e8bb 12695 }
99c475ab
FB
12696 break;
12697 case 0x8: /* tst */
396e467c
FN
12698 tcg_gen_and_i32(tmp, tmp, tmp2);
12699 gen_logic_CC(tmp);
99c475ab 12700 rd = 16;
5899f386 12701 break;
99c475ab 12702 case 0x9: /* neg */
9ee6e8bb 12703 if (s->condexec_mask)
396e467c 12704 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 12705 else
72485ec4 12706 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
12707 break;
12708 case 0xa: /* cmp */
72485ec4 12709 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
12710 rd = 16;
12711 break;
12712 case 0xb: /* cmn */
72485ec4 12713 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
12714 rd = 16;
12715 break;
12716 case 0xc: /* orr */
396e467c 12717 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 12718 if (!s->condexec_mask)
396e467c 12719 gen_logic_CC(tmp);
99c475ab
FB
12720 break;
12721 case 0xd: /* mul */
7b2919a0 12722 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 12723 if (!s->condexec_mask)
396e467c 12724 gen_logic_CC(tmp);
99c475ab
FB
12725 break;
12726 case 0xe: /* bic */
f669df27 12727 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 12728 if (!s->condexec_mask)
396e467c 12729 gen_logic_CC(tmp);
99c475ab
FB
12730 break;
12731 case 0xf: /* mvn */
396e467c 12732 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 12733 if (!s->condexec_mask)
396e467c 12734 gen_logic_CC(tmp2);
99c475ab 12735 val = 1;
5899f386 12736 rm = rd;
99c475ab
FB
12737 break;
12738 }
12739 if (rd != 16) {
396e467c
FN
12740 if (val) {
12741 store_reg(s, rm, tmp2);
12742 if (op != 0xf)
7d1b0095 12743 tcg_temp_free_i32(tmp);
396e467c
FN
12744 } else {
12745 store_reg(s, rd, tmp);
7d1b0095 12746 tcg_temp_free_i32(tmp2);
396e467c
FN
12747 }
12748 } else {
7d1b0095
PM
12749 tcg_temp_free_i32(tmp);
12750 tcg_temp_free_i32(tmp2);
99c475ab
FB
12751 }
12752 break;
12753
12754 case 5:
12755 /* load/store register offset. */
12756 rd = insn & 7;
12757 rn = (insn >> 3) & 7;
12758 rm = (insn >> 6) & 7;
12759 op = (insn >> 9) & 7;
b0109805 12760 addr = load_reg(s, rn);
b26eefb6 12761 tmp = load_reg(s, rm);
b0109805 12762 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 12763 tcg_temp_free_i32(tmp);
99c475ab 12764
c40c8556 12765 if (op < 3) { /* store */
b0109805 12766 tmp = load_reg(s, rd);
c40c8556
PM
12767 } else {
12768 tmp = tcg_temp_new_i32();
12769 }
99c475ab
FB
12770
12771 switch (op) {
12772 case 0: /* str */
9bb6558a 12773 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12774 break;
12775 case 1: /* strh */
9bb6558a 12776 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12777 break;
12778 case 2: /* strb */
9bb6558a 12779 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12780 break;
12781 case 3: /* ldrsb */
9bb6558a 12782 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12783 break;
12784 case 4: /* ldr */
9bb6558a 12785 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12786 break;
12787 case 5: /* ldrh */
9bb6558a 12788 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12789 break;
12790 case 6: /* ldrb */
9bb6558a 12791 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12792 break;
12793 case 7: /* ldrsh */
9bb6558a 12794 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12795 break;
12796 }
c40c8556 12797 if (op >= 3) { /* load */
b0109805 12798 store_reg(s, rd, tmp);
c40c8556
PM
12799 } else {
12800 tcg_temp_free_i32(tmp);
12801 }
7d1b0095 12802 tcg_temp_free_i32(addr);
99c475ab
FB
12803 break;
12804
12805 case 6:
12806 /* load/store word immediate offset */
12807 rd = insn & 7;
12808 rn = (insn >> 3) & 7;
b0109805 12809 addr = load_reg(s, rn);
99c475ab 12810 val = (insn >> 4) & 0x7c;
b0109805 12811 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12812
12813 if (insn & (1 << 11)) {
12814 /* load */
c40c8556 12815 tmp = tcg_temp_new_i32();
12dcc321 12816 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 12817 store_reg(s, rd, tmp);
99c475ab
FB
12818 } else {
12819 /* store */
b0109805 12820 tmp = load_reg(s, rd);
12dcc321 12821 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12822 tcg_temp_free_i32(tmp);
99c475ab 12823 }
7d1b0095 12824 tcg_temp_free_i32(addr);
99c475ab
FB
12825 break;
12826
12827 case 7:
12828 /* load/store byte immediate offset */
12829 rd = insn & 7;
12830 rn = (insn >> 3) & 7;
b0109805 12831 addr = load_reg(s, rn);
99c475ab 12832 val = (insn >> 6) & 0x1f;
b0109805 12833 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12834
12835 if (insn & (1 << 11)) {
12836 /* load */
c40c8556 12837 tmp = tcg_temp_new_i32();
9bb6558a 12838 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12839 store_reg(s, rd, tmp);
99c475ab
FB
12840 } else {
12841 /* store */
b0109805 12842 tmp = load_reg(s, rd);
9bb6558a 12843 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12844 tcg_temp_free_i32(tmp);
99c475ab 12845 }
7d1b0095 12846 tcg_temp_free_i32(addr);
99c475ab
FB
12847 break;
12848
12849 case 8:
12850 /* load/store halfword immediate offset */
12851 rd = insn & 7;
12852 rn = (insn >> 3) & 7;
b0109805 12853 addr = load_reg(s, rn);
99c475ab 12854 val = (insn >> 5) & 0x3e;
b0109805 12855 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12856
12857 if (insn & (1 << 11)) {
12858 /* load */
c40c8556 12859 tmp = tcg_temp_new_i32();
9bb6558a 12860 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12861 store_reg(s, rd, tmp);
99c475ab
FB
12862 } else {
12863 /* store */
b0109805 12864 tmp = load_reg(s, rd);
9bb6558a 12865 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12866 tcg_temp_free_i32(tmp);
99c475ab 12867 }
7d1b0095 12868 tcg_temp_free_i32(addr);
99c475ab
FB
12869 break;
12870
12871 case 9:
12872 /* load/store from stack */
12873 rd = (insn >> 8) & 7;
b0109805 12874 addr = load_reg(s, 13);
99c475ab 12875 val = (insn & 0xff) * 4;
b0109805 12876 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12877
12878 if (insn & (1 << 11)) {
12879 /* load */
c40c8556 12880 tmp = tcg_temp_new_i32();
9bb6558a 12881 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12882 store_reg(s, rd, tmp);
99c475ab
FB
12883 } else {
12884 /* store */
b0109805 12885 tmp = load_reg(s, rd);
9bb6558a 12886 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12887 tcg_temp_free_i32(tmp);
99c475ab 12888 }
7d1b0095 12889 tcg_temp_free_i32(addr);
99c475ab
FB
12890 break;
12891
12892 case 10:
55203189
PM
12893 /*
12894 * 0b1010_xxxx_xxxx_xxxx
12895 * - Add PC/SP (immediate)
12896 */
99c475ab 12897 rd = (insn >> 8) & 7;
5899f386
FB
12898 if (insn & (1 << 11)) {
12899 /* SP */
5e3f878a 12900 tmp = load_reg(s, 13);
5899f386
FB
12901 } else {
12902 /* PC. bit 1 is ignored. */
7d1b0095 12903 tmp = tcg_temp_new_i32();
5e3f878a 12904 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 12905 }
99c475ab 12906 val = (insn & 0xff) * 4;
5e3f878a
PB
12907 tcg_gen_addi_i32(tmp, tmp, val);
12908 store_reg(s, rd, tmp);
99c475ab
FB
12909 break;
12910
12911 case 11:
12912 /* misc */
12913 op = (insn >> 8) & 0xf;
12914 switch (op) {
12915 case 0:
55203189
PM
12916 /*
12917 * 0b1011_0000_xxxx_xxxx
12918 * - ADD (SP plus immediate)
12919 * - SUB (SP minus immediate)
12920 */
b26eefb6 12921 tmp = load_reg(s, 13);
99c475ab
FB
12922 val = (insn & 0x7f) * 4;
12923 if (insn & (1 << 7))
6a0d8a1d 12924 val = -(int32_t)val;
b26eefb6 12925 tcg_gen_addi_i32(tmp, tmp, val);
55203189 12926 store_sp_checked(s, tmp);
99c475ab
FB
12927 break;
12928
9ee6e8bb
PB
12929 case 2: /* sign/zero extend. */
12930 ARCH(6);
12931 rd = insn & 7;
12932 rm = (insn >> 3) & 7;
b0109805 12933 tmp = load_reg(s, rm);
9ee6e8bb 12934 switch ((insn >> 6) & 3) {
b0109805
PB
12935 case 0: gen_sxth(tmp); break;
12936 case 1: gen_sxtb(tmp); break;
12937 case 2: gen_uxth(tmp); break;
12938 case 3: gen_uxtb(tmp); break;
9ee6e8bb 12939 }
b0109805 12940 store_reg(s, rd, tmp);
9ee6e8bb 12941 break;
99c475ab 12942 case 4: case 5: case 0xc: case 0xd:
aa369e5c
PM
12943 /*
12944 * 0b1011_x10x_xxxx_xxxx
12945 * - push/pop
12946 */
b0109805 12947 addr = load_reg(s, 13);
5899f386
FB
12948 if (insn & (1 << 8))
12949 offset = 4;
99c475ab 12950 else
5899f386
FB
12951 offset = 0;
12952 for (i = 0; i < 8; i++) {
12953 if (insn & (1 << i))
12954 offset += 4;
12955 }
12956 if ((insn & (1 << 11)) == 0) {
b0109805 12957 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 12958 }
aa369e5c
PM
12959
12960 if (s->v8m_stackcheck) {
12961 /*
12962 * Here 'addr' is the lower of "old SP" and "new SP";
12963 * if this is a pop that starts below the limit and ends
12964 * above it, it is UNKNOWN whether the limit check triggers;
12965 * we choose to trigger.
12966 */
12967 gen_helper_v8m_stackcheck(cpu_env, addr);
12968 }
12969
99c475ab
FB
12970 for (i = 0; i < 8; i++) {
12971 if (insn & (1 << i)) {
12972 if (insn & (1 << 11)) {
12973 /* pop */
c40c8556 12974 tmp = tcg_temp_new_i32();
12dcc321 12975 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 12976 store_reg(s, i, tmp);
99c475ab
FB
12977 } else {
12978 /* push */
b0109805 12979 tmp = load_reg(s, i);
12dcc321 12980 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12981 tcg_temp_free_i32(tmp);
99c475ab 12982 }
5899f386 12983 /* advance to the next address. */
b0109805 12984 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
12985 }
12986 }
f764718d 12987 tmp = NULL;
99c475ab
FB
12988 if (insn & (1 << 8)) {
12989 if (insn & (1 << 11)) {
12990 /* pop pc */
c40c8556 12991 tmp = tcg_temp_new_i32();
12dcc321 12992 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
12993 /* don't set the pc until the rest of the instruction
12994 has completed */
12995 } else {
12996 /* push lr */
b0109805 12997 tmp = load_reg(s, 14);
12dcc321 12998 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12999 tcg_temp_free_i32(tmp);
99c475ab 13000 }
b0109805 13001 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 13002 }
5899f386 13003 if ((insn & (1 << 11)) == 0) {
b0109805 13004 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 13005 }
99c475ab 13006 /* write back the new stack pointer */
b0109805 13007 store_reg(s, 13, addr);
99c475ab 13008 /* set the new PC value */
be5e7a76 13009 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 13010 store_reg_from_load(s, 15, tmp);
be5e7a76 13011 }
99c475ab
FB
13012 break;
13013
9ee6e8bb
PB
13014 case 1: case 3: case 9: case 11: /* czb */
13015 rm = insn & 7;
d9ba4830 13016 tmp = load_reg(s, rm);
c2d9644e 13017 arm_gen_condlabel(s);
9ee6e8bb 13018 if (insn & (1 << 11))
cb63669a 13019 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 13020 else
cb63669a 13021 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 13022 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
13023 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
13024 val = (uint32_t)s->pc + 2;
13025 val += offset;
13026 gen_jmp(s, val);
13027 break;
13028
13029 case 15: /* IT, nop-hint. */
13030 if ((insn & 0xf) == 0) {
13031 gen_nop_hint(s, (insn >> 4) & 0xf);
13032 break;
13033 }
13034 /* If Then. */
13035 s->condexec_cond = (insn >> 4) & 0xe;
13036 s->condexec_mask = insn & 0x1f;
13037 /* No actual code generated for this insn, just setup state. */
13038 break;
13039
06c949e6 13040 case 0xe: /* bkpt */
d4a2dc67
PM
13041 {
13042 int imm8 = extract32(insn, 0, 8);
be5e7a76 13043 ARCH(5);
c900a2e6 13044 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
06c949e6 13045 break;
d4a2dc67 13046 }
06c949e6 13047
19a6e31c
PM
13048 case 0xa: /* rev, and hlt */
13049 {
13050 int op1 = extract32(insn, 6, 2);
13051
13052 if (op1 == 2) {
13053 /* HLT */
13054 int imm6 = extract32(insn, 0, 6);
13055
13056 gen_hlt(s, imm6);
13057 break;
13058 }
13059
13060 /* Otherwise this is rev */
9ee6e8bb
PB
13061 ARCH(6);
13062 rn = (insn >> 3) & 0x7;
13063 rd = insn & 0x7;
b0109805 13064 tmp = load_reg(s, rn);
19a6e31c 13065 switch (op1) {
66896cb8 13066 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
13067 case 1: gen_rev16(tmp); break;
13068 case 3: gen_revsh(tmp); break;
19a6e31c
PM
13069 default:
13070 g_assert_not_reached();
9ee6e8bb 13071 }
b0109805 13072 store_reg(s, rd, tmp);
9ee6e8bb 13073 break;
19a6e31c 13074 }
9ee6e8bb 13075
d9e028c1
PM
13076 case 6:
13077 switch ((insn >> 5) & 7) {
13078 case 2:
13079 /* setend */
13080 ARCH(6);
9886ecdf
PB
13081 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
13082 gen_helper_setend(cpu_env);
dcba3a8d 13083 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 13084 }
9ee6e8bb 13085 break;
d9e028c1
PM
13086 case 3:
13087 /* cps */
13088 ARCH(6);
13089 if (IS_USER(s)) {
13090 break;
8984bd2e 13091 }
b53d8923 13092 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
13093 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
13094 /* FAULTMASK */
13095 if (insn & 1) {
13096 addr = tcg_const_i32(19);
13097 gen_helper_v7m_msr(cpu_env, addr, tmp);
13098 tcg_temp_free_i32(addr);
13099 }
13100 /* PRIMASK */
13101 if (insn & 2) {
13102 addr = tcg_const_i32(16);
13103 gen_helper_v7m_msr(cpu_env, addr, tmp);
13104 tcg_temp_free_i32(addr);
13105 }
13106 tcg_temp_free_i32(tmp);
13107 gen_lookup_tb(s);
13108 } else {
13109 if (insn & (1 << 4)) {
13110 shift = CPSR_A | CPSR_I | CPSR_F;
13111 } else {
13112 shift = 0;
13113 }
13114 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 13115 }
d9e028c1
PM
13116 break;
13117 default:
13118 goto undef;
9ee6e8bb
PB
13119 }
13120 break;
13121
99c475ab
FB
13122 default:
13123 goto undef;
13124 }
13125 break;
13126
13127 case 12:
a7d3970d 13128 {
99c475ab 13129 /* load/store multiple */
f764718d 13130 TCGv_i32 loaded_var = NULL;
99c475ab 13131 rn = (insn >> 8) & 0x7;
b0109805 13132 addr = load_reg(s, rn);
99c475ab
FB
13133 for (i = 0; i < 8; i++) {
13134 if (insn & (1 << i)) {
99c475ab
FB
13135 if (insn & (1 << 11)) {
13136 /* load */
c40c8556 13137 tmp = tcg_temp_new_i32();
12dcc321 13138 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
13139 if (i == rn) {
13140 loaded_var = tmp;
13141 } else {
13142 store_reg(s, i, tmp);
13143 }
99c475ab
FB
13144 } else {
13145 /* store */
b0109805 13146 tmp = load_reg(s, i);
12dcc321 13147 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 13148 tcg_temp_free_i32(tmp);
99c475ab 13149 }
5899f386 13150 /* advance to the next address */
b0109805 13151 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
13152 }
13153 }
b0109805 13154 if ((insn & (1 << rn)) == 0) {
a7d3970d 13155 /* base reg not in list: base register writeback */
b0109805
PB
13156 store_reg(s, rn, addr);
13157 } else {
a7d3970d
PM
13158 /* base reg in list: if load, complete it now */
13159 if (insn & (1 << 11)) {
13160 store_reg(s, rn, loaded_var);
13161 }
7d1b0095 13162 tcg_temp_free_i32(addr);
b0109805 13163 }
99c475ab 13164 break;
a7d3970d 13165 }
99c475ab
FB
13166 case 13:
13167 /* conditional branch or swi */
13168 cond = (insn >> 8) & 0xf;
13169 if (cond == 0xe)
13170 goto undef;
13171
13172 if (cond == 0xf) {
13173 /* swi */
eaed129d 13174 gen_set_pc_im(s, s->pc);
d4a2dc67 13175 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 13176 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
13177 break;
13178 }
13179 /* generate a conditional jump to next instruction */
c2d9644e 13180 arm_skip_unless(s, cond);
99c475ab
FB
13181
13182 /* jump to the offset */
5899f386 13183 val = (uint32_t)s->pc + 2;
99c475ab 13184 offset = ((int32_t)insn << 24) >> 24;
5899f386 13185 val += offset << 1;
8aaca4c0 13186 gen_jmp(s, val);
99c475ab
FB
13187 break;
13188
13189 case 14:
358bf29e 13190 if (insn & (1 << 11)) {
296e5a0a
PM
13191 /* thumb_insn_is_16bit() ensures we can't get here for
13192 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
13193 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
13194 */
13195 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
13196 ARCH(5);
13197 offset = ((insn & 0x7ff) << 1);
13198 tmp = load_reg(s, 14);
13199 tcg_gen_addi_i32(tmp, tmp, offset);
13200 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
13201
13202 tmp2 = tcg_temp_new_i32();
13203 tcg_gen_movi_i32(tmp2, s->pc | 1);
13204 store_reg(s, 14, tmp2);
13205 gen_bx(s, tmp);
358bf29e
PB
13206 break;
13207 }
9ee6e8bb 13208 /* unconditional branch */
99c475ab
FB
13209 val = (uint32_t)s->pc;
13210 offset = ((int32_t)insn << 21) >> 21;
13211 val += (offset << 1) + 2;
8aaca4c0 13212 gen_jmp(s, val);
99c475ab
FB
13213 break;
13214
13215 case 15:
296e5a0a
PM
13216 /* thumb_insn_is_16bit() ensures we can't get here for
13217 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
13218 */
13219 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
13220
13221 if (insn & (1 << 11)) {
13222 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
13223 offset = ((insn & 0x7ff) << 1) | 1;
13224 tmp = load_reg(s, 14);
13225 tcg_gen_addi_i32(tmp, tmp, offset);
13226
13227 tmp2 = tcg_temp_new_i32();
13228 tcg_gen_movi_i32(tmp2, s->pc | 1);
13229 store_reg(s, 14, tmp2);
13230 gen_bx(s, tmp);
13231 } else {
13232 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
13233 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
13234
13235 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
13236 }
9ee6e8bb 13237 break;
99c475ab
FB
13238 }
13239 return;
9ee6e8bb 13240illegal_op:
99c475ab 13241undef:
73710361
GB
13242 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
13243 default_exception_el(s));
99c475ab
FB
13244}
13245
541ebcd4
PM
13246static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
13247{
13248 /* Return true if the insn at dc->pc might cross a page boundary.
13249 * (False positives are OK, false negatives are not.)
5b8d7289
PM
13250 * We know this is a Thumb insn, and our caller ensures we are
13251 * only called if dc->pc is less than 4 bytes from the page
13252 * boundary, so we cross the page if the first 16 bits indicate
13253 * that this is a 32 bit insn.
541ebcd4 13254 */
5b8d7289 13255 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 13256
5b8d7289 13257 return !thumb_insn_is_16bit(s, insn);
541ebcd4
PM
13258}
13259
b542683d 13260static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2c0262af 13261{
1d8a5535 13262 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 13263 CPUARMState *env = cs->env_ptr;
4e5e1215 13264 ARMCPU *cpu = arm_env_get_cpu(env);
aad821ac
RH
13265 uint32_t tb_flags = dc->base.tb->flags;
13266 uint32_t condexec, core_mmu_idx;
3b46e624 13267
962fcbf2 13268 dc->isar = &cpu->isar;
dcba3a8d 13269 dc->pc = dc->base.pc_first;
e50e6a20 13270 dc->condjmp = 0;
3926cc84 13271
40f860cd 13272 dc->aarch64 = 0;
cef9ee70
SS
13273 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
13274 * there is no secure EL1, so we route exceptions to EL3.
13275 */
13276 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
13277 !arm_el_is_aa64(env, 3);
aad821ac
RH
13278 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
13279 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
13280 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
13281 condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
13282 dc->condexec_mask = (condexec & 0xf) << 1;
13283 dc->condexec_cond = condexec >> 4;
13284 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
13285 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
c1e37810 13286 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 13287#if !defined(CONFIG_USER_ONLY)
c1e37810 13288 dc->user = (dc->current_el == 0);
3926cc84 13289#endif
aad821ac
RH
13290 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
13291 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
13292 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
13293 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
13294 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
13295 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
13296 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
fb602cb7
PM
13297 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
13298 regime_is_secure(env, dc->mmu_idx);
aad821ac 13299 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
60322b39 13300 dc->cp_regs = cpu->cp_regs;
a984e42c 13301 dc->features = env->features;
40f860cd 13302
50225ad0
PM
13303 /* Single step state. The code-generation logic here is:
13304 * SS_ACTIVE == 0:
13305 * generate code with no special handling for single-stepping (except
13306 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
13307 * this happens anyway because those changes are all system register or
13308 * PSTATE writes).
13309 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
13310 * emit code for one insn
13311 * emit code to clear PSTATE.SS
13312 * emit code to generate software step exception for completed step
13313 * end TB (as usual for having generated an exception)
13314 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
13315 * emit code to generate a software step exception
13316 * end the TB
13317 */
aad821ac
RH
13318 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
13319 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
50225ad0
PM
13320 dc->is_ldex = false;
13321 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
13322
bfe7ad5b 13323 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1d8a5535 13324
f7708456
RH
13325 /* If architectural single step active, limit to 1. */
13326 if (is_singlestepping(dc)) {
b542683d 13327 dc->base.max_insns = 1;
f7708456
RH
13328 }
13329
d0264d86
RH
13330 /* ARM is a fixed-length ISA. Bound the number of insns to execute
13331 to those left on the page. */
13332 if (!dc->thumb) {
bfe7ad5b 13333 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 13334 dc->base.max_insns = MIN(dc->base.max_insns, bound);
d0264d86
RH
13335 }
13336
a7812ae4
PB
13337 cpu_F0s = tcg_temp_new_i32();
13338 cpu_F1s = tcg_temp_new_i32();
13339 cpu_F0d = tcg_temp_new_i64();
13340 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
13341 cpu_V0 = cpu_F0d;
13342 cpu_V1 = cpu_F1d;
e677137d 13343 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 13344 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
13345}
13346
b1476854
LV
13347static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
13348{
13349 DisasContext *dc = container_of(dcbase, DisasContext, base);
13350
13351 /* A note on handling of the condexec (IT) bits:
13352 *
13353 * We want to avoid the overhead of having to write the updated condexec
13354 * bits back to the CPUARMState for every instruction in an IT block. So:
13355 * (1) if the condexec bits are not already zero then we write
13356 * zero back into the CPUARMState now. This avoids complications trying
13357 * to do it at the end of the block. (For example if we don't do this
13358 * it's hard to identify whether we can safely skip writing condexec
13359 * at the end of the TB, which we definitely want to do for the case
13360 * where a TB doesn't do anything with the IT state at all.)
13361 * (2) if we are going to leave the TB then we call gen_set_condexec()
13362 * which will write the correct value into CPUARMState if zero is wrong.
13363 * This is done both for leaving the TB at the end, and for leaving
13364 * it because of an exception we know will happen, which is done in
13365 * gen_exception_insn(). The latter is necessary because we need to
13366 * leave the TB with the PC/IT state just prior to execution of the
13367 * instruction which caused the exception.
13368 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
13369 * then the CPUARMState will be wrong and we need to reset it.
13370 * This is handled in the same way as restoration of the
13371 * PC in these situations; we save the value of the condexec bits
13372 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
13373 * then uses this to restore them after an exception.
13374 *
13375 * Note that there are no instructions which can read the condexec
13376 * bits, and none which can write non-static values to them, so
13377 * we don't need to care about whether CPUARMState is correct in the
13378 * middle of a TB.
13379 */
13380
13381 /* Reset the conditional execution bits immediately. This avoids
13382 complications trying to do it at the end of the block. */
13383 if (dc->condexec_mask || dc->condexec_cond) {
13384 TCGv_i32 tmp = tcg_temp_new_i32();
13385 tcg_gen_movi_i32(tmp, 0);
13386 store_cpu_field(tmp, condexec_bits);
13387 }
13388}
13389
f62bd897
LV
13390static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
13391{
13392 DisasContext *dc = container_of(dcbase, DisasContext, base);
13393
f62bd897
LV
13394 tcg_gen_insn_start(dc->pc,
13395 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
13396 0);
15fa08f8 13397 dc->insn_start = tcg_last_op();
f62bd897
LV
13398}
13399
a68956ad
LV
13400static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
13401 const CPUBreakpoint *bp)
13402{
13403 DisasContext *dc = container_of(dcbase, DisasContext, base);
13404
13405 if (bp->flags & BP_CPU) {
13406 gen_set_condexec(dc);
13407 gen_set_pc_im(dc, dc->pc);
13408 gen_helper_check_breakpoints(cpu_env);
13409 /* End the TB early; it's likely not going to be executed */
13410 dc->base.is_jmp = DISAS_TOO_MANY;
13411 } else {
13412 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
13413 /* The address covered by the breakpoint must be
13414 included in [tb->pc, tb->pc + tb->size) in order
13415 to for it to be properly cleared -- thus we
13416 increment the PC here so that the logic setting
13417 tb->size below does the right thing. */
13418 /* TODO: Advance PC by correct instruction length to
13419 * avoid disassembler error messages */
13420 dc->pc += 2;
13421 dc->base.is_jmp = DISAS_NORETURN;
13422 }
13423
13424 return true;
13425}
13426
722ef0a5 13427static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 13428{
13189a90
LV
13429#ifdef CONFIG_USER_ONLY
13430 /* Intercept jump to the magic kernel page. */
13431 if (dc->pc >= 0xffff0000) {
13432 /* We always get here via a jump, so know we are not in a
13433 conditional execution block. */
13434 gen_exception_internal(EXCP_KERNEL_TRAP);
13435 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 13436 return true;
13189a90
LV
13437 }
13438#endif
13439
13440 if (dc->ss_active && !dc->pstate_ss) {
13441 /* Singlestep state is Active-pending.
13442 * If we're in this state at the start of a TB then either
13443 * a) we just took an exception to an EL which is being debugged
13444 * and this is the first insn in the exception handler
13445 * b) debug exceptions were masked and we just unmasked them
13446 * without changing EL (eg by clearing PSTATE.D)
13447 * In either case we're going to take a swstep exception in the
13448 * "did not step an insn" case, and so the syndrome ISV and EX
13449 * bits should be zero.
13450 */
13451 assert(dc->base.num_insns == 1);
13452 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
13453 default_exception_el(dc));
13454 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 13455 return true;
13189a90
LV
13456 }
13457
722ef0a5
RH
13458 return false;
13459}
13189a90 13460
d0264d86 13461static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 13462{
13189a90
LV
13463 if (dc->condjmp && !dc->base.is_jmp) {
13464 gen_set_label(dc->condlabel);
13465 dc->condjmp = 0;
13466 }
13189a90 13467 dc->base.pc_next = dc->pc;
23169224 13468 translator_loop_temp_check(&dc->base);
13189a90
LV
13469}
13470
722ef0a5
RH
13471static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13472{
13473 DisasContext *dc = container_of(dcbase, DisasContext, base);
13474 CPUARMState *env = cpu->env_ptr;
13475 unsigned int insn;
13476
13477 if (arm_pre_translate_insn(dc)) {
13478 return;
13479 }
13480
13481 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
58803318 13482 dc->insn = insn;
722ef0a5
RH
13483 dc->pc += 4;
13484 disas_arm_insn(dc, insn);
13485
d0264d86
RH
13486 arm_post_translate_insn(dc);
13487
13488 /* ARM is a fixed-length ISA. We performed the cross-page check
13489 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
13490}
13491
dcf14dfb
PM
13492static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
13493{
13494 /* Return true if this Thumb insn is always unconditional,
13495 * even inside an IT block. This is true of only a very few
13496 * instructions: BKPT, HLT, and SG.
13497 *
13498 * A larger class of instructions are UNPREDICTABLE if used
13499 * inside an IT block; we do not need to detect those here, because
13500 * what we do by default (perform the cc check and update the IT
13501 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
13502 * choice for those situations.
13503 *
13504 * insn is either a 16-bit or a 32-bit instruction; the two are
13505 * distinguishable because for the 16-bit case the top 16 bits
13506 * are zeroes, and that isn't a valid 32-bit encoding.
13507 */
13508 if ((insn & 0xffffff00) == 0xbe00) {
13509 /* BKPT */
13510 return true;
13511 }
13512
13513 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
13514 !arm_dc_feature(s, ARM_FEATURE_M)) {
13515 /* HLT: v8A only. This is unconditional even when it is going to
13516 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
13517 * For v7 cores this was a plain old undefined encoding and so
13518 * honours its cc check. (We might be using the encoding as
13519 * a semihosting trap, but we don't change the cc check behaviour
13520 * on that account, because a debugger connected to a real v7A
13521 * core and emulating semihosting traps by catching the UNDEF
13522 * exception would also only see cases where the cc check passed.
13523 * No guest code should be trying to do a HLT semihosting trap
13524 * in an IT block anyway.
13525 */
13526 return true;
13527 }
13528
13529 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
13530 arm_dc_feature(s, ARM_FEATURE_M)) {
13531 /* SG: v8M only */
13532 return true;
13533 }
13534
13535 return false;
13536}
13537
722ef0a5
RH
13538static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13539{
13540 DisasContext *dc = container_of(dcbase, DisasContext, base);
13541 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
13542 uint32_t insn;
13543 bool is_16bit;
722ef0a5
RH
13544
13545 if (arm_pre_translate_insn(dc)) {
13546 return;
13547 }
13548
296e5a0a
PM
13549 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
13550 is_16bit = thumb_insn_is_16bit(dc, insn);
13551 dc->pc += 2;
13552 if (!is_16bit) {
13553 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
13554
13555 insn = insn << 16 | insn2;
13556 dc->pc += 2;
13557 }
58803318 13558 dc->insn = insn;
296e5a0a 13559
dcf14dfb 13560 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
13561 uint32_t cond = dc->condexec_cond;
13562
13563 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
c2d9644e 13564 arm_skip_unless(dc, cond);
296e5a0a
PM
13565 }
13566 }
13567
13568 if (is_16bit) {
13569 disas_thumb_insn(dc, insn);
13570 } else {
2eea841c 13571 disas_thumb2_insn(dc, insn);
296e5a0a 13572 }
722ef0a5
RH
13573
13574 /* Advance the Thumb condexec condition. */
13575 if (dc->condexec_mask) {
13576 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
13577 ((dc->condexec_mask >> 4) & 1));
13578 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
13579 if (dc->condexec_mask == 0) {
13580 dc->condexec_cond = 0;
13581 }
13582 }
13583
d0264d86
RH
13584 arm_post_translate_insn(dc);
13585
13586 /* Thumb is a variable-length ISA. Stop translation when the next insn
13587 * will touch a new page. This ensures that prefetch aborts occur at
13588 * the right place.
13589 *
13590 * We want to stop the TB if the next insn starts in a new page,
13591 * or if it spans between this page and the next. This means that
13592 * if we're looking at the last halfword in the page we need to
13593 * see if it's a 16-bit Thumb insn (which will fit in this TB)
13594 * or a 32-bit Thumb insn (which won't).
13595 * This is to avoid generating a silly TB with a single 16-bit insn
13596 * in it at the end of this page (which would execute correctly
13597 * but isn't very efficient).
13598 */
13599 if (dc->base.is_jmp == DISAS_NEXT
bfe7ad5b
EC
13600 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
13601 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
d0264d86
RH
13602 && insn_crosses_page(env, dc)))) {
13603 dc->base.is_jmp = DISAS_TOO_MANY;
13604 }
722ef0a5
RH
13605}
13606
70d3c035 13607static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 13608{
70d3c035 13609 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 13610
c5a49c63 13611 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
13612 /* FIXME: This can theoretically happen with self-modifying code. */
13613 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 13614 }
9ee6e8bb 13615
b5ff1b31 13616 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
13617 instruction was a conditional branch or trap, and the PC has
13618 already been written. */
f021b2c4 13619 gen_set_condexec(dc);
dcba3a8d 13620 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
13621 /* Exception return branches need some special case code at the
13622 * end of the TB, which is complex enough that it has to
13623 * handle the single-step vs not and the condition-failed
13624 * insn codepath itself.
13625 */
13626 gen_bx_excret_final_code(dc);
13627 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 13628 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 13629 switch (dc->base.is_jmp) {
7999a5c8 13630 case DISAS_SWI:
50225ad0 13631 gen_ss_advance(dc);
73710361
GB
13632 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
13633 default_exception_el(dc));
7999a5c8
SF
13634 break;
13635 case DISAS_HVC:
37e6456e 13636 gen_ss_advance(dc);
73710361 13637 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
13638 break;
13639 case DISAS_SMC:
37e6456e 13640 gen_ss_advance(dc);
73710361 13641 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
13642 break;
13643 case DISAS_NEXT:
a68956ad 13644 case DISAS_TOO_MANY:
7999a5c8
SF
13645 case DISAS_UPDATE:
13646 gen_set_pc_im(dc, dc->pc);
13647 /* fall through */
13648 default:
5425415e
PM
13649 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
13650 gen_singlestep_exception(dc);
a0c231e6
RH
13651 break;
13652 case DISAS_NORETURN:
13653 break;
7999a5c8 13654 }
8aaca4c0 13655 } else {
9ee6e8bb
PB
13656 /* While branches must always occur at the end of an IT block,
13657 there are a few other things that can cause us to terminate
65626741 13658 the TB in the middle of an IT block:
9ee6e8bb
PB
13659 - Exception generating instructions (bkpt, swi, undefined).
13660 - Page boundaries.
13661 - Hardware watchpoints.
13662 Hardware breakpoints have already been handled and skip this code.
13663 */
dcba3a8d 13664 switch(dc->base.is_jmp) {
8aaca4c0 13665 case DISAS_NEXT:
a68956ad 13666 case DISAS_TOO_MANY:
6e256c93 13667 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 13668 break;
577bf808 13669 case DISAS_JUMP:
8a6b28c7
EC
13670 gen_goto_ptr();
13671 break;
e8d52302
AB
13672 case DISAS_UPDATE:
13673 gen_set_pc_im(dc, dc->pc);
13674 /* fall through */
577bf808 13675 default:
8aaca4c0 13676 /* indicate that the hash table must be used to find the next TB */
07ea28b4 13677 tcg_gen_exit_tb(NULL, 0);
8aaca4c0 13678 break;
a0c231e6 13679 case DISAS_NORETURN:
8aaca4c0
FB
13680 /* nothing more to generate */
13681 break;
9ee6e8bb 13682 case DISAS_WFI:
58803318
SS
13683 {
13684 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
13685 !(dc->insn & (1U << 31))) ? 2 : 4);
13686
13687 gen_helper_wfi(cpu_env, tmp);
13688 tcg_temp_free_i32(tmp);
84549b6d
PM
13689 /* The helper doesn't necessarily throw an exception, but we
13690 * must go back to the main loop to check for interrupts anyway.
13691 */
07ea28b4 13692 tcg_gen_exit_tb(NULL, 0);
9ee6e8bb 13693 break;
58803318 13694 }
72c1d3af
PM
13695 case DISAS_WFE:
13696 gen_helper_wfe(cpu_env);
13697 break;
c87e5a61
PM
13698 case DISAS_YIELD:
13699 gen_helper_yield(cpu_env);
13700 break;
9ee6e8bb 13701 case DISAS_SWI:
73710361
GB
13702 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
13703 default_exception_el(dc));
9ee6e8bb 13704 break;
37e6456e 13705 case DISAS_HVC:
73710361 13706 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
13707 break;
13708 case DISAS_SMC:
73710361 13709 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 13710 break;
8aaca4c0 13711 }
f021b2c4
PM
13712 }
13713
13714 if (dc->condjmp) {
13715 /* "Condition failed" instruction codepath for the branch/trap insn */
13716 gen_set_label(dc->condlabel);
13717 gen_set_condexec(dc);
b636649f 13718 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
13719 gen_set_pc_im(dc, dc->pc);
13720 gen_singlestep_exception(dc);
13721 } else {
6e256c93 13722 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 13723 }
2c0262af 13724 }
23169224
LV
13725
13726 /* Functions above can change dc->pc, so re-align db->pc_next */
13727 dc->base.pc_next = dc->pc;
70d3c035
LV
13728}
13729
4013f7fc
LV
13730static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
13731{
13732 DisasContext *dc = container_of(dcbase, DisasContext, base);
13733
13734 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 13735 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
13736}
13737
23169224
LV
13738static const TranslatorOps arm_translator_ops = {
13739 .init_disas_context = arm_tr_init_disas_context,
13740 .tb_start = arm_tr_tb_start,
13741 .insn_start = arm_tr_insn_start,
13742 .breakpoint_check = arm_tr_breakpoint_check,
13743 .translate_insn = arm_tr_translate_insn,
13744 .tb_stop = arm_tr_tb_stop,
13745 .disas_log = arm_tr_disas_log,
13746};
13747
722ef0a5
RH
13748static const TranslatorOps thumb_translator_ops = {
13749 .init_disas_context = arm_tr_init_disas_context,
13750 .tb_start = arm_tr_tb_start,
13751 .insn_start = arm_tr_insn_start,
13752 .breakpoint_check = arm_tr_breakpoint_check,
13753 .translate_insn = thumb_tr_translate_insn,
13754 .tb_stop = arm_tr_tb_stop,
13755 .disas_log = arm_tr_disas_log,
13756};
13757
70d3c035 13758/* generate intermediate code for basic block 'tb'. */
23169224 13759void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
70d3c035 13760{
23169224
LV
13761 DisasContext dc;
13762 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 13763
aad821ac 13764 if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
722ef0a5
RH
13765 ops = &thumb_translator_ops;
13766 }
23169224 13767#ifdef TARGET_AARCH64
aad821ac 13768 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
23169224 13769 ops = &aarch64_translator_ops;
2c0262af
FB
13770 }
13771#endif
23169224
LV
13772
13773 translator_loop(ops, &dc.base, cpu, tb);
2c0262af
FB
13774}
13775
90c84c56 13776void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
2c0262af 13777{
878096ee
AF
13778 ARMCPU *cpu = ARM_CPU(cs);
13779 CPUARMState *env = &cpu->env;
2c0262af
FB
13780 int i;
13781
17731115 13782 if (is_a64(env)) {
90c84c56 13783 aarch64_cpu_dump_state(cs, f, flags);
17731115
PM
13784 return;
13785 }
13786
2c0262af 13787 for(i=0;i<16;i++) {
90c84c56 13788 qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 13789 if ((i % 4) == 3)
90c84c56 13790 qemu_fprintf(f, "\n");
2c0262af 13791 else
90c84c56 13792 qemu_fprintf(f, " ");
2c0262af 13793 }
06e5cf7a 13794
5b906f35
PM
13795 if (arm_feature(env, ARM_FEATURE_M)) {
13796 uint32_t xpsr = xpsr_read(env);
13797 const char *mode;
1e577cc7
PM
13798 const char *ns_status = "";
13799
13800 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
13801 ns_status = env->v7m.secure ? "S " : "NS ";
13802 }
5b906f35
PM
13803
13804 if (xpsr & XPSR_EXCP) {
13805 mode = "handler";
13806 } else {
8bfc26ea 13807 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
13808 mode = "unpriv-thread";
13809 } else {
13810 mode = "priv-thread";
13811 }
13812 }
13813
90c84c56
MA
13814 qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
13815 xpsr,
13816 xpsr & XPSR_N ? 'N' : '-',
13817 xpsr & XPSR_Z ? 'Z' : '-',
13818 xpsr & XPSR_C ? 'C' : '-',
13819 xpsr & XPSR_V ? 'V' : '-',
13820 xpsr & XPSR_T ? 'T' : 'A',
13821 ns_status,
13822 mode);
06e5cf7a 13823 } else {
5b906f35
PM
13824 uint32_t psr = cpsr_read(env);
13825 const char *ns_status = "";
13826
13827 if (arm_feature(env, ARM_FEATURE_EL3) &&
13828 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
13829 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
13830 }
13831
90c84c56
MA
13832 qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
13833 psr,
13834 psr & CPSR_N ? 'N' : '-',
13835 psr & CPSR_Z ? 'Z' : '-',
13836 psr & CPSR_C ? 'C' : '-',
13837 psr & CPSR_V ? 'V' : '-',
13838 psr & CPSR_T ? 'T' : 'A',
13839 ns_status,
13840 aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
5b906f35 13841 }
b7bcbe95 13842
f2617cfc
PM
13843 if (flags & CPU_DUMP_FPU) {
13844 int numvfpregs = 0;
13845 if (arm_feature(env, ARM_FEATURE_VFP)) {
13846 numvfpregs += 16;
13847 }
13848 if (arm_feature(env, ARM_FEATURE_VFP3)) {
13849 numvfpregs += 16;
13850 }
13851 for (i = 0; i < numvfpregs; i++) {
9a2b5256 13852 uint64_t v = *aa32_vfp_dreg(env, i);
90c84c56
MA
13853 qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
13854 i * 2, (uint32_t)v,
13855 i * 2 + 1, (uint32_t)(v >> 32),
13856 i, v);
f2617cfc 13857 }
90c84c56 13858 qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
b7bcbe95 13859 }
2c0262af 13860}
a6b025d3 13861
bad729e2
RH
13862void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
13863 target_ulong *data)
d2856f1a 13864{
3926cc84 13865 if (is_a64(env)) {
bad729e2 13866 env->pc = data[0];
40f860cd 13867 env->condexec_bits = 0;
aaa1f954 13868 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13869 } else {
bad729e2
RH
13870 env->regs[15] = data[0];
13871 env->condexec_bits = data[1];
aaa1f954 13872 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13873 }
d2856f1a 13874}