]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
target/arm: Decode aa32 armv8.3 2-reg-index
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
36a71934 28#include "tcg-op-gvec.h"
1de7afc9 29#include "qemu/log.h"
534df156 30#include "qemu/bitops.h"
1d854765 31#include "arm_ldst.h"
19a6e31c 32#include "exec/semihost.h"
1497c961 33
2ef6175a
RH
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
2c0262af 36
a7e30d84 37#include "trace-tcg.h"
508127e2 38#include "exec/log.h"
a7e30d84
LV
39
40
2b51668f
PM
41#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 43/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 44#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
c99a55d3 45#define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
2b51668f
PM
46#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 51
86753403 52#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 53
f570c61e 54#include "translate.h"
e12ce78d 55
b5ff1b31
FB
56#if defined(CONFIG_USER_ONLY)
57#define IS_USER(s) 1
58#else
59#define IS_USER(s) (s->user)
60#endif
61
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
b26eefb6 69/* FIXME: These should be removed. */
39d5492a 70static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 71static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 72
022c62cb 73#include "exec/gen-icount.h"
2e70f6ef 74
155c3eac
FN
75static const char *regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
78
61adacc8
RH
79/* Function prototypes for gen_ functions calling Neon helpers. */
80typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
81 TCGv_i32, TCGv_i32);
82
b26eefb6
PB
83/* initialize TCG globals. */
84void arm_translate_init(void)
85{
155c3eac
FN
86 int i;
87
155c3eac 88 for (i = 0; i < 16; i++) {
e1ccc054 89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 90 offsetof(CPUARMState, regs[i]),
155c3eac
FN
91 regnames[i]);
92 }
e1ccc054
RH
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 97
e1ccc054 98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 102
14ade10f 103 a64_translate_init();
b26eefb6
PB
104}
105
9bb6558a
PM
106/* Flags for the disas_set_da_iss info argument:
107 * lower bits hold the Rt register number, higher bits are flags.
108 */
109typedef enum ISSInfo {
110 ISSNone = 0,
111 ISSRegMask = 0x1f,
112 ISSInvalid = (1 << 5),
113 ISSIsAcqRel = (1 << 6),
114 ISSIsWrite = (1 << 7),
115 ISSIs16Bit = (1 << 8),
116} ISSInfo;
117
118/* Save the syndrome information for a Data Abort */
119static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
120{
121 uint32_t syn;
122 int sas = memop & MO_SIZE;
123 bool sse = memop & MO_SIGN;
124 bool is_acqrel = issinfo & ISSIsAcqRel;
125 bool is_write = issinfo & ISSIsWrite;
126 bool is_16bit = issinfo & ISSIs16Bit;
127 int srt = issinfo & ISSRegMask;
128
129 if (issinfo & ISSInvalid) {
130 /* Some callsites want to conditionally provide ISS info,
131 * eg "only if this was not a writeback"
132 */
133 return;
134 }
135
136 if (srt == 15) {
137 /* For AArch32, insns where the src/dest is R15 never generate
138 * ISS information. Catching that here saves checking at all
139 * the call sites.
140 */
141 return;
142 }
143
144 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
145 0, 0, 0, is_write, 0, is_16bit);
146 disas_set_insn_syndrome(s, syn);
147}
148
8bd5c820 149static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 150{
8bd5c820 151 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
152 * insns:
153 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
154 * otherwise, access as if at PL0.
155 */
156 switch (s->mmu_idx) {
157 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
158 case ARMMMUIdx_S12NSE0:
159 case ARMMMUIdx_S12NSE1:
8bd5c820 160 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
161 case ARMMMUIdx_S1E3:
162 case ARMMMUIdx_S1SE0:
163 case ARMMMUIdx_S1SE1:
8bd5c820 164 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
165 case ARMMMUIdx_MUser:
166 case ARMMMUIdx_MPriv:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
168 case ARMMMUIdx_MUserNegPri:
169 case ARMMMUIdx_MPrivNegPri:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
171 case ARMMMUIdx_MSUser:
172 case ARMMMUIdx_MSPriv:
b9f587d6 173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
174 case ARMMMUIdx_MSUserNegPri:
175 case ARMMMUIdx_MSPrivNegPri:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
177 case ARMMMUIdx_S2NS:
178 default:
179 g_assert_not_reached();
180 }
181}
182
39d5492a 183static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 184{
39d5492a 185 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
186 tcg_gen_ld_i32(tmp, cpu_env, offset);
187 return tmp;
188}
189
0ecb72a5 190#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 191
39d5492a 192static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
193{
194 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 195 tcg_temp_free_i32(var);
d9ba4830
PB
196}
197
198#define store_cpu_field(var, name) \
0ecb72a5 199 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 200
b26eefb6 201/* Set a variable to the value of a CPU register. */
39d5492a 202static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
203{
204 if (reg == 15) {
205 uint32_t addr;
b90372ad 206 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
207 if (s->thumb)
208 addr = (long)s->pc + 2;
209 else
210 addr = (long)s->pc + 4;
211 tcg_gen_movi_i32(var, addr);
212 } else {
155c3eac 213 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
214 }
215}
216
217/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 218static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 219{
39d5492a 220 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
221 load_reg_var(s, tmp, reg);
222 return tmp;
223}
224
225/* Set a CPU register. The source must be a temporary and will be
226 marked as dead. */
39d5492a 227static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
228{
229 if (reg == 15) {
9b6a3ea7
PM
230 /* In Thumb mode, we must ignore bit 0.
231 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
232 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
233 * We choose to ignore [1:0] in ARM mode for all architecture versions.
234 */
235 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 236 s->base.is_jmp = DISAS_JUMP;
b26eefb6 237 }
155c3eac 238 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 239 tcg_temp_free_i32(var);
b26eefb6
PB
240}
241
b26eefb6 242/* Value extensions. */
86831435
PB
243#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
244#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
245#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
246#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
247
1497c961
PB
248#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
249#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 250
b26eefb6 251
39d5492a 252static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 253{
39d5492a 254 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 255 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
256 tcg_temp_free_i32(tmp_mask);
257}
d9ba4830
PB
258/* Set NZCV flags from the high 4 bits of var. */
259#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
260
d4a2dc67 261static void gen_exception_internal(int excp)
d9ba4830 262{
d4a2dc67
PM
263 TCGv_i32 tcg_excp = tcg_const_i32(excp);
264
265 assert(excp_is_internal(excp));
266 gen_helper_exception_internal(cpu_env, tcg_excp);
267 tcg_temp_free_i32(tcg_excp);
268}
269
73710361 270static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
271{
272 TCGv_i32 tcg_excp = tcg_const_i32(excp);
273 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 274 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 275
73710361
GB
276 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
277 tcg_syn, tcg_el);
278
279 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
280 tcg_temp_free_i32(tcg_syn);
281 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
282}
283
50225ad0
PM
284static void gen_ss_advance(DisasContext *s)
285{
286 /* If the singlestep state is Active-not-pending, advance to
287 * Active-pending.
288 */
289 if (s->ss_active) {
290 s->pstate_ss = 0;
291 gen_helper_clear_pstate_ss(cpu_env);
292 }
293}
294
295static void gen_step_complete_exception(DisasContext *s)
296{
297 /* We just completed step of an insn. Move from Active-not-pending
298 * to Active-pending, and then also take the swstep exception.
299 * This corresponds to making the (IMPDEF) choice to prioritize
300 * swstep exceptions over asynchronous exceptions taken to an exception
301 * level where debug is disabled. This choice has the advantage that
302 * we do not need to maintain internal state corresponding to the
303 * ISV/EX syndrome bits between completion of the step and generation
304 * of the exception, and our syndrome information is always correct.
305 */
306 gen_ss_advance(s);
73710361
GB
307 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
308 default_exception_el(s));
dcba3a8d 309 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
310}
311
5425415e
PM
312static void gen_singlestep_exception(DisasContext *s)
313{
314 /* Generate the right kind of exception for singlestep, which is
315 * either the architectural singlestep or EXCP_DEBUG for QEMU's
316 * gdb singlestepping.
317 */
318 if (s->ss_active) {
319 gen_step_complete_exception(s);
320 } else {
321 gen_exception_internal(EXCP_DEBUG);
322 }
323}
324
b636649f
PM
325static inline bool is_singlestepping(DisasContext *s)
326{
327 /* Return true if we are singlestepping either because of
328 * architectural singlestep or QEMU gdbstub singlestep. This does
329 * not include the command line '-singlestep' mode which is rather
330 * misnamed as it only means "one instruction per TB" and doesn't
331 * affect the code we generate.
332 */
dcba3a8d 333 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
334}
335
39d5492a 336static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 337{
39d5492a
PM
338 TCGv_i32 tmp1 = tcg_temp_new_i32();
339 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
340 tcg_gen_ext16s_i32(tmp1, a);
341 tcg_gen_ext16s_i32(tmp2, b);
3670669c 342 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 343 tcg_temp_free_i32(tmp2);
3670669c
PB
344 tcg_gen_sari_i32(a, a, 16);
345 tcg_gen_sari_i32(b, b, 16);
346 tcg_gen_mul_i32(b, b, a);
347 tcg_gen_mov_i32(a, tmp1);
7d1b0095 348 tcg_temp_free_i32(tmp1);
3670669c
PB
349}
350
351/* Byteswap each halfword. */
39d5492a 352static void gen_rev16(TCGv_i32 var)
3670669c 353{
39d5492a 354 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 355 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 356 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
357 tcg_gen_and_i32(tmp, tmp, mask);
358 tcg_gen_and_i32(var, var, mask);
3670669c 359 tcg_gen_shli_i32(var, var, 8);
3670669c 360 tcg_gen_or_i32(var, var, tmp);
68cedf73 361 tcg_temp_free_i32(mask);
7d1b0095 362 tcg_temp_free_i32(tmp);
3670669c
PB
363}
364
365/* Byteswap low halfword and sign extend. */
39d5492a 366static void gen_revsh(TCGv_i32 var)
3670669c 367{
1a855029
AJ
368 tcg_gen_ext16u_i32(var, var);
369 tcg_gen_bswap16_i32(var, var);
370 tcg_gen_ext16s_i32(var, var);
3670669c
PB
371}
372
838fa72d 373/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 374static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 375{
838fa72d
AJ
376 TCGv_i64 tmp64 = tcg_temp_new_i64();
377
378 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 379 tcg_temp_free_i32(b);
838fa72d
AJ
380 tcg_gen_shli_i64(tmp64, tmp64, 32);
381 tcg_gen_add_i64(a, tmp64, a);
382
383 tcg_temp_free_i64(tmp64);
384 return a;
385}
386
387/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 388static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
389{
390 TCGv_i64 tmp64 = tcg_temp_new_i64();
391
392 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 393 tcg_temp_free_i32(b);
838fa72d
AJ
394 tcg_gen_shli_i64(tmp64, tmp64, 32);
395 tcg_gen_sub_i64(a, tmp64, a);
396
397 tcg_temp_free_i64(tmp64);
398 return a;
3670669c
PB
399}
400
5e3f878a 401/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 402static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 403{
39d5492a
PM
404 TCGv_i32 lo = tcg_temp_new_i32();
405 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 406 TCGv_i64 ret;
5e3f878a 407
831d7fe8 408 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 409 tcg_temp_free_i32(a);
7d1b0095 410 tcg_temp_free_i32(b);
831d7fe8
RH
411
412 ret = tcg_temp_new_i64();
413 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
414 tcg_temp_free_i32(lo);
415 tcg_temp_free_i32(hi);
831d7fe8
RH
416
417 return ret;
5e3f878a
PB
418}
419
39d5492a 420static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 421{
39d5492a
PM
422 TCGv_i32 lo = tcg_temp_new_i32();
423 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 424 TCGv_i64 ret;
5e3f878a 425
831d7fe8 426 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 427 tcg_temp_free_i32(a);
7d1b0095 428 tcg_temp_free_i32(b);
831d7fe8
RH
429
430 ret = tcg_temp_new_i64();
431 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
432 tcg_temp_free_i32(lo);
433 tcg_temp_free_i32(hi);
831d7fe8
RH
434
435 return ret;
5e3f878a
PB
436}
437
8f01245e 438/* Swap low and high halfwords. */
39d5492a 439static void gen_swap_half(TCGv_i32 var)
8f01245e 440{
39d5492a 441 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
442 tcg_gen_shri_i32(tmp, var, 16);
443 tcg_gen_shli_i32(var, var, 16);
444 tcg_gen_or_i32(var, var, tmp);
7d1b0095 445 tcg_temp_free_i32(tmp);
8f01245e
PB
446}
447
b26eefb6
PB
448/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
449 tmp = (t0 ^ t1) & 0x8000;
450 t0 &= ~0x8000;
451 t1 &= ~0x8000;
452 t0 = (t0 + t1) ^ tmp;
453 */
454
39d5492a 455static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 456{
39d5492a 457 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
458 tcg_gen_xor_i32(tmp, t0, t1);
459 tcg_gen_andi_i32(tmp, tmp, 0x8000);
460 tcg_gen_andi_i32(t0, t0, ~0x8000);
461 tcg_gen_andi_i32(t1, t1, ~0x8000);
462 tcg_gen_add_i32(t0, t0, t1);
463 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
464 tcg_temp_free_i32(tmp);
465 tcg_temp_free_i32(t1);
b26eefb6
PB
466}
467
468/* Set CF to the top bit of var. */
39d5492a 469static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 470{
66c374de 471 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
472}
473
474/* Set N and Z flags from var. */
39d5492a 475static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 476{
66c374de
AJ
477 tcg_gen_mov_i32(cpu_NF, var);
478 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
479}
480
481/* T0 += T1 + CF. */
39d5492a 482static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 483{
396e467c 484 tcg_gen_add_i32(t0, t0, t1);
66c374de 485 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
486}
487
e9bb4aa9 488/* dest = T0 + T1 + CF. */
39d5492a 489static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 490{
e9bb4aa9 491 tcg_gen_add_i32(dest, t0, t1);
66c374de 492 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
493}
494
3670669c 495/* dest = T0 - T1 + CF - 1. */
39d5492a 496static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 497{
3670669c 498 tcg_gen_sub_i32(dest, t0, t1);
66c374de 499 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 500 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
501}
502
72485ec4 503/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 504static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 505{
39d5492a 506 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
507 tcg_gen_movi_i32(tmp, 0);
508 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 509 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 510 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
511 tcg_gen_xor_i32(tmp, t0, t1);
512 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
513 tcg_temp_free_i32(tmp);
514 tcg_gen_mov_i32(dest, cpu_NF);
515}
516
49b4c31e 517/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 518static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 519{
39d5492a 520 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
521 if (TCG_TARGET_HAS_add2_i32) {
522 tcg_gen_movi_i32(tmp, 0);
523 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 524 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
525 } else {
526 TCGv_i64 q0 = tcg_temp_new_i64();
527 TCGv_i64 q1 = tcg_temp_new_i64();
528 tcg_gen_extu_i32_i64(q0, t0);
529 tcg_gen_extu_i32_i64(q1, t1);
530 tcg_gen_add_i64(q0, q0, q1);
531 tcg_gen_extu_i32_i64(q1, cpu_CF);
532 tcg_gen_add_i64(q0, q0, q1);
533 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
534 tcg_temp_free_i64(q0);
535 tcg_temp_free_i64(q1);
536 }
537 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
538 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
539 tcg_gen_xor_i32(tmp, t0, t1);
540 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
541 tcg_temp_free_i32(tmp);
542 tcg_gen_mov_i32(dest, cpu_NF);
543}
544
72485ec4 545/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 546static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 547{
39d5492a 548 TCGv_i32 tmp;
72485ec4
AJ
549 tcg_gen_sub_i32(cpu_NF, t0, t1);
550 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
551 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
552 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
553 tmp = tcg_temp_new_i32();
554 tcg_gen_xor_i32(tmp, t0, t1);
555 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
556 tcg_temp_free_i32(tmp);
557 tcg_gen_mov_i32(dest, cpu_NF);
558}
559
e77f0832 560/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 561static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 562{
39d5492a 563 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
564 tcg_gen_not_i32(tmp, t1);
565 gen_adc_CC(dest, t0, tmp);
39d5492a 566 tcg_temp_free_i32(tmp);
2de68a49
RH
567}
568
365af80e 569#define GEN_SHIFT(name) \
39d5492a 570static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 571{ \
39d5492a 572 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
573 tmp1 = tcg_temp_new_i32(); \
574 tcg_gen_andi_i32(tmp1, t1, 0xff); \
575 tmp2 = tcg_const_i32(0); \
576 tmp3 = tcg_const_i32(0x1f); \
577 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
578 tcg_temp_free_i32(tmp3); \
579 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
580 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
581 tcg_temp_free_i32(tmp2); \
582 tcg_temp_free_i32(tmp1); \
583}
584GEN_SHIFT(shl)
585GEN_SHIFT(shr)
586#undef GEN_SHIFT
587
39d5492a 588static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 589{
39d5492a 590 TCGv_i32 tmp1, tmp2;
365af80e
AJ
591 tmp1 = tcg_temp_new_i32();
592 tcg_gen_andi_i32(tmp1, t1, 0xff);
593 tmp2 = tcg_const_i32(0x1f);
594 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
595 tcg_temp_free_i32(tmp2);
596 tcg_gen_sar_i32(dest, t0, tmp1);
597 tcg_temp_free_i32(tmp1);
598}
599
39d5492a 600static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 601{
39d5492a
PM
602 TCGv_i32 c0 = tcg_const_i32(0);
603 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
604 tcg_gen_neg_i32(tmp, src);
605 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
606 tcg_temp_free_i32(c0);
607 tcg_temp_free_i32(tmp);
608}
ad69471c 609
39d5492a 610static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 611{
9a119ff6 612 if (shift == 0) {
66c374de 613 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 614 } else {
66c374de
AJ
615 tcg_gen_shri_i32(cpu_CF, var, shift);
616 if (shift != 31) {
617 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
618 }
9a119ff6 619 }
9a119ff6 620}
b26eefb6 621
9a119ff6 622/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
623static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
624 int shift, int flags)
9a119ff6
PB
625{
626 switch (shiftop) {
627 case 0: /* LSL */
628 if (shift != 0) {
629 if (flags)
630 shifter_out_im(var, 32 - shift);
631 tcg_gen_shli_i32(var, var, shift);
632 }
633 break;
634 case 1: /* LSR */
635 if (shift == 0) {
636 if (flags) {
66c374de 637 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
638 }
639 tcg_gen_movi_i32(var, 0);
640 } else {
641 if (flags)
642 shifter_out_im(var, shift - 1);
643 tcg_gen_shri_i32(var, var, shift);
644 }
645 break;
646 case 2: /* ASR */
647 if (shift == 0)
648 shift = 32;
649 if (flags)
650 shifter_out_im(var, shift - 1);
651 if (shift == 32)
652 shift = 31;
653 tcg_gen_sari_i32(var, var, shift);
654 break;
655 case 3: /* ROR/RRX */
656 if (shift != 0) {
657 if (flags)
658 shifter_out_im(var, shift - 1);
f669df27 659 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 660 } else {
39d5492a 661 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 662 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
663 if (flags)
664 shifter_out_im(var, 0);
665 tcg_gen_shri_i32(var, var, 1);
b26eefb6 666 tcg_gen_or_i32(var, var, tmp);
7d1b0095 667 tcg_temp_free_i32(tmp);
b26eefb6
PB
668 }
669 }
670};
671
39d5492a
PM
672static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
673 TCGv_i32 shift, int flags)
8984bd2e
PB
674{
675 if (flags) {
676 switch (shiftop) {
9ef39277
BS
677 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
678 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
679 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
680 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
681 }
682 } else {
683 switch (shiftop) {
365af80e
AJ
684 case 0:
685 gen_shl(var, var, shift);
686 break;
687 case 1:
688 gen_shr(var, var, shift);
689 break;
690 case 2:
691 gen_sar(var, var, shift);
692 break;
f669df27
AJ
693 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
694 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
695 }
696 }
7d1b0095 697 tcg_temp_free_i32(shift);
8984bd2e
PB
698}
699
6ddbc6e4
PB
700#define PAS_OP(pfx) \
701 switch (op2) { \
702 case 0: gen_pas_helper(glue(pfx,add16)); break; \
703 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
704 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
705 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
706 case 4: gen_pas_helper(glue(pfx,add8)); break; \
707 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
708 }
39d5492a 709static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 710{
a7812ae4 711 TCGv_ptr tmp;
6ddbc6e4
PB
712
713 switch (op1) {
714#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
715 case 1:
a7812ae4 716 tmp = tcg_temp_new_ptr();
0ecb72a5 717 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 718 PAS_OP(s)
b75263d6 719 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
720 break;
721 case 5:
a7812ae4 722 tmp = tcg_temp_new_ptr();
0ecb72a5 723 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 724 PAS_OP(u)
b75263d6 725 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
726 break;
727#undef gen_pas_helper
728#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
729 case 2:
730 PAS_OP(q);
731 break;
732 case 3:
733 PAS_OP(sh);
734 break;
735 case 6:
736 PAS_OP(uq);
737 break;
738 case 7:
739 PAS_OP(uh);
740 break;
741#undef gen_pas_helper
742 }
743}
9ee6e8bb
PB
744#undef PAS_OP
745
6ddbc6e4
PB
746/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
747#define PAS_OP(pfx) \
ed89a2f1 748 switch (op1) { \
6ddbc6e4
PB
749 case 0: gen_pas_helper(glue(pfx,add8)); break; \
750 case 1: gen_pas_helper(glue(pfx,add16)); break; \
751 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
752 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
753 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
754 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
755 }
39d5492a 756static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 757{
a7812ae4 758 TCGv_ptr tmp;
6ddbc6e4 759
ed89a2f1 760 switch (op2) {
6ddbc6e4
PB
761#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
762 case 0:
a7812ae4 763 tmp = tcg_temp_new_ptr();
0ecb72a5 764 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 765 PAS_OP(s)
b75263d6 766 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
767 break;
768 case 4:
a7812ae4 769 tmp = tcg_temp_new_ptr();
0ecb72a5 770 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 771 PAS_OP(u)
b75263d6 772 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
773 break;
774#undef gen_pas_helper
775#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
776 case 1:
777 PAS_OP(q);
778 break;
779 case 2:
780 PAS_OP(sh);
781 break;
782 case 5:
783 PAS_OP(uq);
784 break;
785 case 6:
786 PAS_OP(uh);
787 break;
788#undef gen_pas_helper
789 }
790}
9ee6e8bb
PB
791#undef PAS_OP
792
39fb730a 793/*
6c2c63d3 794 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
795 * This is common between ARM and Aarch64 targets.
796 */
6c2c63d3 797void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 798{
6c2c63d3
RH
799 TCGv_i32 value;
800 TCGCond cond;
801 bool global = true;
d9ba4830 802
d9ba4830
PB
803 switch (cc) {
804 case 0: /* eq: Z */
d9ba4830 805 case 1: /* ne: !Z */
6c2c63d3
RH
806 cond = TCG_COND_EQ;
807 value = cpu_ZF;
d9ba4830 808 break;
6c2c63d3 809
d9ba4830 810 case 2: /* cs: C */
d9ba4830 811 case 3: /* cc: !C */
6c2c63d3
RH
812 cond = TCG_COND_NE;
813 value = cpu_CF;
d9ba4830 814 break;
6c2c63d3 815
d9ba4830 816 case 4: /* mi: N */
d9ba4830 817 case 5: /* pl: !N */
6c2c63d3
RH
818 cond = TCG_COND_LT;
819 value = cpu_NF;
d9ba4830 820 break;
6c2c63d3 821
d9ba4830 822 case 6: /* vs: V */
d9ba4830 823 case 7: /* vc: !V */
6c2c63d3
RH
824 cond = TCG_COND_LT;
825 value = cpu_VF;
d9ba4830 826 break;
6c2c63d3 827
d9ba4830 828 case 8: /* hi: C && !Z */
6c2c63d3
RH
829 case 9: /* ls: !C || Z -> !(C && !Z) */
830 cond = TCG_COND_NE;
831 value = tcg_temp_new_i32();
832 global = false;
833 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
834 ZF is non-zero for !Z; so AND the two subexpressions. */
835 tcg_gen_neg_i32(value, cpu_CF);
836 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 837 break;
6c2c63d3 838
d9ba4830 839 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 840 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
841 /* Since we're only interested in the sign bit, == 0 is >= 0. */
842 cond = TCG_COND_GE;
843 value = tcg_temp_new_i32();
844 global = false;
845 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 846 break;
6c2c63d3 847
d9ba4830 848 case 12: /* gt: !Z && N == V */
d9ba4830 849 case 13: /* le: Z || N != V */
6c2c63d3
RH
850 cond = TCG_COND_NE;
851 value = tcg_temp_new_i32();
852 global = false;
853 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
854 * the sign bit then AND with ZF to yield the result. */
855 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
856 tcg_gen_sari_i32(value, value, 31);
857 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 858 break;
6c2c63d3 859
9305eac0
RH
860 case 14: /* always */
861 case 15: /* always */
862 /* Use the ALWAYS condition, which will fold early.
863 * It doesn't matter what we use for the value. */
864 cond = TCG_COND_ALWAYS;
865 value = cpu_ZF;
866 goto no_invert;
867
d9ba4830
PB
868 default:
869 fprintf(stderr, "Bad condition code 0x%x\n", cc);
870 abort();
871 }
6c2c63d3
RH
872
873 if (cc & 1) {
874 cond = tcg_invert_cond(cond);
875 }
876
9305eac0 877 no_invert:
6c2c63d3
RH
878 cmp->cond = cond;
879 cmp->value = value;
880 cmp->value_global = global;
881}
882
883void arm_free_cc(DisasCompare *cmp)
884{
885 if (!cmp->value_global) {
886 tcg_temp_free_i32(cmp->value);
887 }
888}
889
890void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
891{
892 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
893}
894
895void arm_gen_test_cc(int cc, TCGLabel *label)
896{
897 DisasCompare cmp;
898 arm_test_cc(&cmp, cc);
899 arm_jump_cc(&cmp, label);
900 arm_free_cc(&cmp);
d9ba4830 901}
2c0262af 902
b1d8e52e 903static const uint8_t table_logic_cc[16] = {
2c0262af
FB
904 1, /* and */
905 1, /* xor */
906 0, /* sub */
907 0, /* rsb */
908 0, /* add */
909 0, /* adc */
910 0, /* sbc */
911 0, /* rsc */
912 1, /* andl */
913 1, /* xorl */
914 0, /* cmp */
915 0, /* cmn */
916 1, /* orr */
917 1, /* mov */
918 1, /* bic */
919 1, /* mvn */
920};
3b46e624 921
4d5e8c96
PM
922static inline void gen_set_condexec(DisasContext *s)
923{
924 if (s->condexec_mask) {
925 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
926 TCGv_i32 tmp = tcg_temp_new_i32();
927 tcg_gen_movi_i32(tmp, val);
928 store_cpu_field(tmp, condexec_bits);
929 }
930}
931
932static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
933{
934 tcg_gen_movi_i32(cpu_R[15], val);
935}
936
d9ba4830
PB
937/* Set PC and Thumb state from an immediate address. */
938static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 939{
39d5492a 940 TCGv_i32 tmp;
99c475ab 941
dcba3a8d 942 s->base.is_jmp = DISAS_JUMP;
d9ba4830 943 if (s->thumb != (addr & 1)) {
7d1b0095 944 tmp = tcg_temp_new_i32();
d9ba4830 945 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 946 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 947 tcg_temp_free_i32(tmp);
d9ba4830 948 }
155c3eac 949 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
950}
951
952/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 953static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 954{
dcba3a8d 955 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
956 tcg_gen_andi_i32(cpu_R[15], var, ~1);
957 tcg_gen_andi_i32(var, var, 1);
958 store_cpu_field(var, thumb);
d9ba4830
PB
959}
960
3bb8a96f
PM
961/* Set PC and Thumb state from var. var is marked as dead.
962 * For M-profile CPUs, include logic to detect exception-return
963 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
964 * and BX reg, and no others, and happens only for code in Handler mode.
965 */
966static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
967{
968 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 969 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
970 */
971 gen_bx(s, var);
d02a8698
PM
972 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
973 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 974 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
975 }
976}
977
978static inline void gen_bx_excret_final_code(DisasContext *s)
979{
980 /* Generate the code to finish possible exception return and end the TB */
981 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
982 uint32_t min_magic;
983
984 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
985 /* Covers FNC_RETURN and EXC_RETURN magic */
986 min_magic = FNC_RETURN_MIN_MAGIC;
987 } else {
988 /* EXC_RETURN magic only */
989 min_magic = EXC_RETURN_MIN_MAGIC;
990 }
3bb8a96f
PM
991
992 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 993 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
994 /* No: end the TB as we would for a DISAS_JMP */
995 if (is_singlestepping(s)) {
996 gen_singlestep_exception(s);
997 } else {
998 tcg_gen_exit_tb(0);
999 }
1000 gen_set_label(excret_label);
1001 /* Yes: this is an exception return.
1002 * At this point in runtime env->regs[15] and env->thumb will hold
1003 * the exception-return magic number, which do_v7m_exception_exit()
1004 * will read. Nothing else will be able to see those values because
1005 * the cpu-exec main loop guarantees that we will always go straight
1006 * from raising the exception to the exception-handling code.
1007 *
1008 * gen_ss_advance(s) does nothing on M profile currently but
1009 * calling it is conceptually the right thing as we have executed
1010 * this instruction (compare SWI, HVC, SMC handling).
1011 */
1012 gen_ss_advance(s);
1013 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1014}
1015
fb602cb7
PM
1016static inline void gen_bxns(DisasContext *s, int rm)
1017{
1018 TCGv_i32 var = load_reg(s, rm);
1019
1020 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1021 * we need to sync state before calling it, but:
1022 * - we don't need to do gen_set_pc_im() because the bxns helper will
1023 * always set the PC itself
1024 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1025 * unless it's outside an IT block or the last insn in an IT block,
1026 * so we know that condexec == 0 (already set at the top of the TB)
1027 * is correct in the non-UNPREDICTABLE cases, and we can choose
1028 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1029 */
1030 gen_helper_v7m_bxns(cpu_env, var);
1031 tcg_temp_free_i32(var);
ef475b5d 1032 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1033}
1034
3e3fa230
PM
1035static inline void gen_blxns(DisasContext *s, int rm)
1036{
1037 TCGv_i32 var = load_reg(s, rm);
1038
1039 /* We don't need to sync condexec state, for the same reason as bxns.
1040 * We do however need to set the PC, because the blxns helper reads it.
1041 * The blxns helper may throw an exception.
1042 */
1043 gen_set_pc_im(s, s->pc);
1044 gen_helper_v7m_blxns(cpu_env, var);
1045 tcg_temp_free_i32(var);
1046 s->base.is_jmp = DISAS_EXIT;
1047}
1048
21aeb343
JR
1049/* Variant of store_reg which uses branch&exchange logic when storing
1050 to r15 in ARM architecture v7 and above. The source must be a temporary
1051 and will be marked as dead. */
7dcc1f89 1052static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1053{
1054 if (reg == 15 && ENABLE_ARCH_7) {
1055 gen_bx(s, var);
1056 } else {
1057 store_reg(s, reg, var);
1058 }
1059}
1060
be5e7a76
DES
1061/* Variant of store_reg which uses branch&exchange logic when storing
1062 * to r15 in ARM architecture v5T and above. This is used for storing
1063 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1064 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1065static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1066{
1067 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1068 gen_bx_excret(s, var);
be5e7a76
DES
1069 } else {
1070 store_reg(s, reg, var);
1071 }
1072}
1073
e334bd31
PB
1074#ifdef CONFIG_USER_ONLY
1075#define IS_USER_ONLY 1
1076#else
1077#define IS_USER_ONLY 0
1078#endif
1079
08307563
PM
1080/* Abstractions of "generate code to do a guest load/store for
1081 * AArch32", where a vaddr is always 32 bits (and is zero
1082 * extended if we're a 64 bit core) and data is also
1083 * 32 bits unless specifically doing a 64 bit access.
1084 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1085 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1086 */
08307563 1087
7f5616f5 1088static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1089{
7f5616f5
RH
1090 TCGv addr = tcg_temp_new();
1091 tcg_gen_extu_i32_tl(addr, a32);
1092
e334bd31 1093 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1094 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1095 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1096 }
7f5616f5 1097 return addr;
08307563
PM
1098}
1099
7f5616f5
RH
1100static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1101 int index, TCGMemOp opc)
08307563 1102{
7f5616f5
RH
1103 TCGv addr = gen_aa32_addr(s, a32, opc);
1104 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1105 tcg_temp_free(addr);
08307563
PM
1106}
1107
7f5616f5
RH
1108static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1109 int index, TCGMemOp opc)
1110{
1111 TCGv addr = gen_aa32_addr(s, a32, opc);
1112 tcg_gen_qemu_st_i32(val, addr, index, opc);
1113 tcg_temp_free(addr);
1114}
08307563 1115
7f5616f5 1116#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1117static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1118 TCGv_i32 a32, int index) \
08307563 1119{ \
7f5616f5 1120 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1121} \
1122static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1123 TCGv_i32 val, \
1124 TCGv_i32 a32, int index, \
1125 ISSInfo issinfo) \
1126{ \
1127 gen_aa32_ld##SUFF(s, val, a32, index); \
1128 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1129}
1130
7f5616f5 1131#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1132static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1133 TCGv_i32 a32, int index) \
08307563 1134{ \
7f5616f5 1135 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1136} \
1137static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1138 TCGv_i32 val, \
1139 TCGv_i32 a32, int index, \
1140 ISSInfo issinfo) \
1141{ \
1142 gen_aa32_st##SUFF(s, val, a32, index); \
1143 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1144}
1145
7f5616f5 1146static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1147{
e334bd31
PB
1148 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1149 if (!IS_USER_ONLY && s->sctlr_b) {
1150 tcg_gen_rotri_i64(val, val, 32);
1151 }
08307563
PM
1152}
1153
7f5616f5
RH
1154static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1155 int index, TCGMemOp opc)
08307563 1156{
7f5616f5
RH
1157 TCGv addr = gen_aa32_addr(s, a32, opc);
1158 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1159 gen_aa32_frob64(s, val);
1160 tcg_temp_free(addr);
1161}
1162
1163static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1164 TCGv_i32 a32, int index)
1165{
1166 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1167}
1168
1169static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1170 int index, TCGMemOp opc)
1171{
1172 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1173
1174 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1175 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1176 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1177 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1178 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1179 tcg_temp_free_i64(tmp);
e334bd31 1180 } else {
7f5616f5 1181 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1182 }
7f5616f5 1183 tcg_temp_free(addr);
08307563
PM
1184}
1185
7f5616f5
RH
1186static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1187 TCGv_i32 a32, int index)
1188{
1189 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1190}
08307563 1191
7f5616f5
RH
1192DO_GEN_LD(8s, MO_SB)
1193DO_GEN_LD(8u, MO_UB)
1194DO_GEN_LD(16s, MO_SW)
1195DO_GEN_LD(16u, MO_UW)
1196DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1197DO_GEN_ST(8, MO_UB)
1198DO_GEN_ST(16, MO_UW)
1199DO_GEN_ST(32, MO_UL)
08307563 1200
37e6456e
PM
1201static inline void gen_hvc(DisasContext *s, int imm16)
1202{
1203 /* The pre HVC helper handles cases when HVC gets trapped
1204 * as an undefined insn by runtime configuration (ie before
1205 * the insn really executes).
1206 */
1207 gen_set_pc_im(s, s->pc - 4);
1208 gen_helper_pre_hvc(cpu_env);
1209 /* Otherwise we will treat this as a real exception which
1210 * happens after execution of the insn. (The distinction matters
1211 * for the PC value reported to the exception handler and also
1212 * for single stepping.)
1213 */
1214 s->svc_imm = imm16;
1215 gen_set_pc_im(s, s->pc);
dcba3a8d 1216 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1217}
1218
1219static inline void gen_smc(DisasContext *s)
1220{
1221 /* As with HVC, we may take an exception either before or after
1222 * the insn executes.
1223 */
1224 TCGv_i32 tmp;
1225
1226 gen_set_pc_im(s, s->pc - 4);
1227 tmp = tcg_const_i32(syn_aa32_smc());
1228 gen_helper_pre_smc(cpu_env, tmp);
1229 tcg_temp_free_i32(tmp);
1230 gen_set_pc_im(s, s->pc);
dcba3a8d 1231 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1232}
1233
d4a2dc67
PM
1234static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1235{
1236 gen_set_condexec(s);
1237 gen_set_pc_im(s, s->pc - offset);
1238 gen_exception_internal(excp);
dcba3a8d 1239 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1240}
1241
73710361
GB
1242static void gen_exception_insn(DisasContext *s, int offset, int excp,
1243 int syn, uint32_t target_el)
d4a2dc67
PM
1244{
1245 gen_set_condexec(s);
1246 gen_set_pc_im(s, s->pc - offset);
73710361 1247 gen_exception(excp, syn, target_el);
dcba3a8d 1248 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1249}
1250
b5ff1b31
FB
1251/* Force a TB lookup after an instruction that changes the CPU state. */
1252static inline void gen_lookup_tb(DisasContext *s)
1253{
a6445c52 1254 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1255 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1256}
1257
19a6e31c
PM
1258static inline void gen_hlt(DisasContext *s, int imm)
1259{
1260 /* HLT. This has two purposes.
1261 * Architecturally, it is an external halting debug instruction.
1262 * Since QEMU doesn't implement external debug, we treat this as
1263 * it is required for halting debug disabled: it will UNDEF.
1264 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1265 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1266 * must trigger semihosting even for ARMv7 and earlier, where
1267 * HLT was an undefined encoding.
1268 * In system mode, we don't allow userspace access to
1269 * semihosting, to provide some semblance of security
1270 * (and for consistency with our 32-bit semihosting).
1271 */
1272 if (semihosting_enabled() &&
1273#ifndef CONFIG_USER_ONLY
1274 s->current_el != 0 &&
1275#endif
1276 (imm == (s->thumb ? 0x3c : 0xf000))) {
1277 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1278 return;
1279 }
1280
1281 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1282 default_exception_el(s));
1283}
1284
b0109805 1285static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1286 TCGv_i32 var)
2c0262af 1287{
1e8d4eec 1288 int val, rm, shift, shiftop;
39d5492a 1289 TCGv_i32 offset;
2c0262af
FB
1290
1291 if (!(insn & (1 << 25))) {
1292 /* immediate */
1293 val = insn & 0xfff;
1294 if (!(insn & (1 << 23)))
1295 val = -val;
537730b9 1296 if (val != 0)
b0109805 1297 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1298 } else {
1299 /* shift/register */
1300 rm = (insn) & 0xf;
1301 shift = (insn >> 7) & 0x1f;
1e8d4eec 1302 shiftop = (insn >> 5) & 3;
b26eefb6 1303 offset = load_reg(s, rm);
9a119ff6 1304 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1305 if (!(insn & (1 << 23)))
b0109805 1306 tcg_gen_sub_i32(var, var, offset);
2c0262af 1307 else
b0109805 1308 tcg_gen_add_i32(var, var, offset);
7d1b0095 1309 tcg_temp_free_i32(offset);
2c0262af
FB
1310 }
1311}
1312
191f9a93 1313static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1314 int extra, TCGv_i32 var)
2c0262af
FB
1315{
1316 int val, rm;
39d5492a 1317 TCGv_i32 offset;
3b46e624 1318
2c0262af
FB
1319 if (insn & (1 << 22)) {
1320 /* immediate */
1321 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1322 if (!(insn & (1 << 23)))
1323 val = -val;
18acad92 1324 val += extra;
537730b9 1325 if (val != 0)
b0109805 1326 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1327 } else {
1328 /* register */
191f9a93 1329 if (extra)
b0109805 1330 tcg_gen_addi_i32(var, var, extra);
2c0262af 1331 rm = (insn) & 0xf;
b26eefb6 1332 offset = load_reg(s, rm);
2c0262af 1333 if (!(insn & (1 << 23)))
b0109805 1334 tcg_gen_sub_i32(var, var, offset);
2c0262af 1335 else
b0109805 1336 tcg_gen_add_i32(var, var, offset);
7d1b0095 1337 tcg_temp_free_i32(offset);
2c0262af
FB
1338 }
1339}
1340
5aaebd13
PM
1341static TCGv_ptr get_fpstatus_ptr(int neon)
1342{
1343 TCGv_ptr statusptr = tcg_temp_new_ptr();
1344 int offset;
1345 if (neon) {
0ecb72a5 1346 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1347 } else {
0ecb72a5 1348 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1349 }
1350 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1351 return statusptr;
1352}
1353
4373f3ce
PB
1354#define VFP_OP2(name) \
1355static inline void gen_vfp_##name(int dp) \
1356{ \
ae1857ec
PM
1357 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1358 if (dp) { \
1359 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1360 } else { \
1361 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1362 } \
1363 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1364}
1365
4373f3ce
PB
1366VFP_OP2(add)
1367VFP_OP2(sub)
1368VFP_OP2(mul)
1369VFP_OP2(div)
1370
1371#undef VFP_OP2
1372
605a6aed
PM
1373static inline void gen_vfp_F1_mul(int dp)
1374{
1375 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1376 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1377 if (dp) {
ae1857ec 1378 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1379 } else {
ae1857ec 1380 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1381 }
ae1857ec 1382 tcg_temp_free_ptr(fpst);
605a6aed
PM
1383}
1384
1385static inline void gen_vfp_F1_neg(int dp)
1386{
1387 /* Like gen_vfp_neg() but put result in F1 */
1388 if (dp) {
1389 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1390 } else {
1391 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1392 }
1393}
1394
4373f3ce
PB
1395static inline void gen_vfp_abs(int dp)
1396{
1397 if (dp)
1398 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1399 else
1400 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1401}
1402
1403static inline void gen_vfp_neg(int dp)
1404{
1405 if (dp)
1406 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1407 else
1408 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1409}
1410
1411static inline void gen_vfp_sqrt(int dp)
1412{
1413 if (dp)
1414 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1415 else
1416 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1417}
1418
1419static inline void gen_vfp_cmp(int dp)
1420{
1421 if (dp)
1422 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1423 else
1424 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1425}
1426
1427static inline void gen_vfp_cmpe(int dp)
1428{
1429 if (dp)
1430 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1431 else
1432 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1433}
1434
1435static inline void gen_vfp_F1_ld0(int dp)
1436{
1437 if (dp)
5b340b51 1438 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1439 else
5b340b51 1440 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1441}
1442
5500b06c
PM
1443#define VFP_GEN_ITOF(name) \
1444static inline void gen_vfp_##name(int dp, int neon) \
1445{ \
5aaebd13 1446 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1447 if (dp) { \
1448 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1449 } else { \
1450 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1451 } \
b7fa9214 1452 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1453}
1454
5500b06c
PM
1455VFP_GEN_ITOF(uito)
1456VFP_GEN_ITOF(sito)
1457#undef VFP_GEN_ITOF
4373f3ce 1458
5500b06c
PM
1459#define VFP_GEN_FTOI(name) \
1460static inline void gen_vfp_##name(int dp, int neon) \
1461{ \
5aaebd13 1462 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1463 if (dp) { \
1464 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1465 } else { \
1466 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1467 } \
b7fa9214 1468 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1469}
1470
5500b06c
PM
1471VFP_GEN_FTOI(toui)
1472VFP_GEN_FTOI(touiz)
1473VFP_GEN_FTOI(tosi)
1474VFP_GEN_FTOI(tosiz)
1475#undef VFP_GEN_FTOI
4373f3ce 1476
16d5b3ca 1477#define VFP_GEN_FIX(name, round) \
5500b06c 1478static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1479{ \
39d5492a 1480 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1481 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1482 if (dp) { \
16d5b3ca
WN
1483 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1484 statusptr); \
5500b06c 1485 } else { \
16d5b3ca
WN
1486 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1487 statusptr); \
5500b06c 1488 } \
b75263d6 1489 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1490 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1491}
16d5b3ca
WN
1492VFP_GEN_FIX(tosh, _round_to_zero)
1493VFP_GEN_FIX(tosl, _round_to_zero)
1494VFP_GEN_FIX(touh, _round_to_zero)
1495VFP_GEN_FIX(toul, _round_to_zero)
1496VFP_GEN_FIX(shto, )
1497VFP_GEN_FIX(slto, )
1498VFP_GEN_FIX(uhto, )
1499VFP_GEN_FIX(ulto, )
4373f3ce 1500#undef VFP_GEN_FIX
9ee6e8bb 1501
39d5492a 1502static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1503{
08307563 1504 if (dp) {
12dcc321 1505 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1506 } else {
12dcc321 1507 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1508 }
b5ff1b31
FB
1509}
1510
39d5492a 1511static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1512{
08307563 1513 if (dp) {
12dcc321 1514 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1515 } else {
12dcc321 1516 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1517 }
b5ff1b31
FB
1518}
1519
c39c2b90 1520static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1521{
9a2b5256 1522 if (dp) {
c39c2b90 1523 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1524 } else {
c39c2b90 1525 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1526 if (reg & 1) {
1527 ofs += offsetof(CPU_DoubleU, l.upper);
1528 } else {
1529 ofs += offsetof(CPU_DoubleU, l.lower);
1530 }
1531 return ofs;
8e96005d
FB
1532 }
1533}
9ee6e8bb
PB
1534
1535/* Return the offset of a 32-bit piece of a NEON register.
1536 zero is the least significant end of the register. */
1537static inline long
1538neon_reg_offset (int reg, int n)
1539{
1540 int sreg;
1541 sreg = reg * 2 + n;
1542 return vfp_reg_offset(0, sreg);
1543}
1544
39d5492a 1545static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1546{
39d5492a 1547 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1548 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1549 return tmp;
1550}
1551
39d5492a 1552static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1553{
1554 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1555 tcg_temp_free_i32(var);
8f8e3aa4
PB
1556}
1557
a7812ae4 1558static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1559{
1560 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1561}
1562
a7812ae4 1563static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1564{
1565 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1566}
1567
1a66ac61
RH
1568static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1569{
1570 TCGv_ptr ret = tcg_temp_new_ptr();
1571 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1572 return ret;
1573}
1574
4373f3ce
PB
1575#define tcg_gen_ld_f32 tcg_gen_ld_i32
1576#define tcg_gen_ld_f64 tcg_gen_ld_i64
1577#define tcg_gen_st_f32 tcg_gen_st_i32
1578#define tcg_gen_st_f64 tcg_gen_st_i64
1579
b7bcbe95
FB
1580static inline void gen_mov_F0_vreg(int dp, int reg)
1581{
1582 if (dp)
4373f3ce 1583 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1584 else
4373f3ce 1585 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1586}
1587
1588static inline void gen_mov_F1_vreg(int dp, int reg)
1589{
1590 if (dp)
4373f3ce 1591 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1592 else
4373f3ce 1593 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1594}
1595
1596static inline void gen_mov_vreg_F0(int dp, int reg)
1597{
1598 if (dp)
4373f3ce 1599 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1600 else
4373f3ce 1601 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1602}
1603
18c9b560
AZ
1604#define ARM_CP_RW_BIT (1 << 20)
1605
a7812ae4 1606static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1607{
0ecb72a5 1608 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1609}
1610
a7812ae4 1611static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1612{
0ecb72a5 1613 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1614}
1615
39d5492a 1616static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1617{
39d5492a 1618 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1619 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1620 return var;
e677137d
PB
1621}
1622
39d5492a 1623static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1624{
0ecb72a5 1625 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1626 tcg_temp_free_i32(var);
e677137d
PB
1627}
1628
1629static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1630{
1631 iwmmxt_store_reg(cpu_M0, rn);
1632}
1633
1634static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1635{
1636 iwmmxt_load_reg(cpu_M0, rn);
1637}
1638
1639static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1640{
1641 iwmmxt_load_reg(cpu_V1, rn);
1642 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1643}
1644
1645static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1646{
1647 iwmmxt_load_reg(cpu_V1, rn);
1648 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1649}
1650
1651static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1652{
1653 iwmmxt_load_reg(cpu_V1, rn);
1654 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1655}
1656
1657#define IWMMXT_OP(name) \
1658static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1659{ \
1660 iwmmxt_load_reg(cpu_V1, rn); \
1661 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1662}
1663
477955bd
PM
1664#define IWMMXT_OP_ENV(name) \
1665static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1666{ \
1667 iwmmxt_load_reg(cpu_V1, rn); \
1668 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1669}
1670
1671#define IWMMXT_OP_ENV_SIZE(name) \
1672IWMMXT_OP_ENV(name##b) \
1673IWMMXT_OP_ENV(name##w) \
1674IWMMXT_OP_ENV(name##l)
e677137d 1675
477955bd 1676#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1677static inline void gen_op_iwmmxt_##name##_M0(void) \
1678{ \
477955bd 1679 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1680}
1681
1682IWMMXT_OP(maddsq)
1683IWMMXT_OP(madduq)
1684IWMMXT_OP(sadb)
1685IWMMXT_OP(sadw)
1686IWMMXT_OP(mulslw)
1687IWMMXT_OP(mulshw)
1688IWMMXT_OP(mululw)
1689IWMMXT_OP(muluhw)
1690IWMMXT_OP(macsw)
1691IWMMXT_OP(macuw)
1692
477955bd
PM
1693IWMMXT_OP_ENV_SIZE(unpackl)
1694IWMMXT_OP_ENV_SIZE(unpackh)
1695
1696IWMMXT_OP_ENV1(unpacklub)
1697IWMMXT_OP_ENV1(unpackluw)
1698IWMMXT_OP_ENV1(unpacklul)
1699IWMMXT_OP_ENV1(unpackhub)
1700IWMMXT_OP_ENV1(unpackhuw)
1701IWMMXT_OP_ENV1(unpackhul)
1702IWMMXT_OP_ENV1(unpacklsb)
1703IWMMXT_OP_ENV1(unpacklsw)
1704IWMMXT_OP_ENV1(unpacklsl)
1705IWMMXT_OP_ENV1(unpackhsb)
1706IWMMXT_OP_ENV1(unpackhsw)
1707IWMMXT_OP_ENV1(unpackhsl)
1708
1709IWMMXT_OP_ENV_SIZE(cmpeq)
1710IWMMXT_OP_ENV_SIZE(cmpgtu)
1711IWMMXT_OP_ENV_SIZE(cmpgts)
1712
1713IWMMXT_OP_ENV_SIZE(mins)
1714IWMMXT_OP_ENV_SIZE(minu)
1715IWMMXT_OP_ENV_SIZE(maxs)
1716IWMMXT_OP_ENV_SIZE(maxu)
1717
1718IWMMXT_OP_ENV_SIZE(subn)
1719IWMMXT_OP_ENV_SIZE(addn)
1720IWMMXT_OP_ENV_SIZE(subu)
1721IWMMXT_OP_ENV_SIZE(addu)
1722IWMMXT_OP_ENV_SIZE(subs)
1723IWMMXT_OP_ENV_SIZE(adds)
1724
1725IWMMXT_OP_ENV(avgb0)
1726IWMMXT_OP_ENV(avgb1)
1727IWMMXT_OP_ENV(avgw0)
1728IWMMXT_OP_ENV(avgw1)
e677137d 1729
477955bd
PM
1730IWMMXT_OP_ENV(packuw)
1731IWMMXT_OP_ENV(packul)
1732IWMMXT_OP_ENV(packuq)
1733IWMMXT_OP_ENV(packsw)
1734IWMMXT_OP_ENV(packsl)
1735IWMMXT_OP_ENV(packsq)
e677137d 1736
e677137d
PB
1737static void gen_op_iwmmxt_set_mup(void)
1738{
39d5492a 1739 TCGv_i32 tmp;
e677137d
PB
1740 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1741 tcg_gen_ori_i32(tmp, tmp, 2);
1742 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1743}
1744
1745static void gen_op_iwmmxt_set_cup(void)
1746{
39d5492a 1747 TCGv_i32 tmp;
e677137d
PB
1748 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1749 tcg_gen_ori_i32(tmp, tmp, 1);
1750 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1751}
1752
1753static void gen_op_iwmmxt_setpsr_nz(void)
1754{
39d5492a 1755 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1756 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1757 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1758}
1759
1760static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1761{
1762 iwmmxt_load_reg(cpu_V1, rn);
86831435 1763 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1764 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1765}
1766
39d5492a
PM
1767static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1768 TCGv_i32 dest)
18c9b560
AZ
1769{
1770 int rd;
1771 uint32_t offset;
39d5492a 1772 TCGv_i32 tmp;
18c9b560
AZ
1773
1774 rd = (insn >> 16) & 0xf;
da6b5335 1775 tmp = load_reg(s, rd);
18c9b560
AZ
1776
1777 offset = (insn & 0xff) << ((insn >> 7) & 2);
1778 if (insn & (1 << 24)) {
1779 /* Pre indexed */
1780 if (insn & (1 << 23))
da6b5335 1781 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1782 else
da6b5335
FN
1783 tcg_gen_addi_i32(tmp, tmp, -offset);
1784 tcg_gen_mov_i32(dest, tmp);
18c9b560 1785 if (insn & (1 << 21))
da6b5335
FN
1786 store_reg(s, rd, tmp);
1787 else
7d1b0095 1788 tcg_temp_free_i32(tmp);
18c9b560
AZ
1789 } else if (insn & (1 << 21)) {
1790 /* Post indexed */
da6b5335 1791 tcg_gen_mov_i32(dest, tmp);
18c9b560 1792 if (insn & (1 << 23))
da6b5335 1793 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1794 else
da6b5335
FN
1795 tcg_gen_addi_i32(tmp, tmp, -offset);
1796 store_reg(s, rd, tmp);
18c9b560
AZ
1797 } else if (!(insn & (1 << 23)))
1798 return 1;
1799 return 0;
1800}
1801
39d5492a 1802static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1803{
1804 int rd = (insn >> 0) & 0xf;
39d5492a 1805 TCGv_i32 tmp;
18c9b560 1806
da6b5335
FN
1807 if (insn & (1 << 8)) {
1808 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1809 return 1;
da6b5335
FN
1810 } else {
1811 tmp = iwmmxt_load_creg(rd);
1812 }
1813 } else {
7d1b0095 1814 tmp = tcg_temp_new_i32();
da6b5335 1815 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1816 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1817 }
1818 tcg_gen_andi_i32(tmp, tmp, mask);
1819 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1820 tcg_temp_free_i32(tmp);
18c9b560
AZ
1821 return 0;
1822}
1823
a1c7273b 1824/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1825 (ie. an undefined instruction). */
7dcc1f89 1826static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1827{
1828 int rd, wrd;
1829 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1830 TCGv_i32 addr;
1831 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1832
1833 if ((insn & 0x0e000e00) == 0x0c000000) {
1834 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1835 wrd = insn & 0xf;
1836 rdlo = (insn >> 12) & 0xf;
1837 rdhi = (insn >> 16) & 0xf;
1838 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1839 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1840 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1841 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1842 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1843 } else { /* TMCRR */
da6b5335
FN
1844 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1845 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1846 gen_op_iwmmxt_set_mup();
1847 }
1848 return 0;
1849 }
1850
1851 wrd = (insn >> 12) & 0xf;
7d1b0095 1852 addr = tcg_temp_new_i32();
da6b5335 1853 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1854 tcg_temp_free_i32(addr);
18c9b560 1855 return 1;
da6b5335 1856 }
18c9b560
AZ
1857 if (insn & ARM_CP_RW_BIT) {
1858 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1859 tmp = tcg_temp_new_i32();
12dcc321 1860 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1861 iwmmxt_store_creg(wrd, tmp);
18c9b560 1862 } else {
e677137d
PB
1863 i = 1;
1864 if (insn & (1 << 8)) {
1865 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1866 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d
PB
1867 i = 0;
1868 } else { /* WLDRW wRd */
29531141 1869 tmp = tcg_temp_new_i32();
12dcc321 1870 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1871 }
1872 } else {
29531141 1873 tmp = tcg_temp_new_i32();
e677137d 1874 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1875 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
e677137d 1876 } else { /* WLDRB */
12dcc321 1877 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1878 }
1879 }
1880 if (i) {
1881 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1882 tcg_temp_free_i32(tmp);
e677137d 1883 }
18c9b560
AZ
1884 gen_op_iwmmxt_movq_wRn_M0(wrd);
1885 }
1886 } else {
1887 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1888 tmp = iwmmxt_load_creg(wrd);
12dcc321 1889 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1890 } else {
1891 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1892 tmp = tcg_temp_new_i32();
e677137d
PB
1893 if (insn & (1 << 8)) {
1894 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1895 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1896 } else { /* WSTRW wRd */
ecc7b3aa 1897 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1898 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1899 }
1900 } else {
1901 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1902 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1903 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
e677137d 1904 } else { /* WSTRB */
ecc7b3aa 1905 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1906 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1907 }
1908 }
18c9b560 1909 }
29531141 1910 tcg_temp_free_i32(tmp);
18c9b560 1911 }
7d1b0095 1912 tcg_temp_free_i32(addr);
18c9b560
AZ
1913 return 0;
1914 }
1915
1916 if ((insn & 0x0f000000) != 0x0e000000)
1917 return 1;
1918
1919 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1920 case 0x000: /* WOR */
1921 wrd = (insn >> 12) & 0xf;
1922 rd0 = (insn >> 0) & 0xf;
1923 rd1 = (insn >> 16) & 0xf;
1924 gen_op_iwmmxt_movq_M0_wRn(rd0);
1925 gen_op_iwmmxt_orq_M0_wRn(rd1);
1926 gen_op_iwmmxt_setpsr_nz();
1927 gen_op_iwmmxt_movq_wRn_M0(wrd);
1928 gen_op_iwmmxt_set_mup();
1929 gen_op_iwmmxt_set_cup();
1930 break;
1931 case 0x011: /* TMCR */
1932 if (insn & 0xf)
1933 return 1;
1934 rd = (insn >> 12) & 0xf;
1935 wrd = (insn >> 16) & 0xf;
1936 switch (wrd) {
1937 case ARM_IWMMXT_wCID:
1938 case ARM_IWMMXT_wCASF:
1939 break;
1940 case ARM_IWMMXT_wCon:
1941 gen_op_iwmmxt_set_cup();
1942 /* Fall through. */
1943 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1944 tmp = iwmmxt_load_creg(wrd);
1945 tmp2 = load_reg(s, rd);
f669df27 1946 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1947 tcg_temp_free_i32(tmp2);
da6b5335 1948 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1949 break;
1950 case ARM_IWMMXT_wCGR0:
1951 case ARM_IWMMXT_wCGR1:
1952 case ARM_IWMMXT_wCGR2:
1953 case ARM_IWMMXT_wCGR3:
1954 gen_op_iwmmxt_set_cup();
da6b5335
FN
1955 tmp = load_reg(s, rd);
1956 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1957 break;
1958 default:
1959 return 1;
1960 }
1961 break;
1962 case 0x100: /* WXOR */
1963 wrd = (insn >> 12) & 0xf;
1964 rd0 = (insn >> 0) & 0xf;
1965 rd1 = (insn >> 16) & 0xf;
1966 gen_op_iwmmxt_movq_M0_wRn(rd0);
1967 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1968 gen_op_iwmmxt_setpsr_nz();
1969 gen_op_iwmmxt_movq_wRn_M0(wrd);
1970 gen_op_iwmmxt_set_mup();
1971 gen_op_iwmmxt_set_cup();
1972 break;
1973 case 0x111: /* TMRC */
1974 if (insn & 0xf)
1975 return 1;
1976 rd = (insn >> 12) & 0xf;
1977 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1978 tmp = iwmmxt_load_creg(wrd);
1979 store_reg(s, rd, tmp);
18c9b560
AZ
1980 break;
1981 case 0x300: /* WANDN */
1982 wrd = (insn >> 12) & 0xf;
1983 rd0 = (insn >> 0) & 0xf;
1984 rd1 = (insn >> 16) & 0xf;
1985 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1986 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1987 gen_op_iwmmxt_andq_M0_wRn(rd1);
1988 gen_op_iwmmxt_setpsr_nz();
1989 gen_op_iwmmxt_movq_wRn_M0(wrd);
1990 gen_op_iwmmxt_set_mup();
1991 gen_op_iwmmxt_set_cup();
1992 break;
1993 case 0x200: /* WAND */
1994 wrd = (insn >> 12) & 0xf;
1995 rd0 = (insn >> 0) & 0xf;
1996 rd1 = (insn >> 16) & 0xf;
1997 gen_op_iwmmxt_movq_M0_wRn(rd0);
1998 gen_op_iwmmxt_andq_M0_wRn(rd1);
1999 gen_op_iwmmxt_setpsr_nz();
2000 gen_op_iwmmxt_movq_wRn_M0(wrd);
2001 gen_op_iwmmxt_set_mup();
2002 gen_op_iwmmxt_set_cup();
2003 break;
2004 case 0x810: case 0xa10: /* WMADD */
2005 wrd = (insn >> 12) & 0xf;
2006 rd0 = (insn >> 0) & 0xf;
2007 rd1 = (insn >> 16) & 0xf;
2008 gen_op_iwmmxt_movq_M0_wRn(rd0);
2009 if (insn & (1 << 21))
2010 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2011 else
2012 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2013 gen_op_iwmmxt_movq_wRn_M0(wrd);
2014 gen_op_iwmmxt_set_mup();
2015 break;
2016 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
2017 wrd = (insn >> 12) & 0xf;
2018 rd0 = (insn >> 16) & 0xf;
2019 rd1 = (insn >> 0) & 0xf;
2020 gen_op_iwmmxt_movq_M0_wRn(rd0);
2021 switch ((insn >> 22) & 3) {
2022 case 0:
2023 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2024 break;
2025 case 1:
2026 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2027 break;
2028 case 2:
2029 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2030 break;
2031 case 3:
2032 return 1;
2033 }
2034 gen_op_iwmmxt_movq_wRn_M0(wrd);
2035 gen_op_iwmmxt_set_mup();
2036 gen_op_iwmmxt_set_cup();
2037 break;
2038 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
2039 wrd = (insn >> 12) & 0xf;
2040 rd0 = (insn >> 16) & 0xf;
2041 rd1 = (insn >> 0) & 0xf;
2042 gen_op_iwmmxt_movq_M0_wRn(rd0);
2043 switch ((insn >> 22) & 3) {
2044 case 0:
2045 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2046 break;
2047 case 1:
2048 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2049 break;
2050 case 2:
2051 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2052 break;
2053 case 3:
2054 return 1;
2055 }
2056 gen_op_iwmmxt_movq_wRn_M0(wrd);
2057 gen_op_iwmmxt_set_mup();
2058 gen_op_iwmmxt_set_cup();
2059 break;
2060 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2061 wrd = (insn >> 12) & 0xf;
2062 rd0 = (insn >> 16) & 0xf;
2063 rd1 = (insn >> 0) & 0xf;
2064 gen_op_iwmmxt_movq_M0_wRn(rd0);
2065 if (insn & (1 << 22))
2066 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2067 else
2068 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2069 if (!(insn & (1 << 20)))
2070 gen_op_iwmmxt_addl_M0_wRn(wrd);
2071 gen_op_iwmmxt_movq_wRn_M0(wrd);
2072 gen_op_iwmmxt_set_mup();
2073 break;
2074 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2075 wrd = (insn >> 12) & 0xf;
2076 rd0 = (insn >> 16) & 0xf;
2077 rd1 = (insn >> 0) & 0xf;
2078 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2079 if (insn & (1 << 21)) {
2080 if (insn & (1 << 20))
2081 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2082 else
2083 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2084 } else {
2085 if (insn & (1 << 20))
2086 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2087 else
2088 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2089 }
18c9b560
AZ
2090 gen_op_iwmmxt_movq_wRn_M0(wrd);
2091 gen_op_iwmmxt_set_mup();
2092 break;
2093 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2094 wrd = (insn >> 12) & 0xf;
2095 rd0 = (insn >> 16) & 0xf;
2096 rd1 = (insn >> 0) & 0xf;
2097 gen_op_iwmmxt_movq_M0_wRn(rd0);
2098 if (insn & (1 << 21))
2099 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2100 else
2101 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2102 if (!(insn & (1 << 20))) {
e677137d
PB
2103 iwmmxt_load_reg(cpu_V1, wrd);
2104 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2105 }
2106 gen_op_iwmmxt_movq_wRn_M0(wrd);
2107 gen_op_iwmmxt_set_mup();
2108 break;
2109 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2110 wrd = (insn >> 12) & 0xf;
2111 rd0 = (insn >> 16) & 0xf;
2112 rd1 = (insn >> 0) & 0xf;
2113 gen_op_iwmmxt_movq_M0_wRn(rd0);
2114 switch ((insn >> 22) & 3) {
2115 case 0:
2116 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2117 break;
2118 case 1:
2119 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2120 break;
2121 case 2:
2122 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2123 break;
2124 case 3:
2125 return 1;
2126 }
2127 gen_op_iwmmxt_movq_wRn_M0(wrd);
2128 gen_op_iwmmxt_set_mup();
2129 gen_op_iwmmxt_set_cup();
2130 break;
2131 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2132 wrd = (insn >> 12) & 0xf;
2133 rd0 = (insn >> 16) & 0xf;
2134 rd1 = (insn >> 0) & 0xf;
2135 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2136 if (insn & (1 << 22)) {
2137 if (insn & (1 << 20))
2138 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2139 else
2140 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2141 } else {
2142 if (insn & (1 << 20))
2143 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2144 else
2145 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2146 }
18c9b560
AZ
2147 gen_op_iwmmxt_movq_wRn_M0(wrd);
2148 gen_op_iwmmxt_set_mup();
2149 gen_op_iwmmxt_set_cup();
2150 break;
2151 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2152 wrd = (insn >> 12) & 0xf;
2153 rd0 = (insn >> 16) & 0xf;
2154 rd1 = (insn >> 0) & 0xf;
2155 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2156 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2157 tcg_gen_andi_i32(tmp, tmp, 7);
2158 iwmmxt_load_reg(cpu_V1, rd1);
2159 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2160 tcg_temp_free_i32(tmp);
18c9b560
AZ
2161 gen_op_iwmmxt_movq_wRn_M0(wrd);
2162 gen_op_iwmmxt_set_mup();
2163 break;
2164 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2165 if (((insn >> 6) & 3) == 3)
2166 return 1;
18c9b560
AZ
2167 rd = (insn >> 12) & 0xf;
2168 wrd = (insn >> 16) & 0xf;
da6b5335 2169 tmp = load_reg(s, rd);
18c9b560
AZ
2170 gen_op_iwmmxt_movq_M0_wRn(wrd);
2171 switch ((insn >> 6) & 3) {
2172 case 0:
da6b5335
FN
2173 tmp2 = tcg_const_i32(0xff);
2174 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2175 break;
2176 case 1:
da6b5335
FN
2177 tmp2 = tcg_const_i32(0xffff);
2178 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2179 break;
2180 case 2:
da6b5335
FN
2181 tmp2 = tcg_const_i32(0xffffffff);
2182 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2183 break;
da6b5335 2184 default:
f764718d
RH
2185 tmp2 = NULL;
2186 tmp3 = NULL;
18c9b560 2187 }
da6b5335 2188 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2189 tcg_temp_free_i32(tmp3);
2190 tcg_temp_free_i32(tmp2);
7d1b0095 2191 tcg_temp_free_i32(tmp);
18c9b560
AZ
2192 gen_op_iwmmxt_movq_wRn_M0(wrd);
2193 gen_op_iwmmxt_set_mup();
2194 break;
2195 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2196 rd = (insn >> 12) & 0xf;
2197 wrd = (insn >> 16) & 0xf;
da6b5335 2198 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2199 return 1;
2200 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2201 tmp = tcg_temp_new_i32();
18c9b560
AZ
2202 switch ((insn >> 22) & 3) {
2203 case 0:
da6b5335 2204 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2205 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2206 if (insn & 8) {
2207 tcg_gen_ext8s_i32(tmp, tmp);
2208 } else {
2209 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2210 }
2211 break;
2212 case 1:
da6b5335 2213 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2214 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2215 if (insn & 8) {
2216 tcg_gen_ext16s_i32(tmp, tmp);
2217 } else {
2218 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2219 }
2220 break;
2221 case 2:
da6b5335 2222 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2223 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2224 break;
18c9b560 2225 }
da6b5335 2226 store_reg(s, rd, tmp);
18c9b560
AZ
2227 break;
2228 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2229 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2230 return 1;
da6b5335 2231 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2232 switch ((insn >> 22) & 3) {
2233 case 0:
da6b5335 2234 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2235 break;
2236 case 1:
da6b5335 2237 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2238 break;
2239 case 2:
da6b5335 2240 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2241 break;
18c9b560 2242 }
da6b5335
FN
2243 tcg_gen_shli_i32(tmp, tmp, 28);
2244 gen_set_nzcv(tmp);
7d1b0095 2245 tcg_temp_free_i32(tmp);
18c9b560
AZ
2246 break;
2247 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2248 if (((insn >> 6) & 3) == 3)
2249 return 1;
18c9b560
AZ
2250 rd = (insn >> 12) & 0xf;
2251 wrd = (insn >> 16) & 0xf;
da6b5335 2252 tmp = load_reg(s, rd);
18c9b560
AZ
2253 switch ((insn >> 6) & 3) {
2254 case 0:
da6b5335 2255 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2256 break;
2257 case 1:
da6b5335 2258 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2259 break;
2260 case 2:
da6b5335 2261 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2262 break;
18c9b560 2263 }
7d1b0095 2264 tcg_temp_free_i32(tmp);
18c9b560
AZ
2265 gen_op_iwmmxt_movq_wRn_M0(wrd);
2266 gen_op_iwmmxt_set_mup();
2267 break;
2268 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2269 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2270 return 1;
da6b5335 2271 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2272 tmp2 = tcg_temp_new_i32();
da6b5335 2273 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2274 switch ((insn >> 22) & 3) {
2275 case 0:
2276 for (i = 0; i < 7; i ++) {
da6b5335
FN
2277 tcg_gen_shli_i32(tmp2, tmp2, 4);
2278 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2279 }
2280 break;
2281 case 1:
2282 for (i = 0; i < 3; i ++) {
da6b5335
FN
2283 tcg_gen_shli_i32(tmp2, tmp2, 8);
2284 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2285 }
2286 break;
2287 case 2:
da6b5335
FN
2288 tcg_gen_shli_i32(tmp2, tmp2, 16);
2289 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2290 break;
18c9b560 2291 }
da6b5335 2292 gen_set_nzcv(tmp);
7d1b0095
PM
2293 tcg_temp_free_i32(tmp2);
2294 tcg_temp_free_i32(tmp);
18c9b560
AZ
2295 break;
2296 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2297 wrd = (insn >> 12) & 0xf;
2298 rd0 = (insn >> 16) & 0xf;
2299 gen_op_iwmmxt_movq_M0_wRn(rd0);
2300 switch ((insn >> 22) & 3) {
2301 case 0:
e677137d 2302 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2303 break;
2304 case 1:
e677137d 2305 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2306 break;
2307 case 2:
e677137d 2308 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2309 break;
2310 case 3:
2311 return 1;
2312 }
2313 gen_op_iwmmxt_movq_wRn_M0(wrd);
2314 gen_op_iwmmxt_set_mup();
2315 break;
2316 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2317 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2318 return 1;
da6b5335 2319 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2320 tmp2 = tcg_temp_new_i32();
da6b5335 2321 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2322 switch ((insn >> 22) & 3) {
2323 case 0:
2324 for (i = 0; i < 7; i ++) {
da6b5335
FN
2325 tcg_gen_shli_i32(tmp2, tmp2, 4);
2326 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2327 }
2328 break;
2329 case 1:
2330 for (i = 0; i < 3; i ++) {
da6b5335
FN
2331 tcg_gen_shli_i32(tmp2, tmp2, 8);
2332 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2333 }
2334 break;
2335 case 2:
da6b5335
FN
2336 tcg_gen_shli_i32(tmp2, tmp2, 16);
2337 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2338 break;
18c9b560 2339 }
da6b5335 2340 gen_set_nzcv(tmp);
7d1b0095
PM
2341 tcg_temp_free_i32(tmp2);
2342 tcg_temp_free_i32(tmp);
18c9b560
AZ
2343 break;
2344 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2345 rd = (insn >> 12) & 0xf;
2346 rd0 = (insn >> 16) & 0xf;
da6b5335 2347 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2348 return 1;
2349 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2350 tmp = tcg_temp_new_i32();
18c9b560
AZ
2351 switch ((insn >> 22) & 3) {
2352 case 0:
da6b5335 2353 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2354 break;
2355 case 1:
da6b5335 2356 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2357 break;
2358 case 2:
da6b5335 2359 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2360 break;
18c9b560 2361 }
da6b5335 2362 store_reg(s, rd, tmp);
18c9b560
AZ
2363 break;
2364 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2365 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2366 wrd = (insn >> 12) & 0xf;
2367 rd0 = (insn >> 16) & 0xf;
2368 rd1 = (insn >> 0) & 0xf;
2369 gen_op_iwmmxt_movq_M0_wRn(rd0);
2370 switch ((insn >> 22) & 3) {
2371 case 0:
2372 if (insn & (1 << 21))
2373 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2374 else
2375 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2376 break;
2377 case 1:
2378 if (insn & (1 << 21))
2379 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2380 else
2381 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2382 break;
2383 case 2:
2384 if (insn & (1 << 21))
2385 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2386 else
2387 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2388 break;
2389 case 3:
2390 return 1;
2391 }
2392 gen_op_iwmmxt_movq_wRn_M0(wrd);
2393 gen_op_iwmmxt_set_mup();
2394 gen_op_iwmmxt_set_cup();
2395 break;
2396 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2397 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2398 wrd = (insn >> 12) & 0xf;
2399 rd0 = (insn >> 16) & 0xf;
2400 gen_op_iwmmxt_movq_M0_wRn(rd0);
2401 switch ((insn >> 22) & 3) {
2402 case 0:
2403 if (insn & (1 << 21))
2404 gen_op_iwmmxt_unpacklsb_M0();
2405 else
2406 gen_op_iwmmxt_unpacklub_M0();
2407 break;
2408 case 1:
2409 if (insn & (1 << 21))
2410 gen_op_iwmmxt_unpacklsw_M0();
2411 else
2412 gen_op_iwmmxt_unpackluw_M0();
2413 break;
2414 case 2:
2415 if (insn & (1 << 21))
2416 gen_op_iwmmxt_unpacklsl_M0();
2417 else
2418 gen_op_iwmmxt_unpacklul_M0();
2419 break;
2420 case 3:
2421 return 1;
2422 }
2423 gen_op_iwmmxt_movq_wRn_M0(wrd);
2424 gen_op_iwmmxt_set_mup();
2425 gen_op_iwmmxt_set_cup();
2426 break;
2427 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2428 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2429 wrd = (insn >> 12) & 0xf;
2430 rd0 = (insn >> 16) & 0xf;
2431 gen_op_iwmmxt_movq_M0_wRn(rd0);
2432 switch ((insn >> 22) & 3) {
2433 case 0:
2434 if (insn & (1 << 21))
2435 gen_op_iwmmxt_unpackhsb_M0();
2436 else
2437 gen_op_iwmmxt_unpackhub_M0();
2438 break;
2439 case 1:
2440 if (insn & (1 << 21))
2441 gen_op_iwmmxt_unpackhsw_M0();
2442 else
2443 gen_op_iwmmxt_unpackhuw_M0();
2444 break;
2445 case 2:
2446 if (insn & (1 << 21))
2447 gen_op_iwmmxt_unpackhsl_M0();
2448 else
2449 gen_op_iwmmxt_unpackhul_M0();
2450 break;
2451 case 3:
2452 return 1;
2453 }
2454 gen_op_iwmmxt_movq_wRn_M0(wrd);
2455 gen_op_iwmmxt_set_mup();
2456 gen_op_iwmmxt_set_cup();
2457 break;
2458 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2459 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2460 if (((insn >> 22) & 3) == 0)
2461 return 1;
18c9b560
AZ
2462 wrd = (insn >> 12) & 0xf;
2463 rd0 = (insn >> 16) & 0xf;
2464 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2465 tmp = tcg_temp_new_i32();
da6b5335 2466 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2467 tcg_temp_free_i32(tmp);
18c9b560 2468 return 1;
da6b5335 2469 }
18c9b560 2470 switch ((insn >> 22) & 3) {
18c9b560 2471 case 1:
477955bd 2472 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2473 break;
2474 case 2:
477955bd 2475 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2476 break;
2477 case 3:
477955bd 2478 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2479 break;
2480 }
7d1b0095 2481 tcg_temp_free_i32(tmp);
18c9b560
AZ
2482 gen_op_iwmmxt_movq_wRn_M0(wrd);
2483 gen_op_iwmmxt_set_mup();
2484 gen_op_iwmmxt_set_cup();
2485 break;
2486 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2487 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2488 if (((insn >> 22) & 3) == 0)
2489 return 1;
18c9b560
AZ
2490 wrd = (insn >> 12) & 0xf;
2491 rd0 = (insn >> 16) & 0xf;
2492 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2493 tmp = tcg_temp_new_i32();
da6b5335 2494 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2495 tcg_temp_free_i32(tmp);
18c9b560 2496 return 1;
da6b5335 2497 }
18c9b560 2498 switch ((insn >> 22) & 3) {
18c9b560 2499 case 1:
477955bd 2500 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2501 break;
2502 case 2:
477955bd 2503 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2504 break;
2505 case 3:
477955bd 2506 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2507 break;
2508 }
7d1b0095 2509 tcg_temp_free_i32(tmp);
18c9b560
AZ
2510 gen_op_iwmmxt_movq_wRn_M0(wrd);
2511 gen_op_iwmmxt_set_mup();
2512 gen_op_iwmmxt_set_cup();
2513 break;
2514 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2515 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2516 if (((insn >> 22) & 3) == 0)
2517 return 1;
18c9b560
AZ
2518 wrd = (insn >> 12) & 0xf;
2519 rd0 = (insn >> 16) & 0xf;
2520 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2521 tmp = tcg_temp_new_i32();
da6b5335 2522 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2523 tcg_temp_free_i32(tmp);
18c9b560 2524 return 1;
da6b5335 2525 }
18c9b560 2526 switch ((insn >> 22) & 3) {
18c9b560 2527 case 1:
477955bd 2528 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2529 break;
2530 case 2:
477955bd 2531 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2532 break;
2533 case 3:
477955bd 2534 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2535 break;
2536 }
7d1b0095 2537 tcg_temp_free_i32(tmp);
18c9b560
AZ
2538 gen_op_iwmmxt_movq_wRn_M0(wrd);
2539 gen_op_iwmmxt_set_mup();
2540 gen_op_iwmmxt_set_cup();
2541 break;
2542 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2543 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2544 if (((insn >> 22) & 3) == 0)
2545 return 1;
18c9b560
AZ
2546 wrd = (insn >> 12) & 0xf;
2547 rd0 = (insn >> 16) & 0xf;
2548 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2549 tmp = tcg_temp_new_i32();
18c9b560 2550 switch ((insn >> 22) & 3) {
18c9b560 2551 case 1:
da6b5335 2552 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2553 tcg_temp_free_i32(tmp);
18c9b560 2554 return 1;
da6b5335 2555 }
477955bd 2556 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2557 break;
2558 case 2:
da6b5335 2559 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2560 tcg_temp_free_i32(tmp);
18c9b560 2561 return 1;
da6b5335 2562 }
477955bd 2563 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2564 break;
2565 case 3:
da6b5335 2566 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2567 tcg_temp_free_i32(tmp);
18c9b560 2568 return 1;
da6b5335 2569 }
477955bd 2570 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2571 break;
2572 }
7d1b0095 2573 tcg_temp_free_i32(tmp);
18c9b560
AZ
2574 gen_op_iwmmxt_movq_wRn_M0(wrd);
2575 gen_op_iwmmxt_set_mup();
2576 gen_op_iwmmxt_set_cup();
2577 break;
2578 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2579 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2580 wrd = (insn >> 12) & 0xf;
2581 rd0 = (insn >> 16) & 0xf;
2582 rd1 = (insn >> 0) & 0xf;
2583 gen_op_iwmmxt_movq_M0_wRn(rd0);
2584 switch ((insn >> 22) & 3) {
2585 case 0:
2586 if (insn & (1 << 21))
2587 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2588 else
2589 gen_op_iwmmxt_minub_M0_wRn(rd1);
2590 break;
2591 case 1:
2592 if (insn & (1 << 21))
2593 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2594 else
2595 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2596 break;
2597 case 2:
2598 if (insn & (1 << 21))
2599 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2600 else
2601 gen_op_iwmmxt_minul_M0_wRn(rd1);
2602 break;
2603 case 3:
2604 return 1;
2605 }
2606 gen_op_iwmmxt_movq_wRn_M0(wrd);
2607 gen_op_iwmmxt_set_mup();
2608 break;
2609 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2610 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2611 wrd = (insn >> 12) & 0xf;
2612 rd0 = (insn >> 16) & 0xf;
2613 rd1 = (insn >> 0) & 0xf;
2614 gen_op_iwmmxt_movq_M0_wRn(rd0);
2615 switch ((insn >> 22) & 3) {
2616 case 0:
2617 if (insn & (1 << 21))
2618 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2619 else
2620 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2621 break;
2622 case 1:
2623 if (insn & (1 << 21))
2624 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2625 else
2626 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2627 break;
2628 case 2:
2629 if (insn & (1 << 21))
2630 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2631 else
2632 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2633 break;
2634 case 3:
2635 return 1;
2636 }
2637 gen_op_iwmmxt_movq_wRn_M0(wrd);
2638 gen_op_iwmmxt_set_mup();
2639 break;
2640 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2641 case 0x402: case 0x502: case 0x602: case 0x702:
2642 wrd = (insn >> 12) & 0xf;
2643 rd0 = (insn >> 16) & 0xf;
2644 rd1 = (insn >> 0) & 0xf;
2645 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2646 tmp = tcg_const_i32((insn >> 20) & 3);
2647 iwmmxt_load_reg(cpu_V1, rd1);
2648 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2649 tcg_temp_free_i32(tmp);
18c9b560
AZ
2650 gen_op_iwmmxt_movq_wRn_M0(wrd);
2651 gen_op_iwmmxt_set_mup();
2652 break;
2653 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2654 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2655 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2656 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2657 wrd = (insn >> 12) & 0xf;
2658 rd0 = (insn >> 16) & 0xf;
2659 rd1 = (insn >> 0) & 0xf;
2660 gen_op_iwmmxt_movq_M0_wRn(rd0);
2661 switch ((insn >> 20) & 0xf) {
2662 case 0x0:
2663 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2664 break;
2665 case 0x1:
2666 gen_op_iwmmxt_subub_M0_wRn(rd1);
2667 break;
2668 case 0x3:
2669 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2670 break;
2671 case 0x4:
2672 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2673 break;
2674 case 0x5:
2675 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2676 break;
2677 case 0x7:
2678 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2679 break;
2680 case 0x8:
2681 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2682 break;
2683 case 0x9:
2684 gen_op_iwmmxt_subul_M0_wRn(rd1);
2685 break;
2686 case 0xb:
2687 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2688 break;
2689 default:
2690 return 1;
2691 }
2692 gen_op_iwmmxt_movq_wRn_M0(wrd);
2693 gen_op_iwmmxt_set_mup();
2694 gen_op_iwmmxt_set_cup();
2695 break;
2696 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2697 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2698 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2699 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2700 wrd = (insn >> 12) & 0xf;
2701 rd0 = (insn >> 16) & 0xf;
2702 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2703 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2704 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2705 tcg_temp_free_i32(tmp);
18c9b560
AZ
2706 gen_op_iwmmxt_movq_wRn_M0(wrd);
2707 gen_op_iwmmxt_set_mup();
2708 gen_op_iwmmxt_set_cup();
2709 break;
2710 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2711 case 0x418: case 0x518: case 0x618: case 0x718:
2712 case 0x818: case 0x918: case 0xa18: case 0xb18:
2713 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2714 wrd = (insn >> 12) & 0xf;
2715 rd0 = (insn >> 16) & 0xf;
2716 rd1 = (insn >> 0) & 0xf;
2717 gen_op_iwmmxt_movq_M0_wRn(rd0);
2718 switch ((insn >> 20) & 0xf) {
2719 case 0x0:
2720 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2721 break;
2722 case 0x1:
2723 gen_op_iwmmxt_addub_M0_wRn(rd1);
2724 break;
2725 case 0x3:
2726 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2727 break;
2728 case 0x4:
2729 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2730 break;
2731 case 0x5:
2732 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2733 break;
2734 case 0x7:
2735 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2736 break;
2737 case 0x8:
2738 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2739 break;
2740 case 0x9:
2741 gen_op_iwmmxt_addul_M0_wRn(rd1);
2742 break;
2743 case 0xb:
2744 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2745 break;
2746 default:
2747 return 1;
2748 }
2749 gen_op_iwmmxt_movq_wRn_M0(wrd);
2750 gen_op_iwmmxt_set_mup();
2751 gen_op_iwmmxt_set_cup();
2752 break;
2753 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2754 case 0x408: case 0x508: case 0x608: case 0x708:
2755 case 0x808: case 0x908: case 0xa08: case 0xb08:
2756 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2757 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2758 return 1;
18c9b560
AZ
2759 wrd = (insn >> 12) & 0xf;
2760 rd0 = (insn >> 16) & 0xf;
2761 rd1 = (insn >> 0) & 0xf;
2762 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2763 switch ((insn >> 22) & 3) {
18c9b560
AZ
2764 case 1:
2765 if (insn & (1 << 21))
2766 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2767 else
2768 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2769 break;
2770 case 2:
2771 if (insn & (1 << 21))
2772 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2773 else
2774 gen_op_iwmmxt_packul_M0_wRn(rd1);
2775 break;
2776 case 3:
2777 if (insn & (1 << 21))
2778 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2779 else
2780 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2781 break;
2782 }
2783 gen_op_iwmmxt_movq_wRn_M0(wrd);
2784 gen_op_iwmmxt_set_mup();
2785 gen_op_iwmmxt_set_cup();
2786 break;
2787 case 0x201: case 0x203: case 0x205: case 0x207:
2788 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2789 case 0x211: case 0x213: case 0x215: case 0x217:
2790 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2791 wrd = (insn >> 5) & 0xf;
2792 rd0 = (insn >> 12) & 0xf;
2793 rd1 = (insn >> 0) & 0xf;
2794 if (rd0 == 0xf || rd1 == 0xf)
2795 return 1;
2796 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2797 tmp = load_reg(s, rd0);
2798 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2799 switch ((insn >> 16) & 0xf) {
2800 case 0x0: /* TMIA */
da6b5335 2801 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2802 break;
2803 case 0x8: /* TMIAPH */
da6b5335 2804 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2805 break;
2806 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2807 if (insn & (1 << 16))
da6b5335 2808 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2809 if (insn & (1 << 17))
da6b5335
FN
2810 tcg_gen_shri_i32(tmp2, tmp2, 16);
2811 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2812 break;
2813 default:
7d1b0095
PM
2814 tcg_temp_free_i32(tmp2);
2815 tcg_temp_free_i32(tmp);
18c9b560
AZ
2816 return 1;
2817 }
7d1b0095
PM
2818 tcg_temp_free_i32(tmp2);
2819 tcg_temp_free_i32(tmp);
18c9b560
AZ
2820 gen_op_iwmmxt_movq_wRn_M0(wrd);
2821 gen_op_iwmmxt_set_mup();
2822 break;
2823 default:
2824 return 1;
2825 }
2826
2827 return 0;
2828}
2829
a1c7273b 2830/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2831 (ie. an undefined instruction). */
7dcc1f89 2832static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2833{
2834 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2835 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2836
2837 if ((insn & 0x0ff00f10) == 0x0e200010) {
2838 /* Multiply with Internal Accumulate Format */
2839 rd0 = (insn >> 12) & 0xf;
2840 rd1 = insn & 0xf;
2841 acc = (insn >> 5) & 7;
2842
2843 if (acc != 0)
2844 return 1;
2845
3a554c0f
FN
2846 tmp = load_reg(s, rd0);
2847 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2848 switch ((insn >> 16) & 0xf) {
2849 case 0x0: /* MIA */
3a554c0f 2850 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2851 break;
2852 case 0x8: /* MIAPH */
3a554c0f 2853 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2854 break;
2855 case 0xc: /* MIABB */
2856 case 0xd: /* MIABT */
2857 case 0xe: /* MIATB */
2858 case 0xf: /* MIATT */
18c9b560 2859 if (insn & (1 << 16))
3a554c0f 2860 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2861 if (insn & (1 << 17))
3a554c0f
FN
2862 tcg_gen_shri_i32(tmp2, tmp2, 16);
2863 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2864 break;
2865 default:
2866 return 1;
2867 }
7d1b0095
PM
2868 tcg_temp_free_i32(tmp2);
2869 tcg_temp_free_i32(tmp);
18c9b560
AZ
2870
2871 gen_op_iwmmxt_movq_wRn_M0(acc);
2872 return 0;
2873 }
2874
2875 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2876 /* Internal Accumulator Access Format */
2877 rdhi = (insn >> 16) & 0xf;
2878 rdlo = (insn >> 12) & 0xf;
2879 acc = insn & 7;
2880
2881 if (acc != 0)
2882 return 1;
2883
2884 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2885 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2886 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2887 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2888 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2889 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2890 } else { /* MAR */
3a554c0f
FN
2891 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2892 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2893 }
2894 return 0;
2895 }
2896
2897 return 1;
2898}
2899
9ee6e8bb
PB
2900#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2901#define VFP_SREG(insn, bigbit, smallbit) \
2902 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2903#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2904 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2905 reg = (((insn) >> (bigbit)) & 0x0f) \
2906 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2907 } else { \
2908 if (insn & (1 << (smallbit))) \
2909 return 1; \
2910 reg = ((insn) >> (bigbit)) & 0x0f; \
2911 }} while (0)
2912
2913#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2914#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2915#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2916#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2917#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2918#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2919
4373f3ce 2920/* Move between integer and VFP cores. */
39d5492a 2921static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2922{
39d5492a 2923 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2924 tcg_gen_mov_i32(tmp, cpu_F0s);
2925 return tmp;
2926}
2927
39d5492a 2928static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2929{
2930 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2931 tcg_temp_free_i32(tmp);
4373f3ce
PB
2932}
2933
39d5492a 2934static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2935{
39d5492a 2936 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2937 if (shift)
2938 tcg_gen_shri_i32(var, var, shift);
86831435 2939 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2940 tcg_gen_shli_i32(tmp, var, 8);
2941 tcg_gen_or_i32(var, var, tmp);
2942 tcg_gen_shli_i32(tmp, var, 16);
2943 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2944 tcg_temp_free_i32(tmp);
ad69471c
PB
2945}
2946
39d5492a 2947static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2948{
39d5492a 2949 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2950 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2951 tcg_gen_shli_i32(tmp, var, 16);
2952 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2953 tcg_temp_free_i32(tmp);
ad69471c
PB
2954}
2955
39d5492a 2956static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2957{
39d5492a 2958 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2959 tcg_gen_andi_i32(var, var, 0xffff0000);
2960 tcg_gen_shri_i32(tmp, var, 16);
2961 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2962 tcg_temp_free_i32(tmp);
ad69471c
PB
2963}
2964
39d5492a 2965static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2966{
2967 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2968 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2969 switch (size) {
2970 case 0:
12dcc321 2971 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2972 gen_neon_dup_u8(tmp, 0);
2973 break;
2974 case 1:
12dcc321 2975 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2976 gen_neon_dup_low16(tmp);
2977 break;
2978 case 2:
12dcc321 2979 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2980 break;
2981 default: /* Avoid compiler warnings. */
2982 abort();
2983 }
2984 return tmp;
2985}
2986
04731fb5
WN
2987static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2988 uint32_t dp)
2989{
2990 uint32_t cc = extract32(insn, 20, 2);
2991
2992 if (dp) {
2993 TCGv_i64 frn, frm, dest;
2994 TCGv_i64 tmp, zero, zf, nf, vf;
2995
2996 zero = tcg_const_i64(0);
2997
2998 frn = tcg_temp_new_i64();
2999 frm = tcg_temp_new_i64();
3000 dest = tcg_temp_new_i64();
3001
3002 zf = tcg_temp_new_i64();
3003 nf = tcg_temp_new_i64();
3004 vf = tcg_temp_new_i64();
3005
3006 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3007 tcg_gen_ext_i32_i64(nf, cpu_NF);
3008 tcg_gen_ext_i32_i64(vf, cpu_VF);
3009
3010 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3011 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3012 switch (cc) {
3013 case 0: /* eq: Z */
3014 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3015 frn, frm);
3016 break;
3017 case 1: /* vs: V */
3018 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3019 frn, frm);
3020 break;
3021 case 2: /* ge: N == V -> N ^ V == 0 */
3022 tmp = tcg_temp_new_i64();
3023 tcg_gen_xor_i64(tmp, vf, nf);
3024 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3025 frn, frm);
3026 tcg_temp_free_i64(tmp);
3027 break;
3028 case 3: /* gt: !Z && N == V */
3029 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3030 frn, frm);
3031 tmp = tcg_temp_new_i64();
3032 tcg_gen_xor_i64(tmp, vf, nf);
3033 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3034 dest, frm);
3035 tcg_temp_free_i64(tmp);
3036 break;
3037 }
3038 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3039 tcg_temp_free_i64(frn);
3040 tcg_temp_free_i64(frm);
3041 tcg_temp_free_i64(dest);
3042
3043 tcg_temp_free_i64(zf);
3044 tcg_temp_free_i64(nf);
3045 tcg_temp_free_i64(vf);
3046
3047 tcg_temp_free_i64(zero);
3048 } else {
3049 TCGv_i32 frn, frm, dest;
3050 TCGv_i32 tmp, zero;
3051
3052 zero = tcg_const_i32(0);
3053
3054 frn = tcg_temp_new_i32();
3055 frm = tcg_temp_new_i32();
3056 dest = tcg_temp_new_i32();
3057 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3058 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3059 switch (cc) {
3060 case 0: /* eq: Z */
3061 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3062 frn, frm);
3063 break;
3064 case 1: /* vs: V */
3065 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3066 frn, frm);
3067 break;
3068 case 2: /* ge: N == V -> N ^ V == 0 */
3069 tmp = tcg_temp_new_i32();
3070 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3071 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3072 frn, frm);
3073 tcg_temp_free_i32(tmp);
3074 break;
3075 case 3: /* gt: !Z && N == V */
3076 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3077 frn, frm);
3078 tmp = tcg_temp_new_i32();
3079 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3080 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3081 dest, frm);
3082 tcg_temp_free_i32(tmp);
3083 break;
3084 }
3085 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3086 tcg_temp_free_i32(frn);
3087 tcg_temp_free_i32(frm);
3088 tcg_temp_free_i32(dest);
3089
3090 tcg_temp_free_i32(zero);
3091 }
3092
3093 return 0;
3094}
3095
40cfacdd
WN
3096static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3097 uint32_t rm, uint32_t dp)
3098{
3099 uint32_t vmin = extract32(insn, 6, 1);
3100 TCGv_ptr fpst = get_fpstatus_ptr(0);
3101
3102 if (dp) {
3103 TCGv_i64 frn, frm, dest;
3104
3105 frn = tcg_temp_new_i64();
3106 frm = tcg_temp_new_i64();
3107 dest = tcg_temp_new_i64();
3108
3109 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3110 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3111 if (vmin) {
f71a2ae5 3112 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3113 } else {
f71a2ae5 3114 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3115 }
3116 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3117 tcg_temp_free_i64(frn);
3118 tcg_temp_free_i64(frm);
3119 tcg_temp_free_i64(dest);
3120 } else {
3121 TCGv_i32 frn, frm, dest;
3122
3123 frn = tcg_temp_new_i32();
3124 frm = tcg_temp_new_i32();
3125 dest = tcg_temp_new_i32();
3126
3127 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3128 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3129 if (vmin) {
f71a2ae5 3130 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3131 } else {
f71a2ae5 3132 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3133 }
3134 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3135 tcg_temp_free_i32(frn);
3136 tcg_temp_free_i32(frm);
3137 tcg_temp_free_i32(dest);
3138 }
3139
3140 tcg_temp_free_ptr(fpst);
3141 return 0;
3142}
3143
7655f39b
WN
3144static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3145 int rounding)
3146{
3147 TCGv_ptr fpst = get_fpstatus_ptr(0);
3148 TCGv_i32 tcg_rmode;
3149
3150 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3151 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3152
3153 if (dp) {
3154 TCGv_i64 tcg_op;
3155 TCGv_i64 tcg_res;
3156 tcg_op = tcg_temp_new_i64();
3157 tcg_res = tcg_temp_new_i64();
3158 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3159 gen_helper_rintd(tcg_res, tcg_op, fpst);
3160 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3161 tcg_temp_free_i64(tcg_op);
3162 tcg_temp_free_i64(tcg_res);
3163 } else {
3164 TCGv_i32 tcg_op;
3165 TCGv_i32 tcg_res;
3166 tcg_op = tcg_temp_new_i32();
3167 tcg_res = tcg_temp_new_i32();
3168 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3169 gen_helper_rints(tcg_res, tcg_op, fpst);
3170 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3171 tcg_temp_free_i32(tcg_op);
3172 tcg_temp_free_i32(tcg_res);
3173 }
3174
9b049916 3175 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3176 tcg_temp_free_i32(tcg_rmode);
3177
3178 tcg_temp_free_ptr(fpst);
3179 return 0;
3180}
3181
c9975a83
WN
3182static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3183 int rounding)
3184{
3185 bool is_signed = extract32(insn, 7, 1);
3186 TCGv_ptr fpst = get_fpstatus_ptr(0);
3187 TCGv_i32 tcg_rmode, tcg_shift;
3188
3189 tcg_shift = tcg_const_i32(0);
3190
3191 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3192 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3193
3194 if (dp) {
3195 TCGv_i64 tcg_double, tcg_res;
3196 TCGv_i32 tcg_tmp;
3197 /* Rd is encoded as a single precision register even when the source
3198 * is double precision.
3199 */
3200 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3201 tcg_double = tcg_temp_new_i64();
3202 tcg_res = tcg_temp_new_i64();
3203 tcg_tmp = tcg_temp_new_i32();
3204 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3205 if (is_signed) {
3206 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3207 } else {
3208 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3209 }
ecc7b3aa 3210 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3211 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3212 tcg_temp_free_i32(tcg_tmp);
3213 tcg_temp_free_i64(tcg_res);
3214 tcg_temp_free_i64(tcg_double);
3215 } else {
3216 TCGv_i32 tcg_single, tcg_res;
3217 tcg_single = tcg_temp_new_i32();
3218 tcg_res = tcg_temp_new_i32();
3219 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3220 if (is_signed) {
3221 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3222 } else {
3223 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3224 }
3225 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3226 tcg_temp_free_i32(tcg_res);
3227 tcg_temp_free_i32(tcg_single);
3228 }
3229
9b049916 3230 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3231 tcg_temp_free_i32(tcg_rmode);
3232
3233 tcg_temp_free_i32(tcg_shift);
3234
3235 tcg_temp_free_ptr(fpst);
3236
3237 return 0;
3238}
7655f39b
WN
3239
3240/* Table for converting the most common AArch32 encoding of
3241 * rounding mode to arm_fprounding order (which matches the
3242 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3243 */
3244static const uint8_t fp_decode_rm[] = {
3245 FPROUNDING_TIEAWAY,
3246 FPROUNDING_TIEEVEN,
3247 FPROUNDING_POSINF,
3248 FPROUNDING_NEGINF,
3249};
3250
7dcc1f89 3251static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3252{
3253 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3254
d614a513 3255 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3256 return 1;
3257 }
3258
3259 if (dp) {
3260 VFP_DREG_D(rd, insn);
3261 VFP_DREG_N(rn, insn);
3262 VFP_DREG_M(rm, insn);
3263 } else {
3264 rd = VFP_SREG_D(insn);
3265 rn = VFP_SREG_N(insn);
3266 rm = VFP_SREG_M(insn);
3267 }
3268
3269 if ((insn & 0x0f800e50) == 0x0e000a00) {
3270 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3271 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3272 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3273 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3274 /* VRINTA, VRINTN, VRINTP, VRINTM */
3275 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3276 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3277 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3278 /* VCVTA, VCVTN, VCVTP, VCVTM */
3279 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3280 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3281 }
3282 return 1;
3283}
3284
a1c7273b 3285/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3286 (ie. an undefined instruction). */
7dcc1f89 3287static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3288{
3289 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3290 int dp, veclen;
39d5492a
PM
3291 TCGv_i32 addr;
3292 TCGv_i32 tmp;
3293 TCGv_i32 tmp2;
b7bcbe95 3294
d614a513 3295 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3296 return 1;
d614a513 3297 }
40f137e1 3298
2c7ffc41
PM
3299 /* FIXME: this access check should not take precedence over UNDEF
3300 * for invalid encodings; we will generate incorrect syndrome information
3301 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3302 */
9dbbc748 3303 if (s->fp_excp_el) {
2c7ffc41 3304 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3305 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3306 return 0;
3307 }
3308
5df8bac1 3309 if (!s->vfp_enabled) {
9ee6e8bb 3310 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3311 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3312 return 1;
3313 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3314 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3315 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3316 return 1;
a50c0f51 3317 }
40f137e1 3318 }
6a57f3eb
WN
3319
3320 if (extract32(insn, 28, 4) == 0xf) {
3321 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3322 * only used in v8 and above.
3323 */
7dcc1f89 3324 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3325 }
3326
b7bcbe95
FB
3327 dp = ((insn & 0xf00) == 0xb00);
3328 switch ((insn >> 24) & 0xf) {
3329 case 0xe:
3330 if (insn & (1 << 4)) {
3331 /* single register transfer */
b7bcbe95
FB
3332 rd = (insn >> 12) & 0xf;
3333 if (dp) {
9ee6e8bb
PB
3334 int size;
3335 int pass;
3336
3337 VFP_DREG_N(rn, insn);
3338 if (insn & 0xf)
b7bcbe95 3339 return 1;
9ee6e8bb 3340 if (insn & 0x00c00060
d614a513 3341 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3342 return 1;
d614a513 3343 }
9ee6e8bb
PB
3344
3345 pass = (insn >> 21) & 1;
3346 if (insn & (1 << 22)) {
3347 size = 0;
3348 offset = ((insn >> 5) & 3) * 8;
3349 } else if (insn & (1 << 5)) {
3350 size = 1;
3351 offset = (insn & (1 << 6)) ? 16 : 0;
3352 } else {
3353 size = 2;
3354 offset = 0;
3355 }
18c9b560 3356 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3357 /* vfp->arm */
ad69471c 3358 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3359 switch (size) {
3360 case 0:
9ee6e8bb 3361 if (offset)
ad69471c 3362 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3363 if (insn & (1 << 23))
ad69471c 3364 gen_uxtb(tmp);
9ee6e8bb 3365 else
ad69471c 3366 gen_sxtb(tmp);
9ee6e8bb
PB
3367 break;
3368 case 1:
9ee6e8bb
PB
3369 if (insn & (1 << 23)) {
3370 if (offset) {
ad69471c 3371 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3372 } else {
ad69471c 3373 gen_uxth(tmp);
9ee6e8bb
PB
3374 }
3375 } else {
3376 if (offset) {
ad69471c 3377 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3378 } else {
ad69471c 3379 gen_sxth(tmp);
9ee6e8bb
PB
3380 }
3381 }
3382 break;
3383 case 2:
9ee6e8bb
PB
3384 break;
3385 }
ad69471c 3386 store_reg(s, rd, tmp);
b7bcbe95
FB
3387 } else {
3388 /* arm->vfp */
ad69471c 3389 tmp = load_reg(s, rd);
9ee6e8bb
PB
3390 if (insn & (1 << 23)) {
3391 /* VDUP */
3392 if (size == 0) {
ad69471c 3393 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3394 } else if (size == 1) {
ad69471c 3395 gen_neon_dup_low16(tmp);
9ee6e8bb 3396 }
cbbccffc 3397 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3398 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3399 tcg_gen_mov_i32(tmp2, tmp);
3400 neon_store_reg(rn, n, tmp2);
3401 }
3402 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3403 } else {
3404 /* VMOV */
3405 switch (size) {
3406 case 0:
ad69471c 3407 tmp2 = neon_load_reg(rn, pass);
d593c48e 3408 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3409 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3410 break;
3411 case 1:
ad69471c 3412 tmp2 = neon_load_reg(rn, pass);
d593c48e 3413 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3414 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3415 break;
3416 case 2:
9ee6e8bb
PB
3417 break;
3418 }
ad69471c 3419 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3420 }
b7bcbe95 3421 }
9ee6e8bb
PB
3422 } else { /* !dp */
3423 if ((insn & 0x6f) != 0x00)
3424 return 1;
3425 rn = VFP_SREG_N(insn);
18c9b560 3426 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3427 /* vfp->arm */
3428 if (insn & (1 << 21)) {
3429 /* system register */
40f137e1 3430 rn >>= 1;
9ee6e8bb 3431
b7bcbe95 3432 switch (rn) {
40f137e1 3433 case ARM_VFP_FPSID:
4373f3ce 3434 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3435 VFP3 restricts all id registers to privileged
3436 accesses. */
3437 if (IS_USER(s)
d614a513 3438 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3439 return 1;
d614a513 3440 }
4373f3ce 3441 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3442 break;
40f137e1 3443 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3444 if (IS_USER(s))
3445 return 1;
4373f3ce 3446 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3447 break;
40f137e1
PB
3448 case ARM_VFP_FPINST:
3449 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3450 /* Not present in VFP3. */
3451 if (IS_USER(s)
d614a513 3452 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3453 return 1;
d614a513 3454 }
4373f3ce 3455 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3456 break;
40f137e1 3457 case ARM_VFP_FPSCR:
601d70b9 3458 if (rd == 15) {
4373f3ce
PB
3459 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3460 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3461 } else {
7d1b0095 3462 tmp = tcg_temp_new_i32();
4373f3ce
PB
3463 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3464 }
b7bcbe95 3465 break;
a50c0f51 3466 case ARM_VFP_MVFR2:
d614a513 3467 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3468 return 1;
3469 }
3470 /* fall through */
9ee6e8bb
PB
3471 case ARM_VFP_MVFR0:
3472 case ARM_VFP_MVFR1:
3473 if (IS_USER(s)
d614a513 3474 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3475 return 1;
d614a513 3476 }
4373f3ce 3477 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3478 break;
b7bcbe95
FB
3479 default:
3480 return 1;
3481 }
3482 } else {
3483 gen_mov_F0_vreg(0, rn);
4373f3ce 3484 tmp = gen_vfp_mrs();
b7bcbe95
FB
3485 }
3486 if (rd == 15) {
b5ff1b31 3487 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3488 gen_set_nzcv(tmp);
7d1b0095 3489 tcg_temp_free_i32(tmp);
4373f3ce
PB
3490 } else {
3491 store_reg(s, rd, tmp);
3492 }
b7bcbe95
FB
3493 } else {
3494 /* arm->vfp */
b7bcbe95 3495 if (insn & (1 << 21)) {
40f137e1 3496 rn >>= 1;
b7bcbe95
FB
3497 /* system register */
3498 switch (rn) {
40f137e1 3499 case ARM_VFP_FPSID:
9ee6e8bb
PB
3500 case ARM_VFP_MVFR0:
3501 case ARM_VFP_MVFR1:
b7bcbe95
FB
3502 /* Writes are ignored. */
3503 break;
40f137e1 3504 case ARM_VFP_FPSCR:
e4c1cfa5 3505 tmp = load_reg(s, rd);
4373f3ce 3506 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3507 tcg_temp_free_i32(tmp);
b5ff1b31 3508 gen_lookup_tb(s);
b7bcbe95 3509 break;
40f137e1 3510 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3511 if (IS_USER(s))
3512 return 1;
71b3c3de
JR
3513 /* TODO: VFP subarchitecture support.
3514 * For now, keep the EN bit only */
e4c1cfa5 3515 tmp = load_reg(s, rd);
71b3c3de 3516 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3517 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3518 gen_lookup_tb(s);
3519 break;
3520 case ARM_VFP_FPINST:
3521 case ARM_VFP_FPINST2:
23adb861
PM
3522 if (IS_USER(s)) {
3523 return 1;
3524 }
e4c1cfa5 3525 tmp = load_reg(s, rd);
4373f3ce 3526 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3527 break;
b7bcbe95
FB
3528 default:
3529 return 1;
3530 }
3531 } else {
e4c1cfa5 3532 tmp = load_reg(s, rd);
4373f3ce 3533 gen_vfp_msr(tmp);
b7bcbe95
FB
3534 gen_mov_vreg_F0(0, rn);
3535 }
3536 }
3537 }
3538 } else {
3539 /* data processing */
3540 /* The opcode is in bits 23, 21, 20 and 6. */
3541 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3542 if (dp) {
3543 if (op == 15) {
3544 /* rn is opcode */
3545 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3546 } else {
3547 /* rn is register number */
9ee6e8bb 3548 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3549 }
3550
239c20c7
WN
3551 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3552 ((rn & 0x1e) == 0x6))) {
3553 /* Integer or single/half precision destination. */
9ee6e8bb 3554 rd = VFP_SREG_D(insn);
b7bcbe95 3555 } else {
9ee6e8bb 3556 VFP_DREG_D(rd, insn);
b7bcbe95 3557 }
04595bf6 3558 if (op == 15 &&
239c20c7
WN
3559 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3560 ((rn & 0x1e) == 0x4))) {
3561 /* VCVT from int or half precision is always from S reg
3562 * regardless of dp bit. VCVT with immediate frac_bits
3563 * has same format as SREG_M.
04595bf6
PM
3564 */
3565 rm = VFP_SREG_M(insn);
b7bcbe95 3566 } else {
9ee6e8bb 3567 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3568 }
3569 } else {
9ee6e8bb 3570 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3571 if (op == 15 && rn == 15) {
3572 /* Double precision destination. */
9ee6e8bb
PB
3573 VFP_DREG_D(rd, insn);
3574 } else {
3575 rd = VFP_SREG_D(insn);
3576 }
04595bf6
PM
3577 /* NB that we implicitly rely on the encoding for the frac_bits
3578 * in VCVT of fixed to float being the same as that of an SREG_M
3579 */
9ee6e8bb 3580 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3581 }
3582
69d1fc22 3583 veclen = s->vec_len;
b7bcbe95
FB
3584 if (op == 15 && rn > 3)
3585 veclen = 0;
3586
3587 /* Shut up compiler warnings. */
3588 delta_m = 0;
3589 delta_d = 0;
3590 bank_mask = 0;
3b46e624 3591
b7bcbe95
FB
3592 if (veclen > 0) {
3593 if (dp)
3594 bank_mask = 0xc;
3595 else
3596 bank_mask = 0x18;
3597
3598 /* Figure out what type of vector operation this is. */
3599 if ((rd & bank_mask) == 0) {
3600 /* scalar */
3601 veclen = 0;
3602 } else {
3603 if (dp)
69d1fc22 3604 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3605 else
69d1fc22 3606 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3607
3608 if ((rm & bank_mask) == 0) {
3609 /* mixed scalar/vector */
3610 delta_m = 0;
3611 } else {
3612 /* vector */
3613 delta_m = delta_d;
3614 }
3615 }
3616 }
3617
3618 /* Load the initial operands. */
3619 if (op == 15) {
3620 switch (rn) {
3621 case 16:
3622 case 17:
3623 /* Integer source */
3624 gen_mov_F0_vreg(0, rm);
3625 break;
3626 case 8:
3627 case 9:
3628 /* Compare */
3629 gen_mov_F0_vreg(dp, rd);
3630 gen_mov_F1_vreg(dp, rm);
3631 break;
3632 case 10:
3633 case 11:
3634 /* Compare with zero */
3635 gen_mov_F0_vreg(dp, rd);
3636 gen_vfp_F1_ld0(dp);
3637 break;
9ee6e8bb
PB
3638 case 20:
3639 case 21:
3640 case 22:
3641 case 23:
644ad806
PB
3642 case 28:
3643 case 29:
3644 case 30:
3645 case 31:
9ee6e8bb
PB
3646 /* Source and destination the same. */
3647 gen_mov_F0_vreg(dp, rd);
3648 break;
6e0c0ed1
PM
3649 case 4:
3650 case 5:
3651 case 6:
3652 case 7:
239c20c7
WN
3653 /* VCVTB, VCVTT: only present with the halfprec extension
3654 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3655 * (we choose to UNDEF)
6e0c0ed1 3656 */
d614a513
PM
3657 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3658 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3659 return 1;
3660 }
239c20c7
WN
3661 if (!extract32(rn, 1, 1)) {
3662 /* Half precision source. */
3663 gen_mov_F0_vreg(0, rm);
3664 break;
3665 }
6e0c0ed1 3666 /* Otherwise fall through */
b7bcbe95
FB
3667 default:
3668 /* One source operand. */
3669 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3670 break;
b7bcbe95
FB
3671 }
3672 } else {
3673 /* Two source operands. */
3674 gen_mov_F0_vreg(dp, rn);
3675 gen_mov_F1_vreg(dp, rm);
3676 }
3677
3678 for (;;) {
3679 /* Perform the calculation. */
3680 switch (op) {
605a6aed
PM
3681 case 0: /* VMLA: fd + (fn * fm) */
3682 /* Note that order of inputs to the add matters for NaNs */
3683 gen_vfp_F1_mul(dp);
3684 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3685 gen_vfp_add(dp);
3686 break;
605a6aed 3687 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3688 gen_vfp_mul(dp);
605a6aed
PM
3689 gen_vfp_F1_neg(dp);
3690 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3691 gen_vfp_add(dp);
3692 break;
605a6aed
PM
3693 case 2: /* VNMLS: -fd + (fn * fm) */
3694 /* Note that it isn't valid to replace (-A + B) with (B - A)
3695 * or similar plausible looking simplifications
3696 * because this will give wrong results for NaNs.
3697 */
3698 gen_vfp_F1_mul(dp);
3699 gen_mov_F0_vreg(dp, rd);
3700 gen_vfp_neg(dp);
3701 gen_vfp_add(dp);
b7bcbe95 3702 break;
605a6aed 3703 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3704 gen_vfp_mul(dp);
605a6aed
PM
3705 gen_vfp_F1_neg(dp);
3706 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3707 gen_vfp_neg(dp);
605a6aed 3708 gen_vfp_add(dp);
b7bcbe95
FB
3709 break;
3710 case 4: /* mul: fn * fm */
3711 gen_vfp_mul(dp);
3712 break;
3713 case 5: /* nmul: -(fn * fm) */
3714 gen_vfp_mul(dp);
3715 gen_vfp_neg(dp);
3716 break;
3717 case 6: /* add: fn + fm */
3718 gen_vfp_add(dp);
3719 break;
3720 case 7: /* sub: fn - fm */
3721 gen_vfp_sub(dp);
3722 break;
3723 case 8: /* div: fn / fm */
3724 gen_vfp_div(dp);
3725 break;
da97f52c
PM
3726 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3727 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3728 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3729 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3730 /* These are fused multiply-add, and must be done as one
3731 * floating point operation with no rounding between the
3732 * multiplication and addition steps.
3733 * NB that doing the negations here as separate steps is
3734 * correct : an input NaN should come out with its sign bit
3735 * flipped if it is a negated-input.
3736 */
d614a513 3737 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3738 return 1;
3739 }
3740 if (dp) {
3741 TCGv_ptr fpst;
3742 TCGv_i64 frd;
3743 if (op & 1) {
3744 /* VFNMS, VFMS */
3745 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3746 }
3747 frd = tcg_temp_new_i64();
3748 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3749 if (op & 2) {
3750 /* VFNMA, VFNMS */
3751 gen_helper_vfp_negd(frd, frd);
3752 }
3753 fpst = get_fpstatus_ptr(0);
3754 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3755 cpu_F1d, frd, fpst);
3756 tcg_temp_free_ptr(fpst);
3757 tcg_temp_free_i64(frd);
3758 } else {
3759 TCGv_ptr fpst;
3760 TCGv_i32 frd;
3761 if (op & 1) {
3762 /* VFNMS, VFMS */
3763 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3764 }
3765 frd = tcg_temp_new_i32();
3766 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3767 if (op & 2) {
3768 gen_helper_vfp_negs(frd, frd);
3769 }
3770 fpst = get_fpstatus_ptr(0);
3771 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3772 cpu_F1s, frd, fpst);
3773 tcg_temp_free_ptr(fpst);
3774 tcg_temp_free_i32(frd);
3775 }
3776 break;
9ee6e8bb 3777 case 14: /* fconst */
d614a513
PM
3778 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3779 return 1;
3780 }
9ee6e8bb
PB
3781
3782 n = (insn << 12) & 0x80000000;
3783 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3784 if (dp) {
3785 if (i & 0x40)
3786 i |= 0x3f80;
3787 else
3788 i |= 0x4000;
3789 n |= i << 16;
4373f3ce 3790 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3791 } else {
3792 if (i & 0x40)
3793 i |= 0x780;
3794 else
3795 i |= 0x800;
3796 n |= i << 19;
5b340b51 3797 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3798 }
9ee6e8bb 3799 break;
b7bcbe95
FB
3800 case 15: /* extension space */
3801 switch (rn) {
3802 case 0: /* cpy */
3803 /* no-op */
3804 break;
3805 case 1: /* abs */
3806 gen_vfp_abs(dp);
3807 break;
3808 case 2: /* neg */
3809 gen_vfp_neg(dp);
3810 break;
3811 case 3: /* sqrt */
3812 gen_vfp_sqrt(dp);
3813 break;
239c20c7 3814 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3815 tmp = gen_vfp_mrs();
3816 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3817 if (dp) {
3818 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3819 cpu_env);
3820 } else {
3821 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3822 cpu_env);
3823 }
7d1b0095 3824 tcg_temp_free_i32(tmp);
60011498 3825 break;
239c20c7 3826 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3827 tmp = gen_vfp_mrs();
3828 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3829 if (dp) {
3830 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3831 cpu_env);
3832 } else {
3833 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3834 cpu_env);
3835 }
7d1b0095 3836 tcg_temp_free_i32(tmp);
60011498 3837 break;
239c20c7 3838 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3839 tmp = tcg_temp_new_i32();
239c20c7
WN
3840 if (dp) {
3841 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3842 cpu_env);
3843 } else {
3844 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3845 cpu_env);
3846 }
60011498
PB
3847 gen_mov_F0_vreg(0, rd);
3848 tmp2 = gen_vfp_mrs();
3849 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3850 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3851 tcg_temp_free_i32(tmp2);
60011498
PB
3852 gen_vfp_msr(tmp);
3853 break;
239c20c7 3854 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3855 tmp = tcg_temp_new_i32();
239c20c7
WN
3856 if (dp) {
3857 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3858 cpu_env);
3859 } else {
3860 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3861 cpu_env);
3862 }
60011498
PB
3863 tcg_gen_shli_i32(tmp, tmp, 16);
3864 gen_mov_F0_vreg(0, rd);
3865 tmp2 = gen_vfp_mrs();
3866 tcg_gen_ext16u_i32(tmp2, tmp2);
3867 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3868 tcg_temp_free_i32(tmp2);
60011498
PB
3869 gen_vfp_msr(tmp);
3870 break;
b7bcbe95
FB
3871 case 8: /* cmp */
3872 gen_vfp_cmp(dp);
3873 break;
3874 case 9: /* cmpe */
3875 gen_vfp_cmpe(dp);
3876 break;
3877 case 10: /* cmpz */
3878 gen_vfp_cmp(dp);
3879 break;
3880 case 11: /* cmpez */
3881 gen_vfp_F1_ld0(dp);
3882 gen_vfp_cmpe(dp);
3883 break;
664c6733
WN
3884 case 12: /* vrintr */
3885 {
3886 TCGv_ptr fpst = get_fpstatus_ptr(0);
3887 if (dp) {
3888 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3889 } else {
3890 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3891 }
3892 tcg_temp_free_ptr(fpst);
3893 break;
3894 }
a290c62a
WN
3895 case 13: /* vrintz */
3896 {
3897 TCGv_ptr fpst = get_fpstatus_ptr(0);
3898 TCGv_i32 tcg_rmode;
3899 tcg_rmode = tcg_const_i32(float_round_to_zero);
9b049916 3900 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
3901 if (dp) {
3902 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3903 } else {
3904 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3905 }
9b049916 3906 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
3907 tcg_temp_free_i32(tcg_rmode);
3908 tcg_temp_free_ptr(fpst);
3909 break;
3910 }
4e82bc01
WN
3911 case 14: /* vrintx */
3912 {
3913 TCGv_ptr fpst = get_fpstatus_ptr(0);
3914 if (dp) {
3915 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3916 } else {
3917 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3918 }
3919 tcg_temp_free_ptr(fpst);
3920 break;
3921 }
b7bcbe95
FB
3922 case 15: /* single<->double conversion */
3923 if (dp)
4373f3ce 3924 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3925 else
4373f3ce 3926 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3927 break;
3928 case 16: /* fuito */
5500b06c 3929 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3930 break;
3931 case 17: /* fsito */
5500b06c 3932 gen_vfp_sito(dp, 0);
b7bcbe95 3933 break;
9ee6e8bb 3934 case 20: /* fshto */
d614a513
PM
3935 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3936 return 1;
3937 }
5500b06c 3938 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3939 break;
3940 case 21: /* fslto */
d614a513
PM
3941 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3942 return 1;
3943 }
5500b06c 3944 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3945 break;
3946 case 22: /* fuhto */
d614a513
PM
3947 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3948 return 1;
3949 }
5500b06c 3950 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3951 break;
3952 case 23: /* fulto */
d614a513
PM
3953 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3954 return 1;
3955 }
5500b06c 3956 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3957 break;
b7bcbe95 3958 case 24: /* ftoui */
5500b06c 3959 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3960 break;
3961 case 25: /* ftouiz */
5500b06c 3962 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3963 break;
3964 case 26: /* ftosi */
5500b06c 3965 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3966 break;
3967 case 27: /* ftosiz */
5500b06c 3968 gen_vfp_tosiz(dp, 0);
b7bcbe95 3969 break;
9ee6e8bb 3970 case 28: /* ftosh */
d614a513
PM
3971 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3972 return 1;
3973 }
5500b06c 3974 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3975 break;
3976 case 29: /* ftosl */
d614a513
PM
3977 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3978 return 1;
3979 }
5500b06c 3980 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3981 break;
3982 case 30: /* ftouh */
d614a513
PM
3983 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3984 return 1;
3985 }
5500b06c 3986 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3987 break;
3988 case 31: /* ftoul */
d614a513
PM
3989 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3990 return 1;
3991 }
5500b06c 3992 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3993 break;
b7bcbe95 3994 default: /* undefined */
b7bcbe95
FB
3995 return 1;
3996 }
3997 break;
3998 default: /* undefined */
b7bcbe95
FB
3999 return 1;
4000 }
4001
4002 /* Write back the result. */
239c20c7
WN
4003 if (op == 15 && (rn >= 8 && rn <= 11)) {
4004 /* Comparison, do nothing. */
4005 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
4006 (rn & 0x1e) == 0x6)) {
4007 /* VCVT double to int: always integer result.
4008 * VCVT double to half precision is always a single
4009 * precision result.
4010 */
b7bcbe95 4011 gen_mov_vreg_F0(0, rd);
239c20c7 4012 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
4013 /* conversion */
4014 gen_mov_vreg_F0(!dp, rd);
239c20c7 4015 } else {
b7bcbe95 4016 gen_mov_vreg_F0(dp, rd);
239c20c7 4017 }
b7bcbe95
FB
4018
4019 /* break out of the loop if we have finished */
4020 if (veclen == 0)
4021 break;
4022
4023 if (op == 15 && delta_m == 0) {
4024 /* single source one-many */
4025 while (veclen--) {
4026 rd = ((rd + delta_d) & (bank_mask - 1))
4027 | (rd & bank_mask);
4028 gen_mov_vreg_F0(dp, rd);
4029 }
4030 break;
4031 }
4032 /* Setup the next operands. */
4033 veclen--;
4034 rd = ((rd + delta_d) & (bank_mask - 1))
4035 | (rd & bank_mask);
4036
4037 if (op == 15) {
4038 /* One source operand. */
4039 rm = ((rm + delta_m) & (bank_mask - 1))
4040 | (rm & bank_mask);
4041 gen_mov_F0_vreg(dp, rm);
4042 } else {
4043 /* Two source operands. */
4044 rn = ((rn + delta_d) & (bank_mask - 1))
4045 | (rn & bank_mask);
4046 gen_mov_F0_vreg(dp, rn);
4047 if (delta_m) {
4048 rm = ((rm + delta_m) & (bank_mask - 1))
4049 | (rm & bank_mask);
4050 gen_mov_F1_vreg(dp, rm);
4051 }
4052 }
4053 }
4054 }
4055 break;
4056 case 0xc:
4057 case 0xd:
8387da81 4058 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
4059 /* two-register transfer */
4060 rn = (insn >> 16) & 0xf;
4061 rd = (insn >> 12) & 0xf;
4062 if (dp) {
9ee6e8bb
PB
4063 VFP_DREG_M(rm, insn);
4064 } else {
4065 rm = VFP_SREG_M(insn);
4066 }
b7bcbe95 4067
18c9b560 4068 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4069 /* vfp->arm */
4070 if (dp) {
4373f3ce
PB
4071 gen_mov_F0_vreg(0, rm * 2);
4072 tmp = gen_vfp_mrs();
4073 store_reg(s, rd, tmp);
4074 gen_mov_F0_vreg(0, rm * 2 + 1);
4075 tmp = gen_vfp_mrs();
4076 store_reg(s, rn, tmp);
b7bcbe95
FB
4077 } else {
4078 gen_mov_F0_vreg(0, rm);
4373f3ce 4079 tmp = gen_vfp_mrs();
8387da81 4080 store_reg(s, rd, tmp);
b7bcbe95 4081 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4082 tmp = gen_vfp_mrs();
8387da81 4083 store_reg(s, rn, tmp);
b7bcbe95
FB
4084 }
4085 } else {
4086 /* arm->vfp */
4087 if (dp) {
4373f3ce
PB
4088 tmp = load_reg(s, rd);
4089 gen_vfp_msr(tmp);
4090 gen_mov_vreg_F0(0, rm * 2);
4091 tmp = load_reg(s, rn);
4092 gen_vfp_msr(tmp);
4093 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4094 } else {
8387da81 4095 tmp = load_reg(s, rd);
4373f3ce 4096 gen_vfp_msr(tmp);
b7bcbe95 4097 gen_mov_vreg_F0(0, rm);
8387da81 4098 tmp = load_reg(s, rn);
4373f3ce 4099 gen_vfp_msr(tmp);
b7bcbe95
FB
4100 gen_mov_vreg_F0(0, rm + 1);
4101 }
4102 }
4103 } else {
4104 /* Load/store */
4105 rn = (insn >> 16) & 0xf;
4106 if (dp)
9ee6e8bb 4107 VFP_DREG_D(rd, insn);
b7bcbe95 4108 else
9ee6e8bb 4109 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4110 if ((insn & 0x01200000) == 0x01000000) {
4111 /* Single load/store */
4112 offset = (insn & 0xff) << 2;
4113 if ((insn & (1 << 23)) == 0)
4114 offset = -offset;
934814f1
PM
4115 if (s->thumb && rn == 15) {
4116 /* This is actually UNPREDICTABLE */
4117 addr = tcg_temp_new_i32();
4118 tcg_gen_movi_i32(addr, s->pc & ~2);
4119 } else {
4120 addr = load_reg(s, rn);
4121 }
312eea9f 4122 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4123 if (insn & (1 << 20)) {
312eea9f 4124 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4125 gen_mov_vreg_F0(dp, rd);
4126 } else {
4127 gen_mov_F0_vreg(dp, rd);
312eea9f 4128 gen_vfp_st(s, dp, addr);
b7bcbe95 4129 }
7d1b0095 4130 tcg_temp_free_i32(addr);
b7bcbe95
FB
4131 } else {
4132 /* load/store multiple */
934814f1 4133 int w = insn & (1 << 21);
b7bcbe95
FB
4134 if (dp)
4135 n = (insn >> 1) & 0x7f;
4136 else
4137 n = insn & 0xff;
4138
934814f1
PM
4139 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4140 /* P == U , W == 1 => UNDEF */
4141 return 1;
4142 }
4143 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4144 /* UNPREDICTABLE cases for bad immediates: we choose to
4145 * UNDEF to avoid generating huge numbers of TCG ops
4146 */
4147 return 1;
4148 }
4149 if (rn == 15 && w) {
4150 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4151 return 1;
4152 }
4153
4154 if (s->thumb && rn == 15) {
4155 /* This is actually UNPREDICTABLE */
4156 addr = tcg_temp_new_i32();
4157 tcg_gen_movi_i32(addr, s->pc & ~2);
4158 } else {
4159 addr = load_reg(s, rn);
4160 }
b7bcbe95 4161 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4162 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4163
4164 if (dp)
4165 offset = 8;
4166 else
4167 offset = 4;
4168 for (i = 0; i < n; i++) {
18c9b560 4169 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4170 /* load */
312eea9f 4171 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4172 gen_mov_vreg_F0(dp, rd + i);
4173 } else {
4174 /* store */
4175 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4176 gen_vfp_st(s, dp, addr);
b7bcbe95 4177 }
312eea9f 4178 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4179 }
934814f1 4180 if (w) {
b7bcbe95
FB
4181 /* writeback */
4182 if (insn & (1 << 24))
4183 offset = -offset * n;
4184 else if (dp && (insn & 1))
4185 offset = 4;
4186 else
4187 offset = 0;
4188
4189 if (offset != 0)
312eea9f
FN
4190 tcg_gen_addi_i32(addr, addr, offset);
4191 store_reg(s, rn, addr);
4192 } else {
7d1b0095 4193 tcg_temp_free_i32(addr);
b7bcbe95
FB
4194 }
4195 }
4196 }
4197 break;
4198 default:
4199 /* Should never happen. */
4200 return 1;
4201 }
4202 return 0;
4203}
4204
90aa39a1 4205static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4206{
90aa39a1 4207#ifndef CONFIG_USER_ONLY
dcba3a8d 4208 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
4209 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4210#else
4211 return true;
4212#endif
4213}
6e256c93 4214
8a6b28c7
EC
4215static void gen_goto_ptr(void)
4216{
7f11636d 4217 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
4218}
4219
4cae8f56
AB
4220/* This will end the TB but doesn't guarantee we'll return to
4221 * cpu_loop_exec. Any live exit_requests will be processed as we
4222 * enter the next TB.
4223 */
8a6b28c7 4224static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4225{
4226 if (use_goto_tb(s, dest)) {
57fec1fe 4227 tcg_gen_goto_tb(n);
eaed129d 4228 gen_set_pc_im(s, dest);
dcba3a8d 4229 tcg_gen_exit_tb((uintptr_t)s->base.tb + n);
6e256c93 4230 } else {
eaed129d 4231 gen_set_pc_im(s, dest);
8a6b28c7 4232 gen_goto_ptr();
6e256c93 4233 }
dcba3a8d 4234 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
4235}
4236
8aaca4c0
FB
4237static inline void gen_jmp (DisasContext *s, uint32_t dest)
4238{
b636649f 4239 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4240 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4241 if (s->thumb)
d9ba4830
PB
4242 dest |= 1;
4243 gen_bx_im(s, dest);
8aaca4c0 4244 } else {
6e256c93 4245 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4246 }
4247}
4248
39d5492a 4249static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4250{
ee097184 4251 if (x)
d9ba4830 4252 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4253 else
d9ba4830 4254 gen_sxth(t0);
ee097184 4255 if (y)
d9ba4830 4256 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4257 else
d9ba4830
PB
4258 gen_sxth(t1);
4259 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4260}
4261
4262/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4263static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4264{
b5ff1b31
FB
4265 uint32_t mask;
4266
4267 mask = 0;
4268 if (flags & (1 << 0))
4269 mask |= 0xff;
4270 if (flags & (1 << 1))
4271 mask |= 0xff00;
4272 if (flags & (1 << 2))
4273 mask |= 0xff0000;
4274 if (flags & (1 << 3))
4275 mask |= 0xff000000;
9ee6e8bb 4276
2ae23e75 4277 /* Mask out undefined bits. */
9ee6e8bb 4278 mask &= ~CPSR_RESERVED;
d614a513 4279 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4280 mask &= ~CPSR_T;
d614a513
PM
4281 }
4282 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4283 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4284 }
4285 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4286 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4287 }
4288 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4289 mask &= ~CPSR_IT;
d614a513 4290 }
4051e12c
PM
4291 /* Mask out execution state and reserved bits. */
4292 if (!spsr) {
4293 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4294 }
b5ff1b31
FB
4295 /* Mask out privileged bits. */
4296 if (IS_USER(s))
9ee6e8bb 4297 mask &= CPSR_USER;
b5ff1b31
FB
4298 return mask;
4299}
4300
2fbac54b 4301/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4302static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4303{
39d5492a 4304 TCGv_i32 tmp;
b5ff1b31
FB
4305 if (spsr) {
4306 /* ??? This is also undefined in system mode. */
4307 if (IS_USER(s))
4308 return 1;
d9ba4830
PB
4309
4310 tmp = load_cpu_field(spsr);
4311 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4312 tcg_gen_andi_i32(t0, t0, mask);
4313 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4314 store_cpu_field(tmp, spsr);
b5ff1b31 4315 } else {
2fbac54b 4316 gen_set_cpsr(t0, mask);
b5ff1b31 4317 }
7d1b0095 4318 tcg_temp_free_i32(t0);
b5ff1b31
FB
4319 gen_lookup_tb(s);
4320 return 0;
4321}
4322
2fbac54b
FN
4323/* Returns nonzero if access to the PSR is not permitted. */
4324static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4325{
39d5492a 4326 TCGv_i32 tmp;
7d1b0095 4327 tmp = tcg_temp_new_i32();
2fbac54b
FN
4328 tcg_gen_movi_i32(tmp, val);
4329 return gen_set_psr(s, mask, spsr, tmp);
4330}
4331
8bfd0550
PM
4332static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4333 int *tgtmode, int *regno)
4334{
4335 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4336 * the target mode and register number, and identify the various
4337 * unpredictable cases.
4338 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4339 * + executed in user mode
4340 * + using R15 as the src/dest register
4341 * + accessing an unimplemented register
4342 * + accessing a register that's inaccessible at current PL/security state*
4343 * + accessing a register that you could access with a different insn
4344 * We choose to UNDEF in all these cases.
4345 * Since we don't know which of the various AArch32 modes we are in
4346 * we have to defer some checks to runtime.
4347 * Accesses to Monitor mode registers from Secure EL1 (which implies
4348 * that EL3 is AArch64) must trap to EL3.
4349 *
4350 * If the access checks fail this function will emit code to take
4351 * an exception and return false. Otherwise it will return true,
4352 * and set *tgtmode and *regno appropriately.
4353 */
4354 int exc_target = default_exception_el(s);
4355
4356 /* These instructions are present only in ARMv8, or in ARMv7 with the
4357 * Virtualization Extensions.
4358 */
4359 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4360 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4361 goto undef;
4362 }
4363
4364 if (IS_USER(s) || rn == 15) {
4365 goto undef;
4366 }
4367
4368 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4369 * of registers into (r, sysm).
4370 */
4371 if (r) {
4372 /* SPSRs for other modes */
4373 switch (sysm) {
4374 case 0xe: /* SPSR_fiq */
4375 *tgtmode = ARM_CPU_MODE_FIQ;
4376 break;
4377 case 0x10: /* SPSR_irq */
4378 *tgtmode = ARM_CPU_MODE_IRQ;
4379 break;
4380 case 0x12: /* SPSR_svc */
4381 *tgtmode = ARM_CPU_MODE_SVC;
4382 break;
4383 case 0x14: /* SPSR_abt */
4384 *tgtmode = ARM_CPU_MODE_ABT;
4385 break;
4386 case 0x16: /* SPSR_und */
4387 *tgtmode = ARM_CPU_MODE_UND;
4388 break;
4389 case 0x1c: /* SPSR_mon */
4390 *tgtmode = ARM_CPU_MODE_MON;
4391 break;
4392 case 0x1e: /* SPSR_hyp */
4393 *tgtmode = ARM_CPU_MODE_HYP;
4394 break;
4395 default: /* unallocated */
4396 goto undef;
4397 }
4398 /* We arbitrarily assign SPSR a register number of 16. */
4399 *regno = 16;
4400 } else {
4401 /* general purpose registers for other modes */
4402 switch (sysm) {
4403 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4404 *tgtmode = ARM_CPU_MODE_USR;
4405 *regno = sysm + 8;
4406 break;
4407 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4408 *tgtmode = ARM_CPU_MODE_FIQ;
4409 *regno = sysm;
4410 break;
4411 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4412 *tgtmode = ARM_CPU_MODE_IRQ;
4413 *regno = sysm & 1 ? 13 : 14;
4414 break;
4415 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4416 *tgtmode = ARM_CPU_MODE_SVC;
4417 *regno = sysm & 1 ? 13 : 14;
4418 break;
4419 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4420 *tgtmode = ARM_CPU_MODE_ABT;
4421 *regno = sysm & 1 ? 13 : 14;
4422 break;
4423 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4424 *tgtmode = ARM_CPU_MODE_UND;
4425 *regno = sysm & 1 ? 13 : 14;
4426 break;
4427 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4428 *tgtmode = ARM_CPU_MODE_MON;
4429 *regno = sysm & 1 ? 13 : 14;
4430 break;
4431 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4432 *tgtmode = ARM_CPU_MODE_HYP;
4433 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4434 *regno = sysm & 1 ? 13 : 17;
4435 break;
4436 default: /* unallocated */
4437 goto undef;
4438 }
4439 }
4440
4441 /* Catch the 'accessing inaccessible register' cases we can detect
4442 * at translate time.
4443 */
4444 switch (*tgtmode) {
4445 case ARM_CPU_MODE_MON:
4446 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4447 goto undef;
4448 }
4449 if (s->current_el == 1) {
4450 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4451 * then accesses to Mon registers trap to EL3
4452 */
4453 exc_target = 3;
4454 goto undef;
4455 }
4456 break;
4457 case ARM_CPU_MODE_HYP:
4458 /* Note that we can forbid accesses from EL2 here because they
4459 * must be from Hyp mode itself
4460 */
4461 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4462 goto undef;
4463 }
4464 break;
4465 default:
4466 break;
4467 }
4468
4469 return true;
4470
4471undef:
4472 /* If we get here then some access check did not pass */
4473 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4474 return false;
4475}
4476
4477static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4478{
4479 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4480 int tgtmode = 0, regno = 0;
4481
4482 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4483 return;
4484 }
4485
4486 /* Sync state because msr_banked() can raise exceptions */
4487 gen_set_condexec(s);
4488 gen_set_pc_im(s, s->pc - 4);
4489 tcg_reg = load_reg(s, rn);
4490 tcg_tgtmode = tcg_const_i32(tgtmode);
4491 tcg_regno = tcg_const_i32(regno);
4492 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4493 tcg_temp_free_i32(tcg_tgtmode);
4494 tcg_temp_free_i32(tcg_regno);
4495 tcg_temp_free_i32(tcg_reg);
dcba3a8d 4496 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4497}
4498
4499static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4500{
4501 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4502 int tgtmode = 0, regno = 0;
4503
4504 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4505 return;
4506 }
4507
4508 /* Sync state because mrs_banked() can raise exceptions */
4509 gen_set_condexec(s);
4510 gen_set_pc_im(s, s->pc - 4);
4511 tcg_reg = tcg_temp_new_i32();
4512 tcg_tgtmode = tcg_const_i32(tgtmode);
4513 tcg_regno = tcg_const_i32(regno);
4514 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4515 tcg_temp_free_i32(tcg_tgtmode);
4516 tcg_temp_free_i32(tcg_regno);
4517 store_reg(s, rn, tcg_reg);
dcba3a8d 4518 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4519}
4520
fb0e8e79
PM
4521/* Store value to PC as for an exception return (ie don't
4522 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4523 * will do the masking based on the new value of the Thumb bit.
4524 */
4525static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4526{
fb0e8e79
PM
4527 tcg_gen_mov_i32(cpu_R[15], pc);
4528 tcg_temp_free_i32(pc);
b5ff1b31
FB
4529}
4530
b0109805 4531/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4532static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4533{
fb0e8e79
PM
4534 store_pc_exc_ret(s, pc);
4535 /* The cpsr_write_eret helper will mask the low bits of PC
4536 * appropriately depending on the new Thumb bit, so it must
4537 * be called after storing the new PC.
4538 */
235ea1f5 4539 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 4540 tcg_temp_free_i32(cpsr);
b29fd33d 4541 /* Must exit loop to check un-masked IRQs */
dcba3a8d 4542 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 4543}
3b46e624 4544
fb0e8e79
PM
4545/* Generate an old-style exception return. Marks pc as dead. */
4546static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4547{
4548 gen_rfe(s, pc, load_cpu_field(spsr));
4549}
4550
c22edfeb
AB
4551/*
4552 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4553 * only call the helper when running single threaded TCG code to ensure
4554 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4555 * just skip this instruction. Currently the SEV/SEVL instructions
4556 * which are *one* of many ways to wake the CPU from WFE are not
4557 * implemented so we can't sleep like WFI does.
4558 */
9ee6e8bb
PB
4559static void gen_nop_hint(DisasContext *s, int val)
4560{
4561 switch (val) {
2399d4e7
EC
4562 /* When running in MTTCG we don't generate jumps to the yield and
4563 * WFE helpers as it won't affect the scheduling of other vCPUs.
4564 * If we wanted to more completely model WFE/SEV so we don't busy
4565 * spin unnecessarily we would need to do something more involved.
4566 */
c87e5a61 4567 case 1: /* yield */
2399d4e7 4568 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4569 gen_set_pc_im(s, s->pc);
dcba3a8d 4570 s->base.is_jmp = DISAS_YIELD;
c22edfeb 4571 }
c87e5a61 4572 break;
9ee6e8bb 4573 case 3: /* wfi */
eaed129d 4574 gen_set_pc_im(s, s->pc);
dcba3a8d 4575 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
4576 break;
4577 case 2: /* wfe */
2399d4e7 4578 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4579 gen_set_pc_im(s, s->pc);
dcba3a8d 4580 s->base.is_jmp = DISAS_WFE;
c22edfeb 4581 }
72c1d3af 4582 break;
9ee6e8bb 4583 case 4: /* sev */
12b10571
MR
4584 case 5: /* sevl */
4585 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4586 default: /* nop */
4587 break;
4588 }
4589}
99c475ab 4590
ad69471c 4591#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4592
39d5492a 4593static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4594{
4595 switch (size) {
dd8fbd78
FN
4596 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4597 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4598 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4599 default: abort();
9ee6e8bb 4600 }
9ee6e8bb
PB
4601}
4602
39d5492a 4603static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4604{
4605 switch (size) {
dd8fbd78
FN
4606 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4607 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4608 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4609 default: return;
4610 }
4611}
4612
4613/* 32-bit pairwise ops end up the same as the elementwise versions. */
4614#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4615#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4616#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4617#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4618
ad69471c
PB
4619#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4620 switch ((size << 1) | u) { \
4621 case 0: \
dd8fbd78 4622 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4623 break; \
4624 case 1: \
dd8fbd78 4625 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4626 break; \
4627 case 2: \
dd8fbd78 4628 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4629 break; \
4630 case 3: \
dd8fbd78 4631 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4632 break; \
4633 case 4: \
dd8fbd78 4634 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4635 break; \
4636 case 5: \
dd8fbd78 4637 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4638 break; \
4639 default: return 1; \
4640 }} while (0)
9ee6e8bb
PB
4641
4642#define GEN_NEON_INTEGER_OP(name) do { \
4643 switch ((size << 1) | u) { \
ad69471c 4644 case 0: \
dd8fbd78 4645 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4646 break; \
4647 case 1: \
dd8fbd78 4648 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4649 break; \
4650 case 2: \
dd8fbd78 4651 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4652 break; \
4653 case 3: \
dd8fbd78 4654 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4655 break; \
4656 case 4: \
dd8fbd78 4657 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4658 break; \
4659 case 5: \
dd8fbd78 4660 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4661 break; \
9ee6e8bb
PB
4662 default: return 1; \
4663 }} while (0)
4664
39d5492a 4665static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4666{
39d5492a 4667 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4668 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4669 return tmp;
9ee6e8bb
PB
4670}
4671
39d5492a 4672static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4673{
dd8fbd78 4674 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4675 tcg_temp_free_i32(var);
9ee6e8bb
PB
4676}
4677
39d5492a 4678static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4679{
39d5492a 4680 TCGv_i32 tmp;
9ee6e8bb 4681 if (size == 1) {
0fad6efc
PM
4682 tmp = neon_load_reg(reg & 7, reg >> 4);
4683 if (reg & 8) {
dd8fbd78 4684 gen_neon_dup_high16(tmp);
0fad6efc
PM
4685 } else {
4686 gen_neon_dup_low16(tmp);
dd8fbd78 4687 }
0fad6efc
PM
4688 } else {
4689 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4690 }
dd8fbd78 4691 return tmp;
9ee6e8bb
PB
4692}
4693
02acedf9 4694static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4695{
b13708bb
RH
4696 TCGv_ptr pd, pm;
4697
600b828c 4698 if (!q && size == 2) {
02acedf9
PM
4699 return 1;
4700 }
b13708bb
RH
4701 pd = vfp_reg_ptr(true, rd);
4702 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
4703 if (q) {
4704 switch (size) {
4705 case 0:
b13708bb 4706 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
4707 break;
4708 case 1:
b13708bb 4709 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
4710 break;
4711 case 2:
b13708bb 4712 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
4713 break;
4714 default:
4715 abort();
4716 }
4717 } else {
4718 switch (size) {
4719 case 0:
b13708bb 4720 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
4721 break;
4722 case 1:
b13708bb 4723 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
4724 break;
4725 default:
4726 abort();
4727 }
4728 }
b13708bb
RH
4729 tcg_temp_free_ptr(pd);
4730 tcg_temp_free_ptr(pm);
02acedf9 4731 return 0;
19457615
FN
4732}
4733
d68a6f3a 4734static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4735{
b13708bb
RH
4736 TCGv_ptr pd, pm;
4737
600b828c 4738 if (!q && size == 2) {
d68a6f3a
PM
4739 return 1;
4740 }
b13708bb
RH
4741 pd = vfp_reg_ptr(true, rd);
4742 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
4743 if (q) {
4744 switch (size) {
4745 case 0:
b13708bb 4746 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
4747 break;
4748 case 1:
b13708bb 4749 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
4750 break;
4751 case 2:
b13708bb 4752 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
4753 break;
4754 default:
4755 abort();
4756 }
4757 } else {
4758 switch (size) {
4759 case 0:
b13708bb 4760 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
4761 break;
4762 case 1:
b13708bb 4763 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
4764 break;
4765 default:
4766 abort();
4767 }
4768 }
b13708bb
RH
4769 tcg_temp_free_ptr(pd);
4770 tcg_temp_free_ptr(pm);
d68a6f3a 4771 return 0;
19457615
FN
4772}
4773
39d5492a 4774static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4775{
39d5492a 4776 TCGv_i32 rd, tmp;
19457615 4777
7d1b0095
PM
4778 rd = tcg_temp_new_i32();
4779 tmp = tcg_temp_new_i32();
19457615
FN
4780
4781 tcg_gen_shli_i32(rd, t0, 8);
4782 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4783 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4784 tcg_gen_or_i32(rd, rd, tmp);
4785
4786 tcg_gen_shri_i32(t1, t1, 8);
4787 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4788 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4789 tcg_gen_or_i32(t1, t1, tmp);
4790 tcg_gen_mov_i32(t0, rd);
4791
7d1b0095
PM
4792 tcg_temp_free_i32(tmp);
4793 tcg_temp_free_i32(rd);
19457615
FN
4794}
4795
39d5492a 4796static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4797{
39d5492a 4798 TCGv_i32 rd, tmp;
19457615 4799
7d1b0095
PM
4800 rd = tcg_temp_new_i32();
4801 tmp = tcg_temp_new_i32();
19457615
FN
4802
4803 tcg_gen_shli_i32(rd, t0, 16);
4804 tcg_gen_andi_i32(tmp, t1, 0xffff);
4805 tcg_gen_or_i32(rd, rd, tmp);
4806 tcg_gen_shri_i32(t1, t1, 16);
4807 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4808 tcg_gen_or_i32(t1, t1, tmp);
4809 tcg_gen_mov_i32(t0, rd);
4810
7d1b0095
PM
4811 tcg_temp_free_i32(tmp);
4812 tcg_temp_free_i32(rd);
19457615
FN
4813}
4814
4815
9ee6e8bb
PB
4816static struct {
4817 int nregs;
4818 int interleave;
4819 int spacing;
4820} neon_ls_element_type[11] = {
4821 {4, 4, 1},
4822 {4, 4, 2},
4823 {4, 1, 1},
4824 {4, 2, 1},
4825 {3, 3, 1},
4826 {3, 3, 2},
4827 {3, 1, 1},
4828 {1, 1, 1},
4829 {2, 2, 1},
4830 {2, 2, 2},
4831 {2, 1, 1}
4832};
4833
4834/* Translate a NEON load/store element instruction. Return nonzero if the
4835 instruction is invalid. */
7dcc1f89 4836static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4837{
4838 int rd, rn, rm;
4839 int op;
4840 int nregs;
4841 int interleave;
84496233 4842 int spacing;
9ee6e8bb
PB
4843 int stride;
4844 int size;
4845 int reg;
4846 int pass;
4847 int load;
4848 int shift;
9ee6e8bb 4849 int n;
39d5492a
PM
4850 TCGv_i32 addr;
4851 TCGv_i32 tmp;
4852 TCGv_i32 tmp2;
84496233 4853 TCGv_i64 tmp64;
9ee6e8bb 4854
2c7ffc41
PM
4855 /* FIXME: this access check should not take precedence over UNDEF
4856 * for invalid encodings; we will generate incorrect syndrome information
4857 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4858 */
9dbbc748 4859 if (s->fp_excp_el) {
2c7ffc41 4860 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4861 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4862 return 0;
4863 }
4864
5df8bac1 4865 if (!s->vfp_enabled)
9ee6e8bb
PB
4866 return 1;
4867 VFP_DREG_D(rd, insn);
4868 rn = (insn >> 16) & 0xf;
4869 rm = insn & 0xf;
4870 load = (insn & (1 << 21)) != 0;
4871 if ((insn & (1 << 23)) == 0) {
4872 /* Load store all elements. */
4873 op = (insn >> 8) & 0xf;
4874 size = (insn >> 6) & 3;
84496233 4875 if (op > 10)
9ee6e8bb 4876 return 1;
f2dd89d0
PM
4877 /* Catch UNDEF cases for bad values of align field */
4878 switch (op & 0xc) {
4879 case 4:
4880 if (((insn >> 5) & 1) == 1) {
4881 return 1;
4882 }
4883 break;
4884 case 8:
4885 if (((insn >> 4) & 3) == 3) {
4886 return 1;
4887 }
4888 break;
4889 default:
4890 break;
4891 }
9ee6e8bb
PB
4892 nregs = neon_ls_element_type[op].nregs;
4893 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4894 spacing = neon_ls_element_type[op].spacing;
4895 if (size == 3 && (interleave | spacing) != 1)
4896 return 1;
e318a60b 4897 addr = tcg_temp_new_i32();
dcc65026 4898 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4899 stride = (1 << size) * interleave;
4900 for (reg = 0; reg < nregs; reg++) {
4901 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4902 load_reg_var(s, addr, rn);
4903 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4904 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4905 load_reg_var(s, addr, rn);
4906 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4907 }
84496233 4908 if (size == 3) {
8ed1237d 4909 tmp64 = tcg_temp_new_i64();
84496233 4910 if (load) {
12dcc321 4911 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4912 neon_store_reg64(tmp64, rd);
84496233 4913 } else {
84496233 4914 neon_load_reg64(tmp64, rd);
12dcc321 4915 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4916 }
8ed1237d 4917 tcg_temp_free_i64(tmp64);
84496233
JR
4918 tcg_gen_addi_i32(addr, addr, stride);
4919 } else {
4920 for (pass = 0; pass < 2; pass++) {
4921 if (size == 2) {
4922 if (load) {
58ab8e96 4923 tmp = tcg_temp_new_i32();
12dcc321 4924 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
4925 neon_store_reg(rd, pass, tmp);
4926 } else {
4927 tmp = neon_load_reg(rd, pass);
12dcc321 4928 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 4929 tcg_temp_free_i32(tmp);
84496233 4930 }
1b2b1e54 4931 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4932 } else if (size == 1) {
4933 if (load) {
58ab8e96 4934 tmp = tcg_temp_new_i32();
12dcc321 4935 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 4936 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4937 tmp2 = tcg_temp_new_i32();
12dcc321 4938 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 4939 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4940 tcg_gen_shli_i32(tmp2, tmp2, 16);
4941 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4942 tcg_temp_free_i32(tmp2);
84496233
JR
4943 neon_store_reg(rd, pass, tmp);
4944 } else {
4945 tmp = neon_load_reg(rd, pass);
7d1b0095 4946 tmp2 = tcg_temp_new_i32();
84496233 4947 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 4948 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 4949 tcg_temp_free_i32(tmp);
84496233 4950 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 4951 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 4952 tcg_temp_free_i32(tmp2);
1b2b1e54 4953 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4954 }
84496233
JR
4955 } else /* size == 0 */ {
4956 if (load) {
f764718d 4957 tmp2 = NULL;
84496233 4958 for (n = 0; n < 4; n++) {
58ab8e96 4959 tmp = tcg_temp_new_i32();
12dcc321 4960 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
4961 tcg_gen_addi_i32(addr, addr, stride);
4962 if (n == 0) {
4963 tmp2 = tmp;
4964 } else {
41ba8341
PB
4965 tcg_gen_shli_i32(tmp, tmp, n * 8);
4966 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4967 tcg_temp_free_i32(tmp);
84496233 4968 }
9ee6e8bb 4969 }
84496233
JR
4970 neon_store_reg(rd, pass, tmp2);
4971 } else {
4972 tmp2 = neon_load_reg(rd, pass);
4973 for (n = 0; n < 4; n++) {
7d1b0095 4974 tmp = tcg_temp_new_i32();
84496233
JR
4975 if (n == 0) {
4976 tcg_gen_mov_i32(tmp, tmp2);
4977 } else {
4978 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4979 }
12dcc321 4980 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 4981 tcg_temp_free_i32(tmp);
84496233
JR
4982 tcg_gen_addi_i32(addr, addr, stride);
4983 }
7d1b0095 4984 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4985 }
4986 }
4987 }
4988 }
84496233 4989 rd += spacing;
9ee6e8bb 4990 }
e318a60b 4991 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4992 stride = nregs * 8;
4993 } else {
4994 size = (insn >> 10) & 3;
4995 if (size == 3) {
4996 /* Load single element to all lanes. */
8e18cde3
PM
4997 int a = (insn >> 4) & 1;
4998 if (!load) {
9ee6e8bb 4999 return 1;
8e18cde3 5000 }
9ee6e8bb
PB
5001 size = (insn >> 6) & 3;
5002 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
5003
5004 if (size == 3) {
5005 if (nregs != 4 || a == 0) {
9ee6e8bb 5006 return 1;
99c475ab 5007 }
8e18cde3
PM
5008 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5009 size = 2;
5010 }
5011 if (nregs == 1 && a == 1 && size == 0) {
5012 return 1;
5013 }
5014 if (nregs == 3 && a == 1) {
5015 return 1;
5016 }
e318a60b 5017 addr = tcg_temp_new_i32();
8e18cde3
PM
5018 load_reg_var(s, addr, rn);
5019 if (nregs == 1) {
5020 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
5021 tmp = gen_load_and_replicate(s, addr, size);
5022 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5023 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5024 if (insn & (1 << 5)) {
5025 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
5026 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
5027 }
5028 tcg_temp_free_i32(tmp);
5029 } else {
5030 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
5031 stride = (insn & (1 << 5)) ? 2 : 1;
5032 for (reg = 0; reg < nregs; reg++) {
5033 tmp = gen_load_and_replicate(s, addr, size);
5034 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5035 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5036 tcg_temp_free_i32(tmp);
5037 tcg_gen_addi_i32(addr, addr, 1 << size);
5038 rd += stride;
5039 }
9ee6e8bb 5040 }
e318a60b 5041 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5042 stride = (1 << size) * nregs;
5043 } else {
5044 /* Single element. */
93262b16 5045 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
5046 pass = (insn >> 7) & 1;
5047 switch (size) {
5048 case 0:
5049 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
5050 stride = 1;
5051 break;
5052 case 1:
5053 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
5054 stride = (insn & (1 << 5)) ? 2 : 1;
5055 break;
5056 case 2:
5057 shift = 0;
9ee6e8bb
PB
5058 stride = (insn & (1 << 6)) ? 2 : 1;
5059 break;
5060 default:
5061 abort();
5062 }
5063 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
5064 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5065 switch (nregs) {
5066 case 1:
5067 if (((idx & (1 << size)) != 0) ||
5068 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5069 return 1;
5070 }
5071 break;
5072 case 3:
5073 if ((idx & 1) != 0) {
5074 return 1;
5075 }
5076 /* fall through */
5077 case 2:
5078 if (size == 2 && (idx & 2) != 0) {
5079 return 1;
5080 }
5081 break;
5082 case 4:
5083 if ((size == 2) && ((idx & 3) == 3)) {
5084 return 1;
5085 }
5086 break;
5087 default:
5088 abort();
5089 }
5090 if ((rd + stride * (nregs - 1)) > 31) {
5091 /* Attempts to write off the end of the register file
5092 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5093 * the neon_load_reg() would write off the end of the array.
5094 */
5095 return 1;
5096 }
e318a60b 5097 addr = tcg_temp_new_i32();
dcc65026 5098 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5099 for (reg = 0; reg < nregs; reg++) {
5100 if (load) {
58ab8e96 5101 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
5102 switch (size) {
5103 case 0:
12dcc321 5104 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5105 break;
5106 case 1:
12dcc321 5107 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5108 break;
5109 case 2:
12dcc321 5110 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5111 break;
a50f5b91
PB
5112 default: /* Avoid compiler warnings. */
5113 abort();
9ee6e8bb
PB
5114 }
5115 if (size != 2) {
8f8e3aa4 5116 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
5117 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5118 shift, size ? 16 : 8);
7d1b0095 5119 tcg_temp_free_i32(tmp2);
9ee6e8bb 5120 }
8f8e3aa4 5121 neon_store_reg(rd, pass, tmp);
9ee6e8bb 5122 } else { /* Store */
8f8e3aa4
PB
5123 tmp = neon_load_reg(rd, pass);
5124 if (shift)
5125 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
5126 switch (size) {
5127 case 0:
12dcc321 5128 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5129 break;
5130 case 1:
12dcc321 5131 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5132 break;
5133 case 2:
12dcc321 5134 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5135 break;
99c475ab 5136 }
58ab8e96 5137 tcg_temp_free_i32(tmp);
99c475ab 5138 }
9ee6e8bb 5139 rd += stride;
1b2b1e54 5140 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5141 }
e318a60b 5142 tcg_temp_free_i32(addr);
9ee6e8bb 5143 stride = nregs * (1 << size);
99c475ab 5144 }
9ee6e8bb
PB
5145 }
5146 if (rm != 15) {
39d5492a 5147 TCGv_i32 base;
b26eefb6
PB
5148
5149 base = load_reg(s, rn);
9ee6e8bb 5150 if (rm == 13) {
b26eefb6 5151 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5152 } else {
39d5492a 5153 TCGv_i32 index;
b26eefb6
PB
5154 index = load_reg(s, rm);
5155 tcg_gen_add_i32(base, base, index);
7d1b0095 5156 tcg_temp_free_i32(index);
9ee6e8bb 5157 }
b26eefb6 5158 store_reg(s, rn, base);
9ee6e8bb
PB
5159 }
5160 return 0;
5161}
3b46e624 5162
8f8e3aa4 5163/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 5164static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
5165{
5166 tcg_gen_and_i32(t, t, c);
f669df27 5167 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
5168 tcg_gen_or_i32(dest, t, f);
5169}
5170
39d5492a 5171static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5172{
5173 switch (size) {
5174 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5175 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5176 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5177 default: abort();
5178 }
5179}
5180
39d5492a 5181static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5182{
5183 switch (size) {
02da0b2d
PM
5184 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5185 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5186 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5187 default: abort();
5188 }
5189}
5190
39d5492a 5191static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5192{
5193 switch (size) {
02da0b2d
PM
5194 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5195 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5196 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5197 default: abort();
5198 }
5199}
5200
39d5492a 5201static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5202{
5203 switch (size) {
02da0b2d
PM
5204 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5205 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5206 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5207 default: abort();
5208 }
5209}
5210
39d5492a 5211static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5212 int q, int u)
5213{
5214 if (q) {
5215 if (u) {
5216 switch (size) {
5217 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5218 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5219 default: abort();
5220 }
5221 } else {
5222 switch (size) {
5223 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5224 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5225 default: abort();
5226 }
5227 }
5228 } else {
5229 if (u) {
5230 switch (size) {
b408a9b0
CL
5231 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5232 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5233 default: abort();
5234 }
5235 } else {
5236 switch (size) {
5237 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5238 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5239 default: abort();
5240 }
5241 }
5242 }
5243}
5244
39d5492a 5245static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5246{
5247 if (u) {
5248 switch (size) {
5249 case 0: gen_helper_neon_widen_u8(dest, src); break;
5250 case 1: gen_helper_neon_widen_u16(dest, src); break;
5251 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5252 default: abort();
5253 }
5254 } else {
5255 switch (size) {
5256 case 0: gen_helper_neon_widen_s8(dest, src); break;
5257 case 1: gen_helper_neon_widen_s16(dest, src); break;
5258 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5259 default: abort();
5260 }
5261 }
7d1b0095 5262 tcg_temp_free_i32(src);
ad69471c
PB
5263}
5264
5265static inline void gen_neon_addl(int size)
5266{
5267 switch (size) {
5268 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5269 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5270 case 2: tcg_gen_add_i64(CPU_V001); break;
5271 default: abort();
5272 }
5273}
5274
5275static inline void gen_neon_subl(int size)
5276{
5277 switch (size) {
5278 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5279 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5280 case 2: tcg_gen_sub_i64(CPU_V001); break;
5281 default: abort();
5282 }
5283}
5284
a7812ae4 5285static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5286{
5287 switch (size) {
5288 case 0: gen_helper_neon_negl_u16(var, var); break;
5289 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5290 case 2:
5291 tcg_gen_neg_i64(var, var);
5292 break;
ad69471c
PB
5293 default: abort();
5294 }
5295}
5296
a7812ae4 5297static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5298{
5299 switch (size) {
02da0b2d
PM
5300 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5301 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5302 default: abort();
5303 }
5304}
5305
39d5492a
PM
5306static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5307 int size, int u)
ad69471c 5308{
a7812ae4 5309 TCGv_i64 tmp;
ad69471c
PB
5310
5311 switch ((size << 1) | u) {
5312 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5313 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5314 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5315 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5316 case 4:
5317 tmp = gen_muls_i64_i32(a, b);
5318 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5319 tcg_temp_free_i64(tmp);
ad69471c
PB
5320 break;
5321 case 5:
5322 tmp = gen_mulu_i64_i32(a, b);
5323 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5324 tcg_temp_free_i64(tmp);
ad69471c
PB
5325 break;
5326 default: abort();
5327 }
c6067f04
CL
5328
5329 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5330 Don't forget to clean them now. */
5331 if (size < 2) {
7d1b0095
PM
5332 tcg_temp_free_i32(a);
5333 tcg_temp_free_i32(b);
c6067f04 5334 }
ad69471c
PB
5335}
5336
39d5492a
PM
5337static void gen_neon_narrow_op(int op, int u, int size,
5338 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5339{
5340 if (op) {
5341 if (u) {
5342 gen_neon_unarrow_sats(size, dest, src);
5343 } else {
5344 gen_neon_narrow(size, dest, src);
5345 }
5346 } else {
5347 if (u) {
5348 gen_neon_narrow_satu(size, dest, src);
5349 } else {
5350 gen_neon_narrow_sats(size, dest, src);
5351 }
5352 }
5353}
5354
62698be3
PM
5355/* Symbolic constants for op fields for Neon 3-register same-length.
5356 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5357 * table A7-9.
5358 */
5359#define NEON_3R_VHADD 0
5360#define NEON_3R_VQADD 1
5361#define NEON_3R_VRHADD 2
5362#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5363#define NEON_3R_VHSUB 4
5364#define NEON_3R_VQSUB 5
5365#define NEON_3R_VCGT 6
5366#define NEON_3R_VCGE 7
5367#define NEON_3R_VSHL 8
5368#define NEON_3R_VQSHL 9
5369#define NEON_3R_VRSHL 10
5370#define NEON_3R_VQRSHL 11
5371#define NEON_3R_VMAX 12
5372#define NEON_3R_VMIN 13
5373#define NEON_3R_VABD 14
5374#define NEON_3R_VABA 15
5375#define NEON_3R_VADD_VSUB 16
5376#define NEON_3R_VTST_VCEQ 17
5377#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5378#define NEON_3R_VMUL 19
5379#define NEON_3R_VPMAX 20
5380#define NEON_3R_VPMIN 21
5381#define NEON_3R_VQDMULH_VQRDMULH 22
36a71934 5382#define NEON_3R_VPADD_VQRDMLAH 23
f1ecb913 5383#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
36a71934 5384#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
62698be3
PM
5385#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5386#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5387#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5388#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5389#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5390#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5391
5392static const uint8_t neon_3r_sizes[] = {
5393 [NEON_3R_VHADD] = 0x7,
5394 [NEON_3R_VQADD] = 0xf,
5395 [NEON_3R_VRHADD] = 0x7,
5396 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5397 [NEON_3R_VHSUB] = 0x7,
5398 [NEON_3R_VQSUB] = 0xf,
5399 [NEON_3R_VCGT] = 0x7,
5400 [NEON_3R_VCGE] = 0x7,
5401 [NEON_3R_VSHL] = 0xf,
5402 [NEON_3R_VQSHL] = 0xf,
5403 [NEON_3R_VRSHL] = 0xf,
5404 [NEON_3R_VQRSHL] = 0xf,
5405 [NEON_3R_VMAX] = 0x7,
5406 [NEON_3R_VMIN] = 0x7,
5407 [NEON_3R_VABD] = 0x7,
5408 [NEON_3R_VABA] = 0x7,
5409 [NEON_3R_VADD_VSUB] = 0xf,
5410 [NEON_3R_VTST_VCEQ] = 0x7,
5411 [NEON_3R_VML] = 0x7,
5412 [NEON_3R_VMUL] = 0x7,
5413 [NEON_3R_VPMAX] = 0x7,
5414 [NEON_3R_VPMIN] = 0x7,
5415 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
36a71934 5416 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
f1ecb913 5417 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
36a71934 5418 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
62698be3
PM
5419 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5420 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5421 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5422 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5423 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5424 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5425};
5426
600b828c
PM
5427/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5428 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5429 * table A7-13.
5430 */
5431#define NEON_2RM_VREV64 0
5432#define NEON_2RM_VREV32 1
5433#define NEON_2RM_VREV16 2
5434#define NEON_2RM_VPADDL 4
5435#define NEON_2RM_VPADDL_U 5
9d935509
AB
5436#define NEON_2RM_AESE 6 /* Includes AESD */
5437#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5438#define NEON_2RM_VCLS 8
5439#define NEON_2RM_VCLZ 9
5440#define NEON_2RM_VCNT 10
5441#define NEON_2RM_VMVN 11
5442#define NEON_2RM_VPADAL 12
5443#define NEON_2RM_VPADAL_U 13
5444#define NEON_2RM_VQABS 14
5445#define NEON_2RM_VQNEG 15
5446#define NEON_2RM_VCGT0 16
5447#define NEON_2RM_VCGE0 17
5448#define NEON_2RM_VCEQ0 18
5449#define NEON_2RM_VCLE0 19
5450#define NEON_2RM_VCLT0 20
f1ecb913 5451#define NEON_2RM_SHA1H 21
600b828c
PM
5452#define NEON_2RM_VABS 22
5453#define NEON_2RM_VNEG 23
5454#define NEON_2RM_VCGT0_F 24
5455#define NEON_2RM_VCGE0_F 25
5456#define NEON_2RM_VCEQ0_F 26
5457#define NEON_2RM_VCLE0_F 27
5458#define NEON_2RM_VCLT0_F 28
5459#define NEON_2RM_VABS_F 30
5460#define NEON_2RM_VNEG_F 31
5461#define NEON_2RM_VSWP 32
5462#define NEON_2RM_VTRN 33
5463#define NEON_2RM_VUZP 34
5464#define NEON_2RM_VZIP 35
5465#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5466#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5467#define NEON_2RM_VSHLL 38
f1ecb913 5468#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5469#define NEON_2RM_VRINTN 40
2ce70625 5470#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5471#define NEON_2RM_VRINTA 42
5472#define NEON_2RM_VRINTZ 43
600b828c 5473#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5474#define NEON_2RM_VRINTM 45
600b828c 5475#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5476#define NEON_2RM_VRINTP 47
901ad525
WN
5477#define NEON_2RM_VCVTAU 48
5478#define NEON_2RM_VCVTAS 49
5479#define NEON_2RM_VCVTNU 50
5480#define NEON_2RM_VCVTNS 51
5481#define NEON_2RM_VCVTPU 52
5482#define NEON_2RM_VCVTPS 53
5483#define NEON_2RM_VCVTMU 54
5484#define NEON_2RM_VCVTMS 55
600b828c
PM
5485#define NEON_2RM_VRECPE 56
5486#define NEON_2RM_VRSQRTE 57
5487#define NEON_2RM_VRECPE_F 58
5488#define NEON_2RM_VRSQRTE_F 59
5489#define NEON_2RM_VCVT_FS 60
5490#define NEON_2RM_VCVT_FU 61
5491#define NEON_2RM_VCVT_SF 62
5492#define NEON_2RM_VCVT_UF 63
5493
5494static int neon_2rm_is_float_op(int op)
5495{
5496 /* Return true if this neon 2reg-misc op is float-to-float */
5497 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5498 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5499 op == NEON_2RM_VRINTM ||
5500 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5501 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5502}
5503
fe8fcf3d
PM
5504static bool neon_2rm_is_v8_op(int op)
5505{
5506 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5507 switch (op) {
5508 case NEON_2RM_VRINTN:
5509 case NEON_2RM_VRINTA:
5510 case NEON_2RM_VRINTM:
5511 case NEON_2RM_VRINTP:
5512 case NEON_2RM_VRINTZ:
5513 case NEON_2RM_VRINTX:
5514 case NEON_2RM_VCVTAU:
5515 case NEON_2RM_VCVTAS:
5516 case NEON_2RM_VCVTNU:
5517 case NEON_2RM_VCVTNS:
5518 case NEON_2RM_VCVTPU:
5519 case NEON_2RM_VCVTPS:
5520 case NEON_2RM_VCVTMU:
5521 case NEON_2RM_VCVTMS:
5522 return true;
5523 default:
5524 return false;
5525 }
5526}
5527
600b828c
PM
5528/* Each entry in this array has bit n set if the insn allows
5529 * size value n (otherwise it will UNDEF). Since unallocated
5530 * op values will have no bits set they always UNDEF.
5531 */
5532static const uint8_t neon_2rm_sizes[] = {
5533 [NEON_2RM_VREV64] = 0x7,
5534 [NEON_2RM_VREV32] = 0x3,
5535 [NEON_2RM_VREV16] = 0x1,
5536 [NEON_2RM_VPADDL] = 0x7,
5537 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5538 [NEON_2RM_AESE] = 0x1,
5539 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5540 [NEON_2RM_VCLS] = 0x7,
5541 [NEON_2RM_VCLZ] = 0x7,
5542 [NEON_2RM_VCNT] = 0x1,
5543 [NEON_2RM_VMVN] = 0x1,
5544 [NEON_2RM_VPADAL] = 0x7,
5545 [NEON_2RM_VPADAL_U] = 0x7,
5546 [NEON_2RM_VQABS] = 0x7,
5547 [NEON_2RM_VQNEG] = 0x7,
5548 [NEON_2RM_VCGT0] = 0x7,
5549 [NEON_2RM_VCGE0] = 0x7,
5550 [NEON_2RM_VCEQ0] = 0x7,
5551 [NEON_2RM_VCLE0] = 0x7,
5552 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5553 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5554 [NEON_2RM_VABS] = 0x7,
5555 [NEON_2RM_VNEG] = 0x7,
5556 [NEON_2RM_VCGT0_F] = 0x4,
5557 [NEON_2RM_VCGE0_F] = 0x4,
5558 [NEON_2RM_VCEQ0_F] = 0x4,
5559 [NEON_2RM_VCLE0_F] = 0x4,
5560 [NEON_2RM_VCLT0_F] = 0x4,
5561 [NEON_2RM_VABS_F] = 0x4,
5562 [NEON_2RM_VNEG_F] = 0x4,
5563 [NEON_2RM_VSWP] = 0x1,
5564 [NEON_2RM_VTRN] = 0x7,
5565 [NEON_2RM_VUZP] = 0x7,
5566 [NEON_2RM_VZIP] = 0x7,
5567 [NEON_2RM_VMOVN] = 0x7,
5568 [NEON_2RM_VQMOVN] = 0x7,
5569 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5570 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5571 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5572 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5573 [NEON_2RM_VRINTA] = 0x4,
5574 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5575 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5576 [NEON_2RM_VRINTM] = 0x4,
600b828c 5577 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5578 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5579 [NEON_2RM_VCVTAU] = 0x4,
5580 [NEON_2RM_VCVTAS] = 0x4,
5581 [NEON_2RM_VCVTNU] = 0x4,
5582 [NEON_2RM_VCVTNS] = 0x4,
5583 [NEON_2RM_VCVTPU] = 0x4,
5584 [NEON_2RM_VCVTPS] = 0x4,
5585 [NEON_2RM_VCVTMU] = 0x4,
5586 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5587 [NEON_2RM_VRECPE] = 0x4,
5588 [NEON_2RM_VRSQRTE] = 0x4,
5589 [NEON_2RM_VRECPE_F] = 0x4,
5590 [NEON_2RM_VRSQRTE_F] = 0x4,
5591 [NEON_2RM_VCVT_FS] = 0x4,
5592 [NEON_2RM_VCVT_FU] = 0x4,
5593 [NEON_2RM_VCVT_SF] = 0x4,
5594 [NEON_2RM_VCVT_UF] = 0x4,
5595};
5596
36a71934
RH
5597
5598/* Expand v8.1 simd helper. */
5599static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
5600 int q, int rd, int rn, int rm)
5601{
5602 if (arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
5603 int opr_sz = (1 + q) * 8;
5604 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
5605 vfp_reg_offset(1, rn),
5606 vfp_reg_offset(1, rm), cpu_env,
5607 opr_sz, opr_sz, 0, fn);
5608 return 0;
5609 }
5610 return 1;
5611}
5612
9ee6e8bb
PB
5613/* Translate a NEON data processing instruction. Return nonzero if the
5614 instruction is invalid.
ad69471c
PB
5615 We process data in a mixture of 32-bit and 64-bit chunks.
5616 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5617
7dcc1f89 5618static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5619{
5620 int op;
5621 int q;
5622 int rd, rn, rm;
5623 int size;
5624 int shift;
5625 int pass;
5626 int count;
5627 int pairwise;
5628 int u;
ca9a32e4 5629 uint32_t imm, mask;
39d5492a 5630 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 5631 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 5632 TCGv_i64 tmp64;
9ee6e8bb 5633
2c7ffc41
PM
5634 /* FIXME: this access check should not take precedence over UNDEF
5635 * for invalid encodings; we will generate incorrect syndrome information
5636 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5637 */
9dbbc748 5638 if (s->fp_excp_el) {
2c7ffc41 5639 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5640 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5641 return 0;
5642 }
5643
5df8bac1 5644 if (!s->vfp_enabled)
9ee6e8bb
PB
5645 return 1;
5646 q = (insn & (1 << 6)) != 0;
5647 u = (insn >> 24) & 1;
5648 VFP_DREG_D(rd, insn);
5649 VFP_DREG_N(rn, insn);
5650 VFP_DREG_M(rm, insn);
5651 size = (insn >> 20) & 3;
5652 if ((insn & (1 << 23)) == 0) {
5653 /* Three register same length. */
5654 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5655 /* Catch invalid op and bad size combinations: UNDEF */
5656 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5657 return 1;
5658 }
25f84f79
PM
5659 /* All insns of this form UNDEF for either this condition or the
5660 * superset of cases "Q==1"; we catch the latter later.
5661 */
5662 if (q && ((rd | rn | rm) & 1)) {
5663 return 1;
5664 }
36a71934
RH
5665 switch (op) {
5666 case NEON_3R_SHA:
5667 /* The SHA-1/SHA-256 3-register instructions require special
5668 * treatment here, as their size field is overloaded as an
5669 * op type selector, and they all consume their input in a
5670 * single pass.
5671 */
f1ecb913
AB
5672 if (!q) {
5673 return 1;
5674 }
5675 if (!u) { /* SHA-1 */
d614a513 5676 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5677 return 1;
5678 }
1a66ac61
RH
5679 ptr1 = vfp_reg_ptr(true, rd);
5680 ptr2 = vfp_reg_ptr(true, rn);
5681 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 5682 tmp4 = tcg_const_i32(size);
1a66ac61 5683 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
5684 tcg_temp_free_i32(tmp4);
5685 } else { /* SHA-256 */
d614a513 5686 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5687 return 1;
5688 }
1a66ac61
RH
5689 ptr1 = vfp_reg_ptr(true, rd);
5690 ptr2 = vfp_reg_ptr(true, rn);
5691 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
5692 switch (size) {
5693 case 0:
1a66ac61 5694 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
5695 break;
5696 case 1:
1a66ac61 5697 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
5698 break;
5699 case 2:
1a66ac61 5700 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
5701 break;
5702 }
5703 }
1a66ac61
RH
5704 tcg_temp_free_ptr(ptr1);
5705 tcg_temp_free_ptr(ptr2);
5706 tcg_temp_free_ptr(ptr3);
f1ecb913 5707 return 0;
36a71934
RH
5708
5709 case NEON_3R_VPADD_VQRDMLAH:
5710 if (!u) {
5711 break; /* VPADD */
5712 }
5713 /* VQRDMLAH */
5714 switch (size) {
5715 case 1:
5716 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
5717 q, rd, rn, rm);
5718 case 2:
5719 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
5720 q, rd, rn, rm);
5721 }
5722 return 1;
5723
5724 case NEON_3R_VFM_VQRDMLSH:
5725 if (!u) {
5726 /* VFM, VFMS */
5727 if (size == 1) {
5728 return 1;
5729 }
5730 break;
5731 }
5732 /* VQRDMLSH */
5733 switch (size) {
5734 case 1:
5735 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
5736 q, rd, rn, rm);
5737 case 2:
5738 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
5739 q, rd, rn, rm);
5740 }
5741 return 1;
f1ecb913 5742 }
62698be3
PM
5743 if (size == 3 && op != NEON_3R_LOGIC) {
5744 /* 64-bit element instructions. */
9ee6e8bb 5745 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5746 neon_load_reg64(cpu_V0, rn + pass);
5747 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5748 switch (op) {
62698be3 5749 case NEON_3R_VQADD:
9ee6e8bb 5750 if (u) {
02da0b2d
PM
5751 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5752 cpu_V0, cpu_V1);
2c0262af 5753 } else {
02da0b2d
PM
5754 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5755 cpu_V0, cpu_V1);
2c0262af 5756 }
9ee6e8bb 5757 break;
62698be3 5758 case NEON_3R_VQSUB:
9ee6e8bb 5759 if (u) {
02da0b2d
PM
5760 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5761 cpu_V0, cpu_V1);
ad69471c 5762 } else {
02da0b2d
PM
5763 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5764 cpu_V0, cpu_V1);
ad69471c
PB
5765 }
5766 break;
62698be3 5767 case NEON_3R_VSHL:
ad69471c
PB
5768 if (u) {
5769 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5770 } else {
5771 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5772 }
5773 break;
62698be3 5774 case NEON_3R_VQSHL:
ad69471c 5775 if (u) {
02da0b2d
PM
5776 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5777 cpu_V1, cpu_V0);
ad69471c 5778 } else {
02da0b2d
PM
5779 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5780 cpu_V1, cpu_V0);
ad69471c
PB
5781 }
5782 break;
62698be3 5783 case NEON_3R_VRSHL:
ad69471c
PB
5784 if (u) {
5785 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5786 } else {
ad69471c
PB
5787 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5788 }
5789 break;
62698be3 5790 case NEON_3R_VQRSHL:
ad69471c 5791 if (u) {
02da0b2d
PM
5792 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5793 cpu_V1, cpu_V0);
ad69471c 5794 } else {
02da0b2d
PM
5795 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5796 cpu_V1, cpu_V0);
1e8d4eec 5797 }
9ee6e8bb 5798 break;
62698be3 5799 case NEON_3R_VADD_VSUB:
9ee6e8bb 5800 if (u) {
ad69471c 5801 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5802 } else {
ad69471c 5803 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5804 }
5805 break;
5806 default:
5807 abort();
2c0262af 5808 }
ad69471c 5809 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5810 }
9ee6e8bb 5811 return 0;
2c0262af 5812 }
25f84f79 5813 pairwise = 0;
9ee6e8bb 5814 switch (op) {
62698be3
PM
5815 case NEON_3R_VSHL:
5816 case NEON_3R_VQSHL:
5817 case NEON_3R_VRSHL:
5818 case NEON_3R_VQRSHL:
9ee6e8bb 5819 {
ad69471c
PB
5820 int rtmp;
5821 /* Shift instruction operands are reversed. */
5822 rtmp = rn;
9ee6e8bb 5823 rn = rm;
ad69471c 5824 rm = rtmp;
9ee6e8bb 5825 }
2c0262af 5826 break;
36a71934 5827 case NEON_3R_VPADD_VQRDMLAH:
62698be3
PM
5828 case NEON_3R_VPMAX:
5829 case NEON_3R_VPMIN:
9ee6e8bb 5830 pairwise = 1;
2c0262af 5831 break;
25f84f79
PM
5832 case NEON_3R_FLOAT_ARITH:
5833 pairwise = (u && size < 2); /* if VPADD (float) */
5834 break;
5835 case NEON_3R_FLOAT_MINMAX:
5836 pairwise = u; /* if VPMIN/VPMAX (float) */
5837 break;
5838 case NEON_3R_FLOAT_CMP:
5839 if (!u && size) {
5840 /* no encoding for U=0 C=1x */
5841 return 1;
5842 }
5843 break;
5844 case NEON_3R_FLOAT_ACMP:
5845 if (!u) {
5846 return 1;
5847 }
5848 break;
505935fc
WN
5849 case NEON_3R_FLOAT_MISC:
5850 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5851 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5852 return 1;
5853 }
2c0262af 5854 break;
25f84f79
PM
5855 case NEON_3R_VMUL:
5856 if (u && (size != 0)) {
5857 /* UNDEF on invalid size for polynomial subcase */
5858 return 1;
5859 }
2c0262af 5860 break;
36a71934
RH
5861 case NEON_3R_VFM_VQRDMLSH:
5862 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
5863 return 1;
5864 }
5865 break;
9ee6e8bb 5866 default:
2c0262af 5867 break;
9ee6e8bb 5868 }
dd8fbd78 5869
25f84f79
PM
5870 if (pairwise && q) {
5871 /* All the pairwise insns UNDEF if Q is set */
5872 return 1;
5873 }
5874
9ee6e8bb
PB
5875 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5876
5877 if (pairwise) {
5878 /* Pairwise. */
a5a14945
JR
5879 if (pass < 1) {
5880 tmp = neon_load_reg(rn, 0);
5881 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5882 } else {
a5a14945
JR
5883 tmp = neon_load_reg(rm, 0);
5884 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5885 }
5886 } else {
5887 /* Elementwise. */
dd8fbd78
FN
5888 tmp = neon_load_reg(rn, pass);
5889 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5890 }
5891 switch (op) {
62698be3 5892 case NEON_3R_VHADD:
9ee6e8bb
PB
5893 GEN_NEON_INTEGER_OP(hadd);
5894 break;
62698be3 5895 case NEON_3R_VQADD:
02da0b2d 5896 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5897 break;
62698be3 5898 case NEON_3R_VRHADD:
9ee6e8bb 5899 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5900 break;
62698be3 5901 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5902 switch ((u << 2) | size) {
5903 case 0: /* VAND */
dd8fbd78 5904 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5905 break;
5906 case 1: /* BIC */
f669df27 5907 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5908 break;
5909 case 2: /* VORR */
dd8fbd78 5910 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5911 break;
5912 case 3: /* VORN */
f669df27 5913 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5914 break;
5915 case 4: /* VEOR */
dd8fbd78 5916 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5917 break;
5918 case 5: /* VBSL */
dd8fbd78
FN
5919 tmp3 = neon_load_reg(rd, pass);
5920 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5921 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5922 break;
5923 case 6: /* VBIT */
dd8fbd78
FN
5924 tmp3 = neon_load_reg(rd, pass);
5925 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5926 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5927 break;
5928 case 7: /* VBIF */
dd8fbd78
FN
5929 tmp3 = neon_load_reg(rd, pass);
5930 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5931 tcg_temp_free_i32(tmp3);
9ee6e8bb 5932 break;
2c0262af
FB
5933 }
5934 break;
62698be3 5935 case NEON_3R_VHSUB:
9ee6e8bb
PB
5936 GEN_NEON_INTEGER_OP(hsub);
5937 break;
62698be3 5938 case NEON_3R_VQSUB:
02da0b2d 5939 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5940 break;
62698be3 5941 case NEON_3R_VCGT:
9ee6e8bb
PB
5942 GEN_NEON_INTEGER_OP(cgt);
5943 break;
62698be3 5944 case NEON_3R_VCGE:
9ee6e8bb
PB
5945 GEN_NEON_INTEGER_OP(cge);
5946 break;
62698be3 5947 case NEON_3R_VSHL:
ad69471c 5948 GEN_NEON_INTEGER_OP(shl);
2c0262af 5949 break;
62698be3 5950 case NEON_3R_VQSHL:
02da0b2d 5951 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5952 break;
62698be3 5953 case NEON_3R_VRSHL:
ad69471c 5954 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5955 break;
62698be3 5956 case NEON_3R_VQRSHL:
02da0b2d 5957 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5958 break;
62698be3 5959 case NEON_3R_VMAX:
9ee6e8bb
PB
5960 GEN_NEON_INTEGER_OP(max);
5961 break;
62698be3 5962 case NEON_3R_VMIN:
9ee6e8bb
PB
5963 GEN_NEON_INTEGER_OP(min);
5964 break;
62698be3 5965 case NEON_3R_VABD:
9ee6e8bb
PB
5966 GEN_NEON_INTEGER_OP(abd);
5967 break;
62698be3 5968 case NEON_3R_VABA:
9ee6e8bb 5969 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5970 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5971 tmp2 = neon_load_reg(rd, pass);
5972 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5973 break;
62698be3 5974 case NEON_3R_VADD_VSUB:
9ee6e8bb 5975 if (!u) { /* VADD */
62698be3 5976 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5977 } else { /* VSUB */
5978 switch (size) {
dd8fbd78
FN
5979 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5980 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5981 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5982 default: abort();
9ee6e8bb
PB
5983 }
5984 }
5985 break;
62698be3 5986 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5987 if (!u) { /* VTST */
5988 switch (size) {
dd8fbd78
FN
5989 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5990 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5991 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5992 default: abort();
9ee6e8bb
PB
5993 }
5994 } else { /* VCEQ */
5995 switch (size) {
dd8fbd78
FN
5996 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5997 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5998 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5999 default: abort();
9ee6e8bb
PB
6000 }
6001 }
6002 break;
62698be3 6003 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 6004 switch (size) {
dd8fbd78
FN
6005 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6006 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6007 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 6008 default: abort();
9ee6e8bb 6009 }
7d1b0095 6010 tcg_temp_free_i32(tmp2);
dd8fbd78 6011 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6012 if (u) { /* VMLS */
dd8fbd78 6013 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 6014 } else { /* VMLA */
dd8fbd78 6015 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6016 }
6017 break;
62698be3 6018 case NEON_3R_VMUL:
9ee6e8bb 6019 if (u) { /* polynomial */
dd8fbd78 6020 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
6021 } else { /* Integer */
6022 switch (size) {
dd8fbd78
FN
6023 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6024 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6025 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 6026 default: abort();
9ee6e8bb
PB
6027 }
6028 }
6029 break;
62698be3 6030 case NEON_3R_VPMAX:
9ee6e8bb
PB
6031 GEN_NEON_INTEGER_OP(pmax);
6032 break;
62698be3 6033 case NEON_3R_VPMIN:
9ee6e8bb
PB
6034 GEN_NEON_INTEGER_OP(pmin);
6035 break;
62698be3 6036 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
6037 if (!u) { /* VQDMULH */
6038 switch (size) {
02da0b2d
PM
6039 case 1:
6040 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6041 break;
6042 case 2:
6043 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6044 break;
62698be3 6045 default: abort();
9ee6e8bb 6046 }
62698be3 6047 } else { /* VQRDMULH */
9ee6e8bb 6048 switch (size) {
02da0b2d
PM
6049 case 1:
6050 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6051 break;
6052 case 2:
6053 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6054 break;
62698be3 6055 default: abort();
9ee6e8bb
PB
6056 }
6057 }
6058 break;
36a71934 6059 case NEON_3R_VPADD_VQRDMLAH:
9ee6e8bb 6060 switch (size) {
dd8fbd78
FN
6061 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6062 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6063 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 6064 default: abort();
9ee6e8bb
PB
6065 }
6066 break;
62698be3 6067 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
6068 {
6069 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
6070 switch ((u << 2) | size) {
6071 case 0: /* VADD */
aa47cfdd
PM
6072 case 4: /* VPADD */
6073 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6074 break;
6075 case 2: /* VSUB */
aa47cfdd 6076 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6077 break;
6078 case 6: /* VABD */
aa47cfdd 6079 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6080 break;
6081 default:
62698be3 6082 abort();
9ee6e8bb 6083 }
aa47cfdd 6084 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6085 break;
aa47cfdd 6086 }
62698be3 6087 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
6088 {
6089 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6090 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6091 if (!u) {
7d1b0095 6092 tcg_temp_free_i32(tmp2);
dd8fbd78 6093 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6094 if (size == 0) {
aa47cfdd 6095 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6096 } else {
aa47cfdd 6097 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
6098 }
6099 }
aa47cfdd 6100 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6101 break;
aa47cfdd 6102 }
62698be3 6103 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
6104 {
6105 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 6106 if (!u) {
aa47cfdd 6107 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 6108 } else {
aa47cfdd
PM
6109 if (size == 0) {
6110 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6111 } else {
6112 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6113 }
b5ff1b31 6114 }
aa47cfdd 6115 tcg_temp_free_ptr(fpstatus);
2c0262af 6116 break;
aa47cfdd 6117 }
62698be3 6118 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6119 {
6120 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6121 if (size == 0) {
6122 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6123 } else {
6124 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6125 }
6126 tcg_temp_free_ptr(fpstatus);
2c0262af 6127 break;
aa47cfdd 6128 }
62698be3 6129 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6130 {
6131 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6132 if (size == 0) {
f71a2ae5 6133 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6134 } else {
f71a2ae5 6135 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6136 }
6137 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6138 break;
aa47cfdd 6139 }
505935fc
WN
6140 case NEON_3R_FLOAT_MISC:
6141 if (u) {
6142 /* VMAXNM/VMINNM */
6143 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6144 if (size == 0) {
f71a2ae5 6145 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6146 } else {
f71a2ae5 6147 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6148 }
6149 tcg_temp_free_ptr(fpstatus);
6150 } else {
6151 if (size == 0) {
6152 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6153 } else {
6154 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6155 }
6156 }
2c0262af 6157 break;
36a71934 6158 case NEON_3R_VFM_VQRDMLSH:
da97f52c
PM
6159 {
6160 /* VFMA, VFMS: fused multiply-add */
6161 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6162 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6163 if (size) {
6164 /* VFMS */
6165 gen_helper_vfp_negs(tmp, tmp);
6166 }
6167 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6168 tcg_temp_free_i32(tmp3);
6169 tcg_temp_free_ptr(fpstatus);
6170 break;
6171 }
9ee6e8bb
PB
6172 default:
6173 abort();
2c0262af 6174 }
7d1b0095 6175 tcg_temp_free_i32(tmp2);
dd8fbd78 6176
9ee6e8bb
PB
6177 /* Save the result. For elementwise operations we can put it
6178 straight into the destination register. For pairwise operations
6179 we have to be careful to avoid clobbering the source operands. */
6180 if (pairwise && rd == rm) {
dd8fbd78 6181 neon_store_scratch(pass, tmp);
9ee6e8bb 6182 } else {
dd8fbd78 6183 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6184 }
6185
6186 } /* for pass */
6187 if (pairwise && rd == rm) {
6188 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6189 tmp = neon_load_scratch(pass);
6190 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6191 }
6192 }
ad69471c 6193 /* End of 3 register same size operations. */
9ee6e8bb
PB
6194 } else if (insn & (1 << 4)) {
6195 if ((insn & 0x00380080) != 0) {
6196 /* Two registers and shift. */
6197 op = (insn >> 8) & 0xf;
6198 if (insn & (1 << 7)) {
cc13115b
PM
6199 /* 64-bit shift. */
6200 if (op > 7) {
6201 return 1;
6202 }
9ee6e8bb
PB
6203 size = 3;
6204 } else {
6205 size = 2;
6206 while ((insn & (1 << (size + 19))) == 0)
6207 size--;
6208 }
6209 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 6210 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
6211 by immediate using the variable shift operations. */
6212 if (op < 8) {
6213 /* Shift by immediate:
6214 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6215 if (q && ((rd | rm) & 1)) {
6216 return 1;
6217 }
6218 if (!u && (op == 4 || op == 6)) {
6219 return 1;
6220 }
9ee6e8bb
PB
6221 /* Right shifts are encoded as N - shift, where N is the
6222 element size in bits. */
6223 if (op <= 4)
6224 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
6225 if (size == 3) {
6226 count = q + 1;
6227 } else {
6228 count = q ? 4: 2;
6229 }
6230 switch (size) {
6231 case 0:
6232 imm = (uint8_t) shift;
6233 imm |= imm << 8;
6234 imm |= imm << 16;
6235 break;
6236 case 1:
6237 imm = (uint16_t) shift;
6238 imm |= imm << 16;
6239 break;
6240 case 2:
6241 case 3:
6242 imm = shift;
6243 break;
6244 default:
6245 abort();
6246 }
6247
6248 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6249 if (size == 3) {
6250 neon_load_reg64(cpu_V0, rm + pass);
6251 tcg_gen_movi_i64(cpu_V1, imm);
6252 switch (op) {
6253 case 0: /* VSHR */
6254 case 1: /* VSRA */
6255 if (u)
6256 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6257 else
ad69471c 6258 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6259 break;
ad69471c
PB
6260 case 2: /* VRSHR */
6261 case 3: /* VRSRA */
6262 if (u)
6263 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6264 else
ad69471c 6265 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6266 break;
ad69471c 6267 case 4: /* VSRI */
ad69471c
PB
6268 case 5: /* VSHL, VSLI */
6269 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6270 break;
0322b26e 6271 case 6: /* VQSHLU */
02da0b2d
PM
6272 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6273 cpu_V0, cpu_V1);
ad69471c 6274 break;
0322b26e
PM
6275 case 7: /* VQSHL */
6276 if (u) {
02da0b2d 6277 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6278 cpu_V0, cpu_V1);
6279 } else {
02da0b2d 6280 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6281 cpu_V0, cpu_V1);
6282 }
9ee6e8bb 6283 break;
9ee6e8bb 6284 }
ad69471c
PB
6285 if (op == 1 || op == 3) {
6286 /* Accumulate. */
5371cb81 6287 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6288 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6289 } else if (op == 4 || (op == 5 && u)) {
6290 /* Insert */
923e6509
CL
6291 neon_load_reg64(cpu_V1, rd + pass);
6292 uint64_t mask;
6293 if (shift < -63 || shift > 63) {
6294 mask = 0;
6295 } else {
6296 if (op == 4) {
6297 mask = 0xffffffffffffffffull >> -shift;
6298 } else {
6299 mask = 0xffffffffffffffffull << shift;
6300 }
6301 }
6302 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6303 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6304 }
6305 neon_store_reg64(cpu_V0, rd + pass);
6306 } else { /* size < 3 */
6307 /* Operands in T0 and T1. */
dd8fbd78 6308 tmp = neon_load_reg(rm, pass);
7d1b0095 6309 tmp2 = tcg_temp_new_i32();
dd8fbd78 6310 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6311 switch (op) {
6312 case 0: /* VSHR */
6313 case 1: /* VSRA */
6314 GEN_NEON_INTEGER_OP(shl);
6315 break;
6316 case 2: /* VRSHR */
6317 case 3: /* VRSRA */
6318 GEN_NEON_INTEGER_OP(rshl);
6319 break;
6320 case 4: /* VSRI */
ad69471c
PB
6321 case 5: /* VSHL, VSLI */
6322 switch (size) {
dd8fbd78
FN
6323 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6324 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6325 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6326 default: abort();
ad69471c
PB
6327 }
6328 break;
0322b26e 6329 case 6: /* VQSHLU */
ad69471c 6330 switch (size) {
0322b26e 6331 case 0:
02da0b2d
PM
6332 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6333 tmp, tmp2);
0322b26e
PM
6334 break;
6335 case 1:
02da0b2d
PM
6336 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6337 tmp, tmp2);
0322b26e
PM
6338 break;
6339 case 2:
02da0b2d
PM
6340 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6341 tmp, tmp2);
0322b26e
PM
6342 break;
6343 default:
cc13115b 6344 abort();
ad69471c
PB
6345 }
6346 break;
0322b26e 6347 case 7: /* VQSHL */
02da0b2d 6348 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6349 break;
ad69471c 6350 }
7d1b0095 6351 tcg_temp_free_i32(tmp2);
ad69471c
PB
6352
6353 if (op == 1 || op == 3) {
6354 /* Accumulate. */
dd8fbd78 6355 tmp2 = neon_load_reg(rd, pass);
5371cb81 6356 gen_neon_add(size, tmp, tmp2);
7d1b0095 6357 tcg_temp_free_i32(tmp2);
ad69471c
PB
6358 } else if (op == 4 || (op == 5 && u)) {
6359 /* Insert */
6360 switch (size) {
6361 case 0:
6362 if (op == 4)
ca9a32e4 6363 mask = 0xff >> -shift;
ad69471c 6364 else
ca9a32e4
JR
6365 mask = (uint8_t)(0xff << shift);
6366 mask |= mask << 8;
6367 mask |= mask << 16;
ad69471c
PB
6368 break;
6369 case 1:
6370 if (op == 4)
ca9a32e4 6371 mask = 0xffff >> -shift;
ad69471c 6372 else
ca9a32e4
JR
6373 mask = (uint16_t)(0xffff << shift);
6374 mask |= mask << 16;
ad69471c
PB
6375 break;
6376 case 2:
ca9a32e4
JR
6377 if (shift < -31 || shift > 31) {
6378 mask = 0;
6379 } else {
6380 if (op == 4)
6381 mask = 0xffffffffu >> -shift;
6382 else
6383 mask = 0xffffffffu << shift;
6384 }
ad69471c
PB
6385 break;
6386 default:
6387 abort();
6388 }
dd8fbd78 6389 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6390 tcg_gen_andi_i32(tmp, tmp, mask);
6391 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6392 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6393 tcg_temp_free_i32(tmp2);
ad69471c 6394 }
dd8fbd78 6395 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6396 }
6397 } /* for pass */
6398 } else if (op < 10) {
ad69471c 6399 /* Shift by immediate and narrow:
9ee6e8bb 6400 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6401 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6402 if (rm & 1) {
6403 return 1;
6404 }
9ee6e8bb
PB
6405 shift = shift - (1 << (size + 3));
6406 size++;
92cdfaeb 6407 if (size == 3) {
a7812ae4 6408 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6409 neon_load_reg64(cpu_V0, rm);
6410 neon_load_reg64(cpu_V1, rm + 1);
6411 for (pass = 0; pass < 2; pass++) {
6412 TCGv_i64 in;
6413 if (pass == 0) {
6414 in = cpu_V0;
6415 } else {
6416 in = cpu_V1;
6417 }
ad69471c 6418 if (q) {
0b36f4cd 6419 if (input_unsigned) {
92cdfaeb 6420 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6421 } else {
92cdfaeb 6422 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6423 }
ad69471c 6424 } else {
0b36f4cd 6425 if (input_unsigned) {
92cdfaeb 6426 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6427 } else {
92cdfaeb 6428 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6429 }
ad69471c 6430 }
7d1b0095 6431 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6432 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6433 neon_store_reg(rd, pass, tmp);
6434 } /* for pass */
6435 tcg_temp_free_i64(tmp64);
6436 } else {
6437 if (size == 1) {
6438 imm = (uint16_t)shift;
6439 imm |= imm << 16;
2c0262af 6440 } else {
92cdfaeb
PM
6441 /* size == 2 */
6442 imm = (uint32_t)shift;
6443 }
6444 tmp2 = tcg_const_i32(imm);
6445 tmp4 = neon_load_reg(rm + 1, 0);
6446 tmp5 = neon_load_reg(rm + 1, 1);
6447 for (pass = 0; pass < 2; pass++) {
6448 if (pass == 0) {
6449 tmp = neon_load_reg(rm, 0);
6450 } else {
6451 tmp = tmp4;
6452 }
0b36f4cd
CL
6453 gen_neon_shift_narrow(size, tmp, tmp2, q,
6454 input_unsigned);
92cdfaeb
PM
6455 if (pass == 0) {
6456 tmp3 = neon_load_reg(rm, 1);
6457 } else {
6458 tmp3 = tmp5;
6459 }
0b36f4cd
CL
6460 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6461 input_unsigned);
36aa55dc 6462 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6463 tcg_temp_free_i32(tmp);
6464 tcg_temp_free_i32(tmp3);
6465 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6466 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6467 neon_store_reg(rd, pass, tmp);
6468 } /* for pass */
c6067f04 6469 tcg_temp_free_i32(tmp2);
b75263d6 6470 }
9ee6e8bb 6471 } else if (op == 10) {
cc13115b
PM
6472 /* VSHLL, VMOVL */
6473 if (q || (rd & 1)) {
9ee6e8bb 6474 return 1;
cc13115b 6475 }
ad69471c
PB
6476 tmp = neon_load_reg(rm, 0);
6477 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6478 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6479 if (pass == 1)
6480 tmp = tmp2;
6481
6482 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6483
9ee6e8bb
PB
6484 if (shift != 0) {
6485 /* The shift is less than the width of the source
ad69471c
PB
6486 type, so we can just shift the whole register. */
6487 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6488 /* Widen the result of shift: we need to clear
6489 * the potential overflow bits resulting from
6490 * left bits of the narrow input appearing as
6491 * right bits of left the neighbour narrow
6492 * input. */
ad69471c
PB
6493 if (size < 2 || !u) {
6494 uint64_t imm64;
6495 if (size == 0) {
6496 imm = (0xffu >> (8 - shift));
6497 imm |= imm << 16;
acdf01ef 6498 } else if (size == 1) {
ad69471c 6499 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6500 } else {
6501 /* size == 2 */
6502 imm = 0xffffffff >> (32 - shift);
6503 }
6504 if (size < 2) {
6505 imm64 = imm | (((uint64_t)imm) << 32);
6506 } else {
6507 imm64 = imm;
9ee6e8bb 6508 }
acdf01ef 6509 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6510 }
6511 }
ad69471c 6512 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6513 }
f73534a5 6514 } else if (op >= 14) {
9ee6e8bb 6515 /* VCVT fixed-point. */
cc13115b
PM
6516 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6517 return 1;
6518 }
f73534a5
PM
6519 /* We have already masked out the must-be-1 top bit of imm6,
6520 * hence this 32-shift where the ARM ARM has 64-imm6.
6521 */
6522 shift = 32 - shift;
9ee6e8bb 6523 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6524 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6525 if (!(op & 1)) {
9ee6e8bb 6526 if (u)
5500b06c 6527 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6528 else
5500b06c 6529 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6530 } else {
6531 if (u)
5500b06c 6532 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6533 else
5500b06c 6534 gen_vfp_tosl(0, shift, 1);
2c0262af 6535 }
4373f3ce 6536 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6537 }
6538 } else {
9ee6e8bb
PB
6539 return 1;
6540 }
6541 } else { /* (insn & 0x00380080) == 0 */
6542 int invert;
7d80fee5
PM
6543 if (q && (rd & 1)) {
6544 return 1;
6545 }
9ee6e8bb
PB
6546
6547 op = (insn >> 8) & 0xf;
6548 /* One register and immediate. */
6549 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6550 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6551 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6552 * We choose to not special-case this and will behave as if a
6553 * valid constant encoding of 0 had been given.
6554 */
9ee6e8bb
PB
6555 switch (op) {
6556 case 0: case 1:
6557 /* no-op */
6558 break;
6559 case 2: case 3:
6560 imm <<= 8;
6561 break;
6562 case 4: case 5:
6563 imm <<= 16;
6564 break;
6565 case 6: case 7:
6566 imm <<= 24;
6567 break;
6568 case 8: case 9:
6569 imm |= imm << 16;
6570 break;
6571 case 10: case 11:
6572 imm = (imm << 8) | (imm << 24);
6573 break;
6574 case 12:
8e31209e 6575 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6576 break;
6577 case 13:
6578 imm = (imm << 16) | 0xffff;
6579 break;
6580 case 14:
6581 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6582 if (invert)
6583 imm = ~imm;
6584 break;
6585 case 15:
7d80fee5
PM
6586 if (invert) {
6587 return 1;
6588 }
9ee6e8bb
PB
6589 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6590 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6591 break;
6592 }
6593 if (invert)
6594 imm = ~imm;
6595
9ee6e8bb
PB
6596 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6597 if (op & 1 && op < 12) {
ad69471c 6598 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6599 if (invert) {
6600 /* The immediate value has already been inverted, so
6601 BIC becomes AND. */
ad69471c 6602 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6603 } else {
ad69471c 6604 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6605 }
9ee6e8bb 6606 } else {
ad69471c 6607 /* VMOV, VMVN. */
7d1b0095 6608 tmp = tcg_temp_new_i32();
9ee6e8bb 6609 if (op == 14 && invert) {
a5a14945 6610 int n;
ad69471c
PB
6611 uint32_t val;
6612 val = 0;
9ee6e8bb
PB
6613 for (n = 0; n < 4; n++) {
6614 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6615 val |= 0xff << (n * 8);
9ee6e8bb 6616 }
ad69471c
PB
6617 tcg_gen_movi_i32(tmp, val);
6618 } else {
6619 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6620 }
9ee6e8bb 6621 }
ad69471c 6622 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6623 }
6624 }
e4b3861d 6625 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6626 if (size != 3) {
6627 op = (insn >> 8) & 0xf;
6628 if ((insn & (1 << 6)) == 0) {
6629 /* Three registers of different lengths. */
6630 int src1_wide;
6631 int src2_wide;
6632 int prewiden;
526d0096
PM
6633 /* undefreq: bit 0 : UNDEF if size == 0
6634 * bit 1 : UNDEF if size == 1
6635 * bit 2 : UNDEF if size == 2
6636 * bit 3 : UNDEF if U == 1
6637 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6638 */
6639 int undefreq;
6640 /* prewiden, src1_wide, src2_wide, undefreq */
6641 static const int neon_3reg_wide[16][4] = {
6642 {1, 0, 0, 0}, /* VADDL */
6643 {1, 1, 0, 0}, /* VADDW */
6644 {1, 0, 0, 0}, /* VSUBL */
6645 {1, 1, 0, 0}, /* VSUBW */
6646 {0, 1, 1, 0}, /* VADDHN */
6647 {0, 0, 0, 0}, /* VABAL */
6648 {0, 1, 1, 0}, /* VSUBHN */
6649 {0, 0, 0, 0}, /* VABDL */
6650 {0, 0, 0, 0}, /* VMLAL */
526d0096 6651 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6652 {0, 0, 0, 0}, /* VMLSL */
526d0096 6653 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6654 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6655 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6656 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6657 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6658 };
6659
6660 prewiden = neon_3reg_wide[op][0];
6661 src1_wide = neon_3reg_wide[op][1];
6662 src2_wide = neon_3reg_wide[op][2];
695272dc 6663 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6664
526d0096
PM
6665 if ((undefreq & (1 << size)) ||
6666 ((undefreq & 8) && u)) {
695272dc
PM
6667 return 1;
6668 }
6669 if ((src1_wide && (rn & 1)) ||
6670 (src2_wide && (rm & 1)) ||
6671 (!src2_wide && (rd & 1))) {
ad69471c 6672 return 1;
695272dc 6673 }
ad69471c 6674
4e624eda
PM
6675 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6676 * outside the loop below as it only performs a single pass.
6677 */
6678 if (op == 14 && size == 2) {
6679 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6680
d614a513 6681 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6682 return 1;
6683 }
6684 tcg_rn = tcg_temp_new_i64();
6685 tcg_rm = tcg_temp_new_i64();
6686 tcg_rd = tcg_temp_new_i64();
6687 neon_load_reg64(tcg_rn, rn);
6688 neon_load_reg64(tcg_rm, rm);
6689 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6690 neon_store_reg64(tcg_rd, rd);
6691 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6692 neon_store_reg64(tcg_rd, rd + 1);
6693 tcg_temp_free_i64(tcg_rn);
6694 tcg_temp_free_i64(tcg_rm);
6695 tcg_temp_free_i64(tcg_rd);
6696 return 0;
6697 }
6698
9ee6e8bb
PB
6699 /* Avoid overlapping operands. Wide source operands are
6700 always aligned so will never overlap with wide
6701 destinations in problematic ways. */
8f8e3aa4 6702 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6703 tmp = neon_load_reg(rm, 1);
6704 neon_store_scratch(2, tmp);
8f8e3aa4 6705 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6706 tmp = neon_load_reg(rn, 1);
6707 neon_store_scratch(2, tmp);
9ee6e8bb 6708 }
f764718d 6709 tmp3 = NULL;
9ee6e8bb 6710 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6711 if (src1_wide) {
6712 neon_load_reg64(cpu_V0, rn + pass);
f764718d 6713 tmp = NULL;
9ee6e8bb 6714 } else {
ad69471c 6715 if (pass == 1 && rd == rn) {
dd8fbd78 6716 tmp = neon_load_scratch(2);
9ee6e8bb 6717 } else {
ad69471c
PB
6718 tmp = neon_load_reg(rn, pass);
6719 }
6720 if (prewiden) {
6721 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6722 }
6723 }
ad69471c
PB
6724 if (src2_wide) {
6725 neon_load_reg64(cpu_V1, rm + pass);
f764718d 6726 tmp2 = NULL;
9ee6e8bb 6727 } else {
ad69471c 6728 if (pass == 1 && rd == rm) {
dd8fbd78 6729 tmp2 = neon_load_scratch(2);
9ee6e8bb 6730 } else {
ad69471c
PB
6731 tmp2 = neon_load_reg(rm, pass);
6732 }
6733 if (prewiden) {
6734 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6735 }
9ee6e8bb
PB
6736 }
6737 switch (op) {
6738 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6739 gen_neon_addl(size);
9ee6e8bb 6740 break;
79b0e534 6741 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6742 gen_neon_subl(size);
9ee6e8bb
PB
6743 break;
6744 case 5: case 7: /* VABAL, VABDL */
6745 switch ((size << 1) | u) {
ad69471c
PB
6746 case 0:
6747 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6748 break;
6749 case 1:
6750 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6751 break;
6752 case 2:
6753 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6754 break;
6755 case 3:
6756 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6757 break;
6758 case 4:
6759 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6760 break;
6761 case 5:
6762 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6763 break;
9ee6e8bb
PB
6764 default: abort();
6765 }
7d1b0095
PM
6766 tcg_temp_free_i32(tmp2);
6767 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6768 break;
6769 case 8: case 9: case 10: case 11: case 12: case 13:
6770 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6771 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6772 break;
6773 case 14: /* Polynomial VMULL */
e5ca24cb 6774 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6775 tcg_temp_free_i32(tmp2);
6776 tcg_temp_free_i32(tmp);
e5ca24cb 6777 break;
695272dc
PM
6778 default: /* 15 is RESERVED: caught earlier */
6779 abort();
9ee6e8bb 6780 }
ebcd88ce
PM
6781 if (op == 13) {
6782 /* VQDMULL */
6783 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6784 neon_store_reg64(cpu_V0, rd + pass);
6785 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6786 /* Accumulate. */
ebcd88ce 6787 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6788 switch (op) {
4dc064e6
PM
6789 case 10: /* VMLSL */
6790 gen_neon_negl(cpu_V0, size);
6791 /* Fall through */
6792 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6793 gen_neon_addl(size);
9ee6e8bb
PB
6794 break;
6795 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6796 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6797 if (op == 11) {
6798 gen_neon_negl(cpu_V0, size);
6799 }
ad69471c
PB
6800 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6801 break;
9ee6e8bb
PB
6802 default:
6803 abort();
6804 }
ad69471c 6805 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6806 } else if (op == 4 || op == 6) {
6807 /* Narrowing operation. */
7d1b0095 6808 tmp = tcg_temp_new_i32();
79b0e534 6809 if (!u) {
9ee6e8bb 6810 switch (size) {
ad69471c
PB
6811 case 0:
6812 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6813 break;
6814 case 1:
6815 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6816 break;
6817 case 2:
6818 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6819 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6820 break;
9ee6e8bb
PB
6821 default: abort();
6822 }
6823 } else {
6824 switch (size) {
ad69471c
PB
6825 case 0:
6826 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6827 break;
6828 case 1:
6829 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6830 break;
6831 case 2:
6832 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6833 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6834 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6835 break;
9ee6e8bb
PB
6836 default: abort();
6837 }
6838 }
ad69471c
PB
6839 if (pass == 0) {
6840 tmp3 = tmp;
6841 } else {
6842 neon_store_reg(rd, 0, tmp3);
6843 neon_store_reg(rd, 1, tmp);
6844 }
9ee6e8bb
PB
6845 } else {
6846 /* Write back the result. */
ad69471c 6847 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6848 }
6849 }
6850 } else {
3e3326df
PM
6851 /* Two registers and a scalar. NB that for ops of this form
6852 * the ARM ARM labels bit 24 as Q, but it is in our variable
6853 * 'u', not 'q'.
6854 */
6855 if (size == 0) {
6856 return 1;
6857 }
9ee6e8bb 6858 switch (op) {
9ee6e8bb 6859 case 1: /* Float VMLA scalar */
9ee6e8bb 6860 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6861 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6862 if (size == 1) {
6863 return 1;
6864 }
6865 /* fall through */
6866 case 0: /* Integer VMLA scalar */
6867 case 4: /* Integer VMLS scalar */
6868 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6869 case 12: /* VQDMULH scalar */
6870 case 13: /* VQRDMULH scalar */
3e3326df
PM
6871 if (u && ((rd | rn) & 1)) {
6872 return 1;
6873 }
dd8fbd78
FN
6874 tmp = neon_get_scalar(size, rm);
6875 neon_store_scratch(0, tmp);
9ee6e8bb 6876 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6877 tmp = neon_load_scratch(0);
6878 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6879 if (op == 12) {
6880 if (size == 1) {
02da0b2d 6881 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6882 } else {
02da0b2d 6883 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6884 }
6885 } else if (op == 13) {
6886 if (size == 1) {
02da0b2d 6887 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6888 } else {
02da0b2d 6889 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6890 }
6891 } else if (op & 1) {
aa47cfdd
PM
6892 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6893 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6894 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6895 } else {
6896 switch (size) {
dd8fbd78
FN
6897 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6898 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6899 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6900 default: abort();
9ee6e8bb
PB
6901 }
6902 }
7d1b0095 6903 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6904 if (op < 8) {
6905 /* Accumulate. */
dd8fbd78 6906 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6907 switch (op) {
6908 case 0:
dd8fbd78 6909 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6910 break;
6911 case 1:
aa47cfdd
PM
6912 {
6913 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6914 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6915 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6916 break;
aa47cfdd 6917 }
9ee6e8bb 6918 case 4:
dd8fbd78 6919 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6920 break;
6921 case 5:
aa47cfdd
PM
6922 {
6923 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6924 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6925 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6926 break;
aa47cfdd 6927 }
9ee6e8bb
PB
6928 default:
6929 abort();
6930 }
7d1b0095 6931 tcg_temp_free_i32(tmp2);
9ee6e8bb 6932 }
dd8fbd78 6933 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6934 }
6935 break;
9ee6e8bb 6936 case 3: /* VQDMLAL scalar */
9ee6e8bb 6937 case 7: /* VQDMLSL scalar */
9ee6e8bb 6938 case 11: /* VQDMULL scalar */
3e3326df 6939 if (u == 1) {
ad69471c 6940 return 1;
3e3326df
PM
6941 }
6942 /* fall through */
6943 case 2: /* VMLAL sclar */
6944 case 6: /* VMLSL scalar */
6945 case 10: /* VMULL scalar */
6946 if (rd & 1) {
6947 return 1;
6948 }
dd8fbd78 6949 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6950 /* We need a copy of tmp2 because gen_neon_mull
6951 * deletes it during pass 0. */
7d1b0095 6952 tmp4 = tcg_temp_new_i32();
c6067f04 6953 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6954 tmp3 = neon_load_reg(rn, 1);
ad69471c 6955
9ee6e8bb 6956 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6957 if (pass == 0) {
6958 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6959 } else {
dd8fbd78 6960 tmp = tmp3;
c6067f04 6961 tmp2 = tmp4;
9ee6e8bb 6962 }
ad69471c 6963 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6964 if (op != 11) {
6965 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6966 }
9ee6e8bb 6967 switch (op) {
4dc064e6
PM
6968 case 6:
6969 gen_neon_negl(cpu_V0, size);
6970 /* Fall through */
6971 case 2:
ad69471c 6972 gen_neon_addl(size);
9ee6e8bb
PB
6973 break;
6974 case 3: case 7:
ad69471c 6975 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6976 if (op == 7) {
6977 gen_neon_negl(cpu_V0, size);
6978 }
ad69471c 6979 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6980 break;
6981 case 10:
6982 /* no-op */
6983 break;
6984 case 11:
ad69471c 6985 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6986 break;
6987 default:
6988 abort();
6989 }
ad69471c 6990 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6991 }
61adacc8
RH
6992 break;
6993 case 14: /* VQRDMLAH scalar */
6994 case 15: /* VQRDMLSH scalar */
6995 {
6996 NeonGenThreeOpEnvFn *fn;
dd8fbd78 6997
61adacc8
RH
6998 if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
6999 return 1;
7000 }
7001 if (u && ((rd | rn) & 1)) {
7002 return 1;
7003 }
7004 if (op == 14) {
7005 if (size == 1) {
7006 fn = gen_helper_neon_qrdmlah_s16;
7007 } else {
7008 fn = gen_helper_neon_qrdmlah_s32;
7009 }
7010 } else {
7011 if (size == 1) {
7012 fn = gen_helper_neon_qrdmlsh_s16;
7013 } else {
7014 fn = gen_helper_neon_qrdmlsh_s32;
7015 }
7016 }
dd8fbd78 7017
61adacc8
RH
7018 tmp2 = neon_get_scalar(size, rm);
7019 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7020 tmp = neon_load_reg(rn, pass);
7021 tmp3 = neon_load_reg(rd, pass);
7022 fn(tmp, cpu_env, tmp, tmp2, tmp3);
7023 tcg_temp_free_i32(tmp3);
7024 neon_store_reg(rd, pass, tmp);
7025 }
7026 tcg_temp_free_i32(tmp2);
7027 }
9ee6e8bb 7028 break;
61adacc8
RH
7029 default:
7030 g_assert_not_reached();
9ee6e8bb
PB
7031 }
7032 }
7033 } else { /* size == 3 */
7034 if (!u) {
7035 /* Extract. */
9ee6e8bb 7036 imm = (insn >> 8) & 0xf;
ad69471c
PB
7037
7038 if (imm > 7 && !q)
7039 return 1;
7040
52579ea1
PM
7041 if (q && ((rd | rn | rm) & 1)) {
7042 return 1;
7043 }
7044
ad69471c
PB
7045 if (imm == 0) {
7046 neon_load_reg64(cpu_V0, rn);
7047 if (q) {
7048 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 7049 }
ad69471c
PB
7050 } else if (imm == 8) {
7051 neon_load_reg64(cpu_V0, rn + 1);
7052 if (q) {
7053 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7054 }
ad69471c 7055 } else if (q) {
a7812ae4 7056 tmp64 = tcg_temp_new_i64();
ad69471c
PB
7057 if (imm < 8) {
7058 neon_load_reg64(cpu_V0, rn);
a7812ae4 7059 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
7060 } else {
7061 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 7062 neon_load_reg64(tmp64, rm);
ad69471c
PB
7063 }
7064 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 7065 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
7066 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7067 if (imm < 8) {
7068 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7069 } else {
ad69471c
PB
7070 neon_load_reg64(cpu_V1, rm + 1);
7071 imm -= 8;
9ee6e8bb 7072 }
ad69471c 7073 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
7074 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
7075 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 7076 tcg_temp_free_i64(tmp64);
ad69471c 7077 } else {
a7812ae4 7078 /* BUGFIX */
ad69471c 7079 neon_load_reg64(cpu_V0, rn);
a7812ae4 7080 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 7081 neon_load_reg64(cpu_V1, rm);
a7812ae4 7082 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
7083 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7084 }
7085 neon_store_reg64(cpu_V0, rd);
7086 if (q) {
7087 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
7088 }
7089 } else if ((insn & (1 << 11)) == 0) {
7090 /* Two register misc. */
7091 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7092 size = (insn >> 18) & 3;
600b828c
PM
7093 /* UNDEF for unknown op values and bad op-size combinations */
7094 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7095 return 1;
7096 }
fe8fcf3d
PM
7097 if (neon_2rm_is_v8_op(op) &&
7098 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7099 return 1;
7100 }
fc2a9b37
PM
7101 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7102 q && ((rm | rd) & 1)) {
7103 return 1;
7104 }
9ee6e8bb 7105 switch (op) {
600b828c 7106 case NEON_2RM_VREV64:
9ee6e8bb 7107 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
7108 tmp = neon_load_reg(rm, pass * 2);
7109 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 7110 switch (size) {
dd8fbd78
FN
7111 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7112 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
7113 case 2: /* no-op */ break;
7114 default: abort();
7115 }
dd8fbd78 7116 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 7117 if (size == 2) {
dd8fbd78 7118 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 7119 } else {
9ee6e8bb 7120 switch (size) {
dd8fbd78
FN
7121 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7122 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
7123 default: abort();
7124 }
dd8fbd78 7125 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
7126 }
7127 }
7128 break;
600b828c
PM
7129 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7130 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
7131 for (pass = 0; pass < q + 1; pass++) {
7132 tmp = neon_load_reg(rm, pass * 2);
7133 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7134 tmp = neon_load_reg(rm, pass * 2 + 1);
7135 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7136 switch (size) {
7137 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7138 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7139 case 2: tcg_gen_add_i64(CPU_V001); break;
7140 default: abort();
7141 }
600b828c 7142 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 7143 /* Accumulate. */
ad69471c
PB
7144 neon_load_reg64(cpu_V1, rd + pass);
7145 gen_neon_addl(size);
9ee6e8bb 7146 }
ad69471c 7147 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7148 }
7149 break;
600b828c 7150 case NEON_2RM_VTRN:
9ee6e8bb 7151 if (size == 2) {
a5a14945 7152 int n;
9ee6e8bb 7153 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7154 tmp = neon_load_reg(rm, n);
7155 tmp2 = neon_load_reg(rd, n + 1);
7156 neon_store_reg(rm, n, tmp2);
7157 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7158 }
7159 } else {
7160 goto elementwise;
7161 }
7162 break;
600b828c 7163 case NEON_2RM_VUZP:
02acedf9 7164 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7165 return 1;
9ee6e8bb
PB
7166 }
7167 break;
600b828c 7168 case NEON_2RM_VZIP:
d68a6f3a 7169 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7170 return 1;
9ee6e8bb
PB
7171 }
7172 break;
600b828c
PM
7173 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7174 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7175 if (rm & 1) {
7176 return 1;
7177 }
f764718d 7178 tmp2 = NULL;
9ee6e8bb 7179 for (pass = 0; pass < 2; pass++) {
ad69471c 7180 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7181 tmp = tcg_temp_new_i32();
600b828c
PM
7182 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7183 tmp, cpu_V0);
ad69471c
PB
7184 if (pass == 0) {
7185 tmp2 = tmp;
7186 } else {
7187 neon_store_reg(rd, 0, tmp2);
7188 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7189 }
9ee6e8bb
PB
7190 }
7191 break;
600b828c 7192 case NEON_2RM_VSHLL:
fc2a9b37 7193 if (q || (rd & 1)) {
9ee6e8bb 7194 return 1;
600b828c 7195 }
ad69471c
PB
7196 tmp = neon_load_reg(rm, 0);
7197 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7198 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7199 if (pass == 1)
7200 tmp = tmp2;
7201 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7202 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7203 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7204 }
7205 break;
600b828c 7206 case NEON_2RM_VCVT_F16_F32:
d614a513 7207 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7208 q || (rm & 1)) {
7209 return 1;
7210 }
7d1b0095
PM
7211 tmp = tcg_temp_new_i32();
7212 tmp2 = tcg_temp_new_i32();
60011498 7213 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 7214 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 7215 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 7216 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7217 tcg_gen_shli_i32(tmp2, tmp2, 16);
7218 tcg_gen_or_i32(tmp2, tmp2, tmp);
7219 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 7220 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
7221 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7222 neon_store_reg(rd, 0, tmp2);
7d1b0095 7223 tmp2 = tcg_temp_new_i32();
2d981da7 7224 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7225 tcg_gen_shli_i32(tmp2, tmp2, 16);
7226 tcg_gen_or_i32(tmp2, tmp2, tmp);
7227 neon_store_reg(rd, 1, tmp2);
7d1b0095 7228 tcg_temp_free_i32(tmp);
60011498 7229 break;
600b828c 7230 case NEON_2RM_VCVT_F32_F16:
d614a513 7231 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7232 q || (rd & 1)) {
7233 return 1;
7234 }
7d1b0095 7235 tmp3 = tcg_temp_new_i32();
60011498
PB
7236 tmp = neon_load_reg(rm, 0);
7237 tmp2 = neon_load_reg(rm, 1);
7238 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 7239 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7240 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7241 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 7242 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7243 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7244 tcg_temp_free_i32(tmp);
60011498 7245 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 7246 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7247 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7248 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 7249 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7250 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7251 tcg_temp_free_i32(tmp2);
7252 tcg_temp_free_i32(tmp3);
60011498 7253 break;
9d935509 7254 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 7255 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
7256 || ((rm | rd) & 1)) {
7257 return 1;
7258 }
1a66ac61
RH
7259 ptr1 = vfp_reg_ptr(true, rd);
7260 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
7261
7262 /* Bit 6 is the lowest opcode bit; it distinguishes between
7263 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7264 */
7265 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7266
7267 if (op == NEON_2RM_AESE) {
1a66ac61 7268 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 7269 } else {
1a66ac61 7270 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 7271 }
1a66ac61
RH
7272 tcg_temp_free_ptr(ptr1);
7273 tcg_temp_free_ptr(ptr2);
9d935509
AB
7274 tcg_temp_free_i32(tmp3);
7275 break;
f1ecb913 7276 case NEON_2RM_SHA1H:
d614a513 7277 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
7278 || ((rm | rd) & 1)) {
7279 return 1;
7280 }
1a66ac61
RH
7281 ptr1 = vfp_reg_ptr(true, rd);
7282 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7283
1a66ac61 7284 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 7285
1a66ac61
RH
7286 tcg_temp_free_ptr(ptr1);
7287 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
7288 break;
7289 case NEON_2RM_SHA1SU1:
7290 if ((rm | rd) & 1) {
7291 return 1;
7292 }
7293 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7294 if (q) {
d614a513 7295 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
7296 return 1;
7297 }
d614a513 7298 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
7299 return 1;
7300 }
1a66ac61
RH
7301 ptr1 = vfp_reg_ptr(true, rd);
7302 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7303 if (q) {
1a66ac61 7304 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 7305 } else {
1a66ac61 7306 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 7307 }
1a66ac61
RH
7308 tcg_temp_free_ptr(ptr1);
7309 tcg_temp_free_ptr(ptr2);
f1ecb913 7310 break;
9ee6e8bb
PB
7311 default:
7312 elementwise:
7313 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7314 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7315 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7316 neon_reg_offset(rm, pass));
f764718d 7317 tmp = NULL;
9ee6e8bb 7318 } else {
dd8fbd78 7319 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7320 }
7321 switch (op) {
600b828c 7322 case NEON_2RM_VREV32:
9ee6e8bb 7323 switch (size) {
dd8fbd78
FN
7324 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7325 case 1: gen_swap_half(tmp); break;
600b828c 7326 default: abort();
9ee6e8bb
PB
7327 }
7328 break;
600b828c 7329 case NEON_2RM_VREV16:
dd8fbd78 7330 gen_rev16(tmp);
9ee6e8bb 7331 break;
600b828c 7332 case NEON_2RM_VCLS:
9ee6e8bb 7333 switch (size) {
dd8fbd78
FN
7334 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7335 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7336 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7337 default: abort();
9ee6e8bb
PB
7338 }
7339 break;
600b828c 7340 case NEON_2RM_VCLZ:
9ee6e8bb 7341 switch (size) {
dd8fbd78
FN
7342 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7343 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 7344 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 7345 default: abort();
9ee6e8bb
PB
7346 }
7347 break;
600b828c 7348 case NEON_2RM_VCNT:
dd8fbd78 7349 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7350 break;
600b828c 7351 case NEON_2RM_VMVN:
dd8fbd78 7352 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7353 break;
600b828c 7354 case NEON_2RM_VQABS:
9ee6e8bb 7355 switch (size) {
02da0b2d
PM
7356 case 0:
7357 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7358 break;
7359 case 1:
7360 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7361 break;
7362 case 2:
7363 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7364 break;
600b828c 7365 default: abort();
9ee6e8bb
PB
7366 }
7367 break;
600b828c 7368 case NEON_2RM_VQNEG:
9ee6e8bb 7369 switch (size) {
02da0b2d
PM
7370 case 0:
7371 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7372 break;
7373 case 1:
7374 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7375 break;
7376 case 2:
7377 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7378 break;
600b828c 7379 default: abort();
9ee6e8bb
PB
7380 }
7381 break;
600b828c 7382 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7383 tmp2 = tcg_const_i32(0);
9ee6e8bb 7384 switch(size) {
dd8fbd78
FN
7385 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7386 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7387 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7388 default: abort();
9ee6e8bb 7389 }
39d5492a 7390 tcg_temp_free_i32(tmp2);
600b828c 7391 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7392 tcg_gen_not_i32(tmp, tmp);
600b828c 7393 }
9ee6e8bb 7394 break;
600b828c 7395 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7396 tmp2 = tcg_const_i32(0);
9ee6e8bb 7397 switch(size) {
dd8fbd78
FN
7398 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7399 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7400 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7401 default: abort();
9ee6e8bb 7402 }
39d5492a 7403 tcg_temp_free_i32(tmp2);
600b828c 7404 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7405 tcg_gen_not_i32(tmp, tmp);
600b828c 7406 }
9ee6e8bb 7407 break;
600b828c 7408 case NEON_2RM_VCEQ0:
dd8fbd78 7409 tmp2 = tcg_const_i32(0);
9ee6e8bb 7410 switch(size) {
dd8fbd78
FN
7411 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7412 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7413 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7414 default: abort();
9ee6e8bb 7415 }
39d5492a 7416 tcg_temp_free_i32(tmp2);
9ee6e8bb 7417 break;
600b828c 7418 case NEON_2RM_VABS:
9ee6e8bb 7419 switch(size) {
dd8fbd78
FN
7420 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7421 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7422 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7423 default: abort();
9ee6e8bb
PB
7424 }
7425 break;
600b828c 7426 case NEON_2RM_VNEG:
dd8fbd78
FN
7427 tmp2 = tcg_const_i32(0);
7428 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7429 tcg_temp_free_i32(tmp2);
9ee6e8bb 7430 break;
600b828c 7431 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7432 {
7433 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7434 tmp2 = tcg_const_i32(0);
aa47cfdd 7435 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7436 tcg_temp_free_i32(tmp2);
aa47cfdd 7437 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7438 break;
aa47cfdd 7439 }
600b828c 7440 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7441 {
7442 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7443 tmp2 = tcg_const_i32(0);
aa47cfdd 7444 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7445 tcg_temp_free_i32(tmp2);
aa47cfdd 7446 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7447 break;
aa47cfdd 7448 }
600b828c 7449 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7450 {
7451 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7452 tmp2 = tcg_const_i32(0);
aa47cfdd 7453 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7454 tcg_temp_free_i32(tmp2);
aa47cfdd 7455 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7456 break;
aa47cfdd 7457 }
600b828c 7458 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7459 {
7460 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7461 tmp2 = tcg_const_i32(0);
aa47cfdd 7462 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7463 tcg_temp_free_i32(tmp2);
aa47cfdd 7464 tcg_temp_free_ptr(fpstatus);
0e326109 7465 break;
aa47cfdd 7466 }
600b828c 7467 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7468 {
7469 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7470 tmp2 = tcg_const_i32(0);
aa47cfdd 7471 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7472 tcg_temp_free_i32(tmp2);
aa47cfdd 7473 tcg_temp_free_ptr(fpstatus);
0e326109 7474 break;
aa47cfdd 7475 }
600b828c 7476 case NEON_2RM_VABS_F:
4373f3ce 7477 gen_vfp_abs(0);
9ee6e8bb 7478 break;
600b828c 7479 case NEON_2RM_VNEG_F:
4373f3ce 7480 gen_vfp_neg(0);
9ee6e8bb 7481 break;
600b828c 7482 case NEON_2RM_VSWP:
dd8fbd78
FN
7483 tmp2 = neon_load_reg(rd, pass);
7484 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7485 break;
600b828c 7486 case NEON_2RM_VTRN:
dd8fbd78 7487 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7488 switch (size) {
dd8fbd78
FN
7489 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7490 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7491 default: abort();
9ee6e8bb 7492 }
dd8fbd78 7493 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7494 break;
34f7b0a2
WN
7495 case NEON_2RM_VRINTN:
7496 case NEON_2RM_VRINTA:
7497 case NEON_2RM_VRINTM:
7498 case NEON_2RM_VRINTP:
7499 case NEON_2RM_VRINTZ:
7500 {
7501 TCGv_i32 tcg_rmode;
7502 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7503 int rmode;
7504
7505 if (op == NEON_2RM_VRINTZ) {
7506 rmode = FPROUNDING_ZERO;
7507 } else {
7508 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7509 }
7510
7511 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7512 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7513 cpu_env);
7514 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7515 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7516 cpu_env);
7517 tcg_temp_free_ptr(fpstatus);
7518 tcg_temp_free_i32(tcg_rmode);
7519 break;
7520 }
2ce70625
WN
7521 case NEON_2RM_VRINTX:
7522 {
7523 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7524 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7525 tcg_temp_free_ptr(fpstatus);
7526 break;
7527 }
901ad525
WN
7528 case NEON_2RM_VCVTAU:
7529 case NEON_2RM_VCVTAS:
7530 case NEON_2RM_VCVTNU:
7531 case NEON_2RM_VCVTNS:
7532 case NEON_2RM_VCVTPU:
7533 case NEON_2RM_VCVTPS:
7534 case NEON_2RM_VCVTMU:
7535 case NEON_2RM_VCVTMS:
7536 {
7537 bool is_signed = !extract32(insn, 7, 1);
7538 TCGv_ptr fpst = get_fpstatus_ptr(1);
7539 TCGv_i32 tcg_rmode, tcg_shift;
7540 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7541
7542 tcg_shift = tcg_const_i32(0);
7543 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7544 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7545 cpu_env);
7546
7547 if (is_signed) {
7548 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7549 tcg_shift, fpst);
7550 } else {
7551 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7552 tcg_shift, fpst);
7553 }
7554
7555 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7556 cpu_env);
7557 tcg_temp_free_i32(tcg_rmode);
7558 tcg_temp_free_i32(tcg_shift);
7559 tcg_temp_free_ptr(fpst);
7560 break;
7561 }
600b828c 7562 case NEON_2RM_VRECPE:
b6d4443a
AB
7563 {
7564 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7565 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7566 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7567 break;
b6d4443a 7568 }
600b828c 7569 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7570 {
7571 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7572 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7573 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7574 break;
c2fb418e 7575 }
600b828c 7576 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7577 {
7578 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7579 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7580 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7581 break;
b6d4443a 7582 }
600b828c 7583 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7584 {
7585 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7586 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7587 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7588 break;
c2fb418e 7589 }
600b828c 7590 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7591 gen_vfp_sito(0, 1);
9ee6e8bb 7592 break;
600b828c 7593 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7594 gen_vfp_uito(0, 1);
9ee6e8bb 7595 break;
600b828c 7596 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7597 gen_vfp_tosiz(0, 1);
9ee6e8bb 7598 break;
600b828c 7599 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7600 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7601 break;
7602 default:
600b828c
PM
7603 /* Reserved op values were caught by the
7604 * neon_2rm_sizes[] check earlier.
7605 */
7606 abort();
9ee6e8bb 7607 }
600b828c 7608 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7609 tcg_gen_st_f32(cpu_F0s, cpu_env,
7610 neon_reg_offset(rd, pass));
9ee6e8bb 7611 } else {
dd8fbd78 7612 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7613 }
7614 }
7615 break;
7616 }
7617 } else if ((insn & (1 << 10)) == 0) {
7618 /* VTBL, VTBX. */
56907d77
PM
7619 int n = ((insn >> 8) & 3) + 1;
7620 if ((rn + n) > 32) {
7621 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7622 * helper function running off the end of the register file.
7623 */
7624 return 1;
7625 }
7626 n <<= 3;
9ee6e8bb 7627 if (insn & (1 << 6)) {
8f8e3aa4 7628 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7629 } else {
7d1b0095 7630 tmp = tcg_temp_new_i32();
8f8e3aa4 7631 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7632 }
8f8e3aa4 7633 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 7634 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 7635 tmp5 = tcg_const_i32(n);
e7c06c4e 7636 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 7637 tcg_temp_free_i32(tmp);
9ee6e8bb 7638 if (insn & (1 << 6)) {
8f8e3aa4 7639 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7640 } else {
7d1b0095 7641 tmp = tcg_temp_new_i32();
8f8e3aa4 7642 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7643 }
8f8e3aa4 7644 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 7645 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 7646 tcg_temp_free_i32(tmp5);
e7c06c4e 7647 tcg_temp_free_ptr(ptr1);
8f8e3aa4 7648 neon_store_reg(rd, 0, tmp2);
3018f259 7649 neon_store_reg(rd, 1, tmp3);
7d1b0095 7650 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7651 } else if ((insn & 0x380) == 0) {
7652 /* VDUP */
133da6aa
JR
7653 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7654 return 1;
7655 }
9ee6e8bb 7656 if (insn & (1 << 19)) {
dd8fbd78 7657 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7658 } else {
dd8fbd78 7659 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7660 }
7661 if (insn & (1 << 16)) {
dd8fbd78 7662 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7663 } else if (insn & (1 << 17)) {
7664 if ((insn >> 18) & 1)
dd8fbd78 7665 gen_neon_dup_high16(tmp);
9ee6e8bb 7666 else
dd8fbd78 7667 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7668 }
7669 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7670 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7671 tcg_gen_mov_i32(tmp2, tmp);
7672 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7673 }
7d1b0095 7674 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7675 } else {
7676 return 1;
7677 }
7678 }
7679 }
7680 return 0;
7681}
7682
8b7209fa
RH
7683/* Advanced SIMD three registers of the same length extension.
7684 * 31 25 23 22 20 16 12 11 10 9 8 3 0
7685 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7686 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7687 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7688 */
7689static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
7690{
7691 gen_helper_gvec_3_ptr *fn_gvec_ptr;
7692 int rd, rn, rm, rot, size, opr_sz;
7693 TCGv_ptr fpst;
7694 bool q;
7695
7696 q = extract32(insn, 6, 1);
7697 VFP_DREG_D(rd, insn);
7698 VFP_DREG_N(rn, insn);
7699 VFP_DREG_M(rm, insn);
7700 if ((rd | rn | rm) & q) {
7701 return 1;
7702 }
7703
7704 if ((insn & 0xfe200f10) == 0xfc200800) {
7705 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
7706 size = extract32(insn, 20, 1);
7707 rot = extract32(insn, 23, 2);
7708 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
7709 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7710 return 1;
7711 }
7712 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
7713 } else if ((insn & 0xfea00f10) == 0xfc800800) {
7714 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
7715 size = extract32(insn, 20, 1);
7716 rot = extract32(insn, 24, 1);
7717 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
7718 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7719 return 1;
7720 }
7721 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
7722 } else {
7723 return 1;
7724 }
7725
7726 if (s->fp_excp_el) {
7727 gen_exception_insn(s, 4, EXCP_UDEF,
7728 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
7729 return 0;
7730 }
7731 if (!s->vfp_enabled) {
7732 return 1;
7733 }
7734
7735 opr_sz = (1 + q) * 8;
7736 fpst = get_fpstatus_ptr(1);
7737 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
7738 vfp_reg_offset(1, rn),
7739 vfp_reg_offset(1, rm), fpst,
7740 opr_sz, opr_sz, rot, fn_gvec_ptr);
7741 tcg_temp_free_ptr(fpst);
7742 return 0;
7743}
7744
638808ff
RH
7745/* Advanced SIMD two registers and a scalar extension.
7746 * 31 24 23 22 20 16 12 11 10 9 8 3 0
7747 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7748 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7749 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7750 *
7751 */
7752
7753static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
7754{
7755 int rd, rn, rm, rot, size, opr_sz;
7756 TCGv_ptr fpst;
7757 bool q;
7758
7759 q = extract32(insn, 6, 1);
7760 VFP_DREG_D(rd, insn);
7761 VFP_DREG_N(rn, insn);
7762 VFP_DREG_M(rm, insn);
7763 if ((rd | rn) & q) {
7764 return 1;
7765 }
7766
7767 if ((insn & 0xff000f10) == 0xfe000800) {
7768 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
7769 rot = extract32(insn, 20, 2);
7770 size = extract32(insn, 23, 1);
7771 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
7772 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7773 return 1;
7774 }
7775 } else {
7776 return 1;
7777 }
7778
7779 if (s->fp_excp_el) {
7780 gen_exception_insn(s, 4, EXCP_UDEF,
7781 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
7782 return 0;
7783 }
7784 if (!s->vfp_enabled) {
7785 return 1;
7786 }
7787
7788 opr_sz = (1 + q) * 8;
7789 fpst = get_fpstatus_ptr(1);
7790 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
7791 vfp_reg_offset(1, rn),
7792 vfp_reg_offset(1, rm), fpst,
7793 opr_sz, opr_sz, rot,
7794 size ? gen_helper_gvec_fcmlas_idx
7795 : gen_helper_gvec_fcmlah_idx);
7796 tcg_temp_free_ptr(fpst);
7797 return 0;
7798}
7799
7dcc1f89 7800static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7801{
4b6a83fb
PM
7802 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7803 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7804
7805 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7806
7807 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7808 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7809 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7810 return 1;
7811 }
d614a513 7812 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7813 return disas_iwmmxt_insn(s, insn);
d614a513 7814 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7815 return disas_dsp_insn(s, insn);
c0f4af17
PM
7816 }
7817 return 1;
4b6a83fb
PM
7818 }
7819
7820 /* Otherwise treat as a generic register access */
7821 is64 = (insn & (1 << 25)) == 0;
7822 if (!is64 && ((insn & (1 << 4)) == 0)) {
7823 /* cdp */
7824 return 1;
7825 }
7826
7827 crm = insn & 0xf;
7828 if (is64) {
7829 crn = 0;
7830 opc1 = (insn >> 4) & 0xf;
7831 opc2 = 0;
7832 rt2 = (insn >> 16) & 0xf;
7833 } else {
7834 crn = (insn >> 16) & 0xf;
7835 opc1 = (insn >> 21) & 7;
7836 opc2 = (insn >> 5) & 7;
7837 rt2 = 0;
7838 }
7839 isread = (insn >> 20) & 1;
7840 rt = (insn >> 12) & 0xf;
7841
60322b39 7842 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7843 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7844 if (ri) {
7845 /* Check access permissions */
dcbff19b 7846 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7847 return 1;
7848 }
7849
c0f4af17 7850 if (ri->accessfn ||
d614a513 7851 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7852 /* Emit code to perform further access permissions checks at
7853 * runtime; this may result in an exception.
c0f4af17
PM
7854 * Note that on XScale all cp0..c13 registers do an access check
7855 * call in order to handle c15_cpar.
f59df3f2
PM
7856 */
7857 TCGv_ptr tmpptr;
3f208fd7 7858 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7859 uint32_t syndrome;
7860
7861 /* Note that since we are an implementation which takes an
7862 * exception on a trapped conditional instruction only if the
7863 * instruction passes its condition code check, we can take
7864 * advantage of the clause in the ARM ARM that allows us to set
7865 * the COND field in the instruction to 0xE in all cases.
7866 * We could fish the actual condition out of the insn (ARM)
7867 * or the condexec bits (Thumb) but it isn't necessary.
7868 */
7869 switch (cpnum) {
7870 case 14:
7871 if (is64) {
7872 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7873 isread, false);
8bcbf37c
PM
7874 } else {
7875 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7876 rt, isread, false);
8bcbf37c
PM
7877 }
7878 break;
7879 case 15:
7880 if (is64) {
7881 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7882 isread, false);
8bcbf37c
PM
7883 } else {
7884 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7885 rt, isread, false);
8bcbf37c
PM
7886 }
7887 break;
7888 default:
7889 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7890 * so this can only happen if this is an ARMv7 or earlier CPU,
7891 * in which case the syndrome information won't actually be
7892 * guest visible.
7893 */
d614a513 7894 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7895 syndrome = syn_uncategorized();
7896 break;
7897 }
7898
43bfa4a1 7899 gen_set_condexec(s);
3977ee5d 7900 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7901 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7902 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7903 tcg_isread = tcg_const_i32(isread);
7904 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7905 tcg_isread);
f59df3f2 7906 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7907 tcg_temp_free_i32(tcg_syn);
3f208fd7 7908 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7909 }
7910
4b6a83fb
PM
7911 /* Handle special cases first */
7912 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7913 case ARM_CP_NOP:
7914 return 0;
7915 case ARM_CP_WFI:
7916 if (isread) {
7917 return 1;
7918 }
eaed129d 7919 gen_set_pc_im(s, s->pc);
dcba3a8d 7920 s->base.is_jmp = DISAS_WFI;
2bee5105 7921 return 0;
4b6a83fb
PM
7922 default:
7923 break;
7924 }
7925
c5a49c63 7926 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7927 gen_io_start();
7928 }
7929
4b6a83fb
PM
7930 if (isread) {
7931 /* Read */
7932 if (is64) {
7933 TCGv_i64 tmp64;
7934 TCGv_i32 tmp;
7935 if (ri->type & ARM_CP_CONST) {
7936 tmp64 = tcg_const_i64(ri->resetvalue);
7937 } else if (ri->readfn) {
7938 TCGv_ptr tmpptr;
4b6a83fb
PM
7939 tmp64 = tcg_temp_new_i64();
7940 tmpptr = tcg_const_ptr(ri);
7941 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7942 tcg_temp_free_ptr(tmpptr);
7943 } else {
7944 tmp64 = tcg_temp_new_i64();
7945 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7946 }
7947 tmp = tcg_temp_new_i32();
ecc7b3aa 7948 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7949 store_reg(s, rt, tmp);
7950 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7951 tmp = tcg_temp_new_i32();
ecc7b3aa 7952 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7953 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7954 store_reg(s, rt2, tmp);
7955 } else {
39d5492a 7956 TCGv_i32 tmp;
4b6a83fb
PM
7957 if (ri->type & ARM_CP_CONST) {
7958 tmp = tcg_const_i32(ri->resetvalue);
7959 } else if (ri->readfn) {
7960 TCGv_ptr tmpptr;
4b6a83fb
PM
7961 tmp = tcg_temp_new_i32();
7962 tmpptr = tcg_const_ptr(ri);
7963 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7964 tcg_temp_free_ptr(tmpptr);
7965 } else {
7966 tmp = load_cpu_offset(ri->fieldoffset);
7967 }
7968 if (rt == 15) {
7969 /* Destination register of r15 for 32 bit loads sets
7970 * the condition codes from the high 4 bits of the value
7971 */
7972 gen_set_nzcv(tmp);
7973 tcg_temp_free_i32(tmp);
7974 } else {
7975 store_reg(s, rt, tmp);
7976 }
7977 }
7978 } else {
7979 /* Write */
7980 if (ri->type & ARM_CP_CONST) {
7981 /* If not forbidden by access permissions, treat as WI */
7982 return 0;
7983 }
7984
7985 if (is64) {
39d5492a 7986 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7987 TCGv_i64 tmp64 = tcg_temp_new_i64();
7988 tmplo = load_reg(s, rt);
7989 tmphi = load_reg(s, rt2);
7990 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7991 tcg_temp_free_i32(tmplo);
7992 tcg_temp_free_i32(tmphi);
7993 if (ri->writefn) {
7994 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7995 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7996 tcg_temp_free_ptr(tmpptr);
7997 } else {
7998 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7999 }
8000 tcg_temp_free_i64(tmp64);
8001 } else {
8002 if (ri->writefn) {
39d5492a 8003 TCGv_i32 tmp;
4b6a83fb 8004 TCGv_ptr tmpptr;
4b6a83fb
PM
8005 tmp = load_reg(s, rt);
8006 tmpptr = tcg_const_ptr(ri);
8007 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
8008 tcg_temp_free_ptr(tmpptr);
8009 tcg_temp_free_i32(tmp);
8010 } else {
39d5492a 8011 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
8012 store_cpu_offset(tmp, ri->fieldoffset);
8013 }
8014 }
2452731c
PM
8015 }
8016
c5a49c63 8017 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8018 /* I/O operations must end the TB here (whether read or write) */
8019 gen_io_end();
8020 gen_lookup_tb(s);
8021 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
8022 /* We default to ending the TB on a coprocessor register write,
8023 * but allow this to be suppressed by the register definition
8024 * (usually only necessary to work around guest bugs).
8025 */
2452731c 8026 gen_lookup_tb(s);
4b6a83fb 8027 }
2452731c 8028
4b6a83fb
PM
8029 return 0;
8030 }
8031
626187d8
PM
8032 /* Unknown register; this might be a guest error or a QEMU
8033 * unimplemented feature.
8034 */
8035 if (is64) {
8036 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8037 "64 bit system register cp:%d opc1: %d crm:%d "
8038 "(%s)\n",
8039 isread ? "read" : "write", cpnum, opc1, crm,
8040 s->ns ? "non-secure" : "secure");
626187d8
PM
8041 } else {
8042 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8043 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
8044 "(%s)\n",
8045 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
8046 s->ns ? "non-secure" : "secure");
626187d8
PM
8047 }
8048
4a9a539f 8049 return 1;
9ee6e8bb
PB
8050}
8051
5e3f878a
PB
8052
8053/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 8054static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 8055{
39d5492a 8056 TCGv_i32 tmp;
7d1b0095 8057 tmp = tcg_temp_new_i32();
ecc7b3aa 8058 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 8059 store_reg(s, rlow, tmp);
7d1b0095 8060 tmp = tcg_temp_new_i32();
5e3f878a 8061 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 8062 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
8063 store_reg(s, rhigh, tmp);
8064}
8065
8066/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 8067static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 8068{
a7812ae4 8069 TCGv_i64 tmp;
39d5492a 8070 TCGv_i32 tmp2;
5e3f878a 8071
36aa55dc 8072 /* Load value and extend to 64 bits. */
a7812ae4 8073 tmp = tcg_temp_new_i64();
5e3f878a
PB
8074 tmp2 = load_reg(s, rlow);
8075 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 8076 tcg_temp_free_i32(tmp2);
5e3f878a 8077 tcg_gen_add_i64(val, val, tmp);
b75263d6 8078 tcg_temp_free_i64(tmp);
5e3f878a
PB
8079}
8080
8081/* load and add a 64-bit value from a register pair. */
a7812ae4 8082static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 8083{
a7812ae4 8084 TCGv_i64 tmp;
39d5492a
PM
8085 TCGv_i32 tmpl;
8086 TCGv_i32 tmph;
5e3f878a
PB
8087
8088 /* Load 64-bit value rd:rn. */
36aa55dc
PB
8089 tmpl = load_reg(s, rlow);
8090 tmph = load_reg(s, rhigh);
a7812ae4 8091 tmp = tcg_temp_new_i64();
36aa55dc 8092 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
8093 tcg_temp_free_i32(tmpl);
8094 tcg_temp_free_i32(tmph);
5e3f878a 8095 tcg_gen_add_i64(val, val, tmp);
b75263d6 8096 tcg_temp_free_i64(tmp);
5e3f878a
PB
8097}
8098
c9f10124 8099/* Set N and Z flags from hi|lo. */
39d5492a 8100static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 8101{
c9f10124
RH
8102 tcg_gen_mov_i32(cpu_NF, hi);
8103 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
8104}
8105
426f5abc
PB
8106/* Load/Store exclusive instructions are implemented by remembering
8107 the value/address loaded, and seeing if these are the same
354161b3 8108 when the store is performed. This should be sufficient to implement
426f5abc 8109 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
8110 regular stores. The compare vs the remembered value is done during
8111 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 8112static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 8113 TCGv_i32 addr, int size)
426f5abc 8114{
94ee24e7 8115 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 8116 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 8117
50225ad0
PM
8118 s->is_ldex = true;
8119
426f5abc 8120 if (size == 3) {
39d5492a 8121 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 8122 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 8123
3448d47b
PM
8124 /* For AArch32, architecturally the 32-bit word at the lowest
8125 * address is always Rt and the one at addr+4 is Rt2, even if
8126 * the CPU is big-endian. That means we don't want to do a
8127 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
8128 * for an architecturally 64-bit access, but instead do a
8129 * 64-bit access using MO_BE if appropriate and then split
8130 * the two halves.
8131 * This only makes a difference for BE32 user-mode, where
8132 * frob64() must not flip the two halves of the 64-bit data
8133 * but this code must treat BE32 user-mode like BE32 system.
8134 */
8135 TCGv taddr = gen_aa32_addr(s, addr, opc);
8136
8137 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
8138 tcg_temp_free(taddr);
354161b3 8139 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
8140 if (s->be_data == MO_BE) {
8141 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
8142 } else {
8143 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
8144 }
354161b3
EC
8145 tcg_temp_free_i64(t64);
8146
8147 store_reg(s, rt2, tmp2);
03d05e2d 8148 } else {
354161b3 8149 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 8150 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 8151 }
03d05e2d
PM
8152
8153 store_reg(s, rt, tmp);
8154 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
8155}
8156
8157static void gen_clrex(DisasContext *s)
8158{
03d05e2d 8159 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
8160}
8161
426f5abc 8162static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 8163 TCGv_i32 addr, int size)
426f5abc 8164{
354161b3
EC
8165 TCGv_i32 t0, t1, t2;
8166 TCGv_i64 extaddr;
8167 TCGv taddr;
42a268c2
RH
8168 TCGLabel *done_label;
8169 TCGLabel *fail_label;
354161b3 8170 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
8171
8172 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
8173 [addr] = {Rt};
8174 {Rd} = 0;
8175 } else {
8176 {Rd} = 1;
8177 } */
8178 fail_label = gen_new_label();
8179 done_label = gen_new_label();
03d05e2d
PM
8180 extaddr = tcg_temp_new_i64();
8181 tcg_gen_extu_i32_i64(extaddr, addr);
8182 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
8183 tcg_temp_free_i64(extaddr);
8184
354161b3
EC
8185 taddr = gen_aa32_addr(s, addr, opc);
8186 t0 = tcg_temp_new_i32();
8187 t1 = load_reg(s, rt);
426f5abc 8188 if (size == 3) {
354161b3
EC
8189 TCGv_i64 o64 = tcg_temp_new_i64();
8190 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 8191
354161b3 8192 t2 = load_reg(s, rt2);
3448d47b
PM
8193 /* For AArch32, architecturally the 32-bit word at the lowest
8194 * address is always Rt and the one at addr+4 is Rt2, even if
8195 * the CPU is big-endian. Since we're going to treat this as a
8196 * single 64-bit BE store, we need to put the two halves in the
8197 * opposite order for BE to LE, so that they end up in the right
8198 * places.
8199 * We don't want gen_aa32_frob64() because that does the wrong
8200 * thing for BE32 usermode.
8201 */
8202 if (s->be_data == MO_BE) {
8203 tcg_gen_concat_i32_i64(n64, t2, t1);
8204 } else {
8205 tcg_gen_concat_i32_i64(n64, t1, t2);
8206 }
354161b3 8207 tcg_temp_free_i32(t2);
03d05e2d 8208
354161b3
EC
8209 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
8210 get_mem_index(s), opc);
8211 tcg_temp_free_i64(n64);
8212
354161b3
EC
8213 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
8214 tcg_gen_extrl_i64_i32(t0, o64);
8215
8216 tcg_temp_free_i64(o64);
8217 } else {
8218 t2 = tcg_temp_new_i32();
8219 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
8220 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
8221 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
8222 tcg_temp_free_i32(t2);
426f5abc 8223 }
354161b3
EC
8224 tcg_temp_free_i32(t1);
8225 tcg_temp_free(taddr);
8226 tcg_gen_mov_i32(cpu_R[rd], t0);
8227 tcg_temp_free_i32(t0);
426f5abc 8228 tcg_gen_br(done_label);
354161b3 8229
426f5abc
PB
8230 gen_set_label(fail_label);
8231 tcg_gen_movi_i32(cpu_R[rd], 1);
8232 gen_set_label(done_label);
03d05e2d 8233 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 8234}
426f5abc 8235
81465888
PM
8236/* gen_srs:
8237 * @env: CPUARMState
8238 * @s: DisasContext
8239 * @mode: mode field from insn (which stack to store to)
8240 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
8241 * @writeback: true if writeback bit set
8242 *
8243 * Generate code for the SRS (Store Return State) insn.
8244 */
8245static void gen_srs(DisasContext *s,
8246 uint32_t mode, uint32_t amode, bool writeback)
8247{
8248 int32_t offset;
cbc0326b
PM
8249 TCGv_i32 addr, tmp;
8250 bool undef = false;
8251
8252 /* SRS is:
8253 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 8254 * and specified mode is monitor mode
cbc0326b
PM
8255 * - UNDEFINED in Hyp mode
8256 * - UNPREDICTABLE in User or System mode
8257 * - UNPREDICTABLE if the specified mode is:
8258 * -- not implemented
8259 * -- not a valid mode number
8260 * -- a mode that's at a higher exception level
8261 * -- Monitor, if we are Non-secure
f01377f5 8262 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 8263 */
ba63cf47 8264 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
8265 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
8266 return;
8267 }
8268
8269 if (s->current_el == 0 || s->current_el == 2) {
8270 undef = true;
8271 }
8272
8273 switch (mode) {
8274 case ARM_CPU_MODE_USR:
8275 case ARM_CPU_MODE_FIQ:
8276 case ARM_CPU_MODE_IRQ:
8277 case ARM_CPU_MODE_SVC:
8278 case ARM_CPU_MODE_ABT:
8279 case ARM_CPU_MODE_UND:
8280 case ARM_CPU_MODE_SYS:
8281 break;
8282 case ARM_CPU_MODE_HYP:
8283 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8284 undef = true;
8285 }
8286 break;
8287 case ARM_CPU_MODE_MON:
8288 /* No need to check specifically for "are we non-secure" because
8289 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8290 * so if this isn't EL3 then we must be non-secure.
8291 */
8292 if (s->current_el != 3) {
8293 undef = true;
8294 }
8295 break;
8296 default:
8297 undef = true;
8298 }
8299
8300 if (undef) {
8301 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8302 default_exception_el(s));
8303 return;
8304 }
8305
8306 addr = tcg_temp_new_i32();
8307 tmp = tcg_const_i32(mode);
f01377f5
PM
8308 /* get_r13_banked() will raise an exception if called from System mode */
8309 gen_set_condexec(s);
8310 gen_set_pc_im(s, s->pc - 4);
81465888
PM
8311 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8312 tcg_temp_free_i32(tmp);
8313 switch (amode) {
8314 case 0: /* DA */
8315 offset = -4;
8316 break;
8317 case 1: /* IA */
8318 offset = 0;
8319 break;
8320 case 2: /* DB */
8321 offset = -8;
8322 break;
8323 case 3: /* IB */
8324 offset = 4;
8325 break;
8326 default:
8327 abort();
8328 }
8329 tcg_gen_addi_i32(addr, addr, offset);
8330 tmp = load_reg(s, 14);
12dcc321 8331 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8332 tcg_temp_free_i32(tmp);
81465888
PM
8333 tmp = load_cpu_field(spsr);
8334 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 8335 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8336 tcg_temp_free_i32(tmp);
81465888
PM
8337 if (writeback) {
8338 switch (amode) {
8339 case 0:
8340 offset = -8;
8341 break;
8342 case 1:
8343 offset = 4;
8344 break;
8345 case 2:
8346 offset = -4;
8347 break;
8348 case 3:
8349 offset = 0;
8350 break;
8351 default:
8352 abort();
8353 }
8354 tcg_gen_addi_i32(addr, addr, offset);
8355 tmp = tcg_const_i32(mode);
8356 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8357 tcg_temp_free_i32(tmp);
8358 }
8359 tcg_temp_free_i32(addr);
dcba3a8d 8360 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
8361}
8362
f4df2210 8363static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8364{
f4df2210 8365 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8366 TCGv_i32 tmp;
8367 TCGv_i32 tmp2;
8368 TCGv_i32 tmp3;
8369 TCGv_i32 addr;
a7812ae4 8370 TCGv_i64 tmp64;
9ee6e8bb 8371
e13886e3
PM
8372 /* M variants do not implement ARM mode; this must raise the INVSTATE
8373 * UsageFault exception.
8374 */
b53d8923 8375 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
8376 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8377 default_exception_el(s));
8378 return;
b53d8923 8379 }
9ee6e8bb
PB
8380 cond = insn >> 28;
8381 if (cond == 0xf){
be5e7a76
DES
8382 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8383 * choose to UNDEF. In ARMv5 and above the space is used
8384 * for miscellaneous unconditional instructions.
8385 */
8386 ARCH(5);
8387
9ee6e8bb
PB
8388 /* Unconditional instructions. */
8389 if (((insn >> 25) & 7) == 1) {
8390 /* NEON Data processing. */
d614a513 8391 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8392 goto illegal_op;
d614a513 8393 }
9ee6e8bb 8394
7dcc1f89 8395 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8396 goto illegal_op;
7dcc1f89 8397 }
9ee6e8bb
PB
8398 return;
8399 }
8400 if ((insn & 0x0f100000) == 0x04000000) {
8401 /* NEON load/store. */
d614a513 8402 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8403 goto illegal_op;
d614a513 8404 }
9ee6e8bb 8405
7dcc1f89 8406 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8407 goto illegal_op;
7dcc1f89 8408 }
9ee6e8bb
PB
8409 return;
8410 }
6a57f3eb
WN
8411 if ((insn & 0x0f000e10) == 0x0e000a00) {
8412 /* VFP. */
7dcc1f89 8413 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8414 goto illegal_op;
8415 }
8416 return;
8417 }
3d185e5d
PM
8418 if (((insn & 0x0f30f000) == 0x0510f000) ||
8419 ((insn & 0x0f30f010) == 0x0710f000)) {
8420 if ((insn & (1 << 22)) == 0) {
8421 /* PLDW; v7MP */
d614a513 8422 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8423 goto illegal_op;
8424 }
8425 }
8426 /* Otherwise PLD; v5TE+ */
be5e7a76 8427 ARCH(5TE);
3d185e5d
PM
8428 return;
8429 }
8430 if (((insn & 0x0f70f000) == 0x0450f000) ||
8431 ((insn & 0x0f70f010) == 0x0650f000)) {
8432 ARCH(7);
8433 return; /* PLI; V7 */
8434 }
8435 if (((insn & 0x0f700000) == 0x04100000) ||
8436 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8437 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8438 goto illegal_op;
8439 }
8440 return; /* v7MP: Unallocated memory hint: must NOP */
8441 }
8442
8443 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8444 ARCH(6);
8445 /* setend */
9886ecdf
PB
8446 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8447 gen_helper_setend(cpu_env);
dcba3a8d 8448 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8449 }
8450 return;
8451 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8452 switch ((insn >> 4) & 0xf) {
8453 case 1: /* clrex */
8454 ARCH(6K);
426f5abc 8455 gen_clrex(s);
9ee6e8bb
PB
8456 return;
8457 case 4: /* dsb */
8458 case 5: /* dmb */
9ee6e8bb 8459 ARCH(7);
61e4c432 8460 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8461 return;
6df99dec
SS
8462 case 6: /* isb */
8463 /* We need to break the TB after this insn to execute
8464 * self-modifying code correctly and also to take
8465 * any pending interrupts immediately.
8466 */
0b609cc1 8467 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 8468 return;
9ee6e8bb
PB
8469 default:
8470 goto illegal_op;
8471 }
8472 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8473 /* srs */
81465888
PM
8474 ARCH(6);
8475 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8476 return;
ea825eee 8477 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8478 /* rfe */
c67b6b71 8479 int32_t offset;
9ee6e8bb
PB
8480 if (IS_USER(s))
8481 goto illegal_op;
8482 ARCH(6);
8483 rn = (insn >> 16) & 0xf;
b0109805 8484 addr = load_reg(s, rn);
9ee6e8bb
PB
8485 i = (insn >> 23) & 3;
8486 switch (i) {
b0109805 8487 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8488 case 1: offset = 0; break; /* IA */
8489 case 2: offset = -8; break; /* DB */
b0109805 8490 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8491 default: abort();
8492 }
8493 if (offset)
b0109805
PB
8494 tcg_gen_addi_i32(addr, addr, offset);
8495 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8496 tmp = tcg_temp_new_i32();
12dcc321 8497 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8498 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8499 tmp2 = tcg_temp_new_i32();
12dcc321 8500 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8501 if (insn & (1 << 21)) {
8502 /* Base writeback. */
8503 switch (i) {
b0109805 8504 case 0: offset = -8; break;
c67b6b71
FN
8505 case 1: offset = 4; break;
8506 case 2: offset = -4; break;
b0109805 8507 case 3: offset = 0; break;
9ee6e8bb
PB
8508 default: abort();
8509 }
8510 if (offset)
b0109805
PB
8511 tcg_gen_addi_i32(addr, addr, offset);
8512 store_reg(s, rn, addr);
8513 } else {
7d1b0095 8514 tcg_temp_free_i32(addr);
9ee6e8bb 8515 }
b0109805 8516 gen_rfe(s, tmp, tmp2);
c67b6b71 8517 return;
9ee6e8bb
PB
8518 } else if ((insn & 0x0e000000) == 0x0a000000) {
8519 /* branch link and change to thumb (blx <offset>) */
8520 int32_t offset;
8521
8522 val = (uint32_t)s->pc;
7d1b0095 8523 tmp = tcg_temp_new_i32();
d9ba4830
PB
8524 tcg_gen_movi_i32(tmp, val);
8525 store_reg(s, 14, tmp);
9ee6e8bb
PB
8526 /* Sign-extend the 24-bit offset */
8527 offset = (((int32_t)insn) << 8) >> 8;
8528 /* offset * 4 + bit24 * 2 + (thumb bit) */
8529 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8530 /* pipeline offset */
8531 val += 4;
be5e7a76 8532 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8533 gen_bx_im(s, val);
9ee6e8bb
PB
8534 return;
8535 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8536 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8537 /* iWMMXt register transfer. */
c0f4af17 8538 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8539 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8540 return;
c0f4af17
PM
8541 }
8542 }
9ee6e8bb 8543 }
8b7209fa
RH
8544 } else if ((insn & 0x0e000a00) == 0x0c000800
8545 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8546 if (disas_neon_insn_3same_ext(s, insn)) {
8547 goto illegal_op;
8548 }
8549 return;
638808ff
RH
8550 } else if ((insn & 0x0f000a00) == 0x0e000800
8551 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8552 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
8553 goto illegal_op;
8554 }
8555 return;
9ee6e8bb
PB
8556 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8557 /* Coprocessor double register transfer. */
be5e7a76 8558 ARCH(5TE);
9ee6e8bb
PB
8559 } else if ((insn & 0x0f000010) == 0x0e000010) {
8560 /* Additional coprocessor register transfer. */
7997d92f 8561 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8562 uint32_t mask;
8563 uint32_t val;
8564 /* cps (privileged) */
8565 if (IS_USER(s))
8566 return;
8567 mask = val = 0;
8568 if (insn & (1 << 19)) {
8569 if (insn & (1 << 8))
8570 mask |= CPSR_A;
8571 if (insn & (1 << 7))
8572 mask |= CPSR_I;
8573 if (insn & (1 << 6))
8574 mask |= CPSR_F;
8575 if (insn & (1 << 18))
8576 val |= mask;
8577 }
7997d92f 8578 if (insn & (1 << 17)) {
9ee6e8bb
PB
8579 mask |= CPSR_M;
8580 val |= (insn & 0x1f);
8581 }
8582 if (mask) {
2fbac54b 8583 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8584 }
8585 return;
8586 }
8587 goto illegal_op;
8588 }
8589 if (cond != 0xe) {
8590 /* if not always execute, we generate a conditional jump to
8591 next instruction */
8592 s->condlabel = gen_new_label();
39fb730a 8593 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8594 s->condjmp = 1;
8595 }
8596 if ((insn & 0x0f900000) == 0x03000000) {
8597 if ((insn & (1 << 21)) == 0) {
8598 ARCH(6T2);
8599 rd = (insn >> 12) & 0xf;
8600 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8601 if ((insn & (1 << 22)) == 0) {
8602 /* MOVW */
7d1b0095 8603 tmp = tcg_temp_new_i32();
5e3f878a 8604 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8605 } else {
8606 /* MOVT */
5e3f878a 8607 tmp = load_reg(s, rd);
86831435 8608 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8609 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8610 }
5e3f878a 8611 store_reg(s, rd, tmp);
9ee6e8bb
PB
8612 } else {
8613 if (((insn >> 12) & 0xf) != 0xf)
8614 goto illegal_op;
8615 if (((insn >> 16) & 0xf) == 0) {
8616 gen_nop_hint(s, insn & 0xff);
8617 } else {
8618 /* CPSR = immediate */
8619 val = insn & 0xff;
8620 shift = ((insn >> 8) & 0xf) * 2;
8621 if (shift)
8622 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8623 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8624 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8625 i, val)) {
9ee6e8bb 8626 goto illegal_op;
7dcc1f89 8627 }
9ee6e8bb
PB
8628 }
8629 }
8630 } else if ((insn & 0x0f900000) == 0x01000000
8631 && (insn & 0x00000090) != 0x00000090) {
8632 /* miscellaneous instructions */
8633 op1 = (insn >> 21) & 3;
8634 sh = (insn >> 4) & 0xf;
8635 rm = insn & 0xf;
8636 switch (sh) {
8bfd0550
PM
8637 case 0x0: /* MSR, MRS */
8638 if (insn & (1 << 9)) {
8639 /* MSR (banked) and MRS (banked) */
8640 int sysm = extract32(insn, 16, 4) |
8641 (extract32(insn, 8, 1) << 4);
8642 int r = extract32(insn, 22, 1);
8643
8644 if (op1 & 1) {
8645 /* MSR (banked) */
8646 gen_msr_banked(s, r, sysm, rm);
8647 } else {
8648 /* MRS (banked) */
8649 int rd = extract32(insn, 12, 4);
8650
8651 gen_mrs_banked(s, r, sysm, rd);
8652 }
8653 break;
8654 }
8655
8656 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8657 if (op1 & 1) {
8658 /* PSR = reg */
2fbac54b 8659 tmp = load_reg(s, rm);
9ee6e8bb 8660 i = ((op1 & 2) != 0);
7dcc1f89 8661 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8662 goto illegal_op;
8663 } else {
8664 /* reg = PSR */
8665 rd = (insn >> 12) & 0xf;
8666 if (op1 & 2) {
8667 if (IS_USER(s))
8668 goto illegal_op;
d9ba4830 8669 tmp = load_cpu_field(spsr);
9ee6e8bb 8670 } else {
7d1b0095 8671 tmp = tcg_temp_new_i32();
9ef39277 8672 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8673 }
d9ba4830 8674 store_reg(s, rd, tmp);
9ee6e8bb
PB
8675 }
8676 break;
8677 case 0x1:
8678 if (op1 == 1) {
8679 /* branch/exchange thumb (bx). */
be5e7a76 8680 ARCH(4T);
d9ba4830
PB
8681 tmp = load_reg(s, rm);
8682 gen_bx(s, tmp);
9ee6e8bb
PB
8683 } else if (op1 == 3) {
8684 /* clz */
be5e7a76 8685 ARCH(5);
9ee6e8bb 8686 rd = (insn >> 12) & 0xf;
1497c961 8687 tmp = load_reg(s, rm);
7539a012 8688 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8689 store_reg(s, rd, tmp);
9ee6e8bb
PB
8690 } else {
8691 goto illegal_op;
8692 }
8693 break;
8694 case 0x2:
8695 if (op1 == 1) {
8696 ARCH(5J); /* bxj */
8697 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8698 tmp = load_reg(s, rm);
8699 gen_bx(s, tmp);
9ee6e8bb
PB
8700 } else {
8701 goto illegal_op;
8702 }
8703 break;
8704 case 0x3:
8705 if (op1 != 1)
8706 goto illegal_op;
8707
be5e7a76 8708 ARCH(5);
9ee6e8bb 8709 /* branch link/exchange thumb (blx) */
d9ba4830 8710 tmp = load_reg(s, rm);
7d1b0095 8711 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8712 tcg_gen_movi_i32(tmp2, s->pc);
8713 store_reg(s, 14, tmp2);
8714 gen_bx(s, tmp);
9ee6e8bb 8715 break;
eb0ecd5a
WN
8716 case 0x4:
8717 {
8718 /* crc32/crc32c */
8719 uint32_t c = extract32(insn, 8, 4);
8720
8721 /* Check this CPU supports ARMv8 CRC instructions.
8722 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8723 * Bits 8, 10 and 11 should be zero.
8724 */
d614a513 8725 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8726 (c & 0xd) != 0) {
8727 goto illegal_op;
8728 }
8729
8730 rn = extract32(insn, 16, 4);
8731 rd = extract32(insn, 12, 4);
8732
8733 tmp = load_reg(s, rn);
8734 tmp2 = load_reg(s, rm);
aa633469
PM
8735 if (op1 == 0) {
8736 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8737 } else if (op1 == 1) {
8738 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8739 }
eb0ecd5a
WN
8740 tmp3 = tcg_const_i32(1 << op1);
8741 if (c & 0x2) {
8742 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8743 } else {
8744 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8745 }
8746 tcg_temp_free_i32(tmp2);
8747 tcg_temp_free_i32(tmp3);
8748 store_reg(s, rd, tmp);
8749 break;
8750 }
9ee6e8bb 8751 case 0x5: /* saturating add/subtract */
be5e7a76 8752 ARCH(5TE);
9ee6e8bb
PB
8753 rd = (insn >> 12) & 0xf;
8754 rn = (insn >> 16) & 0xf;
b40d0353 8755 tmp = load_reg(s, rm);
5e3f878a 8756 tmp2 = load_reg(s, rn);
9ee6e8bb 8757 if (op1 & 2)
9ef39277 8758 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8759 if (op1 & 1)
9ef39277 8760 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8761 else
9ef39277 8762 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8763 tcg_temp_free_i32(tmp2);
5e3f878a 8764 store_reg(s, rd, tmp);
9ee6e8bb 8765 break;
49e14940 8766 case 7:
d4a2dc67
PM
8767 {
8768 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8769 switch (op1) {
19a6e31c
PM
8770 case 0:
8771 /* HLT */
8772 gen_hlt(s, imm16);
8773 break;
37e6456e
PM
8774 case 1:
8775 /* bkpt */
8776 ARCH(5);
8777 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
8778 syn_aa32_bkpt(imm16, false),
8779 default_exception_el(s));
37e6456e
PM
8780 break;
8781 case 2:
8782 /* Hypervisor call (v7) */
8783 ARCH(7);
8784 if (IS_USER(s)) {
8785 goto illegal_op;
8786 }
8787 gen_hvc(s, imm16);
8788 break;
8789 case 3:
8790 /* Secure monitor call (v6+) */
8791 ARCH(6K);
8792 if (IS_USER(s)) {
8793 goto illegal_op;
8794 }
8795 gen_smc(s);
8796 break;
8797 default:
19a6e31c 8798 g_assert_not_reached();
49e14940 8799 }
9ee6e8bb 8800 break;
d4a2dc67 8801 }
9ee6e8bb
PB
8802 case 0x8: /* signed multiply */
8803 case 0xa:
8804 case 0xc:
8805 case 0xe:
be5e7a76 8806 ARCH(5TE);
9ee6e8bb
PB
8807 rs = (insn >> 8) & 0xf;
8808 rn = (insn >> 12) & 0xf;
8809 rd = (insn >> 16) & 0xf;
8810 if (op1 == 1) {
8811 /* (32 * 16) >> 16 */
5e3f878a
PB
8812 tmp = load_reg(s, rm);
8813 tmp2 = load_reg(s, rs);
9ee6e8bb 8814 if (sh & 4)
5e3f878a 8815 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8816 else
5e3f878a 8817 gen_sxth(tmp2);
a7812ae4
PB
8818 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8819 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8820 tmp = tcg_temp_new_i32();
ecc7b3aa 8821 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8822 tcg_temp_free_i64(tmp64);
9ee6e8bb 8823 if ((sh & 2) == 0) {
5e3f878a 8824 tmp2 = load_reg(s, rn);
9ef39277 8825 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8826 tcg_temp_free_i32(tmp2);
9ee6e8bb 8827 }
5e3f878a 8828 store_reg(s, rd, tmp);
9ee6e8bb
PB
8829 } else {
8830 /* 16 * 16 */
5e3f878a
PB
8831 tmp = load_reg(s, rm);
8832 tmp2 = load_reg(s, rs);
8833 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8834 tcg_temp_free_i32(tmp2);
9ee6e8bb 8835 if (op1 == 2) {
a7812ae4
PB
8836 tmp64 = tcg_temp_new_i64();
8837 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8838 tcg_temp_free_i32(tmp);
a7812ae4
PB
8839 gen_addq(s, tmp64, rn, rd);
8840 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8841 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8842 } else {
8843 if (op1 == 0) {
5e3f878a 8844 tmp2 = load_reg(s, rn);
9ef39277 8845 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8846 tcg_temp_free_i32(tmp2);
9ee6e8bb 8847 }
5e3f878a 8848 store_reg(s, rd, tmp);
9ee6e8bb
PB
8849 }
8850 }
8851 break;
8852 default:
8853 goto illegal_op;
8854 }
8855 } else if (((insn & 0x0e000000) == 0 &&
8856 (insn & 0x00000090) != 0x90) ||
8857 ((insn & 0x0e000000) == (1 << 25))) {
8858 int set_cc, logic_cc, shiftop;
8859
8860 op1 = (insn >> 21) & 0xf;
8861 set_cc = (insn >> 20) & 1;
8862 logic_cc = table_logic_cc[op1] & set_cc;
8863
8864 /* data processing instruction */
8865 if (insn & (1 << 25)) {
8866 /* immediate operand */
8867 val = insn & 0xff;
8868 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8869 if (shift) {
9ee6e8bb 8870 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8871 }
7d1b0095 8872 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8873 tcg_gen_movi_i32(tmp2, val);
8874 if (logic_cc && shift) {
8875 gen_set_CF_bit31(tmp2);
8876 }
9ee6e8bb
PB
8877 } else {
8878 /* register */
8879 rm = (insn) & 0xf;
e9bb4aa9 8880 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8881 shiftop = (insn >> 5) & 3;
8882 if (!(insn & (1 << 4))) {
8883 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8884 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8885 } else {
8886 rs = (insn >> 8) & 0xf;
8984bd2e 8887 tmp = load_reg(s, rs);
e9bb4aa9 8888 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8889 }
8890 }
8891 if (op1 != 0x0f && op1 != 0x0d) {
8892 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8893 tmp = load_reg(s, rn);
8894 } else {
f764718d 8895 tmp = NULL;
9ee6e8bb
PB
8896 }
8897 rd = (insn >> 12) & 0xf;
8898 switch(op1) {
8899 case 0x00:
e9bb4aa9
JR
8900 tcg_gen_and_i32(tmp, tmp, tmp2);
8901 if (logic_cc) {
8902 gen_logic_CC(tmp);
8903 }
7dcc1f89 8904 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8905 break;
8906 case 0x01:
e9bb4aa9
JR
8907 tcg_gen_xor_i32(tmp, tmp, tmp2);
8908 if (logic_cc) {
8909 gen_logic_CC(tmp);
8910 }
7dcc1f89 8911 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8912 break;
8913 case 0x02:
8914 if (set_cc && rd == 15) {
8915 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8916 if (IS_USER(s)) {
9ee6e8bb 8917 goto illegal_op;
e9bb4aa9 8918 }
72485ec4 8919 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8920 gen_exception_return(s, tmp);
9ee6e8bb 8921 } else {
e9bb4aa9 8922 if (set_cc) {
72485ec4 8923 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8924 } else {
8925 tcg_gen_sub_i32(tmp, tmp, tmp2);
8926 }
7dcc1f89 8927 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8928 }
8929 break;
8930 case 0x03:
e9bb4aa9 8931 if (set_cc) {
72485ec4 8932 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8933 } else {
8934 tcg_gen_sub_i32(tmp, tmp2, tmp);
8935 }
7dcc1f89 8936 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8937 break;
8938 case 0x04:
e9bb4aa9 8939 if (set_cc) {
72485ec4 8940 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8941 } else {
8942 tcg_gen_add_i32(tmp, tmp, tmp2);
8943 }
7dcc1f89 8944 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8945 break;
8946 case 0x05:
e9bb4aa9 8947 if (set_cc) {
49b4c31e 8948 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8949 } else {
8950 gen_add_carry(tmp, tmp, tmp2);
8951 }
7dcc1f89 8952 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8953 break;
8954 case 0x06:
e9bb4aa9 8955 if (set_cc) {
2de68a49 8956 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8957 } else {
8958 gen_sub_carry(tmp, tmp, tmp2);
8959 }
7dcc1f89 8960 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8961 break;
8962 case 0x07:
e9bb4aa9 8963 if (set_cc) {
2de68a49 8964 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8965 } else {
8966 gen_sub_carry(tmp, tmp2, tmp);
8967 }
7dcc1f89 8968 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8969 break;
8970 case 0x08:
8971 if (set_cc) {
e9bb4aa9
JR
8972 tcg_gen_and_i32(tmp, tmp, tmp2);
8973 gen_logic_CC(tmp);
9ee6e8bb 8974 }
7d1b0095 8975 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8976 break;
8977 case 0x09:
8978 if (set_cc) {
e9bb4aa9
JR
8979 tcg_gen_xor_i32(tmp, tmp, tmp2);
8980 gen_logic_CC(tmp);
9ee6e8bb 8981 }
7d1b0095 8982 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8983 break;
8984 case 0x0a:
8985 if (set_cc) {
72485ec4 8986 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8987 }
7d1b0095 8988 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8989 break;
8990 case 0x0b:
8991 if (set_cc) {
72485ec4 8992 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8993 }
7d1b0095 8994 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8995 break;
8996 case 0x0c:
e9bb4aa9
JR
8997 tcg_gen_or_i32(tmp, tmp, tmp2);
8998 if (logic_cc) {
8999 gen_logic_CC(tmp);
9000 }
7dcc1f89 9001 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9002 break;
9003 case 0x0d:
9004 if (logic_cc && rd == 15) {
9005 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 9006 if (IS_USER(s)) {
9ee6e8bb 9007 goto illegal_op;
e9bb4aa9
JR
9008 }
9009 gen_exception_return(s, tmp2);
9ee6e8bb 9010 } else {
e9bb4aa9
JR
9011 if (logic_cc) {
9012 gen_logic_CC(tmp2);
9013 }
7dcc1f89 9014 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9015 }
9016 break;
9017 case 0x0e:
f669df27 9018 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
9019 if (logic_cc) {
9020 gen_logic_CC(tmp);
9021 }
7dcc1f89 9022 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9023 break;
9024 default:
9025 case 0x0f:
e9bb4aa9
JR
9026 tcg_gen_not_i32(tmp2, tmp2);
9027 if (logic_cc) {
9028 gen_logic_CC(tmp2);
9029 }
7dcc1f89 9030 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9031 break;
9032 }
e9bb4aa9 9033 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 9034 tcg_temp_free_i32(tmp2);
e9bb4aa9 9035 }
9ee6e8bb
PB
9036 } else {
9037 /* other instructions */
9038 op1 = (insn >> 24) & 0xf;
9039 switch(op1) {
9040 case 0x0:
9041 case 0x1:
9042 /* multiplies, extra load/stores */
9043 sh = (insn >> 5) & 3;
9044 if (sh == 0) {
9045 if (op1 == 0x0) {
9046 rd = (insn >> 16) & 0xf;
9047 rn = (insn >> 12) & 0xf;
9048 rs = (insn >> 8) & 0xf;
9049 rm = (insn) & 0xf;
9050 op1 = (insn >> 20) & 0xf;
9051 switch (op1) {
9052 case 0: case 1: case 2: case 3: case 6:
9053 /* 32 bit mul */
5e3f878a
PB
9054 tmp = load_reg(s, rs);
9055 tmp2 = load_reg(s, rm);
9056 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9057 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9058 if (insn & (1 << 22)) {
9059 /* Subtract (mls) */
9060 ARCH(6T2);
5e3f878a
PB
9061 tmp2 = load_reg(s, rn);
9062 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 9063 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9064 } else if (insn & (1 << 21)) {
9065 /* Add */
5e3f878a
PB
9066 tmp2 = load_reg(s, rn);
9067 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9068 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9069 }
9070 if (insn & (1 << 20))
5e3f878a
PB
9071 gen_logic_CC(tmp);
9072 store_reg(s, rd, tmp);
9ee6e8bb 9073 break;
8aac08b1
AJ
9074 case 4:
9075 /* 64 bit mul double accumulate (UMAAL) */
9076 ARCH(6);
9077 tmp = load_reg(s, rs);
9078 tmp2 = load_reg(s, rm);
9079 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9080 gen_addq_lo(s, tmp64, rn);
9081 gen_addq_lo(s, tmp64, rd);
9082 gen_storeq_reg(s, rn, rd, tmp64);
9083 tcg_temp_free_i64(tmp64);
9084 break;
9085 case 8: case 9: case 10: case 11:
9086 case 12: case 13: case 14: case 15:
9087 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
9088 tmp = load_reg(s, rs);
9089 tmp2 = load_reg(s, rm);
8aac08b1 9090 if (insn & (1 << 22)) {
c9f10124 9091 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 9092 } else {
c9f10124 9093 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
9094 }
9095 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
9096 TCGv_i32 al = load_reg(s, rn);
9097 TCGv_i32 ah = load_reg(s, rd);
c9f10124 9098 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
9099 tcg_temp_free_i32(al);
9100 tcg_temp_free_i32(ah);
9ee6e8bb 9101 }
8aac08b1 9102 if (insn & (1 << 20)) {
c9f10124 9103 gen_logicq_cc(tmp, tmp2);
8aac08b1 9104 }
c9f10124
RH
9105 store_reg(s, rn, tmp);
9106 store_reg(s, rd, tmp2);
9ee6e8bb 9107 break;
8aac08b1
AJ
9108 default:
9109 goto illegal_op;
9ee6e8bb
PB
9110 }
9111 } else {
9112 rn = (insn >> 16) & 0xf;
9113 rd = (insn >> 12) & 0xf;
9114 if (insn & (1 << 23)) {
9115 /* load/store exclusive */
2359bf80 9116 int op2 = (insn >> 8) & 3;
86753403 9117 op1 = (insn >> 21) & 0x3;
2359bf80
MR
9118
9119 switch (op2) {
9120 case 0: /* lda/stl */
9121 if (op1 == 1) {
9122 goto illegal_op;
9123 }
9124 ARCH(8);
9125 break;
9126 case 1: /* reserved */
9127 goto illegal_op;
9128 case 2: /* ldaex/stlex */
9129 ARCH(8);
9130 break;
9131 case 3: /* ldrex/strex */
9132 if (op1) {
9133 ARCH(6K);
9134 } else {
9135 ARCH(6);
9136 }
9137 break;
9138 }
9139
3174f8e9 9140 addr = tcg_temp_local_new_i32();
98a46317 9141 load_reg_var(s, addr, rn);
2359bf80
MR
9142
9143 /* Since the emulation does not have barriers,
9144 the acquire/release semantics need no special
9145 handling */
9146 if (op2 == 0) {
9147 if (insn & (1 << 20)) {
9148 tmp = tcg_temp_new_i32();
9149 switch (op1) {
9150 case 0: /* lda */
9bb6558a
PM
9151 gen_aa32_ld32u_iss(s, tmp, addr,
9152 get_mem_index(s),
9153 rd | ISSIsAcqRel);
2359bf80
MR
9154 break;
9155 case 2: /* ldab */
9bb6558a
PM
9156 gen_aa32_ld8u_iss(s, tmp, addr,
9157 get_mem_index(s),
9158 rd | ISSIsAcqRel);
2359bf80
MR
9159 break;
9160 case 3: /* ldah */
9bb6558a
PM
9161 gen_aa32_ld16u_iss(s, tmp, addr,
9162 get_mem_index(s),
9163 rd | ISSIsAcqRel);
2359bf80
MR
9164 break;
9165 default:
9166 abort();
9167 }
9168 store_reg(s, rd, tmp);
9169 } else {
9170 rm = insn & 0xf;
9171 tmp = load_reg(s, rm);
9172 switch (op1) {
9173 case 0: /* stl */
9bb6558a
PM
9174 gen_aa32_st32_iss(s, tmp, addr,
9175 get_mem_index(s),
9176 rm | ISSIsAcqRel);
2359bf80
MR
9177 break;
9178 case 2: /* stlb */
9bb6558a
PM
9179 gen_aa32_st8_iss(s, tmp, addr,
9180 get_mem_index(s),
9181 rm | ISSIsAcqRel);
2359bf80
MR
9182 break;
9183 case 3: /* stlh */
9bb6558a
PM
9184 gen_aa32_st16_iss(s, tmp, addr,
9185 get_mem_index(s),
9186 rm | ISSIsAcqRel);
2359bf80
MR
9187 break;
9188 default:
9189 abort();
9190 }
9191 tcg_temp_free_i32(tmp);
9192 }
9193 } else if (insn & (1 << 20)) {
86753403
PB
9194 switch (op1) {
9195 case 0: /* ldrex */
426f5abc 9196 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
9197 break;
9198 case 1: /* ldrexd */
426f5abc 9199 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
9200 break;
9201 case 2: /* ldrexb */
426f5abc 9202 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
9203 break;
9204 case 3: /* ldrexh */
426f5abc 9205 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
9206 break;
9207 default:
9208 abort();
9209 }
9ee6e8bb
PB
9210 } else {
9211 rm = insn & 0xf;
86753403
PB
9212 switch (op1) {
9213 case 0: /* strex */
426f5abc 9214 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
9215 break;
9216 case 1: /* strexd */
502e64fe 9217 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
9218 break;
9219 case 2: /* strexb */
426f5abc 9220 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
9221 break;
9222 case 3: /* strexh */
426f5abc 9223 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
9224 break;
9225 default:
9226 abort();
9227 }
9ee6e8bb 9228 }
39d5492a 9229 tcg_temp_free_i32(addr);
9ee6e8bb 9230 } else {
cf12bce0
EC
9231 TCGv taddr;
9232 TCGMemOp opc = s->be_data;
9233
9ee6e8bb
PB
9234 /* SWP instruction */
9235 rm = (insn) & 0xf;
9236
9ee6e8bb 9237 if (insn & (1 << 22)) {
cf12bce0 9238 opc |= MO_UB;
9ee6e8bb 9239 } else {
cf12bce0 9240 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 9241 }
cf12bce0
EC
9242
9243 addr = load_reg(s, rn);
9244 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 9245 tcg_temp_free_i32(addr);
cf12bce0
EC
9246
9247 tmp = load_reg(s, rm);
9248 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
9249 get_mem_index(s), opc);
9250 tcg_temp_free(taddr);
9251 store_reg(s, rd, tmp);
9ee6e8bb
PB
9252 }
9253 }
9254 } else {
9255 int address_offset;
3960c336 9256 bool load = insn & (1 << 20);
63f26fcf
PM
9257 bool wbit = insn & (1 << 21);
9258 bool pbit = insn & (1 << 24);
3960c336 9259 bool doubleword = false;
9bb6558a
PM
9260 ISSInfo issinfo;
9261
9ee6e8bb
PB
9262 /* Misc load/store */
9263 rn = (insn >> 16) & 0xf;
9264 rd = (insn >> 12) & 0xf;
3960c336 9265
9bb6558a
PM
9266 /* ISS not valid if writeback */
9267 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
9268
3960c336
PM
9269 if (!load && (sh & 2)) {
9270 /* doubleword */
9271 ARCH(5TE);
9272 if (rd & 1) {
9273 /* UNPREDICTABLE; we choose to UNDEF */
9274 goto illegal_op;
9275 }
9276 load = (sh & 1) == 0;
9277 doubleword = true;
9278 }
9279
b0109805 9280 addr = load_reg(s, rn);
63f26fcf 9281 if (pbit) {
b0109805 9282 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 9283 }
9ee6e8bb 9284 address_offset = 0;
3960c336
PM
9285
9286 if (doubleword) {
9287 if (!load) {
9ee6e8bb 9288 /* store */
b0109805 9289 tmp = load_reg(s, rd);
12dcc321 9290 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9291 tcg_temp_free_i32(tmp);
b0109805
PB
9292 tcg_gen_addi_i32(addr, addr, 4);
9293 tmp = load_reg(s, rd + 1);
12dcc321 9294 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9295 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9296 } else {
9297 /* load */
5a839c0d 9298 tmp = tcg_temp_new_i32();
12dcc321 9299 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9300 store_reg(s, rd, tmp);
9301 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9302 tmp = tcg_temp_new_i32();
12dcc321 9303 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9304 rd++;
9ee6e8bb
PB
9305 }
9306 address_offset = -4;
3960c336
PM
9307 } else if (load) {
9308 /* load */
9309 tmp = tcg_temp_new_i32();
9310 switch (sh) {
9311 case 1:
9bb6558a
PM
9312 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9313 issinfo);
3960c336
PM
9314 break;
9315 case 2:
9bb6558a
PM
9316 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9317 issinfo);
3960c336
PM
9318 break;
9319 default:
9320 case 3:
9bb6558a
PM
9321 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9322 issinfo);
3960c336
PM
9323 break;
9324 }
9ee6e8bb
PB
9325 } else {
9326 /* store */
b0109805 9327 tmp = load_reg(s, rd);
9bb6558a 9328 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 9329 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9330 }
9331 /* Perform base writeback before the loaded value to
9332 ensure correct behavior with overlapping index registers.
b6af0975 9333 ldrd with base writeback is undefined if the
9ee6e8bb 9334 destination and index registers overlap. */
63f26fcf 9335 if (!pbit) {
b0109805
PB
9336 gen_add_datah_offset(s, insn, address_offset, addr);
9337 store_reg(s, rn, addr);
63f26fcf 9338 } else if (wbit) {
9ee6e8bb 9339 if (address_offset)
b0109805
PB
9340 tcg_gen_addi_i32(addr, addr, address_offset);
9341 store_reg(s, rn, addr);
9342 } else {
7d1b0095 9343 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9344 }
9345 if (load) {
9346 /* Complete the load. */
b0109805 9347 store_reg(s, rd, tmp);
9ee6e8bb
PB
9348 }
9349 }
9350 break;
9351 case 0x4:
9352 case 0x5:
9353 goto do_ldst;
9354 case 0x6:
9355 case 0x7:
9356 if (insn & (1 << 4)) {
9357 ARCH(6);
9358 /* Armv6 Media instructions. */
9359 rm = insn & 0xf;
9360 rn = (insn >> 16) & 0xf;
2c0262af 9361 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
9362 rs = (insn >> 8) & 0xf;
9363 switch ((insn >> 23) & 3) {
9364 case 0: /* Parallel add/subtract. */
9365 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
9366 tmp = load_reg(s, rn);
9367 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9368 sh = (insn >> 5) & 7;
9369 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9370 goto illegal_op;
6ddbc6e4 9371 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 9372 tcg_temp_free_i32(tmp2);
6ddbc6e4 9373 store_reg(s, rd, tmp);
9ee6e8bb
PB
9374 break;
9375 case 1:
9376 if ((insn & 0x00700020) == 0) {
6c95676b 9377 /* Halfword pack. */
3670669c
PB
9378 tmp = load_reg(s, rn);
9379 tmp2 = load_reg(s, rm);
9ee6e8bb 9380 shift = (insn >> 7) & 0x1f;
3670669c
PB
9381 if (insn & (1 << 6)) {
9382 /* pkhtb */
22478e79
AZ
9383 if (shift == 0)
9384 shift = 31;
9385 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 9386 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 9387 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
9388 } else {
9389 /* pkhbt */
22478e79
AZ
9390 if (shift)
9391 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 9392 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
9393 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9394 }
9395 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9396 tcg_temp_free_i32(tmp2);
3670669c 9397 store_reg(s, rd, tmp);
9ee6e8bb
PB
9398 } else if ((insn & 0x00200020) == 0x00200000) {
9399 /* [us]sat */
6ddbc6e4 9400 tmp = load_reg(s, rm);
9ee6e8bb
PB
9401 shift = (insn >> 7) & 0x1f;
9402 if (insn & (1 << 6)) {
9403 if (shift == 0)
9404 shift = 31;
6ddbc6e4 9405 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9406 } else {
6ddbc6e4 9407 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9408 }
9409 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9410 tmp2 = tcg_const_i32(sh);
9411 if (insn & (1 << 22))
9ef39277 9412 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9413 else
9ef39277 9414 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9415 tcg_temp_free_i32(tmp2);
6ddbc6e4 9416 store_reg(s, rd, tmp);
9ee6e8bb
PB
9417 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9418 /* [us]sat16 */
6ddbc6e4 9419 tmp = load_reg(s, rm);
9ee6e8bb 9420 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9421 tmp2 = tcg_const_i32(sh);
9422 if (insn & (1 << 22))
9ef39277 9423 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9424 else
9ef39277 9425 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9426 tcg_temp_free_i32(tmp2);
6ddbc6e4 9427 store_reg(s, rd, tmp);
9ee6e8bb
PB
9428 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9429 /* Select bytes. */
6ddbc6e4
PB
9430 tmp = load_reg(s, rn);
9431 tmp2 = load_reg(s, rm);
7d1b0095 9432 tmp3 = tcg_temp_new_i32();
0ecb72a5 9433 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9434 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9435 tcg_temp_free_i32(tmp3);
9436 tcg_temp_free_i32(tmp2);
6ddbc6e4 9437 store_reg(s, rd, tmp);
9ee6e8bb 9438 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9439 tmp = load_reg(s, rm);
9ee6e8bb 9440 shift = (insn >> 10) & 3;
1301f322 9441 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9442 rotate, a shift is sufficient. */
9443 if (shift != 0)
f669df27 9444 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9445 op1 = (insn >> 20) & 7;
9446 switch (op1) {
5e3f878a
PB
9447 case 0: gen_sxtb16(tmp); break;
9448 case 2: gen_sxtb(tmp); break;
9449 case 3: gen_sxth(tmp); break;
9450 case 4: gen_uxtb16(tmp); break;
9451 case 6: gen_uxtb(tmp); break;
9452 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9453 default: goto illegal_op;
9454 }
9455 if (rn != 15) {
5e3f878a 9456 tmp2 = load_reg(s, rn);
9ee6e8bb 9457 if ((op1 & 3) == 0) {
5e3f878a 9458 gen_add16(tmp, tmp2);
9ee6e8bb 9459 } else {
5e3f878a 9460 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9461 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9462 }
9463 }
6c95676b 9464 store_reg(s, rd, tmp);
9ee6e8bb
PB
9465 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9466 /* rev */
b0109805 9467 tmp = load_reg(s, rm);
9ee6e8bb
PB
9468 if (insn & (1 << 22)) {
9469 if (insn & (1 << 7)) {
b0109805 9470 gen_revsh(tmp);
9ee6e8bb
PB
9471 } else {
9472 ARCH(6T2);
b0109805 9473 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9474 }
9475 } else {
9476 if (insn & (1 << 7))
b0109805 9477 gen_rev16(tmp);
9ee6e8bb 9478 else
66896cb8 9479 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9480 }
b0109805 9481 store_reg(s, rd, tmp);
9ee6e8bb
PB
9482 } else {
9483 goto illegal_op;
9484 }
9485 break;
9486 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9487 switch ((insn >> 20) & 0x7) {
9488 case 5:
9489 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9490 /* op2 not 00x or 11x : UNDEF */
9491 goto illegal_op;
9492 }
838fa72d
AJ
9493 /* Signed multiply most significant [accumulate].
9494 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9495 tmp = load_reg(s, rm);
9496 tmp2 = load_reg(s, rs);
a7812ae4 9497 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9498
955a7dd5 9499 if (rd != 15) {
838fa72d 9500 tmp = load_reg(s, rd);
9ee6e8bb 9501 if (insn & (1 << 6)) {
838fa72d 9502 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9503 } else {
838fa72d 9504 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9505 }
9506 }
838fa72d
AJ
9507 if (insn & (1 << 5)) {
9508 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9509 }
9510 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9511 tmp = tcg_temp_new_i32();
ecc7b3aa 9512 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9513 tcg_temp_free_i64(tmp64);
955a7dd5 9514 store_reg(s, rn, tmp);
41e9564d
PM
9515 break;
9516 case 0:
9517 case 4:
9518 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9519 if (insn & (1 << 7)) {
9520 goto illegal_op;
9521 }
9522 tmp = load_reg(s, rm);
9523 tmp2 = load_reg(s, rs);
9ee6e8bb 9524 if (insn & (1 << 5))
5e3f878a
PB
9525 gen_swap_half(tmp2);
9526 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9527 if (insn & (1 << 22)) {
5e3f878a 9528 /* smlald, smlsld */
33bbd75a
PC
9529 TCGv_i64 tmp64_2;
9530
a7812ae4 9531 tmp64 = tcg_temp_new_i64();
33bbd75a 9532 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9533 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9534 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9535 tcg_temp_free_i32(tmp);
33bbd75a
PC
9536 tcg_temp_free_i32(tmp2);
9537 if (insn & (1 << 6)) {
9538 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9539 } else {
9540 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9541 }
9542 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9543 gen_addq(s, tmp64, rd, rn);
9544 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9545 tcg_temp_free_i64(tmp64);
9ee6e8bb 9546 } else {
5e3f878a 9547 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9548 if (insn & (1 << 6)) {
9549 /* This subtraction cannot overflow. */
9550 tcg_gen_sub_i32(tmp, tmp, tmp2);
9551 } else {
9552 /* This addition cannot overflow 32 bits;
9553 * however it may overflow considered as a
9554 * signed operation, in which case we must set
9555 * the Q flag.
9556 */
9557 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9558 }
9559 tcg_temp_free_i32(tmp2);
22478e79 9560 if (rd != 15)
9ee6e8bb 9561 {
22478e79 9562 tmp2 = load_reg(s, rd);
9ef39277 9563 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9564 tcg_temp_free_i32(tmp2);
9ee6e8bb 9565 }
22478e79 9566 store_reg(s, rn, tmp);
9ee6e8bb 9567 }
41e9564d 9568 break;
b8b8ea05
PM
9569 case 1:
9570 case 3:
9571 /* SDIV, UDIV */
d614a513 9572 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9573 goto illegal_op;
9574 }
9575 if (((insn >> 5) & 7) || (rd != 15)) {
9576 goto illegal_op;
9577 }
9578 tmp = load_reg(s, rm);
9579 tmp2 = load_reg(s, rs);
9580 if (insn & (1 << 21)) {
9581 gen_helper_udiv(tmp, tmp, tmp2);
9582 } else {
9583 gen_helper_sdiv(tmp, tmp, tmp2);
9584 }
9585 tcg_temp_free_i32(tmp2);
9586 store_reg(s, rn, tmp);
9587 break;
41e9564d
PM
9588 default:
9589 goto illegal_op;
9ee6e8bb
PB
9590 }
9591 break;
9592 case 3:
9593 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9594 switch (op1) {
9595 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9596 ARCH(6);
9597 tmp = load_reg(s, rm);
9598 tmp2 = load_reg(s, rs);
9599 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9600 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9601 if (rd != 15) {
9602 tmp2 = load_reg(s, rd);
6ddbc6e4 9603 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9604 tcg_temp_free_i32(tmp2);
9ee6e8bb 9605 }
ded9d295 9606 store_reg(s, rn, tmp);
9ee6e8bb
PB
9607 break;
9608 case 0x20: case 0x24: case 0x28: case 0x2c:
9609 /* Bitfield insert/clear. */
9610 ARCH(6T2);
9611 shift = (insn >> 7) & 0x1f;
9612 i = (insn >> 16) & 0x1f;
45140a57
KB
9613 if (i < shift) {
9614 /* UNPREDICTABLE; we choose to UNDEF */
9615 goto illegal_op;
9616 }
9ee6e8bb
PB
9617 i = i + 1 - shift;
9618 if (rm == 15) {
7d1b0095 9619 tmp = tcg_temp_new_i32();
5e3f878a 9620 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9621 } else {
5e3f878a 9622 tmp = load_reg(s, rm);
9ee6e8bb
PB
9623 }
9624 if (i != 32) {
5e3f878a 9625 tmp2 = load_reg(s, rd);
d593c48e 9626 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9627 tcg_temp_free_i32(tmp2);
9ee6e8bb 9628 }
5e3f878a 9629 store_reg(s, rd, tmp);
9ee6e8bb
PB
9630 break;
9631 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9632 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9633 ARCH(6T2);
5e3f878a 9634 tmp = load_reg(s, rm);
9ee6e8bb
PB
9635 shift = (insn >> 7) & 0x1f;
9636 i = ((insn >> 16) & 0x1f) + 1;
9637 if (shift + i > 32)
9638 goto illegal_op;
9639 if (i < 32) {
9640 if (op1 & 0x20) {
59a71b4c 9641 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9642 } else {
59a71b4c 9643 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9644 }
9645 }
5e3f878a 9646 store_reg(s, rd, tmp);
9ee6e8bb
PB
9647 break;
9648 default:
9649 goto illegal_op;
9650 }
9651 break;
9652 }
9653 break;
9654 }
9655 do_ldst:
9656 /* Check for undefined extension instructions
9657 * per the ARM Bible IE:
9658 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9659 */
9660 sh = (0xf << 20) | (0xf << 4);
9661 if (op1 == 0x7 && ((insn & sh) == sh))
9662 {
9663 goto illegal_op;
9664 }
9665 /* load/store byte/word */
9666 rn = (insn >> 16) & 0xf;
9667 rd = (insn >> 12) & 0xf;
b0109805 9668 tmp2 = load_reg(s, rn);
a99caa48
PM
9669 if ((insn & 0x01200000) == 0x00200000) {
9670 /* ldrt/strt */
579d21cc 9671 i = get_a32_user_mem_index(s);
a99caa48
PM
9672 } else {
9673 i = get_mem_index(s);
9674 }
9ee6e8bb 9675 if (insn & (1 << 24))
b0109805 9676 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9677 if (insn & (1 << 20)) {
9678 /* load */
5a839c0d 9679 tmp = tcg_temp_new_i32();
9ee6e8bb 9680 if (insn & (1 << 22)) {
9bb6558a 9681 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9682 } else {
9bb6558a 9683 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9684 }
9ee6e8bb
PB
9685 } else {
9686 /* store */
b0109805 9687 tmp = load_reg(s, rd);
5a839c0d 9688 if (insn & (1 << 22)) {
9bb6558a 9689 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9690 } else {
9bb6558a 9691 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9692 }
9693 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9694 }
9695 if (!(insn & (1 << 24))) {
b0109805
PB
9696 gen_add_data_offset(s, insn, tmp2);
9697 store_reg(s, rn, tmp2);
9698 } else if (insn & (1 << 21)) {
9699 store_reg(s, rn, tmp2);
9700 } else {
7d1b0095 9701 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9702 }
9703 if (insn & (1 << 20)) {
9704 /* Complete the load. */
7dcc1f89 9705 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9706 }
9707 break;
9708 case 0x08:
9709 case 0x09:
9710 {
da3e53dd
PM
9711 int j, n, loaded_base;
9712 bool exc_return = false;
9713 bool is_load = extract32(insn, 20, 1);
9714 bool user = false;
39d5492a 9715 TCGv_i32 loaded_var;
9ee6e8bb
PB
9716 /* load/store multiple words */
9717 /* XXX: store correct base if write back */
9ee6e8bb 9718 if (insn & (1 << 22)) {
da3e53dd 9719 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9720 if (IS_USER(s))
9721 goto illegal_op; /* only usable in supervisor mode */
9722
da3e53dd
PM
9723 if (is_load && extract32(insn, 15, 1)) {
9724 exc_return = true;
9725 } else {
9726 user = true;
9727 }
9ee6e8bb
PB
9728 }
9729 rn = (insn >> 16) & 0xf;
b0109805 9730 addr = load_reg(s, rn);
9ee6e8bb
PB
9731
9732 /* compute total size */
9733 loaded_base = 0;
f764718d 9734 loaded_var = NULL;
9ee6e8bb
PB
9735 n = 0;
9736 for(i=0;i<16;i++) {
9737 if (insn & (1 << i))
9738 n++;
9739 }
9740 /* XXX: test invalid n == 0 case ? */
9741 if (insn & (1 << 23)) {
9742 if (insn & (1 << 24)) {
9743 /* pre increment */
b0109805 9744 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9745 } else {
9746 /* post increment */
9747 }
9748 } else {
9749 if (insn & (1 << 24)) {
9750 /* pre decrement */
b0109805 9751 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9752 } else {
9753 /* post decrement */
9754 if (n != 1)
b0109805 9755 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9756 }
9757 }
9758 j = 0;
9759 for(i=0;i<16;i++) {
9760 if (insn & (1 << i)) {
da3e53dd 9761 if (is_load) {
9ee6e8bb 9762 /* load */
5a839c0d 9763 tmp = tcg_temp_new_i32();
12dcc321 9764 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9765 if (user) {
b75263d6 9766 tmp2 = tcg_const_i32(i);
1ce94f81 9767 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9768 tcg_temp_free_i32(tmp2);
7d1b0095 9769 tcg_temp_free_i32(tmp);
9ee6e8bb 9770 } else if (i == rn) {
b0109805 9771 loaded_var = tmp;
9ee6e8bb 9772 loaded_base = 1;
fb0e8e79
PM
9773 } else if (rn == 15 && exc_return) {
9774 store_pc_exc_ret(s, tmp);
9ee6e8bb 9775 } else {
7dcc1f89 9776 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9777 }
9778 } else {
9779 /* store */
9780 if (i == 15) {
9781 /* special case: r15 = PC + 8 */
9782 val = (long)s->pc + 4;
7d1b0095 9783 tmp = tcg_temp_new_i32();
b0109805 9784 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9785 } else if (user) {
7d1b0095 9786 tmp = tcg_temp_new_i32();
b75263d6 9787 tmp2 = tcg_const_i32(i);
9ef39277 9788 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9789 tcg_temp_free_i32(tmp2);
9ee6e8bb 9790 } else {
b0109805 9791 tmp = load_reg(s, i);
9ee6e8bb 9792 }
12dcc321 9793 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9794 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9795 }
9796 j++;
9797 /* no need to add after the last transfer */
9798 if (j != n)
b0109805 9799 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9800 }
9801 }
9802 if (insn & (1 << 21)) {
9803 /* write back */
9804 if (insn & (1 << 23)) {
9805 if (insn & (1 << 24)) {
9806 /* pre increment */
9807 } else {
9808 /* post increment */
b0109805 9809 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9810 }
9811 } else {
9812 if (insn & (1 << 24)) {
9813 /* pre decrement */
9814 if (n != 1)
b0109805 9815 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9816 } else {
9817 /* post decrement */
b0109805 9818 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9819 }
9820 }
b0109805
PB
9821 store_reg(s, rn, addr);
9822 } else {
7d1b0095 9823 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9824 }
9825 if (loaded_base) {
b0109805 9826 store_reg(s, rn, loaded_var);
9ee6e8bb 9827 }
da3e53dd 9828 if (exc_return) {
9ee6e8bb 9829 /* Restore CPSR from SPSR. */
d9ba4830 9830 tmp = load_cpu_field(spsr);
235ea1f5 9831 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 9832 tcg_temp_free_i32(tmp);
b29fd33d 9833 /* Must exit loop to check un-masked IRQs */
dcba3a8d 9834 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
9835 }
9836 }
9837 break;
9838 case 0xa:
9839 case 0xb:
9840 {
9841 int32_t offset;
9842
9843 /* branch (and link) */
9844 val = (int32_t)s->pc;
9845 if (insn & (1 << 24)) {
7d1b0095 9846 tmp = tcg_temp_new_i32();
5e3f878a
PB
9847 tcg_gen_movi_i32(tmp, val);
9848 store_reg(s, 14, tmp);
9ee6e8bb 9849 }
534df156
PM
9850 offset = sextract32(insn << 2, 0, 26);
9851 val += offset + 4;
9ee6e8bb
PB
9852 gen_jmp(s, val);
9853 }
9854 break;
9855 case 0xc:
9856 case 0xd:
9857 case 0xe:
6a57f3eb
WN
9858 if (((insn >> 8) & 0xe) == 10) {
9859 /* VFP. */
7dcc1f89 9860 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9861 goto illegal_op;
9862 }
7dcc1f89 9863 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9864 /* Coprocessor. */
9ee6e8bb 9865 goto illegal_op;
6a57f3eb 9866 }
9ee6e8bb
PB
9867 break;
9868 case 0xf:
9869 /* swi */
eaed129d 9870 gen_set_pc_im(s, s->pc);
d4a2dc67 9871 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 9872 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
9873 break;
9874 default:
9875 illegal_op:
73710361
GB
9876 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9877 default_exception_el(s));
9ee6e8bb
PB
9878 break;
9879 }
9880 }
9881}
9882
296e5a0a
PM
9883static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
9884{
9885 /* Return true if this is a 16 bit instruction. We must be precise
9886 * about this (matching the decode). We assume that s->pc still
9887 * points to the first 16 bits of the insn.
9888 */
9889 if ((insn >> 11) < 0x1d) {
9890 /* Definitely a 16-bit instruction */
9891 return true;
9892 }
9893
9894 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
9895 * first half of a 32-bit Thumb insn. Thumb-1 cores might
9896 * end up actually treating this as two 16-bit insns, though,
9897 * if it's half of a bl/blx pair that might span a page boundary.
9898 */
9899 if (arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
9900 /* Thumb2 cores (including all M profile ones) always treat
9901 * 32-bit insns as 32-bit.
9902 */
9903 return false;
9904 }
9905
9906 if ((insn >> 11) == 0x1e && (s->pc < s->next_page_start - 3)) {
9907 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
9908 * is not on the next page; we merge this into a 32-bit
9909 * insn.
9910 */
9911 return false;
9912 }
9913 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
9914 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
9915 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
9916 * -- handle as single 16 bit insn
9917 */
9918 return true;
9919}
9920
9ee6e8bb
PB
9921/* Return true if this is a Thumb-2 logical op. */
9922static int
9923thumb2_logic_op(int op)
9924{
9925 return (op < 8);
9926}
9927
9928/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9929 then set condition code flags based on the result of the operation.
9930 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9931 to the high bit of T1.
9932 Returns zero if the opcode is valid. */
9933
9934static int
39d5492a
PM
9935gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9936 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9937{
9938 int logic_cc;
9939
9940 logic_cc = 0;
9941 switch (op) {
9942 case 0: /* and */
396e467c 9943 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9944 logic_cc = conds;
9945 break;
9946 case 1: /* bic */
f669df27 9947 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9948 logic_cc = conds;
9949 break;
9950 case 2: /* orr */
396e467c 9951 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9952 logic_cc = conds;
9953 break;
9954 case 3: /* orn */
29501f1b 9955 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9956 logic_cc = conds;
9957 break;
9958 case 4: /* eor */
396e467c 9959 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9960 logic_cc = conds;
9961 break;
9962 case 8: /* add */
9963 if (conds)
72485ec4 9964 gen_add_CC(t0, t0, t1);
9ee6e8bb 9965 else
396e467c 9966 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9967 break;
9968 case 10: /* adc */
9969 if (conds)
49b4c31e 9970 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9971 else
396e467c 9972 gen_adc(t0, t1);
9ee6e8bb
PB
9973 break;
9974 case 11: /* sbc */
2de68a49
RH
9975 if (conds) {
9976 gen_sbc_CC(t0, t0, t1);
9977 } else {
396e467c 9978 gen_sub_carry(t0, t0, t1);
2de68a49 9979 }
9ee6e8bb
PB
9980 break;
9981 case 13: /* sub */
9982 if (conds)
72485ec4 9983 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9984 else
396e467c 9985 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9986 break;
9987 case 14: /* rsb */
9988 if (conds)
72485ec4 9989 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9990 else
396e467c 9991 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9992 break;
9993 default: /* 5, 6, 7, 9, 12, 15. */
9994 return 1;
9995 }
9996 if (logic_cc) {
396e467c 9997 gen_logic_CC(t0);
9ee6e8bb 9998 if (shifter_out)
396e467c 9999 gen_set_CF_bit31(t1);
9ee6e8bb
PB
10000 }
10001 return 0;
10002}
10003
2eea841c
PM
10004/* Translate a 32-bit thumb instruction. */
10005static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 10006{
296e5a0a 10007 uint32_t imm, shift, offset;
9ee6e8bb 10008 uint32_t rd, rn, rm, rs;
39d5492a
PM
10009 TCGv_i32 tmp;
10010 TCGv_i32 tmp2;
10011 TCGv_i32 tmp3;
10012 TCGv_i32 addr;
a7812ae4 10013 TCGv_i64 tmp64;
9ee6e8bb
PB
10014 int op;
10015 int shiftop;
10016 int conds;
10017 int logic_cc;
10018
296e5a0a
PM
10019 /* The only 32 bit insn that's allowed for Thumb1 is the combined
10020 * BL/BLX prefix and suffix.
10021 */
9ee6e8bb
PB
10022 if ((insn & 0xf800e800) != 0xf000e800) {
10023 ARCH(6T2);
10024 }
10025
10026 rn = (insn >> 16) & 0xf;
10027 rs = (insn >> 12) & 0xf;
10028 rd = (insn >> 8) & 0xf;
10029 rm = insn & 0xf;
10030 switch ((insn >> 25) & 0xf) {
10031 case 0: case 1: case 2: case 3:
10032 /* 16-bit instructions. Should never happen. */
10033 abort();
10034 case 4:
10035 if (insn & (1 << 22)) {
ebfe27c5
PM
10036 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10037 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 10038 * table branch, TT.
ebfe27c5 10039 */
76eff04d
PM
10040 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
10041 arm_dc_feature(s, ARM_FEATURE_V8)) {
10042 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10043 * - SG (v8M only)
10044 * The bulk of the behaviour for this instruction is implemented
10045 * in v7m_handle_execute_nsc(), which deals with the insn when
10046 * it is executed by a CPU in non-secure state from memory
10047 * which is Secure & NonSecure-Callable.
10048 * Here we only need to handle the remaining cases:
10049 * * in NS memory (including the "security extension not
10050 * implemented" case) : NOP
10051 * * in S memory but CPU already secure (clear IT bits)
10052 * We know that the attribute for the memory this insn is
10053 * in must match the current CPU state, because otherwise
10054 * get_phys_addr_pmsav8 would have generated an exception.
10055 */
10056 if (s->v8m_secure) {
10057 /* Like the IT insn, we don't need to generate any code */
10058 s->condexec_cond = 0;
10059 s->condexec_mask = 0;
10060 }
10061 } else if (insn & 0x01200000) {
ebfe27c5
PM
10062 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10063 * - load/store dual (post-indexed)
10064 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
10065 * - load/store dual (literal and immediate)
10066 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10067 * - load/store dual (pre-indexed)
10068 */
9ee6e8bb 10069 if (rn == 15) {
ebfe27c5
PM
10070 if (insn & (1 << 21)) {
10071 /* UNPREDICTABLE */
10072 goto illegal_op;
10073 }
7d1b0095 10074 addr = tcg_temp_new_i32();
b0109805 10075 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 10076 } else {
b0109805 10077 addr = load_reg(s, rn);
9ee6e8bb
PB
10078 }
10079 offset = (insn & 0xff) * 4;
10080 if ((insn & (1 << 23)) == 0)
10081 offset = -offset;
10082 if (insn & (1 << 24)) {
b0109805 10083 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
10084 offset = 0;
10085 }
10086 if (insn & (1 << 20)) {
10087 /* ldrd */
e2592fad 10088 tmp = tcg_temp_new_i32();
12dcc321 10089 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
10090 store_reg(s, rs, tmp);
10091 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10092 tmp = tcg_temp_new_i32();
12dcc321 10093 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10094 store_reg(s, rd, tmp);
9ee6e8bb
PB
10095 } else {
10096 /* strd */
b0109805 10097 tmp = load_reg(s, rs);
12dcc321 10098 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10099 tcg_temp_free_i32(tmp);
b0109805
PB
10100 tcg_gen_addi_i32(addr, addr, 4);
10101 tmp = load_reg(s, rd);
12dcc321 10102 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10103 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10104 }
10105 if (insn & (1 << 21)) {
10106 /* Base writeback. */
b0109805
PB
10107 tcg_gen_addi_i32(addr, addr, offset - 4);
10108 store_reg(s, rn, addr);
10109 } else {
7d1b0095 10110 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10111 }
10112 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
10113 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
10114 * - load/store exclusive word
5158de24 10115 * - TT (v8M only)
ebfe27c5
PM
10116 */
10117 if (rs == 15) {
5158de24
PM
10118 if (!(insn & (1 << 20)) &&
10119 arm_dc_feature(s, ARM_FEATURE_M) &&
10120 arm_dc_feature(s, ARM_FEATURE_V8)) {
10121 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
10122 * - TT (v8M only)
10123 */
10124 bool alt = insn & (1 << 7);
10125 TCGv_i32 addr, op, ttresp;
10126
10127 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
10128 /* we UNDEF for these UNPREDICTABLE cases */
10129 goto illegal_op;
10130 }
10131
10132 if (alt && !s->v8m_secure) {
10133 goto illegal_op;
10134 }
10135
10136 addr = load_reg(s, rn);
10137 op = tcg_const_i32(extract32(insn, 6, 2));
10138 ttresp = tcg_temp_new_i32();
10139 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
10140 tcg_temp_free_i32(addr);
10141 tcg_temp_free_i32(op);
10142 store_reg(s, rd, ttresp);
384c6c03 10143 break;
5158de24 10144 }
ebfe27c5
PM
10145 goto illegal_op;
10146 }
39d5492a 10147 addr = tcg_temp_local_new_i32();
98a46317 10148 load_reg_var(s, addr, rn);
426f5abc 10149 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 10150 if (insn & (1 << 20)) {
426f5abc 10151 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 10152 } else {
426f5abc 10153 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 10154 }
39d5492a 10155 tcg_temp_free_i32(addr);
2359bf80 10156 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
10157 /* Table Branch. */
10158 if (rn == 15) {
7d1b0095 10159 addr = tcg_temp_new_i32();
b0109805 10160 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 10161 } else {
b0109805 10162 addr = load_reg(s, rn);
9ee6e8bb 10163 }
b26eefb6 10164 tmp = load_reg(s, rm);
b0109805 10165 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
10166 if (insn & (1 << 4)) {
10167 /* tbh */
b0109805 10168 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10169 tcg_temp_free_i32(tmp);
e2592fad 10170 tmp = tcg_temp_new_i32();
12dcc321 10171 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10172 } else { /* tbb */
7d1b0095 10173 tcg_temp_free_i32(tmp);
e2592fad 10174 tmp = tcg_temp_new_i32();
12dcc321 10175 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10176 }
7d1b0095 10177 tcg_temp_free_i32(addr);
b0109805
PB
10178 tcg_gen_shli_i32(tmp, tmp, 1);
10179 tcg_gen_addi_i32(tmp, tmp, s->pc);
10180 store_reg(s, 15, tmp);
9ee6e8bb 10181 } else {
2359bf80 10182 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 10183 op = (insn >> 4) & 0x3;
2359bf80
MR
10184 switch (op2) {
10185 case 0:
426f5abc 10186 goto illegal_op;
2359bf80
MR
10187 case 1:
10188 /* Load/store exclusive byte/halfword/doubleword */
10189 if (op == 2) {
10190 goto illegal_op;
10191 }
10192 ARCH(7);
10193 break;
10194 case 2:
10195 /* Load-acquire/store-release */
10196 if (op == 3) {
10197 goto illegal_op;
10198 }
10199 /* Fall through */
10200 case 3:
10201 /* Load-acquire/store-release exclusive */
10202 ARCH(8);
10203 break;
426f5abc 10204 }
39d5492a 10205 addr = tcg_temp_local_new_i32();
98a46317 10206 load_reg_var(s, addr, rn);
2359bf80
MR
10207 if (!(op2 & 1)) {
10208 if (insn & (1 << 20)) {
10209 tmp = tcg_temp_new_i32();
10210 switch (op) {
10211 case 0: /* ldab */
9bb6558a
PM
10212 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
10213 rs | ISSIsAcqRel);
2359bf80
MR
10214 break;
10215 case 1: /* ldah */
9bb6558a
PM
10216 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10217 rs | ISSIsAcqRel);
2359bf80
MR
10218 break;
10219 case 2: /* lda */
9bb6558a
PM
10220 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
10221 rs | ISSIsAcqRel);
2359bf80
MR
10222 break;
10223 default:
10224 abort();
10225 }
10226 store_reg(s, rs, tmp);
10227 } else {
10228 tmp = load_reg(s, rs);
10229 switch (op) {
10230 case 0: /* stlb */
9bb6558a
PM
10231 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
10232 rs | ISSIsAcqRel);
2359bf80
MR
10233 break;
10234 case 1: /* stlh */
9bb6558a
PM
10235 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
10236 rs | ISSIsAcqRel);
2359bf80
MR
10237 break;
10238 case 2: /* stl */
9bb6558a
PM
10239 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
10240 rs | ISSIsAcqRel);
2359bf80
MR
10241 break;
10242 default:
10243 abort();
10244 }
10245 tcg_temp_free_i32(tmp);
10246 }
10247 } else if (insn & (1 << 20)) {
426f5abc 10248 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 10249 } else {
426f5abc 10250 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 10251 }
39d5492a 10252 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10253 }
10254 } else {
10255 /* Load/store multiple, RFE, SRS. */
10256 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 10257 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 10258 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10259 goto illegal_op;
00115976 10260 }
9ee6e8bb
PB
10261 if (insn & (1 << 20)) {
10262 /* rfe */
b0109805
PB
10263 addr = load_reg(s, rn);
10264 if ((insn & (1 << 24)) == 0)
10265 tcg_gen_addi_i32(addr, addr, -8);
10266 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 10267 tmp = tcg_temp_new_i32();
12dcc321 10268 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10269 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10270 tmp2 = tcg_temp_new_i32();
12dcc321 10271 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
10272 if (insn & (1 << 21)) {
10273 /* Base writeback. */
b0109805
PB
10274 if (insn & (1 << 24)) {
10275 tcg_gen_addi_i32(addr, addr, 4);
10276 } else {
10277 tcg_gen_addi_i32(addr, addr, -4);
10278 }
10279 store_reg(s, rn, addr);
10280 } else {
7d1b0095 10281 tcg_temp_free_i32(addr);
9ee6e8bb 10282 }
b0109805 10283 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
10284 } else {
10285 /* srs */
81465888
PM
10286 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10287 insn & (1 << 21));
9ee6e8bb
PB
10288 }
10289 } else {
5856d44e 10290 int i, loaded_base = 0;
39d5492a 10291 TCGv_i32 loaded_var;
9ee6e8bb 10292 /* Load/store multiple. */
b0109805 10293 addr = load_reg(s, rn);
9ee6e8bb
PB
10294 offset = 0;
10295 for (i = 0; i < 16; i++) {
10296 if (insn & (1 << i))
10297 offset += 4;
10298 }
10299 if (insn & (1 << 24)) {
b0109805 10300 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10301 }
10302
f764718d 10303 loaded_var = NULL;
9ee6e8bb
PB
10304 for (i = 0; i < 16; i++) {
10305 if ((insn & (1 << i)) == 0)
10306 continue;
10307 if (insn & (1 << 20)) {
10308 /* Load. */
e2592fad 10309 tmp = tcg_temp_new_i32();
12dcc321 10310 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10311 if (i == 15) {
3bb8a96f 10312 gen_bx_excret(s, tmp);
5856d44e
YO
10313 } else if (i == rn) {
10314 loaded_var = tmp;
10315 loaded_base = 1;
9ee6e8bb 10316 } else {
b0109805 10317 store_reg(s, i, tmp);
9ee6e8bb
PB
10318 }
10319 } else {
10320 /* Store. */
b0109805 10321 tmp = load_reg(s, i);
12dcc321 10322 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10323 tcg_temp_free_i32(tmp);
9ee6e8bb 10324 }
b0109805 10325 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 10326 }
5856d44e
YO
10327 if (loaded_base) {
10328 store_reg(s, rn, loaded_var);
10329 }
9ee6e8bb
PB
10330 if (insn & (1 << 21)) {
10331 /* Base register writeback. */
10332 if (insn & (1 << 24)) {
b0109805 10333 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10334 }
10335 /* Fault if writeback register is in register list. */
10336 if (insn & (1 << rn))
10337 goto illegal_op;
b0109805
PB
10338 store_reg(s, rn, addr);
10339 } else {
7d1b0095 10340 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10341 }
10342 }
10343 }
10344 break;
2af9ab77
JB
10345 case 5:
10346
9ee6e8bb 10347 op = (insn >> 21) & 0xf;
2af9ab77 10348 if (op == 6) {
62b44f05
AR
10349 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10350 goto illegal_op;
10351 }
2af9ab77
JB
10352 /* Halfword pack. */
10353 tmp = load_reg(s, rn);
10354 tmp2 = load_reg(s, rm);
10355 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
10356 if (insn & (1 << 5)) {
10357 /* pkhtb */
10358 if (shift == 0)
10359 shift = 31;
10360 tcg_gen_sari_i32(tmp2, tmp2, shift);
10361 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10362 tcg_gen_ext16u_i32(tmp2, tmp2);
10363 } else {
10364 /* pkhbt */
10365 if (shift)
10366 tcg_gen_shli_i32(tmp2, tmp2, shift);
10367 tcg_gen_ext16u_i32(tmp, tmp);
10368 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10369 }
10370 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10371 tcg_temp_free_i32(tmp2);
3174f8e9
FN
10372 store_reg(s, rd, tmp);
10373 } else {
2af9ab77
JB
10374 /* Data processing register constant shift. */
10375 if (rn == 15) {
7d1b0095 10376 tmp = tcg_temp_new_i32();
2af9ab77
JB
10377 tcg_gen_movi_i32(tmp, 0);
10378 } else {
10379 tmp = load_reg(s, rn);
10380 }
10381 tmp2 = load_reg(s, rm);
10382
10383 shiftop = (insn >> 4) & 3;
10384 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10385 conds = (insn & (1 << 20)) != 0;
10386 logic_cc = (conds && thumb2_logic_op(op));
10387 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10388 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10389 goto illegal_op;
7d1b0095 10390 tcg_temp_free_i32(tmp2);
2af9ab77
JB
10391 if (rd != 15) {
10392 store_reg(s, rd, tmp);
10393 } else {
7d1b0095 10394 tcg_temp_free_i32(tmp);
2af9ab77 10395 }
3174f8e9 10396 }
9ee6e8bb
PB
10397 break;
10398 case 13: /* Misc data processing. */
10399 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10400 if (op < 4 && (insn & 0xf000) != 0xf000)
10401 goto illegal_op;
10402 switch (op) {
10403 case 0: /* Register controlled shift. */
8984bd2e
PB
10404 tmp = load_reg(s, rn);
10405 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10406 if ((insn & 0x70) != 0)
10407 goto illegal_op;
10408 op = (insn >> 21) & 3;
8984bd2e
PB
10409 logic_cc = (insn & (1 << 20)) != 0;
10410 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10411 if (logic_cc)
10412 gen_logic_CC(tmp);
bedb8a6b 10413 store_reg(s, rd, tmp);
9ee6e8bb
PB
10414 break;
10415 case 1: /* Sign/zero extend. */
62b44f05
AR
10416 op = (insn >> 20) & 7;
10417 switch (op) {
10418 case 0: /* SXTAH, SXTH */
10419 case 1: /* UXTAH, UXTH */
10420 case 4: /* SXTAB, SXTB */
10421 case 5: /* UXTAB, UXTB */
10422 break;
10423 case 2: /* SXTAB16, SXTB16 */
10424 case 3: /* UXTAB16, UXTB16 */
10425 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10426 goto illegal_op;
10427 }
10428 break;
10429 default:
10430 goto illegal_op;
10431 }
10432 if (rn != 15) {
10433 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10434 goto illegal_op;
10435 }
10436 }
5e3f878a 10437 tmp = load_reg(s, rm);
9ee6e8bb 10438 shift = (insn >> 4) & 3;
1301f322 10439 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10440 rotate, a shift is sufficient. */
10441 if (shift != 0)
f669df27 10442 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10443 op = (insn >> 20) & 7;
10444 switch (op) {
5e3f878a
PB
10445 case 0: gen_sxth(tmp); break;
10446 case 1: gen_uxth(tmp); break;
10447 case 2: gen_sxtb16(tmp); break;
10448 case 3: gen_uxtb16(tmp); break;
10449 case 4: gen_sxtb(tmp); break;
10450 case 5: gen_uxtb(tmp); break;
62b44f05
AR
10451 default:
10452 g_assert_not_reached();
9ee6e8bb
PB
10453 }
10454 if (rn != 15) {
5e3f878a 10455 tmp2 = load_reg(s, rn);
9ee6e8bb 10456 if ((op >> 1) == 1) {
5e3f878a 10457 gen_add16(tmp, tmp2);
9ee6e8bb 10458 } else {
5e3f878a 10459 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10460 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10461 }
10462 }
5e3f878a 10463 store_reg(s, rd, tmp);
9ee6e8bb
PB
10464 break;
10465 case 2: /* SIMD add/subtract. */
62b44f05
AR
10466 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10467 goto illegal_op;
10468 }
9ee6e8bb
PB
10469 op = (insn >> 20) & 7;
10470 shift = (insn >> 4) & 7;
10471 if ((op & 3) == 3 || (shift & 3) == 3)
10472 goto illegal_op;
6ddbc6e4
PB
10473 tmp = load_reg(s, rn);
10474 tmp2 = load_reg(s, rm);
10475 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10476 tcg_temp_free_i32(tmp2);
6ddbc6e4 10477 store_reg(s, rd, tmp);
9ee6e8bb
PB
10478 break;
10479 case 3: /* Other data processing. */
10480 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10481 if (op < 4) {
10482 /* Saturating add/subtract. */
62b44f05
AR
10483 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10484 goto illegal_op;
10485 }
d9ba4830
PB
10486 tmp = load_reg(s, rn);
10487 tmp2 = load_reg(s, rm);
9ee6e8bb 10488 if (op & 1)
9ef39277 10489 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10490 if (op & 2)
9ef39277 10491 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10492 else
9ef39277 10493 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10494 tcg_temp_free_i32(tmp2);
9ee6e8bb 10495 } else {
62b44f05
AR
10496 switch (op) {
10497 case 0x0a: /* rbit */
10498 case 0x08: /* rev */
10499 case 0x09: /* rev16 */
10500 case 0x0b: /* revsh */
10501 case 0x18: /* clz */
10502 break;
10503 case 0x10: /* sel */
10504 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10505 goto illegal_op;
10506 }
10507 break;
10508 case 0x20: /* crc32/crc32c */
10509 case 0x21:
10510 case 0x22:
10511 case 0x28:
10512 case 0x29:
10513 case 0x2a:
10514 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10515 goto illegal_op;
10516 }
10517 break;
10518 default:
10519 goto illegal_op;
10520 }
d9ba4830 10521 tmp = load_reg(s, rn);
9ee6e8bb
PB
10522 switch (op) {
10523 case 0x0a: /* rbit */
d9ba4830 10524 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10525 break;
10526 case 0x08: /* rev */
66896cb8 10527 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10528 break;
10529 case 0x09: /* rev16 */
d9ba4830 10530 gen_rev16(tmp);
9ee6e8bb
PB
10531 break;
10532 case 0x0b: /* revsh */
d9ba4830 10533 gen_revsh(tmp);
9ee6e8bb
PB
10534 break;
10535 case 0x10: /* sel */
d9ba4830 10536 tmp2 = load_reg(s, rm);
7d1b0095 10537 tmp3 = tcg_temp_new_i32();
0ecb72a5 10538 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10539 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10540 tcg_temp_free_i32(tmp3);
10541 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10542 break;
10543 case 0x18: /* clz */
7539a012 10544 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10545 break;
eb0ecd5a
WN
10546 case 0x20:
10547 case 0x21:
10548 case 0x22:
10549 case 0x28:
10550 case 0x29:
10551 case 0x2a:
10552 {
10553 /* crc32/crc32c */
10554 uint32_t sz = op & 0x3;
10555 uint32_t c = op & 0x8;
10556
eb0ecd5a 10557 tmp2 = load_reg(s, rm);
aa633469
PM
10558 if (sz == 0) {
10559 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10560 } else if (sz == 1) {
10561 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10562 }
eb0ecd5a
WN
10563 tmp3 = tcg_const_i32(1 << sz);
10564 if (c) {
10565 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10566 } else {
10567 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10568 }
10569 tcg_temp_free_i32(tmp2);
10570 tcg_temp_free_i32(tmp3);
10571 break;
10572 }
9ee6e8bb 10573 default:
62b44f05 10574 g_assert_not_reached();
9ee6e8bb
PB
10575 }
10576 }
d9ba4830 10577 store_reg(s, rd, tmp);
9ee6e8bb
PB
10578 break;
10579 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10580 switch ((insn >> 20) & 7) {
10581 case 0: /* 32 x 32 -> 32 */
10582 case 7: /* Unsigned sum of absolute differences. */
10583 break;
10584 case 1: /* 16 x 16 -> 32 */
10585 case 2: /* Dual multiply add. */
10586 case 3: /* 32 * 16 -> 32msb */
10587 case 4: /* Dual multiply subtract. */
10588 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10589 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10590 goto illegal_op;
10591 }
10592 break;
10593 }
9ee6e8bb 10594 op = (insn >> 4) & 0xf;
d9ba4830
PB
10595 tmp = load_reg(s, rn);
10596 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10597 switch ((insn >> 20) & 7) {
10598 case 0: /* 32 x 32 -> 32 */
d9ba4830 10599 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10600 tcg_temp_free_i32(tmp2);
9ee6e8bb 10601 if (rs != 15) {
d9ba4830 10602 tmp2 = load_reg(s, rs);
9ee6e8bb 10603 if (op)
d9ba4830 10604 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10605 else
d9ba4830 10606 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10607 tcg_temp_free_i32(tmp2);
9ee6e8bb 10608 }
9ee6e8bb
PB
10609 break;
10610 case 1: /* 16 x 16 -> 32 */
d9ba4830 10611 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10612 tcg_temp_free_i32(tmp2);
9ee6e8bb 10613 if (rs != 15) {
d9ba4830 10614 tmp2 = load_reg(s, rs);
9ef39277 10615 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10616 tcg_temp_free_i32(tmp2);
9ee6e8bb 10617 }
9ee6e8bb
PB
10618 break;
10619 case 2: /* Dual multiply add. */
10620 case 4: /* Dual multiply subtract. */
10621 if (op)
d9ba4830
PB
10622 gen_swap_half(tmp2);
10623 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10624 if (insn & (1 << 22)) {
e1d177b9 10625 /* This subtraction cannot overflow. */
d9ba4830 10626 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10627 } else {
e1d177b9
PM
10628 /* This addition cannot overflow 32 bits;
10629 * however it may overflow considered as a signed
10630 * operation, in which case we must set the Q flag.
10631 */
9ef39277 10632 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10633 }
7d1b0095 10634 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10635 if (rs != 15)
10636 {
d9ba4830 10637 tmp2 = load_reg(s, rs);
9ef39277 10638 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10639 tcg_temp_free_i32(tmp2);
9ee6e8bb 10640 }
9ee6e8bb
PB
10641 break;
10642 case 3: /* 32 * 16 -> 32msb */
10643 if (op)
d9ba4830 10644 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10645 else
d9ba4830 10646 gen_sxth(tmp2);
a7812ae4
PB
10647 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10648 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10649 tmp = tcg_temp_new_i32();
ecc7b3aa 10650 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10651 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10652 if (rs != 15)
10653 {
d9ba4830 10654 tmp2 = load_reg(s, rs);
9ef39277 10655 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10656 tcg_temp_free_i32(tmp2);
9ee6e8bb 10657 }
9ee6e8bb 10658 break;
838fa72d
AJ
10659 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10660 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10661 if (rs != 15) {
838fa72d
AJ
10662 tmp = load_reg(s, rs);
10663 if (insn & (1 << 20)) {
10664 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10665 } else {
838fa72d 10666 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10667 }
2c0262af 10668 }
838fa72d
AJ
10669 if (insn & (1 << 4)) {
10670 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10671 }
10672 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10673 tmp = tcg_temp_new_i32();
ecc7b3aa 10674 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10675 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10676 break;
10677 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10678 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10679 tcg_temp_free_i32(tmp2);
9ee6e8bb 10680 if (rs != 15) {
d9ba4830
PB
10681 tmp2 = load_reg(s, rs);
10682 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10683 tcg_temp_free_i32(tmp2);
5fd46862 10684 }
9ee6e8bb 10685 break;
2c0262af 10686 }
d9ba4830 10687 store_reg(s, rd, tmp);
2c0262af 10688 break;
9ee6e8bb
PB
10689 case 6: case 7: /* 64-bit multiply, Divide. */
10690 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10691 tmp = load_reg(s, rn);
10692 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10693 if ((op & 0x50) == 0x10) {
10694 /* sdiv, udiv */
d614a513 10695 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10696 goto illegal_op;
47789990 10697 }
9ee6e8bb 10698 if (op & 0x20)
5e3f878a 10699 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10700 else
5e3f878a 10701 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10702 tcg_temp_free_i32(tmp2);
5e3f878a 10703 store_reg(s, rd, tmp);
9ee6e8bb
PB
10704 } else if ((op & 0xe) == 0xc) {
10705 /* Dual multiply accumulate long. */
62b44f05
AR
10706 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10707 tcg_temp_free_i32(tmp);
10708 tcg_temp_free_i32(tmp2);
10709 goto illegal_op;
10710 }
9ee6e8bb 10711 if (op & 1)
5e3f878a
PB
10712 gen_swap_half(tmp2);
10713 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10714 if (op & 0x10) {
5e3f878a 10715 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10716 } else {
5e3f878a 10717 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10718 }
7d1b0095 10719 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10720 /* BUGFIX */
10721 tmp64 = tcg_temp_new_i64();
10722 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10723 tcg_temp_free_i32(tmp);
a7812ae4
PB
10724 gen_addq(s, tmp64, rs, rd);
10725 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10726 tcg_temp_free_i64(tmp64);
2c0262af 10727 } else {
9ee6e8bb
PB
10728 if (op & 0x20) {
10729 /* Unsigned 64-bit multiply */
a7812ae4 10730 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10731 } else {
9ee6e8bb
PB
10732 if (op & 8) {
10733 /* smlalxy */
62b44f05
AR
10734 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10735 tcg_temp_free_i32(tmp2);
10736 tcg_temp_free_i32(tmp);
10737 goto illegal_op;
10738 }
5e3f878a 10739 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10740 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10741 tmp64 = tcg_temp_new_i64();
10742 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10743 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10744 } else {
10745 /* Signed 64-bit multiply */
a7812ae4 10746 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10747 }
b5ff1b31 10748 }
9ee6e8bb
PB
10749 if (op & 4) {
10750 /* umaal */
62b44f05
AR
10751 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10752 tcg_temp_free_i64(tmp64);
10753 goto illegal_op;
10754 }
a7812ae4
PB
10755 gen_addq_lo(s, tmp64, rs);
10756 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10757 } else if (op & 0x40) {
10758 /* 64-bit accumulate. */
a7812ae4 10759 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10760 }
a7812ae4 10761 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10762 tcg_temp_free_i64(tmp64);
5fd46862 10763 }
2c0262af 10764 break;
9ee6e8bb
PB
10765 }
10766 break;
10767 case 6: case 7: case 14: case 15:
10768 /* Coprocessor. */
7517748e
PM
10769 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10770 /* We don't currently implement M profile FP support,
10771 * so this entire space should give a NOCP fault.
10772 */
10773 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10774 default_exception_el(s));
10775 break;
10776 }
9ee6e8bb
PB
10777 if (((insn >> 24) & 3) == 3) {
10778 /* Translate into the equivalent ARM encoding. */
f06053e3 10779 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10780 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10781 goto illegal_op;
7dcc1f89 10782 }
6a57f3eb 10783 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10784 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10785 goto illegal_op;
10786 }
9ee6e8bb
PB
10787 } else {
10788 if (insn & (1 << 28))
10789 goto illegal_op;
7dcc1f89 10790 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10791 goto illegal_op;
7dcc1f89 10792 }
9ee6e8bb
PB
10793 }
10794 break;
10795 case 8: case 9: case 10: case 11:
10796 if (insn & (1 << 15)) {
10797 /* Branches, misc control. */
10798 if (insn & 0x5000) {
10799 /* Unconditional branch. */
10800 /* signextend(hw1[10:0]) -> offset[:12]. */
10801 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10802 /* hw1[10:0] -> offset[11:1]. */
10803 offset |= (insn & 0x7ff) << 1;
10804 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10805 offset[24:22] already have the same value because of the
10806 sign extension above. */
10807 offset ^= ((~insn) & (1 << 13)) << 10;
10808 offset ^= ((~insn) & (1 << 11)) << 11;
10809
9ee6e8bb
PB
10810 if (insn & (1 << 14)) {
10811 /* Branch and link. */
3174f8e9 10812 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10813 }
3b46e624 10814
b0109805 10815 offset += s->pc;
9ee6e8bb
PB
10816 if (insn & (1 << 12)) {
10817 /* b/bl */
b0109805 10818 gen_jmp(s, offset);
9ee6e8bb
PB
10819 } else {
10820 /* blx */
b0109805 10821 offset &= ~(uint32_t)2;
be5e7a76 10822 /* thumb2 bx, no need to check */
b0109805 10823 gen_bx_im(s, offset);
2c0262af 10824 }
9ee6e8bb
PB
10825 } else if (((insn >> 23) & 7) == 7) {
10826 /* Misc control */
10827 if (insn & (1 << 13))
10828 goto illegal_op;
10829
10830 if (insn & (1 << 26)) {
001b3cab
PM
10831 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10832 goto illegal_op;
10833 }
37e6456e
PM
10834 if (!(insn & (1 << 20))) {
10835 /* Hypervisor call (v7) */
10836 int imm16 = extract32(insn, 16, 4) << 12
10837 | extract32(insn, 0, 12);
10838 ARCH(7);
10839 if (IS_USER(s)) {
10840 goto illegal_op;
10841 }
10842 gen_hvc(s, imm16);
10843 } else {
10844 /* Secure monitor call (v6+) */
10845 ARCH(6K);
10846 if (IS_USER(s)) {
10847 goto illegal_op;
10848 }
10849 gen_smc(s);
10850 }
2c0262af 10851 } else {
9ee6e8bb
PB
10852 op = (insn >> 20) & 7;
10853 switch (op) {
10854 case 0: /* msr cpsr. */
b53d8923 10855 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 10856 tmp = load_reg(s, rn);
b28b3377
PM
10857 /* the constant is the mask and SYSm fields */
10858 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 10859 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10860 tcg_temp_free_i32(addr);
7d1b0095 10861 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10862 gen_lookup_tb(s);
10863 break;
10864 }
10865 /* fall through */
10866 case 1: /* msr spsr. */
b53d8923 10867 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10868 goto illegal_op;
b53d8923 10869 }
8bfd0550
PM
10870
10871 if (extract32(insn, 5, 1)) {
10872 /* MSR (banked) */
10873 int sysm = extract32(insn, 8, 4) |
10874 (extract32(insn, 4, 1) << 4);
10875 int r = op & 1;
10876
10877 gen_msr_banked(s, r, sysm, rm);
10878 break;
10879 }
10880
10881 /* MSR (for PSRs) */
2fbac54b
FN
10882 tmp = load_reg(s, rn);
10883 if (gen_set_psr(s,
7dcc1f89 10884 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10885 op == 1, tmp))
9ee6e8bb
PB
10886 goto illegal_op;
10887 break;
10888 case 2: /* cps, nop-hint. */
10889 if (((insn >> 8) & 7) == 0) {
10890 gen_nop_hint(s, insn & 0xff);
10891 }
10892 /* Implemented as NOP in user mode. */
10893 if (IS_USER(s))
10894 break;
10895 offset = 0;
10896 imm = 0;
10897 if (insn & (1 << 10)) {
10898 if (insn & (1 << 7))
10899 offset |= CPSR_A;
10900 if (insn & (1 << 6))
10901 offset |= CPSR_I;
10902 if (insn & (1 << 5))
10903 offset |= CPSR_F;
10904 if (insn & (1 << 9))
10905 imm = CPSR_A | CPSR_I | CPSR_F;
10906 }
10907 if (insn & (1 << 8)) {
10908 offset |= 0x1f;
10909 imm |= (insn & 0x1f);
10910 }
10911 if (offset) {
2fbac54b 10912 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10913 }
10914 break;
10915 case 3: /* Special control operations. */
426f5abc 10916 ARCH(7);
9ee6e8bb
PB
10917 op = (insn >> 4) & 0xf;
10918 switch (op) {
10919 case 2: /* clrex */
426f5abc 10920 gen_clrex(s);
9ee6e8bb
PB
10921 break;
10922 case 4: /* dsb */
10923 case 5: /* dmb */
61e4c432 10924 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10925 break;
6df99dec
SS
10926 case 6: /* isb */
10927 /* We need to break the TB after this insn
10928 * to execute self-modifying code correctly
10929 * and also to take any pending interrupts
10930 * immediately.
10931 */
0b609cc1 10932 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 10933 break;
9ee6e8bb
PB
10934 default:
10935 goto illegal_op;
10936 }
10937 break;
10938 case 4: /* bxj */
9d7c59c8
PM
10939 /* Trivial implementation equivalent to bx.
10940 * This instruction doesn't exist at all for M-profile.
10941 */
10942 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10943 goto illegal_op;
10944 }
d9ba4830
PB
10945 tmp = load_reg(s, rn);
10946 gen_bx(s, tmp);
9ee6e8bb
PB
10947 break;
10948 case 5: /* Exception return. */
b8b45b68
RV
10949 if (IS_USER(s)) {
10950 goto illegal_op;
10951 }
10952 if (rn != 14 || rd != 15) {
10953 goto illegal_op;
10954 }
10955 tmp = load_reg(s, rn);
10956 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10957 gen_exception_return(s, tmp);
10958 break;
8bfd0550 10959 case 6: /* MRS */
43ac6574
PM
10960 if (extract32(insn, 5, 1) &&
10961 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10962 /* MRS (banked) */
10963 int sysm = extract32(insn, 16, 4) |
10964 (extract32(insn, 4, 1) << 4);
10965
10966 gen_mrs_banked(s, 0, sysm, rd);
10967 break;
10968 }
10969
3d54026f
PM
10970 if (extract32(insn, 16, 4) != 0xf) {
10971 goto illegal_op;
10972 }
10973 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10974 extract32(insn, 0, 8) != 0) {
10975 goto illegal_op;
10976 }
10977
8bfd0550 10978 /* mrs cpsr */
7d1b0095 10979 tmp = tcg_temp_new_i32();
b53d8923 10980 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10981 addr = tcg_const_i32(insn & 0xff);
10982 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10983 tcg_temp_free_i32(addr);
9ee6e8bb 10984 } else {
9ef39277 10985 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10986 }
8984bd2e 10987 store_reg(s, rd, tmp);
9ee6e8bb 10988 break;
8bfd0550 10989 case 7: /* MRS */
43ac6574
PM
10990 if (extract32(insn, 5, 1) &&
10991 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10992 /* MRS (banked) */
10993 int sysm = extract32(insn, 16, 4) |
10994 (extract32(insn, 4, 1) << 4);
10995
10996 gen_mrs_banked(s, 1, sysm, rd);
10997 break;
10998 }
10999
11000 /* mrs spsr. */
9ee6e8bb 11001 /* Not accessible in user mode. */
b53d8923 11002 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11003 goto illegal_op;
b53d8923 11004 }
3d54026f
PM
11005
11006 if (extract32(insn, 16, 4) != 0xf ||
11007 extract32(insn, 0, 8) != 0) {
11008 goto illegal_op;
11009 }
11010
d9ba4830
PB
11011 tmp = load_cpu_field(spsr);
11012 store_reg(s, rd, tmp);
9ee6e8bb 11013 break;
2c0262af
FB
11014 }
11015 }
9ee6e8bb
PB
11016 } else {
11017 /* Conditional branch. */
11018 op = (insn >> 22) & 0xf;
11019 /* Generate a conditional jump to next instruction. */
11020 s->condlabel = gen_new_label();
39fb730a 11021 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
11022 s->condjmp = 1;
11023
11024 /* offset[11:1] = insn[10:0] */
11025 offset = (insn & 0x7ff) << 1;
11026 /* offset[17:12] = insn[21:16]. */
11027 offset |= (insn & 0x003f0000) >> 4;
11028 /* offset[31:20] = insn[26]. */
11029 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
11030 /* offset[18] = insn[13]. */
11031 offset |= (insn & (1 << 13)) << 5;
11032 /* offset[19] = insn[11]. */
11033 offset |= (insn & (1 << 11)) << 8;
11034
11035 /* jump to the offset */
b0109805 11036 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
11037 }
11038 } else {
11039 /* Data processing immediate. */
11040 if (insn & (1 << 25)) {
11041 if (insn & (1 << 24)) {
11042 if (insn & (1 << 20))
11043 goto illegal_op;
11044 /* Bitfield/Saturate. */
11045 op = (insn >> 21) & 7;
11046 imm = insn & 0x1f;
11047 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 11048 if (rn == 15) {
7d1b0095 11049 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
11050 tcg_gen_movi_i32(tmp, 0);
11051 } else {
11052 tmp = load_reg(s, rn);
11053 }
9ee6e8bb
PB
11054 switch (op) {
11055 case 2: /* Signed bitfield extract. */
11056 imm++;
11057 if (shift + imm > 32)
11058 goto illegal_op;
59a71b4c
RH
11059 if (imm < 32) {
11060 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
11061 }
9ee6e8bb
PB
11062 break;
11063 case 6: /* Unsigned bitfield extract. */
11064 imm++;
11065 if (shift + imm > 32)
11066 goto illegal_op;
59a71b4c
RH
11067 if (imm < 32) {
11068 tcg_gen_extract_i32(tmp, tmp, shift, imm);
11069 }
9ee6e8bb
PB
11070 break;
11071 case 3: /* Bitfield insert/clear. */
11072 if (imm < shift)
11073 goto illegal_op;
11074 imm = imm + 1 - shift;
11075 if (imm != 32) {
6ddbc6e4 11076 tmp2 = load_reg(s, rd);
d593c48e 11077 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 11078 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11079 }
11080 break;
11081 case 7:
11082 goto illegal_op;
11083 default: /* Saturate. */
9ee6e8bb
PB
11084 if (shift) {
11085 if (op & 1)
6ddbc6e4 11086 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 11087 else
6ddbc6e4 11088 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 11089 }
6ddbc6e4 11090 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
11091 if (op & 4) {
11092 /* Unsigned. */
62b44f05
AR
11093 if ((op & 1) && shift == 0) {
11094 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11095 tcg_temp_free_i32(tmp);
11096 tcg_temp_free_i32(tmp2);
11097 goto illegal_op;
11098 }
9ef39277 11099 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 11100 } else {
9ef39277 11101 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 11102 }
2c0262af 11103 } else {
9ee6e8bb 11104 /* Signed. */
62b44f05
AR
11105 if ((op & 1) && shift == 0) {
11106 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11107 tcg_temp_free_i32(tmp);
11108 tcg_temp_free_i32(tmp2);
11109 goto illegal_op;
11110 }
9ef39277 11111 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 11112 } else {
9ef39277 11113 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 11114 }
2c0262af 11115 }
b75263d6 11116 tcg_temp_free_i32(tmp2);
9ee6e8bb 11117 break;
2c0262af 11118 }
6ddbc6e4 11119 store_reg(s, rd, tmp);
9ee6e8bb
PB
11120 } else {
11121 imm = ((insn & 0x04000000) >> 15)
11122 | ((insn & 0x7000) >> 4) | (insn & 0xff);
11123 if (insn & (1 << 22)) {
11124 /* 16-bit immediate. */
11125 imm |= (insn >> 4) & 0xf000;
11126 if (insn & (1 << 23)) {
11127 /* movt */
5e3f878a 11128 tmp = load_reg(s, rd);
86831435 11129 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 11130 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 11131 } else {
9ee6e8bb 11132 /* movw */
7d1b0095 11133 tmp = tcg_temp_new_i32();
5e3f878a 11134 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
11135 }
11136 } else {
9ee6e8bb
PB
11137 /* Add/sub 12-bit immediate. */
11138 if (rn == 15) {
b0109805 11139 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 11140 if (insn & (1 << 23))
b0109805 11141 offset -= imm;
9ee6e8bb 11142 else
b0109805 11143 offset += imm;
7d1b0095 11144 tmp = tcg_temp_new_i32();
5e3f878a 11145 tcg_gen_movi_i32(tmp, offset);
2c0262af 11146 } else {
5e3f878a 11147 tmp = load_reg(s, rn);
9ee6e8bb 11148 if (insn & (1 << 23))
5e3f878a 11149 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 11150 else
5e3f878a 11151 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 11152 }
9ee6e8bb 11153 }
5e3f878a 11154 store_reg(s, rd, tmp);
191abaa2 11155 }
9ee6e8bb
PB
11156 } else {
11157 int shifter_out = 0;
11158 /* modified 12-bit immediate. */
11159 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
11160 imm = (insn & 0xff);
11161 switch (shift) {
11162 case 0: /* XY */
11163 /* Nothing to do. */
11164 break;
11165 case 1: /* 00XY00XY */
11166 imm |= imm << 16;
11167 break;
11168 case 2: /* XY00XY00 */
11169 imm |= imm << 16;
11170 imm <<= 8;
11171 break;
11172 case 3: /* XYXYXYXY */
11173 imm |= imm << 16;
11174 imm |= imm << 8;
11175 break;
11176 default: /* Rotated constant. */
11177 shift = (shift << 1) | (imm >> 7);
11178 imm |= 0x80;
11179 imm = imm << (32 - shift);
11180 shifter_out = 1;
11181 break;
b5ff1b31 11182 }
7d1b0095 11183 tmp2 = tcg_temp_new_i32();
3174f8e9 11184 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 11185 rn = (insn >> 16) & 0xf;
3174f8e9 11186 if (rn == 15) {
7d1b0095 11187 tmp = tcg_temp_new_i32();
3174f8e9
FN
11188 tcg_gen_movi_i32(tmp, 0);
11189 } else {
11190 tmp = load_reg(s, rn);
11191 }
9ee6e8bb
PB
11192 op = (insn >> 21) & 0xf;
11193 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 11194 shifter_out, tmp, tmp2))
9ee6e8bb 11195 goto illegal_op;
7d1b0095 11196 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11197 rd = (insn >> 8) & 0xf;
11198 if (rd != 15) {
3174f8e9
FN
11199 store_reg(s, rd, tmp);
11200 } else {
7d1b0095 11201 tcg_temp_free_i32(tmp);
2c0262af 11202 }
2c0262af 11203 }
9ee6e8bb
PB
11204 }
11205 break;
11206 case 12: /* Load/store single data item. */
11207 {
11208 int postinc = 0;
11209 int writeback = 0;
a99caa48 11210 int memidx;
9bb6558a
PM
11211 ISSInfo issinfo;
11212
9ee6e8bb 11213 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 11214 if (disas_neon_ls_insn(s, insn)) {
c1713132 11215 goto illegal_op;
7dcc1f89 11216 }
9ee6e8bb
PB
11217 break;
11218 }
a2fdc890
PM
11219 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
11220 if (rs == 15) {
11221 if (!(insn & (1 << 20))) {
11222 goto illegal_op;
11223 }
11224 if (op != 2) {
11225 /* Byte or halfword load space with dest == r15 : memory hints.
11226 * Catch them early so we don't emit pointless addressing code.
11227 * This space is a mix of:
11228 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
11229 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
11230 * cores)
11231 * unallocated hints, which must be treated as NOPs
11232 * UNPREDICTABLE space, which we NOP or UNDEF depending on
11233 * which is easiest for the decoding logic
11234 * Some space which must UNDEF
11235 */
11236 int op1 = (insn >> 23) & 3;
11237 int op2 = (insn >> 6) & 0x3f;
11238 if (op & 2) {
11239 goto illegal_op;
11240 }
11241 if (rn == 15) {
02afbf64
PM
11242 /* UNPREDICTABLE, unallocated hint or
11243 * PLD/PLDW/PLI (literal)
11244 */
2eea841c 11245 return;
a2fdc890
PM
11246 }
11247 if (op1 & 1) {
2eea841c 11248 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11249 }
11250 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 11251 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11252 }
11253 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 11254 goto illegal_op;
a2fdc890
PM
11255 }
11256 }
a99caa48 11257 memidx = get_mem_index(s);
9ee6e8bb 11258 if (rn == 15) {
7d1b0095 11259 addr = tcg_temp_new_i32();
9ee6e8bb
PB
11260 /* PC relative. */
11261 /* s->pc has already been incremented by 4. */
11262 imm = s->pc & 0xfffffffc;
11263 if (insn & (1 << 23))
11264 imm += insn & 0xfff;
11265 else
11266 imm -= insn & 0xfff;
b0109805 11267 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 11268 } else {
b0109805 11269 addr = load_reg(s, rn);
9ee6e8bb
PB
11270 if (insn & (1 << 23)) {
11271 /* Positive offset. */
11272 imm = insn & 0xfff;
b0109805 11273 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 11274 } else {
9ee6e8bb 11275 imm = insn & 0xff;
2a0308c5
PM
11276 switch ((insn >> 8) & 0xf) {
11277 case 0x0: /* Shifted Register. */
9ee6e8bb 11278 shift = (insn >> 4) & 0xf;
2a0308c5
PM
11279 if (shift > 3) {
11280 tcg_temp_free_i32(addr);
18c9b560 11281 goto illegal_op;
2a0308c5 11282 }
b26eefb6 11283 tmp = load_reg(s, rm);
9ee6e8bb 11284 if (shift)
b26eefb6 11285 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 11286 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11287 tcg_temp_free_i32(tmp);
9ee6e8bb 11288 break;
2a0308c5 11289 case 0xc: /* Negative offset. */
b0109805 11290 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 11291 break;
2a0308c5 11292 case 0xe: /* User privilege. */
b0109805 11293 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 11294 memidx = get_a32_user_mem_index(s);
9ee6e8bb 11295 break;
2a0308c5 11296 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
11297 imm = -imm;
11298 /* Fall through. */
2a0308c5 11299 case 0xb: /* Post-increment. */
9ee6e8bb
PB
11300 postinc = 1;
11301 writeback = 1;
11302 break;
2a0308c5 11303 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
11304 imm = -imm;
11305 /* Fall through. */
2a0308c5 11306 case 0xf: /* Pre-increment. */
b0109805 11307 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
11308 writeback = 1;
11309 break;
11310 default:
2a0308c5 11311 tcg_temp_free_i32(addr);
b7bcbe95 11312 goto illegal_op;
9ee6e8bb
PB
11313 }
11314 }
11315 }
9bb6558a
PM
11316
11317 issinfo = writeback ? ISSInvalid : rs;
11318
9ee6e8bb
PB
11319 if (insn & (1 << 20)) {
11320 /* Load. */
5a839c0d 11321 tmp = tcg_temp_new_i32();
a2fdc890 11322 switch (op) {
5a839c0d 11323 case 0:
9bb6558a 11324 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11325 break;
11326 case 4:
9bb6558a 11327 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11328 break;
11329 case 1:
9bb6558a 11330 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11331 break;
11332 case 5:
9bb6558a 11333 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11334 break;
11335 case 2:
9bb6558a 11336 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11337 break;
2a0308c5 11338 default:
5a839c0d 11339 tcg_temp_free_i32(tmp);
2a0308c5
PM
11340 tcg_temp_free_i32(addr);
11341 goto illegal_op;
a2fdc890
PM
11342 }
11343 if (rs == 15) {
3bb8a96f 11344 gen_bx_excret(s, tmp);
9ee6e8bb 11345 } else {
a2fdc890 11346 store_reg(s, rs, tmp);
9ee6e8bb
PB
11347 }
11348 } else {
11349 /* Store. */
b0109805 11350 tmp = load_reg(s, rs);
9ee6e8bb 11351 switch (op) {
5a839c0d 11352 case 0:
9bb6558a 11353 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11354 break;
11355 case 1:
9bb6558a 11356 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11357 break;
11358 case 2:
9bb6558a 11359 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11360 break;
2a0308c5 11361 default:
5a839c0d 11362 tcg_temp_free_i32(tmp);
2a0308c5
PM
11363 tcg_temp_free_i32(addr);
11364 goto illegal_op;
b7bcbe95 11365 }
5a839c0d 11366 tcg_temp_free_i32(tmp);
2c0262af 11367 }
9ee6e8bb 11368 if (postinc)
b0109805
PB
11369 tcg_gen_addi_i32(addr, addr, imm);
11370 if (writeback) {
11371 store_reg(s, rn, addr);
11372 } else {
7d1b0095 11373 tcg_temp_free_i32(addr);
b0109805 11374 }
9ee6e8bb
PB
11375 }
11376 break;
11377 default:
11378 goto illegal_op;
2c0262af 11379 }
2eea841c 11380 return;
9ee6e8bb 11381illegal_op:
2eea841c
PM
11382 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11383 default_exception_el(s));
2c0262af
FB
11384}
11385
296e5a0a 11386static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 11387{
296e5a0a 11388 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
11389 int32_t offset;
11390 int i;
39d5492a
PM
11391 TCGv_i32 tmp;
11392 TCGv_i32 tmp2;
11393 TCGv_i32 addr;
99c475ab 11394
99c475ab
FB
11395 switch (insn >> 12) {
11396 case 0: case 1:
396e467c 11397
99c475ab
FB
11398 rd = insn & 7;
11399 op = (insn >> 11) & 3;
11400 if (op == 3) {
11401 /* add/subtract */
11402 rn = (insn >> 3) & 7;
396e467c 11403 tmp = load_reg(s, rn);
99c475ab
FB
11404 if (insn & (1 << 10)) {
11405 /* immediate */
7d1b0095 11406 tmp2 = tcg_temp_new_i32();
396e467c 11407 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
11408 } else {
11409 /* reg */
11410 rm = (insn >> 6) & 7;
396e467c 11411 tmp2 = load_reg(s, rm);
99c475ab 11412 }
9ee6e8bb
PB
11413 if (insn & (1 << 9)) {
11414 if (s->condexec_mask)
396e467c 11415 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11416 else
72485ec4 11417 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
11418 } else {
11419 if (s->condexec_mask)
396e467c 11420 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 11421 else
72485ec4 11422 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 11423 }
7d1b0095 11424 tcg_temp_free_i32(tmp2);
396e467c 11425 store_reg(s, rd, tmp);
99c475ab
FB
11426 } else {
11427 /* shift immediate */
11428 rm = (insn >> 3) & 7;
11429 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
11430 tmp = load_reg(s, rm);
11431 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11432 if (!s->condexec_mask)
11433 gen_logic_CC(tmp);
11434 store_reg(s, rd, tmp);
99c475ab
FB
11435 }
11436 break;
11437 case 2: case 3:
11438 /* arithmetic large immediate */
11439 op = (insn >> 11) & 3;
11440 rd = (insn >> 8) & 0x7;
396e467c 11441 if (op == 0) { /* mov */
7d1b0095 11442 tmp = tcg_temp_new_i32();
396e467c 11443 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11444 if (!s->condexec_mask)
396e467c
FN
11445 gen_logic_CC(tmp);
11446 store_reg(s, rd, tmp);
11447 } else {
11448 tmp = load_reg(s, rd);
7d1b0095 11449 tmp2 = tcg_temp_new_i32();
396e467c
FN
11450 tcg_gen_movi_i32(tmp2, insn & 0xff);
11451 switch (op) {
11452 case 1: /* cmp */
72485ec4 11453 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11454 tcg_temp_free_i32(tmp);
11455 tcg_temp_free_i32(tmp2);
396e467c
FN
11456 break;
11457 case 2: /* add */
11458 if (s->condexec_mask)
11459 tcg_gen_add_i32(tmp, tmp, tmp2);
11460 else
72485ec4 11461 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11462 tcg_temp_free_i32(tmp2);
396e467c
FN
11463 store_reg(s, rd, tmp);
11464 break;
11465 case 3: /* sub */
11466 if (s->condexec_mask)
11467 tcg_gen_sub_i32(tmp, tmp, tmp2);
11468 else
72485ec4 11469 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11470 tcg_temp_free_i32(tmp2);
396e467c
FN
11471 store_reg(s, rd, tmp);
11472 break;
11473 }
99c475ab 11474 }
99c475ab
FB
11475 break;
11476 case 4:
11477 if (insn & (1 << 11)) {
11478 rd = (insn >> 8) & 7;
5899f386
FB
11479 /* load pc-relative. Bit 1 of PC is ignored. */
11480 val = s->pc + 2 + ((insn & 0xff) * 4);
11481 val &= ~(uint32_t)2;
7d1b0095 11482 addr = tcg_temp_new_i32();
b0109805 11483 tcg_gen_movi_i32(addr, val);
c40c8556 11484 tmp = tcg_temp_new_i32();
9bb6558a
PM
11485 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11486 rd | ISSIs16Bit);
7d1b0095 11487 tcg_temp_free_i32(addr);
b0109805 11488 store_reg(s, rd, tmp);
99c475ab
FB
11489 break;
11490 }
11491 if (insn & (1 << 10)) {
ebfe27c5
PM
11492 /* 0b0100_01xx_xxxx_xxxx
11493 * - data processing extended, branch and exchange
11494 */
99c475ab
FB
11495 rd = (insn & 7) | ((insn >> 4) & 8);
11496 rm = (insn >> 3) & 0xf;
11497 op = (insn >> 8) & 3;
11498 switch (op) {
11499 case 0: /* add */
396e467c
FN
11500 tmp = load_reg(s, rd);
11501 tmp2 = load_reg(s, rm);
11502 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11503 tcg_temp_free_i32(tmp2);
396e467c 11504 store_reg(s, rd, tmp);
99c475ab
FB
11505 break;
11506 case 1: /* cmp */
396e467c
FN
11507 tmp = load_reg(s, rd);
11508 tmp2 = load_reg(s, rm);
72485ec4 11509 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11510 tcg_temp_free_i32(tmp2);
11511 tcg_temp_free_i32(tmp);
99c475ab
FB
11512 break;
11513 case 2: /* mov/cpy */
396e467c
FN
11514 tmp = load_reg(s, rm);
11515 store_reg(s, rd, tmp);
99c475ab 11516 break;
ebfe27c5
PM
11517 case 3:
11518 {
11519 /* 0b0100_0111_xxxx_xxxx
11520 * - branch [and link] exchange thumb register
11521 */
11522 bool link = insn & (1 << 7);
11523
fb602cb7 11524 if (insn & 3) {
ebfe27c5
PM
11525 goto undef;
11526 }
11527 if (link) {
be5e7a76 11528 ARCH(5);
ebfe27c5 11529 }
fb602cb7
PM
11530 if ((insn & 4)) {
11531 /* BXNS/BLXNS: only exists for v8M with the
11532 * security extensions, and always UNDEF if NonSecure.
11533 * We don't implement these in the user-only mode
11534 * either (in theory you can use them from Secure User
11535 * mode but they are too tied in to system emulation.)
11536 */
11537 if (!s->v8m_secure || IS_USER_ONLY) {
11538 goto undef;
11539 }
11540 if (link) {
3e3fa230 11541 gen_blxns(s, rm);
fb602cb7
PM
11542 } else {
11543 gen_bxns(s, rm);
11544 }
11545 break;
11546 }
11547 /* BLX/BX */
ebfe27c5
PM
11548 tmp = load_reg(s, rm);
11549 if (link) {
99c475ab 11550 val = (uint32_t)s->pc | 1;
7d1b0095 11551 tmp2 = tcg_temp_new_i32();
b0109805
PB
11552 tcg_gen_movi_i32(tmp2, val);
11553 store_reg(s, 14, tmp2);
3bb8a96f
PM
11554 gen_bx(s, tmp);
11555 } else {
11556 /* Only BX works as exception-return, not BLX */
11557 gen_bx_excret(s, tmp);
99c475ab 11558 }
99c475ab
FB
11559 break;
11560 }
ebfe27c5 11561 }
99c475ab
FB
11562 break;
11563 }
11564
11565 /* data processing register */
11566 rd = insn & 7;
11567 rm = (insn >> 3) & 7;
11568 op = (insn >> 6) & 0xf;
11569 if (op == 2 || op == 3 || op == 4 || op == 7) {
11570 /* the shift/rotate ops want the operands backwards */
11571 val = rm;
11572 rm = rd;
11573 rd = val;
11574 val = 1;
11575 } else {
11576 val = 0;
11577 }
11578
396e467c 11579 if (op == 9) { /* neg */
7d1b0095 11580 tmp = tcg_temp_new_i32();
396e467c
FN
11581 tcg_gen_movi_i32(tmp, 0);
11582 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11583 tmp = load_reg(s, rd);
11584 } else {
f764718d 11585 tmp = NULL;
396e467c 11586 }
99c475ab 11587
396e467c 11588 tmp2 = load_reg(s, rm);
5899f386 11589 switch (op) {
99c475ab 11590 case 0x0: /* and */
396e467c 11591 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11592 if (!s->condexec_mask)
396e467c 11593 gen_logic_CC(tmp);
99c475ab
FB
11594 break;
11595 case 0x1: /* eor */
396e467c 11596 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11597 if (!s->condexec_mask)
396e467c 11598 gen_logic_CC(tmp);
99c475ab
FB
11599 break;
11600 case 0x2: /* lsl */
9ee6e8bb 11601 if (s->condexec_mask) {
365af80e 11602 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11603 } else {
9ef39277 11604 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11605 gen_logic_CC(tmp2);
9ee6e8bb 11606 }
99c475ab
FB
11607 break;
11608 case 0x3: /* lsr */
9ee6e8bb 11609 if (s->condexec_mask) {
365af80e 11610 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11611 } else {
9ef39277 11612 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11613 gen_logic_CC(tmp2);
9ee6e8bb 11614 }
99c475ab
FB
11615 break;
11616 case 0x4: /* asr */
9ee6e8bb 11617 if (s->condexec_mask) {
365af80e 11618 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11619 } else {
9ef39277 11620 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11621 gen_logic_CC(tmp2);
9ee6e8bb 11622 }
99c475ab
FB
11623 break;
11624 case 0x5: /* adc */
49b4c31e 11625 if (s->condexec_mask) {
396e467c 11626 gen_adc(tmp, tmp2);
49b4c31e
RH
11627 } else {
11628 gen_adc_CC(tmp, tmp, tmp2);
11629 }
99c475ab
FB
11630 break;
11631 case 0x6: /* sbc */
2de68a49 11632 if (s->condexec_mask) {
396e467c 11633 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11634 } else {
11635 gen_sbc_CC(tmp, tmp, tmp2);
11636 }
99c475ab
FB
11637 break;
11638 case 0x7: /* ror */
9ee6e8bb 11639 if (s->condexec_mask) {
f669df27
AJ
11640 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11641 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11642 } else {
9ef39277 11643 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11644 gen_logic_CC(tmp2);
9ee6e8bb 11645 }
99c475ab
FB
11646 break;
11647 case 0x8: /* tst */
396e467c
FN
11648 tcg_gen_and_i32(tmp, tmp, tmp2);
11649 gen_logic_CC(tmp);
99c475ab 11650 rd = 16;
5899f386 11651 break;
99c475ab 11652 case 0x9: /* neg */
9ee6e8bb 11653 if (s->condexec_mask)
396e467c 11654 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11655 else
72485ec4 11656 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11657 break;
11658 case 0xa: /* cmp */
72485ec4 11659 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11660 rd = 16;
11661 break;
11662 case 0xb: /* cmn */
72485ec4 11663 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11664 rd = 16;
11665 break;
11666 case 0xc: /* orr */
396e467c 11667 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11668 if (!s->condexec_mask)
396e467c 11669 gen_logic_CC(tmp);
99c475ab
FB
11670 break;
11671 case 0xd: /* mul */
7b2919a0 11672 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11673 if (!s->condexec_mask)
396e467c 11674 gen_logic_CC(tmp);
99c475ab
FB
11675 break;
11676 case 0xe: /* bic */
f669df27 11677 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11678 if (!s->condexec_mask)
396e467c 11679 gen_logic_CC(tmp);
99c475ab
FB
11680 break;
11681 case 0xf: /* mvn */
396e467c 11682 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11683 if (!s->condexec_mask)
396e467c 11684 gen_logic_CC(tmp2);
99c475ab 11685 val = 1;
5899f386 11686 rm = rd;
99c475ab
FB
11687 break;
11688 }
11689 if (rd != 16) {
396e467c
FN
11690 if (val) {
11691 store_reg(s, rm, tmp2);
11692 if (op != 0xf)
7d1b0095 11693 tcg_temp_free_i32(tmp);
396e467c
FN
11694 } else {
11695 store_reg(s, rd, tmp);
7d1b0095 11696 tcg_temp_free_i32(tmp2);
396e467c
FN
11697 }
11698 } else {
7d1b0095
PM
11699 tcg_temp_free_i32(tmp);
11700 tcg_temp_free_i32(tmp2);
99c475ab
FB
11701 }
11702 break;
11703
11704 case 5:
11705 /* load/store register offset. */
11706 rd = insn & 7;
11707 rn = (insn >> 3) & 7;
11708 rm = (insn >> 6) & 7;
11709 op = (insn >> 9) & 7;
b0109805 11710 addr = load_reg(s, rn);
b26eefb6 11711 tmp = load_reg(s, rm);
b0109805 11712 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11713 tcg_temp_free_i32(tmp);
99c475ab 11714
c40c8556 11715 if (op < 3) { /* store */
b0109805 11716 tmp = load_reg(s, rd);
c40c8556
PM
11717 } else {
11718 tmp = tcg_temp_new_i32();
11719 }
99c475ab
FB
11720
11721 switch (op) {
11722 case 0: /* str */
9bb6558a 11723 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11724 break;
11725 case 1: /* strh */
9bb6558a 11726 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11727 break;
11728 case 2: /* strb */
9bb6558a 11729 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11730 break;
11731 case 3: /* ldrsb */
9bb6558a 11732 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11733 break;
11734 case 4: /* ldr */
9bb6558a 11735 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11736 break;
11737 case 5: /* ldrh */
9bb6558a 11738 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11739 break;
11740 case 6: /* ldrb */
9bb6558a 11741 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11742 break;
11743 case 7: /* ldrsh */
9bb6558a 11744 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11745 break;
11746 }
c40c8556 11747 if (op >= 3) { /* load */
b0109805 11748 store_reg(s, rd, tmp);
c40c8556
PM
11749 } else {
11750 tcg_temp_free_i32(tmp);
11751 }
7d1b0095 11752 tcg_temp_free_i32(addr);
99c475ab
FB
11753 break;
11754
11755 case 6:
11756 /* load/store word immediate offset */
11757 rd = insn & 7;
11758 rn = (insn >> 3) & 7;
b0109805 11759 addr = load_reg(s, rn);
99c475ab 11760 val = (insn >> 4) & 0x7c;
b0109805 11761 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11762
11763 if (insn & (1 << 11)) {
11764 /* load */
c40c8556 11765 tmp = tcg_temp_new_i32();
12dcc321 11766 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11767 store_reg(s, rd, tmp);
99c475ab
FB
11768 } else {
11769 /* store */
b0109805 11770 tmp = load_reg(s, rd);
12dcc321 11771 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11772 tcg_temp_free_i32(tmp);
99c475ab 11773 }
7d1b0095 11774 tcg_temp_free_i32(addr);
99c475ab
FB
11775 break;
11776
11777 case 7:
11778 /* load/store byte immediate offset */
11779 rd = insn & 7;
11780 rn = (insn >> 3) & 7;
b0109805 11781 addr = load_reg(s, rn);
99c475ab 11782 val = (insn >> 6) & 0x1f;
b0109805 11783 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11784
11785 if (insn & (1 << 11)) {
11786 /* load */
c40c8556 11787 tmp = tcg_temp_new_i32();
9bb6558a 11788 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11789 store_reg(s, rd, tmp);
99c475ab
FB
11790 } else {
11791 /* store */
b0109805 11792 tmp = load_reg(s, rd);
9bb6558a 11793 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11794 tcg_temp_free_i32(tmp);
99c475ab 11795 }
7d1b0095 11796 tcg_temp_free_i32(addr);
99c475ab
FB
11797 break;
11798
11799 case 8:
11800 /* load/store halfword immediate offset */
11801 rd = insn & 7;
11802 rn = (insn >> 3) & 7;
b0109805 11803 addr = load_reg(s, rn);
99c475ab 11804 val = (insn >> 5) & 0x3e;
b0109805 11805 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11806
11807 if (insn & (1 << 11)) {
11808 /* load */
c40c8556 11809 tmp = tcg_temp_new_i32();
9bb6558a 11810 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11811 store_reg(s, rd, tmp);
99c475ab
FB
11812 } else {
11813 /* store */
b0109805 11814 tmp = load_reg(s, rd);
9bb6558a 11815 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11816 tcg_temp_free_i32(tmp);
99c475ab 11817 }
7d1b0095 11818 tcg_temp_free_i32(addr);
99c475ab
FB
11819 break;
11820
11821 case 9:
11822 /* load/store from stack */
11823 rd = (insn >> 8) & 7;
b0109805 11824 addr = load_reg(s, 13);
99c475ab 11825 val = (insn & 0xff) * 4;
b0109805 11826 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11827
11828 if (insn & (1 << 11)) {
11829 /* load */
c40c8556 11830 tmp = tcg_temp_new_i32();
9bb6558a 11831 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11832 store_reg(s, rd, tmp);
99c475ab
FB
11833 } else {
11834 /* store */
b0109805 11835 tmp = load_reg(s, rd);
9bb6558a 11836 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11837 tcg_temp_free_i32(tmp);
99c475ab 11838 }
7d1b0095 11839 tcg_temp_free_i32(addr);
99c475ab
FB
11840 break;
11841
11842 case 10:
11843 /* add to high reg */
11844 rd = (insn >> 8) & 7;
5899f386
FB
11845 if (insn & (1 << 11)) {
11846 /* SP */
5e3f878a 11847 tmp = load_reg(s, 13);
5899f386
FB
11848 } else {
11849 /* PC. bit 1 is ignored. */
7d1b0095 11850 tmp = tcg_temp_new_i32();
5e3f878a 11851 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 11852 }
99c475ab 11853 val = (insn & 0xff) * 4;
5e3f878a
PB
11854 tcg_gen_addi_i32(tmp, tmp, val);
11855 store_reg(s, rd, tmp);
99c475ab
FB
11856 break;
11857
11858 case 11:
11859 /* misc */
11860 op = (insn >> 8) & 0xf;
11861 switch (op) {
11862 case 0:
11863 /* adjust stack pointer */
b26eefb6 11864 tmp = load_reg(s, 13);
99c475ab
FB
11865 val = (insn & 0x7f) * 4;
11866 if (insn & (1 << 7))
6a0d8a1d 11867 val = -(int32_t)val;
b26eefb6
PB
11868 tcg_gen_addi_i32(tmp, tmp, val);
11869 store_reg(s, 13, tmp);
99c475ab
FB
11870 break;
11871
9ee6e8bb
PB
11872 case 2: /* sign/zero extend. */
11873 ARCH(6);
11874 rd = insn & 7;
11875 rm = (insn >> 3) & 7;
b0109805 11876 tmp = load_reg(s, rm);
9ee6e8bb 11877 switch ((insn >> 6) & 3) {
b0109805
PB
11878 case 0: gen_sxth(tmp); break;
11879 case 1: gen_sxtb(tmp); break;
11880 case 2: gen_uxth(tmp); break;
11881 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11882 }
b0109805 11883 store_reg(s, rd, tmp);
9ee6e8bb 11884 break;
99c475ab
FB
11885 case 4: case 5: case 0xc: case 0xd:
11886 /* push/pop */
b0109805 11887 addr = load_reg(s, 13);
5899f386
FB
11888 if (insn & (1 << 8))
11889 offset = 4;
99c475ab 11890 else
5899f386
FB
11891 offset = 0;
11892 for (i = 0; i < 8; i++) {
11893 if (insn & (1 << i))
11894 offset += 4;
11895 }
11896 if ((insn & (1 << 11)) == 0) {
b0109805 11897 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11898 }
99c475ab
FB
11899 for (i = 0; i < 8; i++) {
11900 if (insn & (1 << i)) {
11901 if (insn & (1 << 11)) {
11902 /* pop */
c40c8556 11903 tmp = tcg_temp_new_i32();
12dcc321 11904 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11905 store_reg(s, i, tmp);
99c475ab
FB
11906 } else {
11907 /* push */
b0109805 11908 tmp = load_reg(s, i);
12dcc321 11909 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11910 tcg_temp_free_i32(tmp);
99c475ab 11911 }
5899f386 11912 /* advance to the next address. */
b0109805 11913 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11914 }
11915 }
f764718d 11916 tmp = NULL;
99c475ab
FB
11917 if (insn & (1 << 8)) {
11918 if (insn & (1 << 11)) {
11919 /* pop pc */
c40c8556 11920 tmp = tcg_temp_new_i32();
12dcc321 11921 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11922 /* don't set the pc until the rest of the instruction
11923 has completed */
11924 } else {
11925 /* push lr */
b0109805 11926 tmp = load_reg(s, 14);
12dcc321 11927 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11928 tcg_temp_free_i32(tmp);
99c475ab 11929 }
b0109805 11930 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11931 }
5899f386 11932 if ((insn & (1 << 11)) == 0) {
b0109805 11933 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11934 }
99c475ab 11935 /* write back the new stack pointer */
b0109805 11936 store_reg(s, 13, addr);
99c475ab 11937 /* set the new PC value */
be5e7a76 11938 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11939 store_reg_from_load(s, 15, tmp);
be5e7a76 11940 }
99c475ab
FB
11941 break;
11942
9ee6e8bb
PB
11943 case 1: case 3: case 9: case 11: /* czb */
11944 rm = insn & 7;
d9ba4830 11945 tmp = load_reg(s, rm);
9ee6e8bb
PB
11946 s->condlabel = gen_new_label();
11947 s->condjmp = 1;
11948 if (insn & (1 << 11))
cb63669a 11949 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11950 else
cb63669a 11951 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11952 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11953 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11954 val = (uint32_t)s->pc + 2;
11955 val += offset;
11956 gen_jmp(s, val);
11957 break;
11958
11959 case 15: /* IT, nop-hint. */
11960 if ((insn & 0xf) == 0) {
11961 gen_nop_hint(s, (insn >> 4) & 0xf);
11962 break;
11963 }
11964 /* If Then. */
11965 s->condexec_cond = (insn >> 4) & 0xe;
11966 s->condexec_mask = insn & 0x1f;
11967 /* No actual code generated for this insn, just setup state. */
11968 break;
11969
06c949e6 11970 case 0xe: /* bkpt */
d4a2dc67
PM
11971 {
11972 int imm8 = extract32(insn, 0, 8);
be5e7a76 11973 ARCH(5);
73710361
GB
11974 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11975 default_exception_el(s));
06c949e6 11976 break;
d4a2dc67 11977 }
06c949e6 11978
19a6e31c
PM
11979 case 0xa: /* rev, and hlt */
11980 {
11981 int op1 = extract32(insn, 6, 2);
11982
11983 if (op1 == 2) {
11984 /* HLT */
11985 int imm6 = extract32(insn, 0, 6);
11986
11987 gen_hlt(s, imm6);
11988 break;
11989 }
11990
11991 /* Otherwise this is rev */
9ee6e8bb
PB
11992 ARCH(6);
11993 rn = (insn >> 3) & 0x7;
11994 rd = insn & 0x7;
b0109805 11995 tmp = load_reg(s, rn);
19a6e31c 11996 switch (op1) {
66896cb8 11997 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11998 case 1: gen_rev16(tmp); break;
11999 case 3: gen_revsh(tmp); break;
19a6e31c
PM
12000 default:
12001 g_assert_not_reached();
9ee6e8bb 12002 }
b0109805 12003 store_reg(s, rd, tmp);
9ee6e8bb 12004 break;
19a6e31c 12005 }
9ee6e8bb 12006
d9e028c1
PM
12007 case 6:
12008 switch ((insn >> 5) & 7) {
12009 case 2:
12010 /* setend */
12011 ARCH(6);
9886ecdf
PB
12012 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
12013 gen_helper_setend(cpu_env);
dcba3a8d 12014 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 12015 }
9ee6e8bb 12016 break;
d9e028c1
PM
12017 case 3:
12018 /* cps */
12019 ARCH(6);
12020 if (IS_USER(s)) {
12021 break;
8984bd2e 12022 }
b53d8923 12023 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
12024 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
12025 /* FAULTMASK */
12026 if (insn & 1) {
12027 addr = tcg_const_i32(19);
12028 gen_helper_v7m_msr(cpu_env, addr, tmp);
12029 tcg_temp_free_i32(addr);
12030 }
12031 /* PRIMASK */
12032 if (insn & 2) {
12033 addr = tcg_const_i32(16);
12034 gen_helper_v7m_msr(cpu_env, addr, tmp);
12035 tcg_temp_free_i32(addr);
12036 }
12037 tcg_temp_free_i32(tmp);
12038 gen_lookup_tb(s);
12039 } else {
12040 if (insn & (1 << 4)) {
12041 shift = CPSR_A | CPSR_I | CPSR_F;
12042 } else {
12043 shift = 0;
12044 }
12045 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 12046 }
d9e028c1
PM
12047 break;
12048 default:
12049 goto undef;
9ee6e8bb
PB
12050 }
12051 break;
12052
99c475ab
FB
12053 default:
12054 goto undef;
12055 }
12056 break;
12057
12058 case 12:
a7d3970d 12059 {
99c475ab 12060 /* load/store multiple */
f764718d 12061 TCGv_i32 loaded_var = NULL;
99c475ab 12062 rn = (insn >> 8) & 0x7;
b0109805 12063 addr = load_reg(s, rn);
99c475ab
FB
12064 for (i = 0; i < 8; i++) {
12065 if (insn & (1 << i)) {
99c475ab
FB
12066 if (insn & (1 << 11)) {
12067 /* load */
c40c8556 12068 tmp = tcg_temp_new_i32();
12dcc321 12069 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
12070 if (i == rn) {
12071 loaded_var = tmp;
12072 } else {
12073 store_reg(s, i, tmp);
12074 }
99c475ab
FB
12075 } else {
12076 /* store */
b0109805 12077 tmp = load_reg(s, i);
12dcc321 12078 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12079 tcg_temp_free_i32(tmp);
99c475ab 12080 }
5899f386 12081 /* advance to the next address */
b0109805 12082 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
12083 }
12084 }
b0109805 12085 if ((insn & (1 << rn)) == 0) {
a7d3970d 12086 /* base reg not in list: base register writeback */
b0109805
PB
12087 store_reg(s, rn, addr);
12088 } else {
a7d3970d
PM
12089 /* base reg in list: if load, complete it now */
12090 if (insn & (1 << 11)) {
12091 store_reg(s, rn, loaded_var);
12092 }
7d1b0095 12093 tcg_temp_free_i32(addr);
b0109805 12094 }
99c475ab 12095 break;
a7d3970d 12096 }
99c475ab
FB
12097 case 13:
12098 /* conditional branch or swi */
12099 cond = (insn >> 8) & 0xf;
12100 if (cond == 0xe)
12101 goto undef;
12102
12103 if (cond == 0xf) {
12104 /* swi */
eaed129d 12105 gen_set_pc_im(s, s->pc);
d4a2dc67 12106 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 12107 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
12108 break;
12109 }
12110 /* generate a conditional jump to next instruction */
e50e6a20 12111 s->condlabel = gen_new_label();
39fb730a 12112 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 12113 s->condjmp = 1;
99c475ab
FB
12114
12115 /* jump to the offset */
5899f386 12116 val = (uint32_t)s->pc + 2;
99c475ab 12117 offset = ((int32_t)insn << 24) >> 24;
5899f386 12118 val += offset << 1;
8aaca4c0 12119 gen_jmp(s, val);
99c475ab
FB
12120 break;
12121
12122 case 14:
358bf29e 12123 if (insn & (1 << 11)) {
296e5a0a
PM
12124 /* thumb_insn_is_16bit() ensures we can't get here for
12125 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
12126 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
12127 */
12128 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12129 ARCH(5);
12130 offset = ((insn & 0x7ff) << 1);
12131 tmp = load_reg(s, 14);
12132 tcg_gen_addi_i32(tmp, tmp, offset);
12133 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
12134
12135 tmp2 = tcg_temp_new_i32();
12136 tcg_gen_movi_i32(tmp2, s->pc | 1);
12137 store_reg(s, 14, tmp2);
12138 gen_bx(s, tmp);
358bf29e
PB
12139 break;
12140 }
9ee6e8bb 12141 /* unconditional branch */
99c475ab
FB
12142 val = (uint32_t)s->pc;
12143 offset = ((int32_t)insn << 21) >> 21;
12144 val += (offset << 1) + 2;
8aaca4c0 12145 gen_jmp(s, val);
99c475ab
FB
12146 break;
12147
12148 case 15:
296e5a0a
PM
12149 /* thumb_insn_is_16bit() ensures we can't get here for
12150 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
12151 */
12152 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12153
12154 if (insn & (1 << 11)) {
12155 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
12156 offset = ((insn & 0x7ff) << 1) | 1;
12157 tmp = load_reg(s, 14);
12158 tcg_gen_addi_i32(tmp, tmp, offset);
12159
12160 tmp2 = tcg_temp_new_i32();
12161 tcg_gen_movi_i32(tmp2, s->pc | 1);
12162 store_reg(s, 14, tmp2);
12163 gen_bx(s, tmp);
12164 } else {
12165 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
12166 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
12167
12168 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
12169 }
9ee6e8bb 12170 break;
99c475ab
FB
12171 }
12172 return;
9ee6e8bb 12173illegal_op:
99c475ab 12174undef:
73710361
GB
12175 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
12176 default_exception_el(s));
99c475ab
FB
12177}
12178
541ebcd4
PM
12179static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
12180{
12181 /* Return true if the insn at dc->pc might cross a page boundary.
12182 * (False positives are OK, false negatives are not.)
5b8d7289
PM
12183 * We know this is a Thumb insn, and our caller ensures we are
12184 * only called if dc->pc is less than 4 bytes from the page
12185 * boundary, so we cross the page if the first 16 bits indicate
12186 * that this is a 32 bit insn.
541ebcd4 12187 */
5b8d7289 12188 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 12189
5b8d7289 12190 return !thumb_insn_is_16bit(s, insn);
541ebcd4
PM
12191}
12192
1d8a5535
LV
12193static int arm_tr_init_disas_context(DisasContextBase *dcbase,
12194 CPUState *cs, int max_insns)
2c0262af 12195{
1d8a5535 12196 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 12197 CPUARMState *env = cs->env_ptr;
4e5e1215 12198 ARMCPU *cpu = arm_env_get_cpu(env);
3b46e624 12199
dcba3a8d 12200 dc->pc = dc->base.pc_first;
e50e6a20 12201 dc->condjmp = 0;
3926cc84 12202
40f860cd 12203 dc->aarch64 = 0;
cef9ee70
SS
12204 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
12205 * there is no secure EL1, so we route exceptions to EL3.
12206 */
12207 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
12208 !arm_el_is_aa64(env, 3);
1d8a5535
LV
12209 dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
12210 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
12211 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
12212 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
12213 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
12214 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
c1e37810 12215 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 12216#if !defined(CONFIG_USER_ONLY)
c1e37810 12217 dc->user = (dc->current_el == 0);
3926cc84 12218#endif
1d8a5535
LV
12219 dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
12220 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
12221 dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
12222 dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
12223 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
12224 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
12225 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
fb602cb7
PM
12226 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
12227 regime_is_secure(env, dc->mmu_idx);
60322b39 12228 dc->cp_regs = cpu->cp_regs;
a984e42c 12229 dc->features = env->features;
40f860cd 12230
50225ad0
PM
12231 /* Single step state. The code-generation logic here is:
12232 * SS_ACTIVE == 0:
12233 * generate code with no special handling for single-stepping (except
12234 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
12235 * this happens anyway because those changes are all system register or
12236 * PSTATE writes).
12237 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
12238 * emit code for one insn
12239 * emit code to clear PSTATE.SS
12240 * emit code to generate software step exception for completed step
12241 * end TB (as usual for having generated an exception)
12242 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
12243 * emit code to generate a software step exception
12244 * end the TB
12245 */
1d8a5535
LV
12246 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
12247 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
50225ad0
PM
12248 dc->is_ldex = false;
12249 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
12250
13189a90
LV
12251 dc->next_page_start =
12252 (dc->base.pc_first & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1d8a5535 12253
f7708456
RH
12254 /* If architectural single step active, limit to 1. */
12255 if (is_singlestepping(dc)) {
12256 max_insns = 1;
12257 }
12258
d0264d86
RH
12259 /* ARM is a fixed-length ISA. Bound the number of insns to execute
12260 to those left on the page. */
12261 if (!dc->thumb) {
12262 int bound = (dc->next_page_start - dc->base.pc_first) / 4;
12263 max_insns = MIN(max_insns, bound);
12264 }
12265
a7812ae4
PB
12266 cpu_F0s = tcg_temp_new_i32();
12267 cpu_F1s = tcg_temp_new_i32();
12268 cpu_F0d = tcg_temp_new_i64();
12269 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
12270 cpu_V0 = cpu_F0d;
12271 cpu_V1 = cpu_F1d;
e677137d 12272 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 12273 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
12274
12275 return max_insns;
12276}
12277
b1476854
LV
12278static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
12279{
12280 DisasContext *dc = container_of(dcbase, DisasContext, base);
12281
12282 /* A note on handling of the condexec (IT) bits:
12283 *
12284 * We want to avoid the overhead of having to write the updated condexec
12285 * bits back to the CPUARMState for every instruction in an IT block. So:
12286 * (1) if the condexec bits are not already zero then we write
12287 * zero back into the CPUARMState now. This avoids complications trying
12288 * to do it at the end of the block. (For example if we don't do this
12289 * it's hard to identify whether we can safely skip writing condexec
12290 * at the end of the TB, which we definitely want to do for the case
12291 * where a TB doesn't do anything with the IT state at all.)
12292 * (2) if we are going to leave the TB then we call gen_set_condexec()
12293 * which will write the correct value into CPUARMState if zero is wrong.
12294 * This is done both for leaving the TB at the end, and for leaving
12295 * it because of an exception we know will happen, which is done in
12296 * gen_exception_insn(). The latter is necessary because we need to
12297 * leave the TB with the PC/IT state just prior to execution of the
12298 * instruction which caused the exception.
12299 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
12300 * then the CPUARMState will be wrong and we need to reset it.
12301 * This is handled in the same way as restoration of the
12302 * PC in these situations; we save the value of the condexec bits
12303 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
12304 * then uses this to restore them after an exception.
12305 *
12306 * Note that there are no instructions which can read the condexec
12307 * bits, and none which can write non-static values to them, so
12308 * we don't need to care about whether CPUARMState is correct in the
12309 * middle of a TB.
12310 */
12311
12312 /* Reset the conditional execution bits immediately. This avoids
12313 complications trying to do it at the end of the block. */
12314 if (dc->condexec_mask || dc->condexec_cond) {
12315 TCGv_i32 tmp = tcg_temp_new_i32();
12316 tcg_gen_movi_i32(tmp, 0);
12317 store_cpu_field(tmp, condexec_bits);
12318 }
23169224 12319 tcg_clear_temp_count();
b1476854
LV
12320}
12321
f62bd897
LV
12322static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
12323{
12324 DisasContext *dc = container_of(dcbase, DisasContext, base);
12325
f62bd897
LV
12326 tcg_gen_insn_start(dc->pc,
12327 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
12328 0);
15fa08f8 12329 dc->insn_start = tcg_last_op();
f62bd897
LV
12330}
12331
a68956ad
LV
12332static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
12333 const CPUBreakpoint *bp)
12334{
12335 DisasContext *dc = container_of(dcbase, DisasContext, base);
12336
12337 if (bp->flags & BP_CPU) {
12338 gen_set_condexec(dc);
12339 gen_set_pc_im(dc, dc->pc);
12340 gen_helper_check_breakpoints(cpu_env);
12341 /* End the TB early; it's likely not going to be executed */
12342 dc->base.is_jmp = DISAS_TOO_MANY;
12343 } else {
12344 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12345 /* The address covered by the breakpoint must be
12346 included in [tb->pc, tb->pc + tb->size) in order
12347 to for it to be properly cleared -- thus we
12348 increment the PC here so that the logic setting
12349 tb->size below does the right thing. */
12350 /* TODO: Advance PC by correct instruction length to
12351 * avoid disassembler error messages */
12352 dc->pc += 2;
12353 dc->base.is_jmp = DISAS_NORETURN;
12354 }
12355
12356 return true;
12357}
12358
722ef0a5 12359static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 12360{
13189a90
LV
12361#ifdef CONFIG_USER_ONLY
12362 /* Intercept jump to the magic kernel page. */
12363 if (dc->pc >= 0xffff0000) {
12364 /* We always get here via a jump, so know we are not in a
12365 conditional execution block. */
12366 gen_exception_internal(EXCP_KERNEL_TRAP);
12367 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12368 return true;
13189a90
LV
12369 }
12370#endif
12371
12372 if (dc->ss_active && !dc->pstate_ss) {
12373 /* Singlestep state is Active-pending.
12374 * If we're in this state at the start of a TB then either
12375 * a) we just took an exception to an EL which is being debugged
12376 * and this is the first insn in the exception handler
12377 * b) debug exceptions were masked and we just unmasked them
12378 * without changing EL (eg by clearing PSTATE.D)
12379 * In either case we're going to take a swstep exception in the
12380 * "did not step an insn" case, and so the syndrome ISV and EX
12381 * bits should be zero.
12382 */
12383 assert(dc->base.num_insns == 1);
12384 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12385 default_exception_el(dc));
12386 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12387 return true;
13189a90
LV
12388 }
12389
722ef0a5
RH
12390 return false;
12391}
13189a90 12392
d0264d86 12393static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 12394{
13189a90
LV
12395 if (dc->condjmp && !dc->base.is_jmp) {
12396 gen_set_label(dc->condlabel);
12397 dc->condjmp = 0;
12398 }
13189a90 12399 dc->base.pc_next = dc->pc;
23169224 12400 translator_loop_temp_check(&dc->base);
13189a90
LV
12401}
12402
722ef0a5
RH
12403static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12404{
12405 DisasContext *dc = container_of(dcbase, DisasContext, base);
12406 CPUARMState *env = cpu->env_ptr;
12407 unsigned int insn;
12408
12409 if (arm_pre_translate_insn(dc)) {
12410 return;
12411 }
12412
12413 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
58803318 12414 dc->insn = insn;
722ef0a5
RH
12415 dc->pc += 4;
12416 disas_arm_insn(dc, insn);
12417
d0264d86
RH
12418 arm_post_translate_insn(dc);
12419
12420 /* ARM is a fixed-length ISA. We performed the cross-page check
12421 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
12422}
12423
dcf14dfb
PM
12424static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12425{
12426 /* Return true if this Thumb insn is always unconditional,
12427 * even inside an IT block. This is true of only a very few
12428 * instructions: BKPT, HLT, and SG.
12429 *
12430 * A larger class of instructions are UNPREDICTABLE if used
12431 * inside an IT block; we do not need to detect those here, because
12432 * what we do by default (perform the cc check and update the IT
12433 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12434 * choice for those situations.
12435 *
12436 * insn is either a 16-bit or a 32-bit instruction; the two are
12437 * distinguishable because for the 16-bit case the top 16 bits
12438 * are zeroes, and that isn't a valid 32-bit encoding.
12439 */
12440 if ((insn & 0xffffff00) == 0xbe00) {
12441 /* BKPT */
12442 return true;
12443 }
12444
12445 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12446 !arm_dc_feature(s, ARM_FEATURE_M)) {
12447 /* HLT: v8A only. This is unconditional even when it is going to
12448 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12449 * For v7 cores this was a plain old undefined encoding and so
12450 * honours its cc check. (We might be using the encoding as
12451 * a semihosting trap, but we don't change the cc check behaviour
12452 * on that account, because a debugger connected to a real v7A
12453 * core and emulating semihosting traps by catching the UNDEF
12454 * exception would also only see cases where the cc check passed.
12455 * No guest code should be trying to do a HLT semihosting trap
12456 * in an IT block anyway.
12457 */
12458 return true;
12459 }
12460
12461 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12462 arm_dc_feature(s, ARM_FEATURE_M)) {
12463 /* SG: v8M only */
12464 return true;
12465 }
12466
12467 return false;
12468}
12469
722ef0a5
RH
12470static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12471{
12472 DisasContext *dc = container_of(dcbase, DisasContext, base);
12473 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
12474 uint32_t insn;
12475 bool is_16bit;
722ef0a5
RH
12476
12477 if (arm_pre_translate_insn(dc)) {
12478 return;
12479 }
12480
296e5a0a
PM
12481 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12482 is_16bit = thumb_insn_is_16bit(dc, insn);
12483 dc->pc += 2;
12484 if (!is_16bit) {
12485 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12486
12487 insn = insn << 16 | insn2;
12488 dc->pc += 2;
12489 }
58803318 12490 dc->insn = insn;
296e5a0a 12491
dcf14dfb 12492 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
12493 uint32_t cond = dc->condexec_cond;
12494
12495 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
12496 dc->condlabel = gen_new_label();
12497 arm_gen_test_cc(cond ^ 1, dc->condlabel);
12498 dc->condjmp = 1;
12499 }
12500 }
12501
12502 if (is_16bit) {
12503 disas_thumb_insn(dc, insn);
12504 } else {
2eea841c 12505 disas_thumb2_insn(dc, insn);
296e5a0a 12506 }
722ef0a5
RH
12507
12508 /* Advance the Thumb condexec condition. */
12509 if (dc->condexec_mask) {
12510 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12511 ((dc->condexec_mask >> 4) & 1));
12512 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12513 if (dc->condexec_mask == 0) {
12514 dc->condexec_cond = 0;
12515 }
12516 }
12517
d0264d86
RH
12518 arm_post_translate_insn(dc);
12519
12520 /* Thumb is a variable-length ISA. Stop translation when the next insn
12521 * will touch a new page. This ensures that prefetch aborts occur at
12522 * the right place.
12523 *
12524 * We want to stop the TB if the next insn starts in a new page,
12525 * or if it spans between this page and the next. This means that
12526 * if we're looking at the last halfword in the page we need to
12527 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12528 * or a 32-bit Thumb insn (which won't).
12529 * This is to avoid generating a silly TB with a single 16-bit insn
12530 * in it at the end of this page (which would execute correctly
12531 * but isn't very efficient).
12532 */
12533 if (dc->base.is_jmp == DISAS_NEXT
12534 && (dc->pc >= dc->next_page_start
12535 || (dc->pc >= dc->next_page_start - 3
12536 && insn_crosses_page(env, dc)))) {
12537 dc->base.is_jmp = DISAS_TOO_MANY;
12538 }
722ef0a5
RH
12539}
12540
70d3c035 12541static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 12542{
70d3c035 12543 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 12544
c5a49c63 12545 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
12546 /* FIXME: This can theoretically happen with self-modifying code. */
12547 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 12548 }
9ee6e8bb 12549
b5ff1b31 12550 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12551 instruction was a conditional branch or trap, and the PC has
12552 already been written. */
f021b2c4 12553 gen_set_condexec(dc);
dcba3a8d 12554 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
12555 /* Exception return branches need some special case code at the
12556 * end of the TB, which is complex enough that it has to
12557 * handle the single-step vs not and the condition-failed
12558 * insn codepath itself.
12559 */
12560 gen_bx_excret_final_code(dc);
12561 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12562 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 12563 switch (dc->base.is_jmp) {
7999a5c8 12564 case DISAS_SWI:
50225ad0 12565 gen_ss_advance(dc);
73710361
GB
12566 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12567 default_exception_el(dc));
7999a5c8
SF
12568 break;
12569 case DISAS_HVC:
37e6456e 12570 gen_ss_advance(dc);
73710361 12571 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12572 break;
12573 case DISAS_SMC:
37e6456e 12574 gen_ss_advance(dc);
73710361 12575 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12576 break;
12577 case DISAS_NEXT:
a68956ad 12578 case DISAS_TOO_MANY:
7999a5c8
SF
12579 case DISAS_UPDATE:
12580 gen_set_pc_im(dc, dc->pc);
12581 /* fall through */
12582 default:
5425415e
PM
12583 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12584 gen_singlestep_exception(dc);
a0c231e6
RH
12585 break;
12586 case DISAS_NORETURN:
12587 break;
7999a5c8 12588 }
8aaca4c0 12589 } else {
9ee6e8bb
PB
12590 /* While branches must always occur at the end of an IT block,
12591 there are a few other things that can cause us to terminate
65626741 12592 the TB in the middle of an IT block:
9ee6e8bb
PB
12593 - Exception generating instructions (bkpt, swi, undefined).
12594 - Page boundaries.
12595 - Hardware watchpoints.
12596 Hardware breakpoints have already been handled and skip this code.
12597 */
dcba3a8d 12598 switch(dc->base.is_jmp) {
8aaca4c0 12599 case DISAS_NEXT:
a68956ad 12600 case DISAS_TOO_MANY:
6e256c93 12601 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 12602 break;
577bf808 12603 case DISAS_JUMP:
8a6b28c7
EC
12604 gen_goto_ptr();
12605 break;
e8d52302
AB
12606 case DISAS_UPDATE:
12607 gen_set_pc_im(dc, dc->pc);
12608 /* fall through */
577bf808 12609 default:
8aaca4c0 12610 /* indicate that the hash table must be used to find the next TB */
57fec1fe 12611 tcg_gen_exit_tb(0);
8aaca4c0 12612 break;
a0c231e6 12613 case DISAS_NORETURN:
8aaca4c0
FB
12614 /* nothing more to generate */
12615 break;
9ee6e8bb 12616 case DISAS_WFI:
58803318
SS
12617 {
12618 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12619 !(dc->insn & (1U << 31))) ? 2 : 4);
12620
12621 gen_helper_wfi(cpu_env, tmp);
12622 tcg_temp_free_i32(tmp);
84549b6d
PM
12623 /* The helper doesn't necessarily throw an exception, but we
12624 * must go back to the main loop to check for interrupts anyway.
12625 */
12626 tcg_gen_exit_tb(0);
9ee6e8bb 12627 break;
58803318 12628 }
72c1d3af
PM
12629 case DISAS_WFE:
12630 gen_helper_wfe(cpu_env);
12631 break;
c87e5a61
PM
12632 case DISAS_YIELD:
12633 gen_helper_yield(cpu_env);
12634 break;
9ee6e8bb 12635 case DISAS_SWI:
73710361
GB
12636 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12637 default_exception_el(dc));
9ee6e8bb 12638 break;
37e6456e 12639 case DISAS_HVC:
73710361 12640 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12641 break;
12642 case DISAS_SMC:
73710361 12643 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12644 break;
8aaca4c0 12645 }
f021b2c4
PM
12646 }
12647
12648 if (dc->condjmp) {
12649 /* "Condition failed" instruction codepath for the branch/trap insn */
12650 gen_set_label(dc->condlabel);
12651 gen_set_condexec(dc);
b636649f 12652 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
12653 gen_set_pc_im(dc, dc->pc);
12654 gen_singlestep_exception(dc);
12655 } else {
6e256c93 12656 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 12657 }
2c0262af 12658 }
23169224
LV
12659
12660 /* Functions above can change dc->pc, so re-align db->pc_next */
12661 dc->base.pc_next = dc->pc;
70d3c035
LV
12662}
12663
4013f7fc
LV
12664static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12665{
12666 DisasContext *dc = container_of(dcbase, DisasContext, base);
12667
12668 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 12669 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
12670}
12671
23169224
LV
12672static const TranslatorOps arm_translator_ops = {
12673 .init_disas_context = arm_tr_init_disas_context,
12674 .tb_start = arm_tr_tb_start,
12675 .insn_start = arm_tr_insn_start,
12676 .breakpoint_check = arm_tr_breakpoint_check,
12677 .translate_insn = arm_tr_translate_insn,
12678 .tb_stop = arm_tr_tb_stop,
12679 .disas_log = arm_tr_disas_log,
12680};
12681
722ef0a5
RH
12682static const TranslatorOps thumb_translator_ops = {
12683 .init_disas_context = arm_tr_init_disas_context,
12684 .tb_start = arm_tr_tb_start,
12685 .insn_start = arm_tr_insn_start,
12686 .breakpoint_check = arm_tr_breakpoint_check,
12687 .translate_insn = thumb_tr_translate_insn,
12688 .tb_stop = arm_tr_tb_stop,
12689 .disas_log = arm_tr_disas_log,
12690};
12691
70d3c035 12692/* generate intermediate code for basic block 'tb'. */
23169224 12693void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
70d3c035 12694{
23169224
LV
12695 DisasContext dc;
12696 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 12697
722ef0a5
RH
12698 if (ARM_TBFLAG_THUMB(tb->flags)) {
12699 ops = &thumb_translator_ops;
12700 }
23169224 12701#ifdef TARGET_AARCH64
70d3c035 12702 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
23169224 12703 ops = &aarch64_translator_ops;
2c0262af
FB
12704 }
12705#endif
23169224
LV
12706
12707 translator_loop(ops, &dc.base, cpu, tb);
2c0262af
FB
12708}
12709
b5ff1b31 12710static const char *cpu_mode_names[16] = {
28c9457d
EI
12711 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12712 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 12713};
9ee6e8bb 12714
878096ee
AF
12715void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12716 int flags)
2c0262af 12717{
878096ee
AF
12718 ARMCPU *cpu = ARM_CPU(cs);
12719 CPUARMState *env = &cpu->env;
2c0262af
FB
12720 int i;
12721
17731115
PM
12722 if (is_a64(env)) {
12723 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12724 return;
12725 }
12726
2c0262af 12727 for(i=0;i<16;i++) {
7fe48483 12728 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12729 if ((i % 4) == 3)
7fe48483 12730 cpu_fprintf(f, "\n");
2c0262af 12731 else
7fe48483 12732 cpu_fprintf(f, " ");
2c0262af 12733 }
06e5cf7a 12734
5b906f35
PM
12735 if (arm_feature(env, ARM_FEATURE_M)) {
12736 uint32_t xpsr = xpsr_read(env);
12737 const char *mode;
1e577cc7
PM
12738 const char *ns_status = "";
12739
12740 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12741 ns_status = env->v7m.secure ? "S " : "NS ";
12742 }
5b906f35
PM
12743
12744 if (xpsr & XPSR_EXCP) {
12745 mode = "handler";
12746 } else {
8bfc26ea 12747 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
12748 mode = "unpriv-thread";
12749 } else {
12750 mode = "priv-thread";
12751 }
12752 }
12753
1e577cc7 12754 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
5b906f35
PM
12755 xpsr,
12756 xpsr & XPSR_N ? 'N' : '-',
12757 xpsr & XPSR_Z ? 'Z' : '-',
12758 xpsr & XPSR_C ? 'C' : '-',
12759 xpsr & XPSR_V ? 'V' : '-',
12760 xpsr & XPSR_T ? 'T' : 'A',
1e577cc7 12761 ns_status,
5b906f35 12762 mode);
06e5cf7a 12763 } else {
5b906f35
PM
12764 uint32_t psr = cpsr_read(env);
12765 const char *ns_status = "";
12766
12767 if (arm_feature(env, ARM_FEATURE_EL3) &&
12768 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12769 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12770 }
12771
12772 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12773 psr,
12774 psr & CPSR_N ? 'N' : '-',
12775 psr & CPSR_Z ? 'Z' : '-',
12776 psr & CPSR_C ? 'C' : '-',
12777 psr & CPSR_V ? 'V' : '-',
12778 psr & CPSR_T ? 'T' : 'A',
12779 ns_status,
12780 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
12781 }
b7bcbe95 12782
f2617cfc
PM
12783 if (flags & CPU_DUMP_FPU) {
12784 int numvfpregs = 0;
12785 if (arm_feature(env, ARM_FEATURE_VFP)) {
12786 numvfpregs += 16;
12787 }
12788 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12789 numvfpregs += 16;
12790 }
12791 for (i = 0; i < numvfpregs; i++) {
9a2b5256 12792 uint64_t v = *aa32_vfp_dreg(env, i);
f2617cfc
PM
12793 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12794 i * 2, (uint32_t)v,
12795 i * 2 + 1, (uint32_t)(v >> 32),
12796 i, v);
12797 }
12798 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 12799 }
2c0262af 12800}
a6b025d3 12801
bad729e2
RH
12802void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12803 target_ulong *data)
d2856f1a 12804{
3926cc84 12805 if (is_a64(env)) {
bad729e2 12806 env->pc = data[0];
40f860cd 12807 env->condexec_bits = 0;
aaa1f954 12808 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12809 } else {
bad729e2
RH
12810 env->regs[15] = data[0];
12811 env->condexec_bits = data[1];
aaa1f954 12812 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12813 }
d2856f1a 12814}