]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
target/arm: Permit accesses to ELR_Hyp from Hyp mode via MSR/MRS (banked)
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
36a71934 28#include "tcg-op-gvec.h"
1de7afc9 29#include "qemu/log.h"
534df156 30#include "qemu/bitops.h"
1d854765 31#include "arm_ldst.h"
19a6e31c 32#include "exec/semihost.h"
1497c961 33
2ef6175a
RH
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
2c0262af 36
a7e30d84 37#include "trace-tcg.h"
508127e2 38#include "exec/log.h"
a7e30d84
LV
39
40
2b51668f
PM
41#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 43/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 44#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
c99a55d3 45#define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
2b51668f
PM
46#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 51
86753403 52#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 53
f570c61e 54#include "translate.h"
e12ce78d 55
b5ff1b31
FB
56#if defined(CONFIG_USER_ONLY)
57#define IS_USER(s) 1
58#else
59#define IS_USER(s) (s->user)
60#endif
61
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
b26eefb6 69/* FIXME: These should be removed. */
39d5492a 70static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 71static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 72
022c62cb 73#include "exec/gen-icount.h"
2e70f6ef 74
155c3eac
FN
75static const char *regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
78
61adacc8
RH
79/* Function prototypes for gen_ functions calling Neon helpers. */
80typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
81 TCGv_i32, TCGv_i32);
82
b26eefb6
PB
83/* initialize TCG globals. */
84void arm_translate_init(void)
85{
155c3eac
FN
86 int i;
87
155c3eac 88 for (i = 0; i < 16; i++) {
e1ccc054 89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 90 offsetof(CPUARMState, regs[i]),
155c3eac
FN
91 regnames[i]);
92 }
e1ccc054
RH
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 97
e1ccc054 98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 102
14ade10f 103 a64_translate_init();
b26eefb6
PB
104}
105
9bb6558a
PM
106/* Flags for the disas_set_da_iss info argument:
107 * lower bits hold the Rt register number, higher bits are flags.
108 */
109typedef enum ISSInfo {
110 ISSNone = 0,
111 ISSRegMask = 0x1f,
112 ISSInvalid = (1 << 5),
113 ISSIsAcqRel = (1 << 6),
114 ISSIsWrite = (1 << 7),
115 ISSIs16Bit = (1 << 8),
116} ISSInfo;
117
118/* Save the syndrome information for a Data Abort */
119static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
120{
121 uint32_t syn;
122 int sas = memop & MO_SIZE;
123 bool sse = memop & MO_SIGN;
124 bool is_acqrel = issinfo & ISSIsAcqRel;
125 bool is_write = issinfo & ISSIsWrite;
126 bool is_16bit = issinfo & ISSIs16Bit;
127 int srt = issinfo & ISSRegMask;
128
129 if (issinfo & ISSInvalid) {
130 /* Some callsites want to conditionally provide ISS info,
131 * eg "only if this was not a writeback"
132 */
133 return;
134 }
135
136 if (srt == 15) {
137 /* For AArch32, insns where the src/dest is R15 never generate
138 * ISS information. Catching that here saves checking at all
139 * the call sites.
140 */
141 return;
142 }
143
144 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
145 0, 0, 0, is_write, 0, is_16bit);
146 disas_set_insn_syndrome(s, syn);
147}
148
8bd5c820 149static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 150{
8bd5c820 151 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
152 * insns:
153 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
154 * otherwise, access as if at PL0.
155 */
156 switch (s->mmu_idx) {
157 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
158 case ARMMMUIdx_S12NSE0:
159 case ARMMMUIdx_S12NSE1:
8bd5c820 160 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
161 case ARMMMUIdx_S1E3:
162 case ARMMMUIdx_S1SE0:
163 case ARMMMUIdx_S1SE1:
8bd5c820 164 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
165 case ARMMMUIdx_MUser:
166 case ARMMMUIdx_MPriv:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
168 case ARMMMUIdx_MUserNegPri:
169 case ARMMMUIdx_MPrivNegPri:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
171 case ARMMMUIdx_MSUser:
172 case ARMMMUIdx_MSPriv:
b9f587d6 173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
174 case ARMMMUIdx_MSUserNegPri:
175 case ARMMMUIdx_MSPrivNegPri:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
177 case ARMMMUIdx_S2NS:
178 default:
179 g_assert_not_reached();
180 }
181}
182
39d5492a 183static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 184{
39d5492a 185 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
186 tcg_gen_ld_i32(tmp, cpu_env, offset);
187 return tmp;
188}
189
0ecb72a5 190#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 191
39d5492a 192static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
193{
194 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 195 tcg_temp_free_i32(var);
d9ba4830
PB
196}
197
198#define store_cpu_field(var, name) \
0ecb72a5 199 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 200
b26eefb6 201/* Set a variable to the value of a CPU register. */
39d5492a 202static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
203{
204 if (reg == 15) {
205 uint32_t addr;
b90372ad 206 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
207 if (s->thumb)
208 addr = (long)s->pc + 2;
209 else
210 addr = (long)s->pc + 4;
211 tcg_gen_movi_i32(var, addr);
212 } else {
155c3eac 213 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
214 }
215}
216
217/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 218static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 219{
39d5492a 220 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
221 load_reg_var(s, tmp, reg);
222 return tmp;
223}
224
225/* Set a CPU register. The source must be a temporary and will be
226 marked as dead. */
39d5492a 227static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
228{
229 if (reg == 15) {
9b6a3ea7
PM
230 /* In Thumb mode, we must ignore bit 0.
231 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
232 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
233 * We choose to ignore [1:0] in ARM mode for all architecture versions.
234 */
235 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 236 s->base.is_jmp = DISAS_JUMP;
b26eefb6 237 }
155c3eac 238 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 239 tcg_temp_free_i32(var);
b26eefb6
PB
240}
241
b26eefb6 242/* Value extensions. */
86831435
PB
243#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
244#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
245#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
246#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
247
1497c961
PB
248#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
249#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 250
b26eefb6 251
39d5492a 252static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 253{
39d5492a 254 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 255 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
256 tcg_temp_free_i32(tmp_mask);
257}
d9ba4830
PB
258/* Set NZCV flags from the high 4 bits of var. */
259#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
260
d4a2dc67 261static void gen_exception_internal(int excp)
d9ba4830 262{
d4a2dc67
PM
263 TCGv_i32 tcg_excp = tcg_const_i32(excp);
264
265 assert(excp_is_internal(excp));
266 gen_helper_exception_internal(cpu_env, tcg_excp);
267 tcg_temp_free_i32(tcg_excp);
268}
269
73710361 270static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
271{
272 TCGv_i32 tcg_excp = tcg_const_i32(excp);
273 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 274 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 275
73710361
GB
276 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
277 tcg_syn, tcg_el);
278
279 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
280 tcg_temp_free_i32(tcg_syn);
281 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
282}
283
50225ad0
PM
284static void gen_ss_advance(DisasContext *s)
285{
286 /* If the singlestep state is Active-not-pending, advance to
287 * Active-pending.
288 */
289 if (s->ss_active) {
290 s->pstate_ss = 0;
291 gen_helper_clear_pstate_ss(cpu_env);
292 }
293}
294
295static void gen_step_complete_exception(DisasContext *s)
296{
297 /* We just completed step of an insn. Move from Active-not-pending
298 * to Active-pending, and then also take the swstep exception.
299 * This corresponds to making the (IMPDEF) choice to prioritize
300 * swstep exceptions over asynchronous exceptions taken to an exception
301 * level where debug is disabled. This choice has the advantage that
302 * we do not need to maintain internal state corresponding to the
303 * ISV/EX syndrome bits between completion of the step and generation
304 * of the exception, and our syndrome information is always correct.
305 */
306 gen_ss_advance(s);
73710361
GB
307 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
308 default_exception_el(s));
dcba3a8d 309 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
310}
311
5425415e
PM
312static void gen_singlestep_exception(DisasContext *s)
313{
314 /* Generate the right kind of exception for singlestep, which is
315 * either the architectural singlestep or EXCP_DEBUG for QEMU's
316 * gdb singlestepping.
317 */
318 if (s->ss_active) {
319 gen_step_complete_exception(s);
320 } else {
321 gen_exception_internal(EXCP_DEBUG);
322 }
323}
324
b636649f
PM
325static inline bool is_singlestepping(DisasContext *s)
326{
327 /* Return true if we are singlestepping either because of
328 * architectural singlestep or QEMU gdbstub singlestep. This does
329 * not include the command line '-singlestep' mode which is rather
330 * misnamed as it only means "one instruction per TB" and doesn't
331 * affect the code we generate.
332 */
dcba3a8d 333 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
334}
335
39d5492a 336static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 337{
39d5492a
PM
338 TCGv_i32 tmp1 = tcg_temp_new_i32();
339 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
340 tcg_gen_ext16s_i32(tmp1, a);
341 tcg_gen_ext16s_i32(tmp2, b);
3670669c 342 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 343 tcg_temp_free_i32(tmp2);
3670669c
PB
344 tcg_gen_sari_i32(a, a, 16);
345 tcg_gen_sari_i32(b, b, 16);
346 tcg_gen_mul_i32(b, b, a);
347 tcg_gen_mov_i32(a, tmp1);
7d1b0095 348 tcg_temp_free_i32(tmp1);
3670669c
PB
349}
350
351/* Byteswap each halfword. */
39d5492a 352static void gen_rev16(TCGv_i32 var)
3670669c 353{
39d5492a 354 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 355 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 356 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
357 tcg_gen_and_i32(tmp, tmp, mask);
358 tcg_gen_and_i32(var, var, mask);
3670669c 359 tcg_gen_shli_i32(var, var, 8);
3670669c 360 tcg_gen_or_i32(var, var, tmp);
68cedf73 361 tcg_temp_free_i32(mask);
7d1b0095 362 tcg_temp_free_i32(tmp);
3670669c
PB
363}
364
365/* Byteswap low halfword and sign extend. */
39d5492a 366static void gen_revsh(TCGv_i32 var)
3670669c 367{
1a855029
AJ
368 tcg_gen_ext16u_i32(var, var);
369 tcg_gen_bswap16_i32(var, var);
370 tcg_gen_ext16s_i32(var, var);
3670669c
PB
371}
372
838fa72d 373/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 374static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 375{
838fa72d
AJ
376 TCGv_i64 tmp64 = tcg_temp_new_i64();
377
378 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 379 tcg_temp_free_i32(b);
838fa72d
AJ
380 tcg_gen_shli_i64(tmp64, tmp64, 32);
381 tcg_gen_add_i64(a, tmp64, a);
382
383 tcg_temp_free_i64(tmp64);
384 return a;
385}
386
387/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 388static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
389{
390 TCGv_i64 tmp64 = tcg_temp_new_i64();
391
392 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 393 tcg_temp_free_i32(b);
838fa72d
AJ
394 tcg_gen_shli_i64(tmp64, tmp64, 32);
395 tcg_gen_sub_i64(a, tmp64, a);
396
397 tcg_temp_free_i64(tmp64);
398 return a;
3670669c
PB
399}
400
5e3f878a 401/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 402static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 403{
39d5492a
PM
404 TCGv_i32 lo = tcg_temp_new_i32();
405 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 406 TCGv_i64 ret;
5e3f878a 407
831d7fe8 408 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 409 tcg_temp_free_i32(a);
7d1b0095 410 tcg_temp_free_i32(b);
831d7fe8
RH
411
412 ret = tcg_temp_new_i64();
413 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
414 tcg_temp_free_i32(lo);
415 tcg_temp_free_i32(hi);
831d7fe8
RH
416
417 return ret;
5e3f878a
PB
418}
419
39d5492a 420static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 421{
39d5492a
PM
422 TCGv_i32 lo = tcg_temp_new_i32();
423 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 424 TCGv_i64 ret;
5e3f878a 425
831d7fe8 426 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 427 tcg_temp_free_i32(a);
7d1b0095 428 tcg_temp_free_i32(b);
831d7fe8
RH
429
430 ret = tcg_temp_new_i64();
431 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
432 tcg_temp_free_i32(lo);
433 tcg_temp_free_i32(hi);
831d7fe8
RH
434
435 return ret;
5e3f878a
PB
436}
437
8f01245e 438/* Swap low and high halfwords. */
39d5492a 439static void gen_swap_half(TCGv_i32 var)
8f01245e 440{
39d5492a 441 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
442 tcg_gen_shri_i32(tmp, var, 16);
443 tcg_gen_shli_i32(var, var, 16);
444 tcg_gen_or_i32(var, var, tmp);
7d1b0095 445 tcg_temp_free_i32(tmp);
8f01245e
PB
446}
447
b26eefb6
PB
448/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
449 tmp = (t0 ^ t1) & 0x8000;
450 t0 &= ~0x8000;
451 t1 &= ~0x8000;
452 t0 = (t0 + t1) ^ tmp;
453 */
454
39d5492a 455static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 456{
39d5492a 457 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
458 tcg_gen_xor_i32(tmp, t0, t1);
459 tcg_gen_andi_i32(tmp, tmp, 0x8000);
460 tcg_gen_andi_i32(t0, t0, ~0x8000);
461 tcg_gen_andi_i32(t1, t1, ~0x8000);
462 tcg_gen_add_i32(t0, t0, t1);
463 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
464 tcg_temp_free_i32(tmp);
465 tcg_temp_free_i32(t1);
b26eefb6
PB
466}
467
468/* Set CF to the top bit of var. */
39d5492a 469static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 470{
66c374de 471 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
472}
473
474/* Set N and Z flags from var. */
39d5492a 475static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 476{
66c374de
AJ
477 tcg_gen_mov_i32(cpu_NF, var);
478 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
479}
480
481/* T0 += T1 + CF. */
39d5492a 482static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 483{
396e467c 484 tcg_gen_add_i32(t0, t0, t1);
66c374de 485 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
486}
487
e9bb4aa9 488/* dest = T0 + T1 + CF. */
39d5492a 489static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 490{
e9bb4aa9 491 tcg_gen_add_i32(dest, t0, t1);
66c374de 492 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
493}
494
3670669c 495/* dest = T0 - T1 + CF - 1. */
39d5492a 496static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 497{
3670669c 498 tcg_gen_sub_i32(dest, t0, t1);
66c374de 499 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 500 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
501}
502
72485ec4 503/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 504static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 505{
39d5492a 506 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
507 tcg_gen_movi_i32(tmp, 0);
508 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 509 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 510 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
511 tcg_gen_xor_i32(tmp, t0, t1);
512 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
513 tcg_temp_free_i32(tmp);
514 tcg_gen_mov_i32(dest, cpu_NF);
515}
516
49b4c31e 517/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 518static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 519{
39d5492a 520 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
521 if (TCG_TARGET_HAS_add2_i32) {
522 tcg_gen_movi_i32(tmp, 0);
523 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 524 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
525 } else {
526 TCGv_i64 q0 = tcg_temp_new_i64();
527 TCGv_i64 q1 = tcg_temp_new_i64();
528 tcg_gen_extu_i32_i64(q0, t0);
529 tcg_gen_extu_i32_i64(q1, t1);
530 tcg_gen_add_i64(q0, q0, q1);
531 tcg_gen_extu_i32_i64(q1, cpu_CF);
532 tcg_gen_add_i64(q0, q0, q1);
533 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
534 tcg_temp_free_i64(q0);
535 tcg_temp_free_i64(q1);
536 }
537 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
538 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
539 tcg_gen_xor_i32(tmp, t0, t1);
540 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
541 tcg_temp_free_i32(tmp);
542 tcg_gen_mov_i32(dest, cpu_NF);
543}
544
72485ec4 545/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 546static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 547{
39d5492a 548 TCGv_i32 tmp;
72485ec4
AJ
549 tcg_gen_sub_i32(cpu_NF, t0, t1);
550 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
551 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
552 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
553 tmp = tcg_temp_new_i32();
554 tcg_gen_xor_i32(tmp, t0, t1);
555 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
556 tcg_temp_free_i32(tmp);
557 tcg_gen_mov_i32(dest, cpu_NF);
558}
559
e77f0832 560/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 561static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 562{
39d5492a 563 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
564 tcg_gen_not_i32(tmp, t1);
565 gen_adc_CC(dest, t0, tmp);
39d5492a 566 tcg_temp_free_i32(tmp);
2de68a49
RH
567}
568
365af80e 569#define GEN_SHIFT(name) \
39d5492a 570static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 571{ \
39d5492a 572 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
573 tmp1 = tcg_temp_new_i32(); \
574 tcg_gen_andi_i32(tmp1, t1, 0xff); \
575 tmp2 = tcg_const_i32(0); \
576 tmp3 = tcg_const_i32(0x1f); \
577 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
578 tcg_temp_free_i32(tmp3); \
579 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
580 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
581 tcg_temp_free_i32(tmp2); \
582 tcg_temp_free_i32(tmp1); \
583}
584GEN_SHIFT(shl)
585GEN_SHIFT(shr)
586#undef GEN_SHIFT
587
39d5492a 588static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 589{
39d5492a 590 TCGv_i32 tmp1, tmp2;
365af80e
AJ
591 tmp1 = tcg_temp_new_i32();
592 tcg_gen_andi_i32(tmp1, t1, 0xff);
593 tmp2 = tcg_const_i32(0x1f);
594 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
595 tcg_temp_free_i32(tmp2);
596 tcg_gen_sar_i32(dest, t0, tmp1);
597 tcg_temp_free_i32(tmp1);
598}
599
39d5492a 600static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 601{
39d5492a
PM
602 TCGv_i32 c0 = tcg_const_i32(0);
603 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
604 tcg_gen_neg_i32(tmp, src);
605 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
606 tcg_temp_free_i32(c0);
607 tcg_temp_free_i32(tmp);
608}
ad69471c 609
39d5492a 610static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 611{
9a119ff6 612 if (shift == 0) {
66c374de 613 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 614 } else {
66c374de
AJ
615 tcg_gen_shri_i32(cpu_CF, var, shift);
616 if (shift != 31) {
617 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
618 }
9a119ff6 619 }
9a119ff6 620}
b26eefb6 621
9a119ff6 622/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
623static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
624 int shift, int flags)
9a119ff6
PB
625{
626 switch (shiftop) {
627 case 0: /* LSL */
628 if (shift != 0) {
629 if (flags)
630 shifter_out_im(var, 32 - shift);
631 tcg_gen_shli_i32(var, var, shift);
632 }
633 break;
634 case 1: /* LSR */
635 if (shift == 0) {
636 if (flags) {
66c374de 637 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
638 }
639 tcg_gen_movi_i32(var, 0);
640 } else {
641 if (flags)
642 shifter_out_im(var, shift - 1);
643 tcg_gen_shri_i32(var, var, shift);
644 }
645 break;
646 case 2: /* ASR */
647 if (shift == 0)
648 shift = 32;
649 if (flags)
650 shifter_out_im(var, shift - 1);
651 if (shift == 32)
652 shift = 31;
653 tcg_gen_sari_i32(var, var, shift);
654 break;
655 case 3: /* ROR/RRX */
656 if (shift != 0) {
657 if (flags)
658 shifter_out_im(var, shift - 1);
f669df27 659 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 660 } else {
39d5492a 661 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 662 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
663 if (flags)
664 shifter_out_im(var, 0);
665 tcg_gen_shri_i32(var, var, 1);
b26eefb6 666 tcg_gen_or_i32(var, var, tmp);
7d1b0095 667 tcg_temp_free_i32(tmp);
b26eefb6
PB
668 }
669 }
670};
671
39d5492a
PM
672static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
673 TCGv_i32 shift, int flags)
8984bd2e
PB
674{
675 if (flags) {
676 switch (shiftop) {
9ef39277
BS
677 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
678 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
679 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
680 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
681 }
682 } else {
683 switch (shiftop) {
365af80e
AJ
684 case 0:
685 gen_shl(var, var, shift);
686 break;
687 case 1:
688 gen_shr(var, var, shift);
689 break;
690 case 2:
691 gen_sar(var, var, shift);
692 break;
f669df27
AJ
693 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
694 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
695 }
696 }
7d1b0095 697 tcg_temp_free_i32(shift);
8984bd2e
PB
698}
699
6ddbc6e4
PB
700#define PAS_OP(pfx) \
701 switch (op2) { \
702 case 0: gen_pas_helper(glue(pfx,add16)); break; \
703 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
704 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
705 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
706 case 4: gen_pas_helper(glue(pfx,add8)); break; \
707 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
708 }
39d5492a 709static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 710{
a7812ae4 711 TCGv_ptr tmp;
6ddbc6e4
PB
712
713 switch (op1) {
714#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
715 case 1:
a7812ae4 716 tmp = tcg_temp_new_ptr();
0ecb72a5 717 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 718 PAS_OP(s)
b75263d6 719 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
720 break;
721 case 5:
a7812ae4 722 tmp = tcg_temp_new_ptr();
0ecb72a5 723 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 724 PAS_OP(u)
b75263d6 725 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
726 break;
727#undef gen_pas_helper
728#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
729 case 2:
730 PAS_OP(q);
731 break;
732 case 3:
733 PAS_OP(sh);
734 break;
735 case 6:
736 PAS_OP(uq);
737 break;
738 case 7:
739 PAS_OP(uh);
740 break;
741#undef gen_pas_helper
742 }
743}
9ee6e8bb
PB
744#undef PAS_OP
745
6ddbc6e4
PB
746/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
747#define PAS_OP(pfx) \
ed89a2f1 748 switch (op1) { \
6ddbc6e4
PB
749 case 0: gen_pas_helper(glue(pfx,add8)); break; \
750 case 1: gen_pas_helper(glue(pfx,add16)); break; \
751 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
752 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
753 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
754 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
755 }
39d5492a 756static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 757{
a7812ae4 758 TCGv_ptr tmp;
6ddbc6e4 759
ed89a2f1 760 switch (op2) {
6ddbc6e4
PB
761#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
762 case 0:
a7812ae4 763 tmp = tcg_temp_new_ptr();
0ecb72a5 764 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 765 PAS_OP(s)
b75263d6 766 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
767 break;
768 case 4:
a7812ae4 769 tmp = tcg_temp_new_ptr();
0ecb72a5 770 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 771 PAS_OP(u)
b75263d6 772 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
773 break;
774#undef gen_pas_helper
775#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
776 case 1:
777 PAS_OP(q);
778 break;
779 case 2:
780 PAS_OP(sh);
781 break;
782 case 5:
783 PAS_OP(uq);
784 break;
785 case 6:
786 PAS_OP(uh);
787 break;
788#undef gen_pas_helper
789 }
790}
9ee6e8bb
PB
791#undef PAS_OP
792
39fb730a 793/*
6c2c63d3 794 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
795 * This is common between ARM and Aarch64 targets.
796 */
6c2c63d3 797void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 798{
6c2c63d3
RH
799 TCGv_i32 value;
800 TCGCond cond;
801 bool global = true;
d9ba4830 802
d9ba4830
PB
803 switch (cc) {
804 case 0: /* eq: Z */
d9ba4830 805 case 1: /* ne: !Z */
6c2c63d3
RH
806 cond = TCG_COND_EQ;
807 value = cpu_ZF;
d9ba4830 808 break;
6c2c63d3 809
d9ba4830 810 case 2: /* cs: C */
d9ba4830 811 case 3: /* cc: !C */
6c2c63d3
RH
812 cond = TCG_COND_NE;
813 value = cpu_CF;
d9ba4830 814 break;
6c2c63d3 815
d9ba4830 816 case 4: /* mi: N */
d9ba4830 817 case 5: /* pl: !N */
6c2c63d3
RH
818 cond = TCG_COND_LT;
819 value = cpu_NF;
d9ba4830 820 break;
6c2c63d3 821
d9ba4830 822 case 6: /* vs: V */
d9ba4830 823 case 7: /* vc: !V */
6c2c63d3
RH
824 cond = TCG_COND_LT;
825 value = cpu_VF;
d9ba4830 826 break;
6c2c63d3 827
d9ba4830 828 case 8: /* hi: C && !Z */
6c2c63d3
RH
829 case 9: /* ls: !C || Z -> !(C && !Z) */
830 cond = TCG_COND_NE;
831 value = tcg_temp_new_i32();
832 global = false;
833 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
834 ZF is non-zero for !Z; so AND the two subexpressions. */
835 tcg_gen_neg_i32(value, cpu_CF);
836 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 837 break;
6c2c63d3 838
d9ba4830 839 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 840 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
841 /* Since we're only interested in the sign bit, == 0 is >= 0. */
842 cond = TCG_COND_GE;
843 value = tcg_temp_new_i32();
844 global = false;
845 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 846 break;
6c2c63d3 847
d9ba4830 848 case 12: /* gt: !Z && N == V */
d9ba4830 849 case 13: /* le: Z || N != V */
6c2c63d3
RH
850 cond = TCG_COND_NE;
851 value = tcg_temp_new_i32();
852 global = false;
853 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
854 * the sign bit then AND with ZF to yield the result. */
855 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
856 tcg_gen_sari_i32(value, value, 31);
857 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 858 break;
6c2c63d3 859
9305eac0
RH
860 case 14: /* always */
861 case 15: /* always */
862 /* Use the ALWAYS condition, which will fold early.
863 * It doesn't matter what we use for the value. */
864 cond = TCG_COND_ALWAYS;
865 value = cpu_ZF;
866 goto no_invert;
867
d9ba4830
PB
868 default:
869 fprintf(stderr, "Bad condition code 0x%x\n", cc);
870 abort();
871 }
6c2c63d3
RH
872
873 if (cc & 1) {
874 cond = tcg_invert_cond(cond);
875 }
876
9305eac0 877 no_invert:
6c2c63d3
RH
878 cmp->cond = cond;
879 cmp->value = value;
880 cmp->value_global = global;
881}
882
883void arm_free_cc(DisasCompare *cmp)
884{
885 if (!cmp->value_global) {
886 tcg_temp_free_i32(cmp->value);
887 }
888}
889
890void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
891{
892 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
893}
894
895void arm_gen_test_cc(int cc, TCGLabel *label)
896{
897 DisasCompare cmp;
898 arm_test_cc(&cmp, cc);
899 arm_jump_cc(&cmp, label);
900 arm_free_cc(&cmp);
d9ba4830 901}
2c0262af 902
b1d8e52e 903static const uint8_t table_logic_cc[16] = {
2c0262af
FB
904 1, /* and */
905 1, /* xor */
906 0, /* sub */
907 0, /* rsb */
908 0, /* add */
909 0, /* adc */
910 0, /* sbc */
911 0, /* rsc */
912 1, /* andl */
913 1, /* xorl */
914 0, /* cmp */
915 0, /* cmn */
916 1, /* orr */
917 1, /* mov */
918 1, /* bic */
919 1, /* mvn */
920};
3b46e624 921
4d5e8c96
PM
922static inline void gen_set_condexec(DisasContext *s)
923{
924 if (s->condexec_mask) {
925 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
926 TCGv_i32 tmp = tcg_temp_new_i32();
927 tcg_gen_movi_i32(tmp, val);
928 store_cpu_field(tmp, condexec_bits);
929 }
930}
931
932static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
933{
934 tcg_gen_movi_i32(cpu_R[15], val);
935}
936
d9ba4830
PB
937/* Set PC and Thumb state from an immediate address. */
938static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 939{
39d5492a 940 TCGv_i32 tmp;
99c475ab 941
dcba3a8d 942 s->base.is_jmp = DISAS_JUMP;
d9ba4830 943 if (s->thumb != (addr & 1)) {
7d1b0095 944 tmp = tcg_temp_new_i32();
d9ba4830 945 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 946 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 947 tcg_temp_free_i32(tmp);
d9ba4830 948 }
155c3eac 949 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
950}
951
952/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 953static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 954{
dcba3a8d 955 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
956 tcg_gen_andi_i32(cpu_R[15], var, ~1);
957 tcg_gen_andi_i32(var, var, 1);
958 store_cpu_field(var, thumb);
d9ba4830
PB
959}
960
3bb8a96f
PM
961/* Set PC and Thumb state from var. var is marked as dead.
962 * For M-profile CPUs, include logic to detect exception-return
963 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
964 * and BX reg, and no others, and happens only for code in Handler mode.
965 */
966static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
967{
968 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 969 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
970 */
971 gen_bx(s, var);
d02a8698
PM
972 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
973 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 974 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
975 }
976}
977
978static inline void gen_bx_excret_final_code(DisasContext *s)
979{
980 /* Generate the code to finish possible exception return and end the TB */
981 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
982 uint32_t min_magic;
983
984 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
985 /* Covers FNC_RETURN and EXC_RETURN magic */
986 min_magic = FNC_RETURN_MIN_MAGIC;
987 } else {
988 /* EXC_RETURN magic only */
989 min_magic = EXC_RETURN_MIN_MAGIC;
990 }
3bb8a96f
PM
991
992 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 993 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
994 /* No: end the TB as we would for a DISAS_JMP */
995 if (is_singlestepping(s)) {
996 gen_singlestep_exception(s);
997 } else {
07ea28b4 998 tcg_gen_exit_tb(NULL, 0);
3bb8a96f
PM
999 }
1000 gen_set_label(excret_label);
1001 /* Yes: this is an exception return.
1002 * At this point in runtime env->regs[15] and env->thumb will hold
1003 * the exception-return magic number, which do_v7m_exception_exit()
1004 * will read. Nothing else will be able to see those values because
1005 * the cpu-exec main loop guarantees that we will always go straight
1006 * from raising the exception to the exception-handling code.
1007 *
1008 * gen_ss_advance(s) does nothing on M profile currently but
1009 * calling it is conceptually the right thing as we have executed
1010 * this instruction (compare SWI, HVC, SMC handling).
1011 */
1012 gen_ss_advance(s);
1013 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1014}
1015
fb602cb7
PM
1016static inline void gen_bxns(DisasContext *s, int rm)
1017{
1018 TCGv_i32 var = load_reg(s, rm);
1019
1020 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1021 * we need to sync state before calling it, but:
1022 * - we don't need to do gen_set_pc_im() because the bxns helper will
1023 * always set the PC itself
1024 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1025 * unless it's outside an IT block or the last insn in an IT block,
1026 * so we know that condexec == 0 (already set at the top of the TB)
1027 * is correct in the non-UNPREDICTABLE cases, and we can choose
1028 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1029 */
1030 gen_helper_v7m_bxns(cpu_env, var);
1031 tcg_temp_free_i32(var);
ef475b5d 1032 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1033}
1034
3e3fa230
PM
1035static inline void gen_blxns(DisasContext *s, int rm)
1036{
1037 TCGv_i32 var = load_reg(s, rm);
1038
1039 /* We don't need to sync condexec state, for the same reason as bxns.
1040 * We do however need to set the PC, because the blxns helper reads it.
1041 * The blxns helper may throw an exception.
1042 */
1043 gen_set_pc_im(s, s->pc);
1044 gen_helper_v7m_blxns(cpu_env, var);
1045 tcg_temp_free_i32(var);
1046 s->base.is_jmp = DISAS_EXIT;
1047}
1048
21aeb343
JR
1049/* Variant of store_reg which uses branch&exchange logic when storing
1050 to r15 in ARM architecture v7 and above. The source must be a temporary
1051 and will be marked as dead. */
7dcc1f89 1052static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1053{
1054 if (reg == 15 && ENABLE_ARCH_7) {
1055 gen_bx(s, var);
1056 } else {
1057 store_reg(s, reg, var);
1058 }
1059}
1060
be5e7a76
DES
1061/* Variant of store_reg which uses branch&exchange logic when storing
1062 * to r15 in ARM architecture v5T and above. This is used for storing
1063 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1064 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1065static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1066{
1067 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1068 gen_bx_excret(s, var);
be5e7a76
DES
1069 } else {
1070 store_reg(s, reg, var);
1071 }
1072}
1073
e334bd31
PB
1074#ifdef CONFIG_USER_ONLY
1075#define IS_USER_ONLY 1
1076#else
1077#define IS_USER_ONLY 0
1078#endif
1079
08307563
PM
1080/* Abstractions of "generate code to do a guest load/store for
1081 * AArch32", where a vaddr is always 32 bits (and is zero
1082 * extended if we're a 64 bit core) and data is also
1083 * 32 bits unless specifically doing a 64 bit access.
1084 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1085 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1086 */
08307563 1087
7f5616f5 1088static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1089{
7f5616f5
RH
1090 TCGv addr = tcg_temp_new();
1091 tcg_gen_extu_i32_tl(addr, a32);
1092
e334bd31 1093 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1094 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1095 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1096 }
7f5616f5 1097 return addr;
08307563
PM
1098}
1099
7f5616f5
RH
1100static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1101 int index, TCGMemOp opc)
08307563 1102{
2aeba0d0
JS
1103 TCGv addr;
1104
1105 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1106 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1107 opc |= MO_ALIGN;
1108 }
1109
1110 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1111 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1112 tcg_temp_free(addr);
08307563
PM
1113}
1114
7f5616f5
RH
1115static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1116 int index, TCGMemOp opc)
1117{
2aeba0d0
JS
1118 TCGv addr;
1119
1120 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1121 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1122 opc |= MO_ALIGN;
1123 }
1124
1125 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1126 tcg_gen_qemu_st_i32(val, addr, index, opc);
1127 tcg_temp_free(addr);
1128}
08307563 1129
7f5616f5 1130#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1131static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1132 TCGv_i32 a32, int index) \
08307563 1133{ \
7f5616f5 1134 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1135} \
1136static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1137 TCGv_i32 val, \
1138 TCGv_i32 a32, int index, \
1139 ISSInfo issinfo) \
1140{ \
1141 gen_aa32_ld##SUFF(s, val, a32, index); \
1142 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1143}
1144
7f5616f5 1145#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1146static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1147 TCGv_i32 a32, int index) \
08307563 1148{ \
7f5616f5 1149 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1150} \
1151static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1152 TCGv_i32 val, \
1153 TCGv_i32 a32, int index, \
1154 ISSInfo issinfo) \
1155{ \
1156 gen_aa32_st##SUFF(s, val, a32, index); \
1157 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1158}
1159
7f5616f5 1160static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1161{
e334bd31
PB
1162 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1163 if (!IS_USER_ONLY && s->sctlr_b) {
1164 tcg_gen_rotri_i64(val, val, 32);
1165 }
08307563
PM
1166}
1167
7f5616f5
RH
1168static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1169 int index, TCGMemOp opc)
08307563 1170{
7f5616f5
RH
1171 TCGv addr = gen_aa32_addr(s, a32, opc);
1172 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1173 gen_aa32_frob64(s, val);
1174 tcg_temp_free(addr);
1175}
1176
1177static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1178 TCGv_i32 a32, int index)
1179{
1180 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1181}
1182
1183static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1184 int index, TCGMemOp opc)
1185{
1186 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1187
1188 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1189 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1190 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1191 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1192 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1193 tcg_temp_free_i64(tmp);
e334bd31 1194 } else {
7f5616f5 1195 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1196 }
7f5616f5 1197 tcg_temp_free(addr);
08307563
PM
1198}
1199
7f5616f5
RH
1200static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1201 TCGv_i32 a32, int index)
1202{
1203 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1204}
08307563 1205
7f5616f5
RH
1206DO_GEN_LD(8s, MO_SB)
1207DO_GEN_LD(8u, MO_UB)
1208DO_GEN_LD(16s, MO_SW)
1209DO_GEN_LD(16u, MO_UW)
1210DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1211DO_GEN_ST(8, MO_UB)
1212DO_GEN_ST(16, MO_UW)
1213DO_GEN_ST(32, MO_UL)
08307563 1214
37e6456e
PM
1215static inline void gen_hvc(DisasContext *s, int imm16)
1216{
1217 /* The pre HVC helper handles cases when HVC gets trapped
1218 * as an undefined insn by runtime configuration (ie before
1219 * the insn really executes).
1220 */
1221 gen_set_pc_im(s, s->pc - 4);
1222 gen_helper_pre_hvc(cpu_env);
1223 /* Otherwise we will treat this as a real exception which
1224 * happens after execution of the insn. (The distinction matters
1225 * for the PC value reported to the exception handler and also
1226 * for single stepping.)
1227 */
1228 s->svc_imm = imm16;
1229 gen_set_pc_im(s, s->pc);
dcba3a8d 1230 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1231}
1232
1233static inline void gen_smc(DisasContext *s)
1234{
1235 /* As with HVC, we may take an exception either before or after
1236 * the insn executes.
1237 */
1238 TCGv_i32 tmp;
1239
1240 gen_set_pc_im(s, s->pc - 4);
1241 tmp = tcg_const_i32(syn_aa32_smc());
1242 gen_helper_pre_smc(cpu_env, tmp);
1243 tcg_temp_free_i32(tmp);
1244 gen_set_pc_im(s, s->pc);
dcba3a8d 1245 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1246}
1247
d4a2dc67
PM
1248static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1249{
1250 gen_set_condexec(s);
1251 gen_set_pc_im(s, s->pc - offset);
1252 gen_exception_internal(excp);
dcba3a8d 1253 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1254}
1255
73710361
GB
1256static void gen_exception_insn(DisasContext *s, int offset, int excp,
1257 int syn, uint32_t target_el)
d4a2dc67
PM
1258{
1259 gen_set_condexec(s);
1260 gen_set_pc_im(s, s->pc - offset);
73710361 1261 gen_exception(excp, syn, target_el);
dcba3a8d 1262 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1263}
1264
c900a2e6
PM
1265static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1266{
1267 TCGv_i32 tcg_syn;
1268
1269 gen_set_condexec(s);
1270 gen_set_pc_im(s, s->pc - offset);
1271 tcg_syn = tcg_const_i32(syn);
1272 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1273 tcg_temp_free_i32(tcg_syn);
1274 s->base.is_jmp = DISAS_NORETURN;
1275}
1276
b5ff1b31
FB
1277/* Force a TB lookup after an instruction that changes the CPU state. */
1278static inline void gen_lookup_tb(DisasContext *s)
1279{
a6445c52 1280 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1281 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1282}
1283
19a6e31c
PM
1284static inline void gen_hlt(DisasContext *s, int imm)
1285{
1286 /* HLT. This has two purposes.
1287 * Architecturally, it is an external halting debug instruction.
1288 * Since QEMU doesn't implement external debug, we treat this as
1289 * it is required for halting debug disabled: it will UNDEF.
1290 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1291 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1292 * must trigger semihosting even for ARMv7 and earlier, where
1293 * HLT was an undefined encoding.
1294 * In system mode, we don't allow userspace access to
1295 * semihosting, to provide some semblance of security
1296 * (and for consistency with our 32-bit semihosting).
1297 */
1298 if (semihosting_enabled() &&
1299#ifndef CONFIG_USER_ONLY
1300 s->current_el != 0 &&
1301#endif
1302 (imm == (s->thumb ? 0x3c : 0xf000))) {
1303 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1304 return;
1305 }
1306
1307 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1308 default_exception_el(s));
1309}
1310
b0109805 1311static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1312 TCGv_i32 var)
2c0262af 1313{
1e8d4eec 1314 int val, rm, shift, shiftop;
39d5492a 1315 TCGv_i32 offset;
2c0262af
FB
1316
1317 if (!(insn & (1 << 25))) {
1318 /* immediate */
1319 val = insn & 0xfff;
1320 if (!(insn & (1 << 23)))
1321 val = -val;
537730b9 1322 if (val != 0)
b0109805 1323 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1324 } else {
1325 /* shift/register */
1326 rm = (insn) & 0xf;
1327 shift = (insn >> 7) & 0x1f;
1e8d4eec 1328 shiftop = (insn >> 5) & 3;
b26eefb6 1329 offset = load_reg(s, rm);
9a119ff6 1330 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1331 if (!(insn & (1 << 23)))
b0109805 1332 tcg_gen_sub_i32(var, var, offset);
2c0262af 1333 else
b0109805 1334 tcg_gen_add_i32(var, var, offset);
7d1b0095 1335 tcg_temp_free_i32(offset);
2c0262af
FB
1336 }
1337}
1338
191f9a93 1339static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1340 int extra, TCGv_i32 var)
2c0262af
FB
1341{
1342 int val, rm;
39d5492a 1343 TCGv_i32 offset;
3b46e624 1344
2c0262af
FB
1345 if (insn & (1 << 22)) {
1346 /* immediate */
1347 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1348 if (!(insn & (1 << 23)))
1349 val = -val;
18acad92 1350 val += extra;
537730b9 1351 if (val != 0)
b0109805 1352 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1353 } else {
1354 /* register */
191f9a93 1355 if (extra)
b0109805 1356 tcg_gen_addi_i32(var, var, extra);
2c0262af 1357 rm = (insn) & 0xf;
b26eefb6 1358 offset = load_reg(s, rm);
2c0262af 1359 if (!(insn & (1 << 23)))
b0109805 1360 tcg_gen_sub_i32(var, var, offset);
2c0262af 1361 else
b0109805 1362 tcg_gen_add_i32(var, var, offset);
7d1b0095 1363 tcg_temp_free_i32(offset);
2c0262af
FB
1364 }
1365}
1366
5aaebd13
PM
1367static TCGv_ptr get_fpstatus_ptr(int neon)
1368{
1369 TCGv_ptr statusptr = tcg_temp_new_ptr();
1370 int offset;
1371 if (neon) {
0ecb72a5 1372 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1373 } else {
0ecb72a5 1374 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1375 }
1376 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1377 return statusptr;
1378}
1379
4373f3ce
PB
1380#define VFP_OP2(name) \
1381static inline void gen_vfp_##name(int dp) \
1382{ \
ae1857ec
PM
1383 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1384 if (dp) { \
1385 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1386 } else { \
1387 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1388 } \
1389 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1390}
1391
4373f3ce
PB
1392VFP_OP2(add)
1393VFP_OP2(sub)
1394VFP_OP2(mul)
1395VFP_OP2(div)
1396
1397#undef VFP_OP2
1398
605a6aed
PM
1399static inline void gen_vfp_F1_mul(int dp)
1400{
1401 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1402 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1403 if (dp) {
ae1857ec 1404 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1405 } else {
ae1857ec 1406 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1407 }
ae1857ec 1408 tcg_temp_free_ptr(fpst);
605a6aed
PM
1409}
1410
1411static inline void gen_vfp_F1_neg(int dp)
1412{
1413 /* Like gen_vfp_neg() but put result in F1 */
1414 if (dp) {
1415 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1416 } else {
1417 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1418 }
1419}
1420
4373f3ce
PB
1421static inline void gen_vfp_abs(int dp)
1422{
1423 if (dp)
1424 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1425 else
1426 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1427}
1428
1429static inline void gen_vfp_neg(int dp)
1430{
1431 if (dp)
1432 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1433 else
1434 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1435}
1436
1437static inline void gen_vfp_sqrt(int dp)
1438{
1439 if (dp)
1440 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1441 else
1442 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1443}
1444
1445static inline void gen_vfp_cmp(int dp)
1446{
1447 if (dp)
1448 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1449 else
1450 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1451}
1452
1453static inline void gen_vfp_cmpe(int dp)
1454{
1455 if (dp)
1456 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1457 else
1458 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1459}
1460
1461static inline void gen_vfp_F1_ld0(int dp)
1462{
1463 if (dp)
5b340b51 1464 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1465 else
5b340b51 1466 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1467}
1468
5500b06c
PM
1469#define VFP_GEN_ITOF(name) \
1470static inline void gen_vfp_##name(int dp, int neon) \
1471{ \
5aaebd13 1472 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1473 if (dp) { \
1474 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1475 } else { \
1476 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1477 } \
b7fa9214 1478 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1479}
1480
5500b06c
PM
1481VFP_GEN_ITOF(uito)
1482VFP_GEN_ITOF(sito)
1483#undef VFP_GEN_ITOF
4373f3ce 1484
5500b06c
PM
1485#define VFP_GEN_FTOI(name) \
1486static inline void gen_vfp_##name(int dp, int neon) \
1487{ \
5aaebd13 1488 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1489 if (dp) { \
1490 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1491 } else { \
1492 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1493 } \
b7fa9214 1494 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1495}
1496
5500b06c
PM
1497VFP_GEN_FTOI(toui)
1498VFP_GEN_FTOI(touiz)
1499VFP_GEN_FTOI(tosi)
1500VFP_GEN_FTOI(tosiz)
1501#undef VFP_GEN_FTOI
4373f3ce 1502
16d5b3ca 1503#define VFP_GEN_FIX(name, round) \
5500b06c 1504static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1505{ \
39d5492a 1506 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1507 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1508 if (dp) { \
16d5b3ca
WN
1509 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1510 statusptr); \
5500b06c 1511 } else { \
16d5b3ca
WN
1512 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1513 statusptr); \
5500b06c 1514 } \
b75263d6 1515 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1516 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1517}
16d5b3ca
WN
1518VFP_GEN_FIX(tosh, _round_to_zero)
1519VFP_GEN_FIX(tosl, _round_to_zero)
1520VFP_GEN_FIX(touh, _round_to_zero)
1521VFP_GEN_FIX(toul, _round_to_zero)
1522VFP_GEN_FIX(shto, )
1523VFP_GEN_FIX(slto, )
1524VFP_GEN_FIX(uhto, )
1525VFP_GEN_FIX(ulto, )
4373f3ce 1526#undef VFP_GEN_FIX
9ee6e8bb 1527
39d5492a 1528static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1529{
08307563 1530 if (dp) {
12dcc321 1531 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1532 } else {
12dcc321 1533 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1534 }
b5ff1b31
FB
1535}
1536
39d5492a 1537static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1538{
08307563 1539 if (dp) {
12dcc321 1540 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1541 } else {
12dcc321 1542 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1543 }
b5ff1b31
FB
1544}
1545
c39c2b90 1546static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1547{
9a2b5256 1548 if (dp) {
c39c2b90 1549 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1550 } else {
c39c2b90 1551 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1552 if (reg & 1) {
1553 ofs += offsetof(CPU_DoubleU, l.upper);
1554 } else {
1555 ofs += offsetof(CPU_DoubleU, l.lower);
1556 }
1557 return ofs;
8e96005d
FB
1558 }
1559}
9ee6e8bb
PB
1560
1561/* Return the offset of a 32-bit piece of a NEON register.
1562 zero is the least significant end of the register. */
1563static inline long
1564neon_reg_offset (int reg, int n)
1565{
1566 int sreg;
1567 sreg = reg * 2 + n;
1568 return vfp_reg_offset(0, sreg);
1569}
1570
39d5492a 1571static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1572{
39d5492a 1573 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1574 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1575 return tmp;
1576}
1577
39d5492a 1578static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1579{
1580 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1581 tcg_temp_free_i32(var);
8f8e3aa4
PB
1582}
1583
a7812ae4 1584static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1585{
1586 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1587}
1588
a7812ae4 1589static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1590{
1591 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1592}
1593
1a66ac61
RH
1594static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1595{
1596 TCGv_ptr ret = tcg_temp_new_ptr();
1597 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1598 return ret;
1599}
1600
4373f3ce
PB
1601#define tcg_gen_ld_f32 tcg_gen_ld_i32
1602#define tcg_gen_ld_f64 tcg_gen_ld_i64
1603#define tcg_gen_st_f32 tcg_gen_st_i32
1604#define tcg_gen_st_f64 tcg_gen_st_i64
1605
b7bcbe95
FB
1606static inline void gen_mov_F0_vreg(int dp, int reg)
1607{
1608 if (dp)
4373f3ce 1609 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1610 else
4373f3ce 1611 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1612}
1613
1614static inline void gen_mov_F1_vreg(int dp, int reg)
1615{
1616 if (dp)
4373f3ce 1617 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1618 else
4373f3ce 1619 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1620}
1621
1622static inline void gen_mov_vreg_F0(int dp, int reg)
1623{
1624 if (dp)
4373f3ce 1625 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1626 else
4373f3ce 1627 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1628}
1629
18c9b560
AZ
1630#define ARM_CP_RW_BIT (1 << 20)
1631
a7812ae4 1632static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1633{
0ecb72a5 1634 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1635}
1636
a7812ae4 1637static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1638{
0ecb72a5 1639 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1640}
1641
39d5492a 1642static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1643{
39d5492a 1644 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1645 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1646 return var;
e677137d
PB
1647}
1648
39d5492a 1649static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1650{
0ecb72a5 1651 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1652 tcg_temp_free_i32(var);
e677137d
PB
1653}
1654
1655static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1656{
1657 iwmmxt_store_reg(cpu_M0, rn);
1658}
1659
1660static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1661{
1662 iwmmxt_load_reg(cpu_M0, rn);
1663}
1664
1665static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1666{
1667 iwmmxt_load_reg(cpu_V1, rn);
1668 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1669}
1670
1671static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1672{
1673 iwmmxt_load_reg(cpu_V1, rn);
1674 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1675}
1676
1677static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1678{
1679 iwmmxt_load_reg(cpu_V1, rn);
1680 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1681}
1682
1683#define IWMMXT_OP(name) \
1684static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1685{ \
1686 iwmmxt_load_reg(cpu_V1, rn); \
1687 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1688}
1689
477955bd
PM
1690#define IWMMXT_OP_ENV(name) \
1691static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1692{ \
1693 iwmmxt_load_reg(cpu_V1, rn); \
1694 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1695}
1696
1697#define IWMMXT_OP_ENV_SIZE(name) \
1698IWMMXT_OP_ENV(name##b) \
1699IWMMXT_OP_ENV(name##w) \
1700IWMMXT_OP_ENV(name##l)
e677137d 1701
477955bd 1702#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1703static inline void gen_op_iwmmxt_##name##_M0(void) \
1704{ \
477955bd 1705 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1706}
1707
1708IWMMXT_OP(maddsq)
1709IWMMXT_OP(madduq)
1710IWMMXT_OP(sadb)
1711IWMMXT_OP(sadw)
1712IWMMXT_OP(mulslw)
1713IWMMXT_OP(mulshw)
1714IWMMXT_OP(mululw)
1715IWMMXT_OP(muluhw)
1716IWMMXT_OP(macsw)
1717IWMMXT_OP(macuw)
1718
477955bd
PM
1719IWMMXT_OP_ENV_SIZE(unpackl)
1720IWMMXT_OP_ENV_SIZE(unpackh)
1721
1722IWMMXT_OP_ENV1(unpacklub)
1723IWMMXT_OP_ENV1(unpackluw)
1724IWMMXT_OP_ENV1(unpacklul)
1725IWMMXT_OP_ENV1(unpackhub)
1726IWMMXT_OP_ENV1(unpackhuw)
1727IWMMXT_OP_ENV1(unpackhul)
1728IWMMXT_OP_ENV1(unpacklsb)
1729IWMMXT_OP_ENV1(unpacklsw)
1730IWMMXT_OP_ENV1(unpacklsl)
1731IWMMXT_OP_ENV1(unpackhsb)
1732IWMMXT_OP_ENV1(unpackhsw)
1733IWMMXT_OP_ENV1(unpackhsl)
1734
1735IWMMXT_OP_ENV_SIZE(cmpeq)
1736IWMMXT_OP_ENV_SIZE(cmpgtu)
1737IWMMXT_OP_ENV_SIZE(cmpgts)
1738
1739IWMMXT_OP_ENV_SIZE(mins)
1740IWMMXT_OP_ENV_SIZE(minu)
1741IWMMXT_OP_ENV_SIZE(maxs)
1742IWMMXT_OP_ENV_SIZE(maxu)
1743
1744IWMMXT_OP_ENV_SIZE(subn)
1745IWMMXT_OP_ENV_SIZE(addn)
1746IWMMXT_OP_ENV_SIZE(subu)
1747IWMMXT_OP_ENV_SIZE(addu)
1748IWMMXT_OP_ENV_SIZE(subs)
1749IWMMXT_OP_ENV_SIZE(adds)
1750
1751IWMMXT_OP_ENV(avgb0)
1752IWMMXT_OP_ENV(avgb1)
1753IWMMXT_OP_ENV(avgw0)
1754IWMMXT_OP_ENV(avgw1)
e677137d 1755
477955bd
PM
1756IWMMXT_OP_ENV(packuw)
1757IWMMXT_OP_ENV(packul)
1758IWMMXT_OP_ENV(packuq)
1759IWMMXT_OP_ENV(packsw)
1760IWMMXT_OP_ENV(packsl)
1761IWMMXT_OP_ENV(packsq)
e677137d 1762
e677137d
PB
1763static void gen_op_iwmmxt_set_mup(void)
1764{
39d5492a 1765 TCGv_i32 tmp;
e677137d
PB
1766 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1767 tcg_gen_ori_i32(tmp, tmp, 2);
1768 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1769}
1770
1771static void gen_op_iwmmxt_set_cup(void)
1772{
39d5492a 1773 TCGv_i32 tmp;
e677137d
PB
1774 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1775 tcg_gen_ori_i32(tmp, tmp, 1);
1776 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1777}
1778
1779static void gen_op_iwmmxt_setpsr_nz(void)
1780{
39d5492a 1781 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1782 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1783 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1784}
1785
1786static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1787{
1788 iwmmxt_load_reg(cpu_V1, rn);
86831435 1789 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1790 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1791}
1792
39d5492a
PM
1793static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1794 TCGv_i32 dest)
18c9b560
AZ
1795{
1796 int rd;
1797 uint32_t offset;
39d5492a 1798 TCGv_i32 tmp;
18c9b560
AZ
1799
1800 rd = (insn >> 16) & 0xf;
da6b5335 1801 tmp = load_reg(s, rd);
18c9b560
AZ
1802
1803 offset = (insn & 0xff) << ((insn >> 7) & 2);
1804 if (insn & (1 << 24)) {
1805 /* Pre indexed */
1806 if (insn & (1 << 23))
da6b5335 1807 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1808 else
da6b5335
FN
1809 tcg_gen_addi_i32(tmp, tmp, -offset);
1810 tcg_gen_mov_i32(dest, tmp);
18c9b560 1811 if (insn & (1 << 21))
da6b5335
FN
1812 store_reg(s, rd, tmp);
1813 else
7d1b0095 1814 tcg_temp_free_i32(tmp);
18c9b560
AZ
1815 } else if (insn & (1 << 21)) {
1816 /* Post indexed */
da6b5335 1817 tcg_gen_mov_i32(dest, tmp);
18c9b560 1818 if (insn & (1 << 23))
da6b5335 1819 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1820 else
da6b5335
FN
1821 tcg_gen_addi_i32(tmp, tmp, -offset);
1822 store_reg(s, rd, tmp);
18c9b560
AZ
1823 } else if (!(insn & (1 << 23)))
1824 return 1;
1825 return 0;
1826}
1827
39d5492a 1828static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1829{
1830 int rd = (insn >> 0) & 0xf;
39d5492a 1831 TCGv_i32 tmp;
18c9b560 1832
da6b5335
FN
1833 if (insn & (1 << 8)) {
1834 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1835 return 1;
da6b5335
FN
1836 } else {
1837 tmp = iwmmxt_load_creg(rd);
1838 }
1839 } else {
7d1b0095 1840 tmp = tcg_temp_new_i32();
da6b5335 1841 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1842 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1843 }
1844 tcg_gen_andi_i32(tmp, tmp, mask);
1845 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1846 tcg_temp_free_i32(tmp);
18c9b560
AZ
1847 return 0;
1848}
1849
a1c7273b 1850/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1851 (ie. an undefined instruction). */
7dcc1f89 1852static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1853{
1854 int rd, wrd;
1855 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1856 TCGv_i32 addr;
1857 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1858
1859 if ((insn & 0x0e000e00) == 0x0c000000) {
1860 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1861 wrd = insn & 0xf;
1862 rdlo = (insn >> 12) & 0xf;
1863 rdhi = (insn >> 16) & 0xf;
1864 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1865 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1866 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1867 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1868 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1869 } else { /* TMCRR */
da6b5335
FN
1870 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1871 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1872 gen_op_iwmmxt_set_mup();
1873 }
1874 return 0;
1875 }
1876
1877 wrd = (insn >> 12) & 0xf;
7d1b0095 1878 addr = tcg_temp_new_i32();
da6b5335 1879 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1880 tcg_temp_free_i32(addr);
18c9b560 1881 return 1;
da6b5335 1882 }
18c9b560
AZ
1883 if (insn & ARM_CP_RW_BIT) {
1884 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1885 tmp = tcg_temp_new_i32();
12dcc321 1886 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1887 iwmmxt_store_creg(wrd, tmp);
18c9b560 1888 } else {
e677137d
PB
1889 i = 1;
1890 if (insn & (1 << 8)) {
1891 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1892 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d
PB
1893 i = 0;
1894 } else { /* WLDRW wRd */
29531141 1895 tmp = tcg_temp_new_i32();
12dcc321 1896 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1897 }
1898 } else {
29531141 1899 tmp = tcg_temp_new_i32();
e677137d 1900 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1901 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
e677137d 1902 } else { /* WLDRB */
12dcc321 1903 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1904 }
1905 }
1906 if (i) {
1907 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1908 tcg_temp_free_i32(tmp);
e677137d 1909 }
18c9b560
AZ
1910 gen_op_iwmmxt_movq_wRn_M0(wrd);
1911 }
1912 } else {
1913 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1914 tmp = iwmmxt_load_creg(wrd);
12dcc321 1915 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1916 } else {
1917 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1918 tmp = tcg_temp_new_i32();
e677137d
PB
1919 if (insn & (1 << 8)) {
1920 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1921 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1922 } else { /* WSTRW wRd */
ecc7b3aa 1923 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1924 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1925 }
1926 } else {
1927 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1928 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1929 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
e677137d 1930 } else { /* WSTRB */
ecc7b3aa 1931 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1932 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1933 }
1934 }
18c9b560 1935 }
29531141 1936 tcg_temp_free_i32(tmp);
18c9b560 1937 }
7d1b0095 1938 tcg_temp_free_i32(addr);
18c9b560
AZ
1939 return 0;
1940 }
1941
1942 if ((insn & 0x0f000000) != 0x0e000000)
1943 return 1;
1944
1945 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1946 case 0x000: /* WOR */
1947 wrd = (insn >> 12) & 0xf;
1948 rd0 = (insn >> 0) & 0xf;
1949 rd1 = (insn >> 16) & 0xf;
1950 gen_op_iwmmxt_movq_M0_wRn(rd0);
1951 gen_op_iwmmxt_orq_M0_wRn(rd1);
1952 gen_op_iwmmxt_setpsr_nz();
1953 gen_op_iwmmxt_movq_wRn_M0(wrd);
1954 gen_op_iwmmxt_set_mup();
1955 gen_op_iwmmxt_set_cup();
1956 break;
1957 case 0x011: /* TMCR */
1958 if (insn & 0xf)
1959 return 1;
1960 rd = (insn >> 12) & 0xf;
1961 wrd = (insn >> 16) & 0xf;
1962 switch (wrd) {
1963 case ARM_IWMMXT_wCID:
1964 case ARM_IWMMXT_wCASF:
1965 break;
1966 case ARM_IWMMXT_wCon:
1967 gen_op_iwmmxt_set_cup();
1968 /* Fall through. */
1969 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1970 tmp = iwmmxt_load_creg(wrd);
1971 tmp2 = load_reg(s, rd);
f669df27 1972 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1973 tcg_temp_free_i32(tmp2);
da6b5335 1974 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1975 break;
1976 case ARM_IWMMXT_wCGR0:
1977 case ARM_IWMMXT_wCGR1:
1978 case ARM_IWMMXT_wCGR2:
1979 case ARM_IWMMXT_wCGR3:
1980 gen_op_iwmmxt_set_cup();
da6b5335
FN
1981 tmp = load_reg(s, rd);
1982 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1983 break;
1984 default:
1985 return 1;
1986 }
1987 break;
1988 case 0x100: /* WXOR */
1989 wrd = (insn >> 12) & 0xf;
1990 rd0 = (insn >> 0) & 0xf;
1991 rd1 = (insn >> 16) & 0xf;
1992 gen_op_iwmmxt_movq_M0_wRn(rd0);
1993 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1994 gen_op_iwmmxt_setpsr_nz();
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x111: /* TMRC */
2000 if (insn & 0xf)
2001 return 1;
2002 rd = (insn >> 12) & 0xf;
2003 wrd = (insn >> 16) & 0xf;
da6b5335
FN
2004 tmp = iwmmxt_load_creg(wrd);
2005 store_reg(s, rd, tmp);
18c9b560
AZ
2006 break;
2007 case 0x300: /* WANDN */
2008 wrd = (insn >> 12) & 0xf;
2009 rd0 = (insn >> 0) & 0xf;
2010 rd1 = (insn >> 16) & 0xf;
2011 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 2012 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
2013 gen_op_iwmmxt_andq_M0_wRn(rd1);
2014 gen_op_iwmmxt_setpsr_nz();
2015 gen_op_iwmmxt_movq_wRn_M0(wrd);
2016 gen_op_iwmmxt_set_mup();
2017 gen_op_iwmmxt_set_cup();
2018 break;
2019 case 0x200: /* WAND */
2020 wrd = (insn >> 12) & 0xf;
2021 rd0 = (insn >> 0) & 0xf;
2022 rd1 = (insn >> 16) & 0xf;
2023 gen_op_iwmmxt_movq_M0_wRn(rd0);
2024 gen_op_iwmmxt_andq_M0_wRn(rd1);
2025 gen_op_iwmmxt_setpsr_nz();
2026 gen_op_iwmmxt_movq_wRn_M0(wrd);
2027 gen_op_iwmmxt_set_mup();
2028 gen_op_iwmmxt_set_cup();
2029 break;
2030 case 0x810: case 0xa10: /* WMADD */
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 0) & 0xf;
2033 rd1 = (insn >> 16) & 0xf;
2034 gen_op_iwmmxt_movq_M0_wRn(rd0);
2035 if (insn & (1 << 21))
2036 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2037 else
2038 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2039 gen_op_iwmmxt_movq_wRn_M0(wrd);
2040 gen_op_iwmmxt_set_mup();
2041 break;
2042 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
2043 wrd = (insn >> 12) & 0xf;
2044 rd0 = (insn >> 16) & 0xf;
2045 rd1 = (insn >> 0) & 0xf;
2046 gen_op_iwmmxt_movq_M0_wRn(rd0);
2047 switch ((insn >> 22) & 3) {
2048 case 0:
2049 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2050 break;
2051 case 1:
2052 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2053 break;
2054 case 2:
2055 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2056 break;
2057 case 3:
2058 return 1;
2059 }
2060 gen_op_iwmmxt_movq_wRn_M0(wrd);
2061 gen_op_iwmmxt_set_mup();
2062 gen_op_iwmmxt_set_cup();
2063 break;
2064 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
2065 wrd = (insn >> 12) & 0xf;
2066 rd0 = (insn >> 16) & 0xf;
2067 rd1 = (insn >> 0) & 0xf;
2068 gen_op_iwmmxt_movq_M0_wRn(rd0);
2069 switch ((insn >> 22) & 3) {
2070 case 0:
2071 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2072 break;
2073 case 1:
2074 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2075 break;
2076 case 2:
2077 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2078 break;
2079 case 3:
2080 return 1;
2081 }
2082 gen_op_iwmmxt_movq_wRn_M0(wrd);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2085 break;
2086 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 rd1 = (insn >> 0) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0);
2091 if (insn & (1 << 22))
2092 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2093 else
2094 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2095 if (!(insn & (1 << 20)))
2096 gen_op_iwmmxt_addl_M0_wRn(wrd);
2097 gen_op_iwmmxt_movq_wRn_M0(wrd);
2098 gen_op_iwmmxt_set_mup();
2099 break;
2100 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2101 wrd = (insn >> 12) & 0xf;
2102 rd0 = (insn >> 16) & 0xf;
2103 rd1 = (insn >> 0) & 0xf;
2104 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2105 if (insn & (1 << 21)) {
2106 if (insn & (1 << 20))
2107 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2108 else
2109 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2110 } else {
2111 if (insn & (1 << 20))
2112 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2113 else
2114 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2115 }
18c9b560
AZ
2116 gen_op_iwmmxt_movq_wRn_M0(wrd);
2117 gen_op_iwmmxt_set_mup();
2118 break;
2119 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2120 wrd = (insn >> 12) & 0xf;
2121 rd0 = (insn >> 16) & 0xf;
2122 rd1 = (insn >> 0) & 0xf;
2123 gen_op_iwmmxt_movq_M0_wRn(rd0);
2124 if (insn & (1 << 21))
2125 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2126 else
2127 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2128 if (!(insn & (1 << 20))) {
e677137d
PB
2129 iwmmxt_load_reg(cpu_V1, wrd);
2130 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2131 }
2132 gen_op_iwmmxt_movq_wRn_M0(wrd);
2133 gen_op_iwmmxt_set_mup();
2134 break;
2135 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2136 wrd = (insn >> 12) & 0xf;
2137 rd0 = (insn >> 16) & 0xf;
2138 rd1 = (insn >> 0) & 0xf;
2139 gen_op_iwmmxt_movq_M0_wRn(rd0);
2140 switch ((insn >> 22) & 3) {
2141 case 0:
2142 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2143 break;
2144 case 1:
2145 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2146 break;
2147 case 2:
2148 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2149 break;
2150 case 3:
2151 return 1;
2152 }
2153 gen_op_iwmmxt_movq_wRn_M0(wrd);
2154 gen_op_iwmmxt_set_mup();
2155 gen_op_iwmmxt_set_cup();
2156 break;
2157 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2158 wrd = (insn >> 12) & 0xf;
2159 rd0 = (insn >> 16) & 0xf;
2160 rd1 = (insn >> 0) & 0xf;
2161 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2162 if (insn & (1 << 22)) {
2163 if (insn & (1 << 20))
2164 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2165 else
2166 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2167 } else {
2168 if (insn & (1 << 20))
2169 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2170 else
2171 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2172 }
18c9b560
AZ
2173 gen_op_iwmmxt_movq_wRn_M0(wrd);
2174 gen_op_iwmmxt_set_mup();
2175 gen_op_iwmmxt_set_cup();
2176 break;
2177 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2178 wrd = (insn >> 12) & 0xf;
2179 rd0 = (insn >> 16) & 0xf;
2180 rd1 = (insn >> 0) & 0xf;
2181 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2182 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2183 tcg_gen_andi_i32(tmp, tmp, 7);
2184 iwmmxt_load_reg(cpu_V1, rd1);
2185 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2186 tcg_temp_free_i32(tmp);
18c9b560
AZ
2187 gen_op_iwmmxt_movq_wRn_M0(wrd);
2188 gen_op_iwmmxt_set_mup();
2189 break;
2190 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2191 if (((insn >> 6) & 3) == 3)
2192 return 1;
18c9b560
AZ
2193 rd = (insn >> 12) & 0xf;
2194 wrd = (insn >> 16) & 0xf;
da6b5335 2195 tmp = load_reg(s, rd);
18c9b560
AZ
2196 gen_op_iwmmxt_movq_M0_wRn(wrd);
2197 switch ((insn >> 6) & 3) {
2198 case 0:
da6b5335
FN
2199 tmp2 = tcg_const_i32(0xff);
2200 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2201 break;
2202 case 1:
da6b5335
FN
2203 tmp2 = tcg_const_i32(0xffff);
2204 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2205 break;
2206 case 2:
da6b5335
FN
2207 tmp2 = tcg_const_i32(0xffffffff);
2208 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2209 break;
da6b5335 2210 default:
f764718d
RH
2211 tmp2 = NULL;
2212 tmp3 = NULL;
18c9b560 2213 }
da6b5335 2214 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2215 tcg_temp_free_i32(tmp3);
2216 tcg_temp_free_i32(tmp2);
7d1b0095 2217 tcg_temp_free_i32(tmp);
18c9b560
AZ
2218 gen_op_iwmmxt_movq_wRn_M0(wrd);
2219 gen_op_iwmmxt_set_mup();
2220 break;
2221 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2222 rd = (insn >> 12) & 0xf;
2223 wrd = (insn >> 16) & 0xf;
da6b5335 2224 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2225 return 1;
2226 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2227 tmp = tcg_temp_new_i32();
18c9b560
AZ
2228 switch ((insn >> 22) & 3) {
2229 case 0:
da6b5335 2230 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2231 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2232 if (insn & 8) {
2233 tcg_gen_ext8s_i32(tmp, tmp);
2234 } else {
2235 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2236 }
2237 break;
2238 case 1:
da6b5335 2239 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2240 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2241 if (insn & 8) {
2242 tcg_gen_ext16s_i32(tmp, tmp);
2243 } else {
2244 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2245 }
2246 break;
2247 case 2:
da6b5335 2248 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2249 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2250 break;
18c9b560 2251 }
da6b5335 2252 store_reg(s, rd, tmp);
18c9b560
AZ
2253 break;
2254 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2255 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2256 return 1;
da6b5335 2257 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2258 switch ((insn >> 22) & 3) {
2259 case 0:
da6b5335 2260 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2261 break;
2262 case 1:
da6b5335 2263 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2264 break;
2265 case 2:
da6b5335 2266 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2267 break;
18c9b560 2268 }
da6b5335
FN
2269 tcg_gen_shli_i32(tmp, tmp, 28);
2270 gen_set_nzcv(tmp);
7d1b0095 2271 tcg_temp_free_i32(tmp);
18c9b560
AZ
2272 break;
2273 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2274 if (((insn >> 6) & 3) == 3)
2275 return 1;
18c9b560
AZ
2276 rd = (insn >> 12) & 0xf;
2277 wrd = (insn >> 16) & 0xf;
da6b5335 2278 tmp = load_reg(s, rd);
18c9b560
AZ
2279 switch ((insn >> 6) & 3) {
2280 case 0:
da6b5335 2281 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2282 break;
2283 case 1:
da6b5335 2284 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2285 break;
2286 case 2:
da6b5335 2287 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2288 break;
18c9b560 2289 }
7d1b0095 2290 tcg_temp_free_i32(tmp);
18c9b560
AZ
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 break;
2294 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2295 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2296 return 1;
da6b5335 2297 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2298 tmp2 = tcg_temp_new_i32();
da6b5335 2299 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2300 switch ((insn >> 22) & 3) {
2301 case 0:
2302 for (i = 0; i < 7; i ++) {
da6b5335
FN
2303 tcg_gen_shli_i32(tmp2, tmp2, 4);
2304 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2305 }
2306 break;
2307 case 1:
2308 for (i = 0; i < 3; i ++) {
da6b5335
FN
2309 tcg_gen_shli_i32(tmp2, tmp2, 8);
2310 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2311 }
2312 break;
2313 case 2:
da6b5335
FN
2314 tcg_gen_shli_i32(tmp2, tmp2, 16);
2315 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2316 break;
18c9b560 2317 }
da6b5335 2318 gen_set_nzcv(tmp);
7d1b0095
PM
2319 tcg_temp_free_i32(tmp2);
2320 tcg_temp_free_i32(tmp);
18c9b560
AZ
2321 break;
2322 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2323 wrd = (insn >> 12) & 0xf;
2324 rd0 = (insn >> 16) & 0xf;
2325 gen_op_iwmmxt_movq_M0_wRn(rd0);
2326 switch ((insn >> 22) & 3) {
2327 case 0:
e677137d 2328 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2329 break;
2330 case 1:
e677137d 2331 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2332 break;
2333 case 2:
e677137d 2334 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2335 break;
2336 case 3:
2337 return 1;
2338 }
2339 gen_op_iwmmxt_movq_wRn_M0(wrd);
2340 gen_op_iwmmxt_set_mup();
2341 break;
2342 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2343 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2344 return 1;
da6b5335 2345 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2346 tmp2 = tcg_temp_new_i32();
da6b5335 2347 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2348 switch ((insn >> 22) & 3) {
2349 case 0:
2350 for (i = 0; i < 7; i ++) {
da6b5335
FN
2351 tcg_gen_shli_i32(tmp2, tmp2, 4);
2352 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2353 }
2354 break;
2355 case 1:
2356 for (i = 0; i < 3; i ++) {
da6b5335
FN
2357 tcg_gen_shli_i32(tmp2, tmp2, 8);
2358 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2359 }
2360 break;
2361 case 2:
da6b5335
FN
2362 tcg_gen_shli_i32(tmp2, tmp2, 16);
2363 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2364 break;
18c9b560 2365 }
da6b5335 2366 gen_set_nzcv(tmp);
7d1b0095
PM
2367 tcg_temp_free_i32(tmp2);
2368 tcg_temp_free_i32(tmp);
18c9b560
AZ
2369 break;
2370 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2371 rd = (insn >> 12) & 0xf;
2372 rd0 = (insn >> 16) & 0xf;
da6b5335 2373 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2374 return 1;
2375 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2376 tmp = tcg_temp_new_i32();
18c9b560
AZ
2377 switch ((insn >> 22) & 3) {
2378 case 0:
da6b5335 2379 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2380 break;
2381 case 1:
da6b5335 2382 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2383 break;
2384 case 2:
da6b5335 2385 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2386 break;
18c9b560 2387 }
da6b5335 2388 store_reg(s, rd, tmp);
18c9b560
AZ
2389 break;
2390 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2391 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2392 wrd = (insn >> 12) & 0xf;
2393 rd0 = (insn >> 16) & 0xf;
2394 rd1 = (insn >> 0) & 0xf;
2395 gen_op_iwmmxt_movq_M0_wRn(rd0);
2396 switch ((insn >> 22) & 3) {
2397 case 0:
2398 if (insn & (1 << 21))
2399 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2400 else
2401 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2402 break;
2403 case 1:
2404 if (insn & (1 << 21))
2405 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2406 else
2407 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2408 break;
2409 case 2:
2410 if (insn & (1 << 21))
2411 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2412 else
2413 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2414 break;
2415 case 3:
2416 return 1;
2417 }
2418 gen_op_iwmmxt_movq_wRn_M0(wrd);
2419 gen_op_iwmmxt_set_mup();
2420 gen_op_iwmmxt_set_cup();
2421 break;
2422 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2423 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2424 wrd = (insn >> 12) & 0xf;
2425 rd0 = (insn >> 16) & 0xf;
2426 gen_op_iwmmxt_movq_M0_wRn(rd0);
2427 switch ((insn >> 22) & 3) {
2428 case 0:
2429 if (insn & (1 << 21))
2430 gen_op_iwmmxt_unpacklsb_M0();
2431 else
2432 gen_op_iwmmxt_unpacklub_M0();
2433 break;
2434 case 1:
2435 if (insn & (1 << 21))
2436 gen_op_iwmmxt_unpacklsw_M0();
2437 else
2438 gen_op_iwmmxt_unpackluw_M0();
2439 break;
2440 case 2:
2441 if (insn & (1 << 21))
2442 gen_op_iwmmxt_unpacklsl_M0();
2443 else
2444 gen_op_iwmmxt_unpacklul_M0();
2445 break;
2446 case 3:
2447 return 1;
2448 }
2449 gen_op_iwmmxt_movq_wRn_M0(wrd);
2450 gen_op_iwmmxt_set_mup();
2451 gen_op_iwmmxt_set_cup();
2452 break;
2453 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2454 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2455 wrd = (insn >> 12) & 0xf;
2456 rd0 = (insn >> 16) & 0xf;
2457 gen_op_iwmmxt_movq_M0_wRn(rd0);
2458 switch ((insn >> 22) & 3) {
2459 case 0:
2460 if (insn & (1 << 21))
2461 gen_op_iwmmxt_unpackhsb_M0();
2462 else
2463 gen_op_iwmmxt_unpackhub_M0();
2464 break;
2465 case 1:
2466 if (insn & (1 << 21))
2467 gen_op_iwmmxt_unpackhsw_M0();
2468 else
2469 gen_op_iwmmxt_unpackhuw_M0();
2470 break;
2471 case 2:
2472 if (insn & (1 << 21))
2473 gen_op_iwmmxt_unpackhsl_M0();
2474 else
2475 gen_op_iwmmxt_unpackhul_M0();
2476 break;
2477 case 3:
2478 return 1;
2479 }
2480 gen_op_iwmmxt_movq_wRn_M0(wrd);
2481 gen_op_iwmmxt_set_mup();
2482 gen_op_iwmmxt_set_cup();
2483 break;
2484 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2485 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2486 if (((insn >> 22) & 3) == 0)
2487 return 1;
18c9b560
AZ
2488 wrd = (insn >> 12) & 0xf;
2489 rd0 = (insn >> 16) & 0xf;
2490 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2491 tmp = tcg_temp_new_i32();
da6b5335 2492 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2493 tcg_temp_free_i32(tmp);
18c9b560 2494 return 1;
da6b5335 2495 }
18c9b560 2496 switch ((insn >> 22) & 3) {
18c9b560 2497 case 1:
477955bd 2498 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2499 break;
2500 case 2:
477955bd 2501 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2502 break;
2503 case 3:
477955bd 2504 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2505 break;
2506 }
7d1b0095 2507 tcg_temp_free_i32(tmp);
18c9b560
AZ
2508 gen_op_iwmmxt_movq_wRn_M0(wrd);
2509 gen_op_iwmmxt_set_mup();
2510 gen_op_iwmmxt_set_cup();
2511 break;
2512 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2513 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2514 if (((insn >> 22) & 3) == 0)
2515 return 1;
18c9b560
AZ
2516 wrd = (insn >> 12) & 0xf;
2517 rd0 = (insn >> 16) & 0xf;
2518 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2519 tmp = tcg_temp_new_i32();
da6b5335 2520 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2521 tcg_temp_free_i32(tmp);
18c9b560 2522 return 1;
da6b5335 2523 }
18c9b560 2524 switch ((insn >> 22) & 3) {
18c9b560 2525 case 1:
477955bd 2526 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2527 break;
2528 case 2:
477955bd 2529 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2530 break;
2531 case 3:
477955bd 2532 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2533 break;
2534 }
7d1b0095 2535 tcg_temp_free_i32(tmp);
18c9b560
AZ
2536 gen_op_iwmmxt_movq_wRn_M0(wrd);
2537 gen_op_iwmmxt_set_mup();
2538 gen_op_iwmmxt_set_cup();
2539 break;
2540 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2541 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2542 if (((insn >> 22) & 3) == 0)
2543 return 1;
18c9b560
AZ
2544 wrd = (insn >> 12) & 0xf;
2545 rd0 = (insn >> 16) & 0xf;
2546 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2547 tmp = tcg_temp_new_i32();
da6b5335 2548 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2549 tcg_temp_free_i32(tmp);
18c9b560 2550 return 1;
da6b5335 2551 }
18c9b560 2552 switch ((insn >> 22) & 3) {
18c9b560 2553 case 1:
477955bd 2554 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2555 break;
2556 case 2:
477955bd 2557 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2558 break;
2559 case 3:
477955bd 2560 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2561 break;
2562 }
7d1b0095 2563 tcg_temp_free_i32(tmp);
18c9b560
AZ
2564 gen_op_iwmmxt_movq_wRn_M0(wrd);
2565 gen_op_iwmmxt_set_mup();
2566 gen_op_iwmmxt_set_cup();
2567 break;
2568 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2569 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2570 if (((insn >> 22) & 3) == 0)
2571 return 1;
18c9b560
AZ
2572 wrd = (insn >> 12) & 0xf;
2573 rd0 = (insn >> 16) & 0xf;
2574 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2575 tmp = tcg_temp_new_i32();
18c9b560 2576 switch ((insn >> 22) & 3) {
18c9b560 2577 case 1:
da6b5335 2578 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2579 tcg_temp_free_i32(tmp);
18c9b560 2580 return 1;
da6b5335 2581 }
477955bd 2582 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2583 break;
2584 case 2:
da6b5335 2585 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2586 tcg_temp_free_i32(tmp);
18c9b560 2587 return 1;
da6b5335 2588 }
477955bd 2589 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2590 break;
2591 case 3:
da6b5335 2592 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2593 tcg_temp_free_i32(tmp);
18c9b560 2594 return 1;
da6b5335 2595 }
477955bd 2596 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2597 break;
2598 }
7d1b0095 2599 tcg_temp_free_i32(tmp);
18c9b560
AZ
2600 gen_op_iwmmxt_movq_wRn_M0(wrd);
2601 gen_op_iwmmxt_set_mup();
2602 gen_op_iwmmxt_set_cup();
2603 break;
2604 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2605 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2606 wrd = (insn >> 12) & 0xf;
2607 rd0 = (insn >> 16) & 0xf;
2608 rd1 = (insn >> 0) & 0xf;
2609 gen_op_iwmmxt_movq_M0_wRn(rd0);
2610 switch ((insn >> 22) & 3) {
2611 case 0:
2612 if (insn & (1 << 21))
2613 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2614 else
2615 gen_op_iwmmxt_minub_M0_wRn(rd1);
2616 break;
2617 case 1:
2618 if (insn & (1 << 21))
2619 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2620 else
2621 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2622 break;
2623 case 2:
2624 if (insn & (1 << 21))
2625 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2626 else
2627 gen_op_iwmmxt_minul_M0_wRn(rd1);
2628 break;
2629 case 3:
2630 return 1;
2631 }
2632 gen_op_iwmmxt_movq_wRn_M0(wrd);
2633 gen_op_iwmmxt_set_mup();
2634 break;
2635 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2636 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2637 wrd = (insn >> 12) & 0xf;
2638 rd0 = (insn >> 16) & 0xf;
2639 rd1 = (insn >> 0) & 0xf;
2640 gen_op_iwmmxt_movq_M0_wRn(rd0);
2641 switch ((insn >> 22) & 3) {
2642 case 0:
2643 if (insn & (1 << 21))
2644 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2645 else
2646 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2647 break;
2648 case 1:
2649 if (insn & (1 << 21))
2650 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2651 else
2652 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2653 break;
2654 case 2:
2655 if (insn & (1 << 21))
2656 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2657 else
2658 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2659 break;
2660 case 3:
2661 return 1;
2662 }
2663 gen_op_iwmmxt_movq_wRn_M0(wrd);
2664 gen_op_iwmmxt_set_mup();
2665 break;
2666 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2667 case 0x402: case 0x502: case 0x602: case 0x702:
2668 wrd = (insn >> 12) & 0xf;
2669 rd0 = (insn >> 16) & 0xf;
2670 rd1 = (insn >> 0) & 0xf;
2671 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2672 tmp = tcg_const_i32((insn >> 20) & 3);
2673 iwmmxt_load_reg(cpu_V1, rd1);
2674 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2675 tcg_temp_free_i32(tmp);
18c9b560
AZ
2676 gen_op_iwmmxt_movq_wRn_M0(wrd);
2677 gen_op_iwmmxt_set_mup();
2678 break;
2679 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2680 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2681 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2682 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2683 wrd = (insn >> 12) & 0xf;
2684 rd0 = (insn >> 16) & 0xf;
2685 rd1 = (insn >> 0) & 0xf;
2686 gen_op_iwmmxt_movq_M0_wRn(rd0);
2687 switch ((insn >> 20) & 0xf) {
2688 case 0x0:
2689 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2690 break;
2691 case 0x1:
2692 gen_op_iwmmxt_subub_M0_wRn(rd1);
2693 break;
2694 case 0x3:
2695 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2696 break;
2697 case 0x4:
2698 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2699 break;
2700 case 0x5:
2701 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2702 break;
2703 case 0x7:
2704 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2705 break;
2706 case 0x8:
2707 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2708 break;
2709 case 0x9:
2710 gen_op_iwmmxt_subul_M0_wRn(rd1);
2711 break;
2712 case 0xb:
2713 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2714 break;
2715 default:
2716 return 1;
2717 }
2718 gen_op_iwmmxt_movq_wRn_M0(wrd);
2719 gen_op_iwmmxt_set_mup();
2720 gen_op_iwmmxt_set_cup();
2721 break;
2722 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2723 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2724 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2725 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2726 wrd = (insn >> 12) & 0xf;
2727 rd0 = (insn >> 16) & 0xf;
2728 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2729 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2730 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2731 tcg_temp_free_i32(tmp);
18c9b560
AZ
2732 gen_op_iwmmxt_movq_wRn_M0(wrd);
2733 gen_op_iwmmxt_set_mup();
2734 gen_op_iwmmxt_set_cup();
2735 break;
2736 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2737 case 0x418: case 0x518: case 0x618: case 0x718:
2738 case 0x818: case 0x918: case 0xa18: case 0xb18:
2739 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2740 wrd = (insn >> 12) & 0xf;
2741 rd0 = (insn >> 16) & 0xf;
2742 rd1 = (insn >> 0) & 0xf;
2743 gen_op_iwmmxt_movq_M0_wRn(rd0);
2744 switch ((insn >> 20) & 0xf) {
2745 case 0x0:
2746 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2747 break;
2748 case 0x1:
2749 gen_op_iwmmxt_addub_M0_wRn(rd1);
2750 break;
2751 case 0x3:
2752 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2753 break;
2754 case 0x4:
2755 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2756 break;
2757 case 0x5:
2758 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2759 break;
2760 case 0x7:
2761 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2762 break;
2763 case 0x8:
2764 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2765 break;
2766 case 0x9:
2767 gen_op_iwmmxt_addul_M0_wRn(rd1);
2768 break;
2769 case 0xb:
2770 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2771 break;
2772 default:
2773 return 1;
2774 }
2775 gen_op_iwmmxt_movq_wRn_M0(wrd);
2776 gen_op_iwmmxt_set_mup();
2777 gen_op_iwmmxt_set_cup();
2778 break;
2779 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2780 case 0x408: case 0x508: case 0x608: case 0x708:
2781 case 0x808: case 0x908: case 0xa08: case 0xb08:
2782 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2783 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2784 return 1;
18c9b560
AZ
2785 wrd = (insn >> 12) & 0xf;
2786 rd0 = (insn >> 16) & 0xf;
2787 rd1 = (insn >> 0) & 0xf;
2788 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2789 switch ((insn >> 22) & 3) {
18c9b560
AZ
2790 case 1:
2791 if (insn & (1 << 21))
2792 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2793 else
2794 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2795 break;
2796 case 2:
2797 if (insn & (1 << 21))
2798 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2799 else
2800 gen_op_iwmmxt_packul_M0_wRn(rd1);
2801 break;
2802 case 3:
2803 if (insn & (1 << 21))
2804 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2805 else
2806 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2807 break;
2808 }
2809 gen_op_iwmmxt_movq_wRn_M0(wrd);
2810 gen_op_iwmmxt_set_mup();
2811 gen_op_iwmmxt_set_cup();
2812 break;
2813 case 0x201: case 0x203: case 0x205: case 0x207:
2814 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2815 case 0x211: case 0x213: case 0x215: case 0x217:
2816 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2817 wrd = (insn >> 5) & 0xf;
2818 rd0 = (insn >> 12) & 0xf;
2819 rd1 = (insn >> 0) & 0xf;
2820 if (rd0 == 0xf || rd1 == 0xf)
2821 return 1;
2822 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2823 tmp = load_reg(s, rd0);
2824 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2825 switch ((insn >> 16) & 0xf) {
2826 case 0x0: /* TMIA */
da6b5335 2827 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2828 break;
2829 case 0x8: /* TMIAPH */
da6b5335 2830 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2831 break;
2832 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2833 if (insn & (1 << 16))
da6b5335 2834 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2835 if (insn & (1 << 17))
da6b5335
FN
2836 tcg_gen_shri_i32(tmp2, tmp2, 16);
2837 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2838 break;
2839 default:
7d1b0095
PM
2840 tcg_temp_free_i32(tmp2);
2841 tcg_temp_free_i32(tmp);
18c9b560
AZ
2842 return 1;
2843 }
7d1b0095
PM
2844 tcg_temp_free_i32(tmp2);
2845 tcg_temp_free_i32(tmp);
18c9b560
AZ
2846 gen_op_iwmmxt_movq_wRn_M0(wrd);
2847 gen_op_iwmmxt_set_mup();
2848 break;
2849 default:
2850 return 1;
2851 }
2852
2853 return 0;
2854}
2855
a1c7273b 2856/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2857 (ie. an undefined instruction). */
7dcc1f89 2858static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2859{
2860 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2861 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2862
2863 if ((insn & 0x0ff00f10) == 0x0e200010) {
2864 /* Multiply with Internal Accumulate Format */
2865 rd0 = (insn >> 12) & 0xf;
2866 rd1 = insn & 0xf;
2867 acc = (insn >> 5) & 7;
2868
2869 if (acc != 0)
2870 return 1;
2871
3a554c0f
FN
2872 tmp = load_reg(s, rd0);
2873 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2874 switch ((insn >> 16) & 0xf) {
2875 case 0x0: /* MIA */
3a554c0f 2876 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2877 break;
2878 case 0x8: /* MIAPH */
3a554c0f 2879 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2880 break;
2881 case 0xc: /* MIABB */
2882 case 0xd: /* MIABT */
2883 case 0xe: /* MIATB */
2884 case 0xf: /* MIATT */
18c9b560 2885 if (insn & (1 << 16))
3a554c0f 2886 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2887 if (insn & (1 << 17))
3a554c0f
FN
2888 tcg_gen_shri_i32(tmp2, tmp2, 16);
2889 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2890 break;
2891 default:
2892 return 1;
2893 }
7d1b0095
PM
2894 tcg_temp_free_i32(tmp2);
2895 tcg_temp_free_i32(tmp);
18c9b560
AZ
2896
2897 gen_op_iwmmxt_movq_wRn_M0(acc);
2898 return 0;
2899 }
2900
2901 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2902 /* Internal Accumulator Access Format */
2903 rdhi = (insn >> 16) & 0xf;
2904 rdlo = (insn >> 12) & 0xf;
2905 acc = insn & 7;
2906
2907 if (acc != 0)
2908 return 1;
2909
2910 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2911 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2912 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2913 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2914 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2915 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2916 } else { /* MAR */
3a554c0f
FN
2917 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2918 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2919 }
2920 return 0;
2921 }
2922
2923 return 1;
2924}
2925
9ee6e8bb
PB
2926#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2927#define VFP_SREG(insn, bigbit, smallbit) \
2928 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2929#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2930 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2931 reg = (((insn) >> (bigbit)) & 0x0f) \
2932 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2933 } else { \
2934 if (insn & (1 << (smallbit))) \
2935 return 1; \
2936 reg = ((insn) >> (bigbit)) & 0x0f; \
2937 }} while (0)
2938
2939#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2940#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2941#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2942#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2943#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2944#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2945
4373f3ce 2946/* Move between integer and VFP cores. */
39d5492a 2947static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2948{
39d5492a 2949 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2950 tcg_gen_mov_i32(tmp, cpu_F0s);
2951 return tmp;
2952}
2953
39d5492a 2954static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2955{
2956 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2957 tcg_temp_free_i32(tmp);
4373f3ce
PB
2958}
2959
39d5492a 2960static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2961{
39d5492a 2962 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2963 if (shift)
2964 tcg_gen_shri_i32(var, var, shift);
86831435 2965 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2966 tcg_gen_shli_i32(tmp, var, 8);
2967 tcg_gen_or_i32(var, var, tmp);
2968 tcg_gen_shli_i32(tmp, var, 16);
2969 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2970 tcg_temp_free_i32(tmp);
ad69471c
PB
2971}
2972
39d5492a 2973static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2974{
39d5492a 2975 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2976 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2977 tcg_gen_shli_i32(tmp, var, 16);
2978 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2979 tcg_temp_free_i32(tmp);
ad69471c
PB
2980}
2981
39d5492a 2982static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2983{
39d5492a 2984 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2985 tcg_gen_andi_i32(var, var, 0xffff0000);
2986 tcg_gen_shri_i32(tmp, var, 16);
2987 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2988 tcg_temp_free_i32(tmp);
ad69471c
PB
2989}
2990
39d5492a 2991static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2992{
2993 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2994 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2995 switch (size) {
2996 case 0:
12dcc321 2997 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2998 gen_neon_dup_u8(tmp, 0);
2999 break;
3000 case 1:
12dcc321 3001 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
3002 gen_neon_dup_low16(tmp);
3003 break;
3004 case 2:
12dcc321 3005 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
3006 break;
3007 default: /* Avoid compiler warnings. */
3008 abort();
3009 }
3010 return tmp;
3011}
3012
04731fb5
WN
3013static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
3014 uint32_t dp)
3015{
3016 uint32_t cc = extract32(insn, 20, 2);
3017
3018 if (dp) {
3019 TCGv_i64 frn, frm, dest;
3020 TCGv_i64 tmp, zero, zf, nf, vf;
3021
3022 zero = tcg_const_i64(0);
3023
3024 frn = tcg_temp_new_i64();
3025 frm = tcg_temp_new_i64();
3026 dest = tcg_temp_new_i64();
3027
3028 zf = tcg_temp_new_i64();
3029 nf = tcg_temp_new_i64();
3030 vf = tcg_temp_new_i64();
3031
3032 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3033 tcg_gen_ext_i32_i64(nf, cpu_NF);
3034 tcg_gen_ext_i32_i64(vf, cpu_VF);
3035
3036 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3037 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3038 switch (cc) {
3039 case 0: /* eq: Z */
3040 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3041 frn, frm);
3042 break;
3043 case 1: /* vs: V */
3044 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3045 frn, frm);
3046 break;
3047 case 2: /* ge: N == V -> N ^ V == 0 */
3048 tmp = tcg_temp_new_i64();
3049 tcg_gen_xor_i64(tmp, vf, nf);
3050 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3051 frn, frm);
3052 tcg_temp_free_i64(tmp);
3053 break;
3054 case 3: /* gt: !Z && N == V */
3055 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3056 frn, frm);
3057 tmp = tcg_temp_new_i64();
3058 tcg_gen_xor_i64(tmp, vf, nf);
3059 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3060 dest, frm);
3061 tcg_temp_free_i64(tmp);
3062 break;
3063 }
3064 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3065 tcg_temp_free_i64(frn);
3066 tcg_temp_free_i64(frm);
3067 tcg_temp_free_i64(dest);
3068
3069 tcg_temp_free_i64(zf);
3070 tcg_temp_free_i64(nf);
3071 tcg_temp_free_i64(vf);
3072
3073 tcg_temp_free_i64(zero);
3074 } else {
3075 TCGv_i32 frn, frm, dest;
3076 TCGv_i32 tmp, zero;
3077
3078 zero = tcg_const_i32(0);
3079
3080 frn = tcg_temp_new_i32();
3081 frm = tcg_temp_new_i32();
3082 dest = tcg_temp_new_i32();
3083 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3084 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3085 switch (cc) {
3086 case 0: /* eq: Z */
3087 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3088 frn, frm);
3089 break;
3090 case 1: /* vs: V */
3091 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3092 frn, frm);
3093 break;
3094 case 2: /* ge: N == V -> N ^ V == 0 */
3095 tmp = tcg_temp_new_i32();
3096 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3097 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3098 frn, frm);
3099 tcg_temp_free_i32(tmp);
3100 break;
3101 case 3: /* gt: !Z && N == V */
3102 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3103 frn, frm);
3104 tmp = tcg_temp_new_i32();
3105 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3106 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3107 dest, frm);
3108 tcg_temp_free_i32(tmp);
3109 break;
3110 }
3111 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3112 tcg_temp_free_i32(frn);
3113 tcg_temp_free_i32(frm);
3114 tcg_temp_free_i32(dest);
3115
3116 tcg_temp_free_i32(zero);
3117 }
3118
3119 return 0;
3120}
3121
40cfacdd
WN
3122static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3123 uint32_t rm, uint32_t dp)
3124{
3125 uint32_t vmin = extract32(insn, 6, 1);
3126 TCGv_ptr fpst = get_fpstatus_ptr(0);
3127
3128 if (dp) {
3129 TCGv_i64 frn, frm, dest;
3130
3131 frn = tcg_temp_new_i64();
3132 frm = tcg_temp_new_i64();
3133 dest = tcg_temp_new_i64();
3134
3135 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3136 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3137 if (vmin) {
f71a2ae5 3138 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3139 } else {
f71a2ae5 3140 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3141 }
3142 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3143 tcg_temp_free_i64(frn);
3144 tcg_temp_free_i64(frm);
3145 tcg_temp_free_i64(dest);
3146 } else {
3147 TCGv_i32 frn, frm, dest;
3148
3149 frn = tcg_temp_new_i32();
3150 frm = tcg_temp_new_i32();
3151 dest = tcg_temp_new_i32();
3152
3153 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3154 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3155 if (vmin) {
f71a2ae5 3156 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3157 } else {
f71a2ae5 3158 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3159 }
3160 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3161 tcg_temp_free_i32(frn);
3162 tcg_temp_free_i32(frm);
3163 tcg_temp_free_i32(dest);
3164 }
3165
3166 tcg_temp_free_ptr(fpst);
3167 return 0;
3168}
3169
7655f39b
WN
3170static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3171 int rounding)
3172{
3173 TCGv_ptr fpst = get_fpstatus_ptr(0);
3174 TCGv_i32 tcg_rmode;
3175
3176 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3177 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3178
3179 if (dp) {
3180 TCGv_i64 tcg_op;
3181 TCGv_i64 tcg_res;
3182 tcg_op = tcg_temp_new_i64();
3183 tcg_res = tcg_temp_new_i64();
3184 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3185 gen_helper_rintd(tcg_res, tcg_op, fpst);
3186 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3187 tcg_temp_free_i64(tcg_op);
3188 tcg_temp_free_i64(tcg_res);
3189 } else {
3190 TCGv_i32 tcg_op;
3191 TCGv_i32 tcg_res;
3192 tcg_op = tcg_temp_new_i32();
3193 tcg_res = tcg_temp_new_i32();
3194 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3195 gen_helper_rints(tcg_res, tcg_op, fpst);
3196 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3197 tcg_temp_free_i32(tcg_op);
3198 tcg_temp_free_i32(tcg_res);
3199 }
3200
9b049916 3201 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3202 tcg_temp_free_i32(tcg_rmode);
3203
3204 tcg_temp_free_ptr(fpst);
3205 return 0;
3206}
3207
c9975a83
WN
3208static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3209 int rounding)
3210{
3211 bool is_signed = extract32(insn, 7, 1);
3212 TCGv_ptr fpst = get_fpstatus_ptr(0);
3213 TCGv_i32 tcg_rmode, tcg_shift;
3214
3215 tcg_shift = tcg_const_i32(0);
3216
3217 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3218 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3219
3220 if (dp) {
3221 TCGv_i64 tcg_double, tcg_res;
3222 TCGv_i32 tcg_tmp;
3223 /* Rd is encoded as a single precision register even when the source
3224 * is double precision.
3225 */
3226 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3227 tcg_double = tcg_temp_new_i64();
3228 tcg_res = tcg_temp_new_i64();
3229 tcg_tmp = tcg_temp_new_i32();
3230 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3231 if (is_signed) {
3232 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3233 } else {
3234 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3235 }
ecc7b3aa 3236 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3237 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3238 tcg_temp_free_i32(tcg_tmp);
3239 tcg_temp_free_i64(tcg_res);
3240 tcg_temp_free_i64(tcg_double);
3241 } else {
3242 TCGv_i32 tcg_single, tcg_res;
3243 tcg_single = tcg_temp_new_i32();
3244 tcg_res = tcg_temp_new_i32();
3245 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3246 if (is_signed) {
3247 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3248 } else {
3249 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3250 }
3251 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3252 tcg_temp_free_i32(tcg_res);
3253 tcg_temp_free_i32(tcg_single);
3254 }
3255
9b049916 3256 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3257 tcg_temp_free_i32(tcg_rmode);
3258
3259 tcg_temp_free_i32(tcg_shift);
3260
3261 tcg_temp_free_ptr(fpst);
3262
3263 return 0;
3264}
7655f39b
WN
3265
3266/* Table for converting the most common AArch32 encoding of
3267 * rounding mode to arm_fprounding order (which matches the
3268 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3269 */
3270static const uint8_t fp_decode_rm[] = {
3271 FPROUNDING_TIEAWAY,
3272 FPROUNDING_TIEEVEN,
3273 FPROUNDING_POSINF,
3274 FPROUNDING_NEGINF,
3275};
3276
7dcc1f89 3277static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3278{
3279 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3280
d614a513 3281 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3282 return 1;
3283 }
3284
3285 if (dp) {
3286 VFP_DREG_D(rd, insn);
3287 VFP_DREG_N(rn, insn);
3288 VFP_DREG_M(rm, insn);
3289 } else {
3290 rd = VFP_SREG_D(insn);
3291 rn = VFP_SREG_N(insn);
3292 rm = VFP_SREG_M(insn);
3293 }
3294
3295 if ((insn & 0x0f800e50) == 0x0e000a00) {
3296 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3297 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3298 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3299 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3300 /* VRINTA, VRINTN, VRINTP, VRINTM */
3301 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3302 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3303 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3304 /* VCVTA, VCVTN, VCVTP, VCVTM */
3305 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3306 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3307 }
3308 return 1;
3309}
3310
a1c7273b 3311/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3312 (ie. an undefined instruction). */
7dcc1f89 3313static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3314{
3315 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3316 int dp, veclen;
39d5492a
PM
3317 TCGv_i32 addr;
3318 TCGv_i32 tmp;
3319 TCGv_i32 tmp2;
b7bcbe95 3320
d614a513 3321 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3322 return 1;
d614a513 3323 }
40f137e1 3324
2c7ffc41
PM
3325 /* FIXME: this access check should not take precedence over UNDEF
3326 * for invalid encodings; we will generate incorrect syndrome information
3327 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3328 */
9dbbc748 3329 if (s->fp_excp_el) {
2c7ffc41 3330 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3331 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3332 return 0;
3333 }
3334
5df8bac1 3335 if (!s->vfp_enabled) {
9ee6e8bb 3336 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3337 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3338 return 1;
3339 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3340 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3341 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3342 return 1;
a50c0f51 3343 }
40f137e1 3344 }
6a57f3eb
WN
3345
3346 if (extract32(insn, 28, 4) == 0xf) {
3347 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3348 * only used in v8 and above.
3349 */
7dcc1f89 3350 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3351 }
3352
b7bcbe95
FB
3353 dp = ((insn & 0xf00) == 0xb00);
3354 switch ((insn >> 24) & 0xf) {
3355 case 0xe:
3356 if (insn & (1 << 4)) {
3357 /* single register transfer */
b7bcbe95
FB
3358 rd = (insn >> 12) & 0xf;
3359 if (dp) {
9ee6e8bb
PB
3360 int size;
3361 int pass;
3362
3363 VFP_DREG_N(rn, insn);
3364 if (insn & 0xf)
b7bcbe95 3365 return 1;
9ee6e8bb 3366 if (insn & 0x00c00060
d614a513 3367 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3368 return 1;
d614a513 3369 }
9ee6e8bb
PB
3370
3371 pass = (insn >> 21) & 1;
3372 if (insn & (1 << 22)) {
3373 size = 0;
3374 offset = ((insn >> 5) & 3) * 8;
3375 } else if (insn & (1 << 5)) {
3376 size = 1;
3377 offset = (insn & (1 << 6)) ? 16 : 0;
3378 } else {
3379 size = 2;
3380 offset = 0;
3381 }
18c9b560 3382 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3383 /* vfp->arm */
ad69471c 3384 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3385 switch (size) {
3386 case 0:
9ee6e8bb 3387 if (offset)
ad69471c 3388 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3389 if (insn & (1 << 23))
ad69471c 3390 gen_uxtb(tmp);
9ee6e8bb 3391 else
ad69471c 3392 gen_sxtb(tmp);
9ee6e8bb
PB
3393 break;
3394 case 1:
9ee6e8bb
PB
3395 if (insn & (1 << 23)) {
3396 if (offset) {
ad69471c 3397 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3398 } else {
ad69471c 3399 gen_uxth(tmp);
9ee6e8bb
PB
3400 }
3401 } else {
3402 if (offset) {
ad69471c 3403 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3404 } else {
ad69471c 3405 gen_sxth(tmp);
9ee6e8bb
PB
3406 }
3407 }
3408 break;
3409 case 2:
9ee6e8bb
PB
3410 break;
3411 }
ad69471c 3412 store_reg(s, rd, tmp);
b7bcbe95
FB
3413 } else {
3414 /* arm->vfp */
ad69471c 3415 tmp = load_reg(s, rd);
9ee6e8bb
PB
3416 if (insn & (1 << 23)) {
3417 /* VDUP */
3418 if (size == 0) {
ad69471c 3419 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3420 } else if (size == 1) {
ad69471c 3421 gen_neon_dup_low16(tmp);
9ee6e8bb 3422 }
cbbccffc 3423 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3424 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3425 tcg_gen_mov_i32(tmp2, tmp);
3426 neon_store_reg(rn, n, tmp2);
3427 }
3428 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3429 } else {
3430 /* VMOV */
3431 switch (size) {
3432 case 0:
ad69471c 3433 tmp2 = neon_load_reg(rn, pass);
d593c48e 3434 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3435 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3436 break;
3437 case 1:
ad69471c 3438 tmp2 = neon_load_reg(rn, pass);
d593c48e 3439 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3440 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3441 break;
3442 case 2:
9ee6e8bb
PB
3443 break;
3444 }
ad69471c 3445 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3446 }
b7bcbe95 3447 }
9ee6e8bb
PB
3448 } else { /* !dp */
3449 if ((insn & 0x6f) != 0x00)
3450 return 1;
3451 rn = VFP_SREG_N(insn);
18c9b560 3452 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3453 /* vfp->arm */
3454 if (insn & (1 << 21)) {
3455 /* system register */
40f137e1 3456 rn >>= 1;
9ee6e8bb 3457
b7bcbe95 3458 switch (rn) {
40f137e1 3459 case ARM_VFP_FPSID:
4373f3ce 3460 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3461 VFP3 restricts all id registers to privileged
3462 accesses. */
3463 if (IS_USER(s)
d614a513 3464 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3465 return 1;
d614a513 3466 }
4373f3ce 3467 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3468 break;
40f137e1 3469 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3470 if (IS_USER(s))
3471 return 1;
4373f3ce 3472 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3473 break;
40f137e1
PB
3474 case ARM_VFP_FPINST:
3475 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3476 /* Not present in VFP3. */
3477 if (IS_USER(s)
d614a513 3478 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3479 return 1;
d614a513 3480 }
4373f3ce 3481 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3482 break;
40f137e1 3483 case ARM_VFP_FPSCR:
601d70b9 3484 if (rd == 15) {
4373f3ce
PB
3485 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3486 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3487 } else {
7d1b0095 3488 tmp = tcg_temp_new_i32();
4373f3ce
PB
3489 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3490 }
b7bcbe95 3491 break;
a50c0f51 3492 case ARM_VFP_MVFR2:
d614a513 3493 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3494 return 1;
3495 }
3496 /* fall through */
9ee6e8bb
PB
3497 case ARM_VFP_MVFR0:
3498 case ARM_VFP_MVFR1:
3499 if (IS_USER(s)
d614a513 3500 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3501 return 1;
d614a513 3502 }
4373f3ce 3503 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3504 break;
b7bcbe95
FB
3505 default:
3506 return 1;
3507 }
3508 } else {
3509 gen_mov_F0_vreg(0, rn);
4373f3ce 3510 tmp = gen_vfp_mrs();
b7bcbe95
FB
3511 }
3512 if (rd == 15) {
b5ff1b31 3513 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3514 gen_set_nzcv(tmp);
7d1b0095 3515 tcg_temp_free_i32(tmp);
4373f3ce
PB
3516 } else {
3517 store_reg(s, rd, tmp);
3518 }
b7bcbe95
FB
3519 } else {
3520 /* arm->vfp */
b7bcbe95 3521 if (insn & (1 << 21)) {
40f137e1 3522 rn >>= 1;
b7bcbe95
FB
3523 /* system register */
3524 switch (rn) {
40f137e1 3525 case ARM_VFP_FPSID:
9ee6e8bb
PB
3526 case ARM_VFP_MVFR0:
3527 case ARM_VFP_MVFR1:
b7bcbe95
FB
3528 /* Writes are ignored. */
3529 break;
40f137e1 3530 case ARM_VFP_FPSCR:
e4c1cfa5 3531 tmp = load_reg(s, rd);
4373f3ce 3532 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3533 tcg_temp_free_i32(tmp);
b5ff1b31 3534 gen_lookup_tb(s);
b7bcbe95 3535 break;
40f137e1 3536 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3537 if (IS_USER(s))
3538 return 1;
71b3c3de
JR
3539 /* TODO: VFP subarchitecture support.
3540 * For now, keep the EN bit only */
e4c1cfa5 3541 tmp = load_reg(s, rd);
71b3c3de 3542 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3543 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3544 gen_lookup_tb(s);
3545 break;
3546 case ARM_VFP_FPINST:
3547 case ARM_VFP_FPINST2:
23adb861
PM
3548 if (IS_USER(s)) {
3549 return 1;
3550 }
e4c1cfa5 3551 tmp = load_reg(s, rd);
4373f3ce 3552 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3553 break;
b7bcbe95
FB
3554 default:
3555 return 1;
3556 }
3557 } else {
e4c1cfa5 3558 tmp = load_reg(s, rd);
4373f3ce 3559 gen_vfp_msr(tmp);
b7bcbe95
FB
3560 gen_mov_vreg_F0(0, rn);
3561 }
3562 }
3563 }
3564 } else {
3565 /* data processing */
3566 /* The opcode is in bits 23, 21, 20 and 6. */
3567 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3568 if (dp) {
3569 if (op == 15) {
3570 /* rn is opcode */
3571 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3572 } else {
3573 /* rn is register number */
9ee6e8bb 3574 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3575 }
3576
239c20c7
WN
3577 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3578 ((rn & 0x1e) == 0x6))) {
3579 /* Integer or single/half precision destination. */
9ee6e8bb 3580 rd = VFP_SREG_D(insn);
b7bcbe95 3581 } else {
9ee6e8bb 3582 VFP_DREG_D(rd, insn);
b7bcbe95 3583 }
04595bf6 3584 if (op == 15 &&
239c20c7
WN
3585 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3586 ((rn & 0x1e) == 0x4))) {
3587 /* VCVT from int or half precision is always from S reg
3588 * regardless of dp bit. VCVT with immediate frac_bits
3589 * has same format as SREG_M.
04595bf6
PM
3590 */
3591 rm = VFP_SREG_M(insn);
b7bcbe95 3592 } else {
9ee6e8bb 3593 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3594 }
3595 } else {
9ee6e8bb 3596 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3597 if (op == 15 && rn == 15) {
3598 /* Double precision destination. */
9ee6e8bb
PB
3599 VFP_DREG_D(rd, insn);
3600 } else {
3601 rd = VFP_SREG_D(insn);
3602 }
04595bf6
PM
3603 /* NB that we implicitly rely on the encoding for the frac_bits
3604 * in VCVT of fixed to float being the same as that of an SREG_M
3605 */
9ee6e8bb 3606 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3607 }
3608
69d1fc22 3609 veclen = s->vec_len;
b7bcbe95
FB
3610 if (op == 15 && rn > 3)
3611 veclen = 0;
3612
3613 /* Shut up compiler warnings. */
3614 delta_m = 0;
3615 delta_d = 0;
3616 bank_mask = 0;
3b46e624 3617
b7bcbe95
FB
3618 if (veclen > 0) {
3619 if (dp)
3620 bank_mask = 0xc;
3621 else
3622 bank_mask = 0x18;
3623
3624 /* Figure out what type of vector operation this is. */
3625 if ((rd & bank_mask) == 0) {
3626 /* scalar */
3627 veclen = 0;
3628 } else {
3629 if (dp)
69d1fc22 3630 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3631 else
69d1fc22 3632 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3633
3634 if ((rm & bank_mask) == 0) {
3635 /* mixed scalar/vector */
3636 delta_m = 0;
3637 } else {
3638 /* vector */
3639 delta_m = delta_d;
3640 }
3641 }
3642 }
3643
3644 /* Load the initial operands. */
3645 if (op == 15) {
3646 switch (rn) {
3647 case 16:
3648 case 17:
3649 /* Integer source */
3650 gen_mov_F0_vreg(0, rm);
3651 break;
3652 case 8:
3653 case 9:
3654 /* Compare */
3655 gen_mov_F0_vreg(dp, rd);
3656 gen_mov_F1_vreg(dp, rm);
3657 break;
3658 case 10:
3659 case 11:
3660 /* Compare with zero */
3661 gen_mov_F0_vreg(dp, rd);
3662 gen_vfp_F1_ld0(dp);
3663 break;
9ee6e8bb
PB
3664 case 20:
3665 case 21:
3666 case 22:
3667 case 23:
644ad806
PB
3668 case 28:
3669 case 29:
3670 case 30:
3671 case 31:
9ee6e8bb
PB
3672 /* Source and destination the same. */
3673 gen_mov_F0_vreg(dp, rd);
3674 break;
6e0c0ed1
PM
3675 case 4:
3676 case 5:
3677 case 6:
3678 case 7:
239c20c7
WN
3679 /* VCVTB, VCVTT: only present with the halfprec extension
3680 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3681 * (we choose to UNDEF)
6e0c0ed1 3682 */
d614a513
PM
3683 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3684 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3685 return 1;
3686 }
239c20c7
WN
3687 if (!extract32(rn, 1, 1)) {
3688 /* Half precision source. */
3689 gen_mov_F0_vreg(0, rm);
3690 break;
3691 }
6e0c0ed1 3692 /* Otherwise fall through */
b7bcbe95
FB
3693 default:
3694 /* One source operand. */
3695 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3696 break;
b7bcbe95
FB
3697 }
3698 } else {
3699 /* Two source operands. */
3700 gen_mov_F0_vreg(dp, rn);
3701 gen_mov_F1_vreg(dp, rm);
3702 }
3703
3704 for (;;) {
3705 /* Perform the calculation. */
3706 switch (op) {
605a6aed
PM
3707 case 0: /* VMLA: fd + (fn * fm) */
3708 /* Note that order of inputs to the add matters for NaNs */
3709 gen_vfp_F1_mul(dp);
3710 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3711 gen_vfp_add(dp);
3712 break;
605a6aed 3713 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3714 gen_vfp_mul(dp);
605a6aed
PM
3715 gen_vfp_F1_neg(dp);
3716 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3717 gen_vfp_add(dp);
3718 break;
605a6aed
PM
3719 case 2: /* VNMLS: -fd + (fn * fm) */
3720 /* Note that it isn't valid to replace (-A + B) with (B - A)
3721 * or similar plausible looking simplifications
3722 * because this will give wrong results for NaNs.
3723 */
3724 gen_vfp_F1_mul(dp);
3725 gen_mov_F0_vreg(dp, rd);
3726 gen_vfp_neg(dp);
3727 gen_vfp_add(dp);
b7bcbe95 3728 break;
605a6aed 3729 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3730 gen_vfp_mul(dp);
605a6aed
PM
3731 gen_vfp_F1_neg(dp);
3732 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3733 gen_vfp_neg(dp);
605a6aed 3734 gen_vfp_add(dp);
b7bcbe95
FB
3735 break;
3736 case 4: /* mul: fn * fm */
3737 gen_vfp_mul(dp);
3738 break;
3739 case 5: /* nmul: -(fn * fm) */
3740 gen_vfp_mul(dp);
3741 gen_vfp_neg(dp);
3742 break;
3743 case 6: /* add: fn + fm */
3744 gen_vfp_add(dp);
3745 break;
3746 case 7: /* sub: fn - fm */
3747 gen_vfp_sub(dp);
3748 break;
3749 case 8: /* div: fn / fm */
3750 gen_vfp_div(dp);
3751 break;
da97f52c
PM
3752 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3753 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3754 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3755 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3756 /* These are fused multiply-add, and must be done as one
3757 * floating point operation with no rounding between the
3758 * multiplication and addition steps.
3759 * NB that doing the negations here as separate steps is
3760 * correct : an input NaN should come out with its sign bit
3761 * flipped if it is a negated-input.
3762 */
d614a513 3763 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3764 return 1;
3765 }
3766 if (dp) {
3767 TCGv_ptr fpst;
3768 TCGv_i64 frd;
3769 if (op & 1) {
3770 /* VFNMS, VFMS */
3771 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3772 }
3773 frd = tcg_temp_new_i64();
3774 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3775 if (op & 2) {
3776 /* VFNMA, VFNMS */
3777 gen_helper_vfp_negd(frd, frd);
3778 }
3779 fpst = get_fpstatus_ptr(0);
3780 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3781 cpu_F1d, frd, fpst);
3782 tcg_temp_free_ptr(fpst);
3783 tcg_temp_free_i64(frd);
3784 } else {
3785 TCGv_ptr fpst;
3786 TCGv_i32 frd;
3787 if (op & 1) {
3788 /* VFNMS, VFMS */
3789 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3790 }
3791 frd = tcg_temp_new_i32();
3792 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3793 if (op & 2) {
3794 gen_helper_vfp_negs(frd, frd);
3795 }
3796 fpst = get_fpstatus_ptr(0);
3797 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3798 cpu_F1s, frd, fpst);
3799 tcg_temp_free_ptr(fpst);
3800 tcg_temp_free_i32(frd);
3801 }
3802 break;
9ee6e8bb 3803 case 14: /* fconst */
d614a513
PM
3804 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3805 return 1;
3806 }
9ee6e8bb
PB
3807
3808 n = (insn << 12) & 0x80000000;
3809 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3810 if (dp) {
3811 if (i & 0x40)
3812 i |= 0x3f80;
3813 else
3814 i |= 0x4000;
3815 n |= i << 16;
4373f3ce 3816 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3817 } else {
3818 if (i & 0x40)
3819 i |= 0x780;
3820 else
3821 i |= 0x800;
3822 n |= i << 19;
5b340b51 3823 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3824 }
9ee6e8bb 3825 break;
b7bcbe95
FB
3826 case 15: /* extension space */
3827 switch (rn) {
3828 case 0: /* cpy */
3829 /* no-op */
3830 break;
3831 case 1: /* abs */
3832 gen_vfp_abs(dp);
3833 break;
3834 case 2: /* neg */
3835 gen_vfp_neg(dp);
3836 break;
3837 case 3: /* sqrt */
3838 gen_vfp_sqrt(dp);
3839 break;
239c20c7 3840 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
486624fc
AB
3841 {
3842 TCGv_ptr fpst = get_fpstatus_ptr(false);
3843 TCGv_i32 ahp_mode = get_ahp_flag();
60011498
PB
3844 tmp = gen_vfp_mrs();
3845 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3846 if (dp) {
3847 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 3848 fpst, ahp_mode);
239c20c7
WN
3849 } else {
3850 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 3851 fpst, ahp_mode);
239c20c7 3852 }
486624fc
AB
3853 tcg_temp_free_i32(ahp_mode);
3854 tcg_temp_free_ptr(fpst);
7d1b0095 3855 tcg_temp_free_i32(tmp);
60011498 3856 break;
486624fc 3857 }
239c20c7 3858 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
486624fc
AB
3859 {
3860 TCGv_ptr fpst = get_fpstatus_ptr(false);
3861 TCGv_i32 ahp = get_ahp_flag();
60011498
PB
3862 tmp = gen_vfp_mrs();
3863 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3864 if (dp) {
3865 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 3866 fpst, ahp);
239c20c7
WN
3867 } else {
3868 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 3869 fpst, ahp);
239c20c7 3870 }
7d1b0095 3871 tcg_temp_free_i32(tmp);
486624fc
AB
3872 tcg_temp_free_i32(ahp);
3873 tcg_temp_free_ptr(fpst);
60011498 3874 break;
486624fc 3875 }
239c20c7 3876 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
486624fc
AB
3877 {
3878 TCGv_ptr fpst = get_fpstatus_ptr(false);
3879 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 3880 tmp = tcg_temp_new_i32();
486624fc 3881
239c20c7
WN
3882 if (dp) {
3883 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 3884 fpst, ahp);
239c20c7
WN
3885 } else {
3886 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 3887 fpst, ahp);
239c20c7 3888 }
486624fc
AB
3889 tcg_temp_free_i32(ahp);
3890 tcg_temp_free_ptr(fpst);
60011498
PB
3891 gen_mov_F0_vreg(0, rd);
3892 tmp2 = gen_vfp_mrs();
3893 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3894 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3895 tcg_temp_free_i32(tmp2);
60011498
PB
3896 gen_vfp_msr(tmp);
3897 break;
486624fc 3898 }
239c20c7 3899 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
486624fc
AB
3900 {
3901 TCGv_ptr fpst = get_fpstatus_ptr(false);
3902 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 3903 tmp = tcg_temp_new_i32();
239c20c7
WN
3904 if (dp) {
3905 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 3906 fpst, ahp);
239c20c7
WN
3907 } else {
3908 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 3909 fpst, ahp);
239c20c7 3910 }
486624fc
AB
3911 tcg_temp_free_i32(ahp);
3912 tcg_temp_free_ptr(fpst);
60011498
PB
3913 tcg_gen_shli_i32(tmp, tmp, 16);
3914 gen_mov_F0_vreg(0, rd);
3915 tmp2 = gen_vfp_mrs();
3916 tcg_gen_ext16u_i32(tmp2, tmp2);
3917 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3918 tcg_temp_free_i32(tmp2);
60011498
PB
3919 gen_vfp_msr(tmp);
3920 break;
486624fc 3921 }
b7bcbe95
FB
3922 case 8: /* cmp */
3923 gen_vfp_cmp(dp);
3924 break;
3925 case 9: /* cmpe */
3926 gen_vfp_cmpe(dp);
3927 break;
3928 case 10: /* cmpz */
3929 gen_vfp_cmp(dp);
3930 break;
3931 case 11: /* cmpez */
3932 gen_vfp_F1_ld0(dp);
3933 gen_vfp_cmpe(dp);
3934 break;
664c6733
WN
3935 case 12: /* vrintr */
3936 {
3937 TCGv_ptr fpst = get_fpstatus_ptr(0);
3938 if (dp) {
3939 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3940 } else {
3941 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3942 }
3943 tcg_temp_free_ptr(fpst);
3944 break;
3945 }
a290c62a
WN
3946 case 13: /* vrintz */
3947 {
3948 TCGv_ptr fpst = get_fpstatus_ptr(0);
3949 TCGv_i32 tcg_rmode;
3950 tcg_rmode = tcg_const_i32(float_round_to_zero);
9b049916 3951 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
3952 if (dp) {
3953 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3954 } else {
3955 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3956 }
9b049916 3957 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
3958 tcg_temp_free_i32(tcg_rmode);
3959 tcg_temp_free_ptr(fpst);
3960 break;
3961 }
4e82bc01
WN
3962 case 14: /* vrintx */
3963 {
3964 TCGv_ptr fpst = get_fpstatus_ptr(0);
3965 if (dp) {
3966 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3967 } else {
3968 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3969 }
3970 tcg_temp_free_ptr(fpst);
3971 break;
3972 }
b7bcbe95
FB
3973 case 15: /* single<->double conversion */
3974 if (dp)
4373f3ce 3975 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3976 else
4373f3ce 3977 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3978 break;
3979 case 16: /* fuito */
5500b06c 3980 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3981 break;
3982 case 17: /* fsito */
5500b06c 3983 gen_vfp_sito(dp, 0);
b7bcbe95 3984 break;
9ee6e8bb 3985 case 20: /* fshto */
d614a513
PM
3986 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3987 return 1;
3988 }
5500b06c 3989 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3990 break;
3991 case 21: /* fslto */
d614a513
PM
3992 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3993 return 1;
3994 }
5500b06c 3995 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3996 break;
3997 case 22: /* fuhto */
d614a513
PM
3998 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3999 return 1;
4000 }
5500b06c 4001 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
4002 break;
4003 case 23: /* fulto */
d614a513
PM
4004 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4005 return 1;
4006 }
5500b06c 4007 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 4008 break;
b7bcbe95 4009 case 24: /* ftoui */
5500b06c 4010 gen_vfp_toui(dp, 0);
b7bcbe95
FB
4011 break;
4012 case 25: /* ftouiz */
5500b06c 4013 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
4014 break;
4015 case 26: /* ftosi */
5500b06c 4016 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
4017 break;
4018 case 27: /* ftosiz */
5500b06c 4019 gen_vfp_tosiz(dp, 0);
b7bcbe95 4020 break;
9ee6e8bb 4021 case 28: /* ftosh */
d614a513
PM
4022 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4023 return 1;
4024 }
5500b06c 4025 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
4026 break;
4027 case 29: /* ftosl */
d614a513
PM
4028 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4029 return 1;
4030 }
5500b06c 4031 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
4032 break;
4033 case 30: /* ftouh */
d614a513
PM
4034 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4035 return 1;
4036 }
5500b06c 4037 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
4038 break;
4039 case 31: /* ftoul */
d614a513
PM
4040 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4041 return 1;
4042 }
5500b06c 4043 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 4044 break;
b7bcbe95 4045 default: /* undefined */
b7bcbe95
FB
4046 return 1;
4047 }
4048 break;
4049 default: /* undefined */
b7bcbe95
FB
4050 return 1;
4051 }
4052
4053 /* Write back the result. */
239c20c7
WN
4054 if (op == 15 && (rn >= 8 && rn <= 11)) {
4055 /* Comparison, do nothing. */
4056 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
4057 (rn & 0x1e) == 0x6)) {
4058 /* VCVT double to int: always integer result.
4059 * VCVT double to half precision is always a single
4060 * precision result.
4061 */
b7bcbe95 4062 gen_mov_vreg_F0(0, rd);
239c20c7 4063 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
4064 /* conversion */
4065 gen_mov_vreg_F0(!dp, rd);
239c20c7 4066 } else {
b7bcbe95 4067 gen_mov_vreg_F0(dp, rd);
239c20c7 4068 }
b7bcbe95
FB
4069
4070 /* break out of the loop if we have finished */
4071 if (veclen == 0)
4072 break;
4073
4074 if (op == 15 && delta_m == 0) {
4075 /* single source one-many */
4076 while (veclen--) {
4077 rd = ((rd + delta_d) & (bank_mask - 1))
4078 | (rd & bank_mask);
4079 gen_mov_vreg_F0(dp, rd);
4080 }
4081 break;
4082 }
4083 /* Setup the next operands. */
4084 veclen--;
4085 rd = ((rd + delta_d) & (bank_mask - 1))
4086 | (rd & bank_mask);
4087
4088 if (op == 15) {
4089 /* One source operand. */
4090 rm = ((rm + delta_m) & (bank_mask - 1))
4091 | (rm & bank_mask);
4092 gen_mov_F0_vreg(dp, rm);
4093 } else {
4094 /* Two source operands. */
4095 rn = ((rn + delta_d) & (bank_mask - 1))
4096 | (rn & bank_mask);
4097 gen_mov_F0_vreg(dp, rn);
4098 if (delta_m) {
4099 rm = ((rm + delta_m) & (bank_mask - 1))
4100 | (rm & bank_mask);
4101 gen_mov_F1_vreg(dp, rm);
4102 }
4103 }
4104 }
4105 }
4106 break;
4107 case 0xc:
4108 case 0xd:
8387da81 4109 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
4110 /* two-register transfer */
4111 rn = (insn >> 16) & 0xf;
4112 rd = (insn >> 12) & 0xf;
4113 if (dp) {
9ee6e8bb
PB
4114 VFP_DREG_M(rm, insn);
4115 } else {
4116 rm = VFP_SREG_M(insn);
4117 }
b7bcbe95 4118
18c9b560 4119 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4120 /* vfp->arm */
4121 if (dp) {
4373f3ce
PB
4122 gen_mov_F0_vreg(0, rm * 2);
4123 tmp = gen_vfp_mrs();
4124 store_reg(s, rd, tmp);
4125 gen_mov_F0_vreg(0, rm * 2 + 1);
4126 tmp = gen_vfp_mrs();
4127 store_reg(s, rn, tmp);
b7bcbe95
FB
4128 } else {
4129 gen_mov_F0_vreg(0, rm);
4373f3ce 4130 tmp = gen_vfp_mrs();
8387da81 4131 store_reg(s, rd, tmp);
b7bcbe95 4132 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4133 tmp = gen_vfp_mrs();
8387da81 4134 store_reg(s, rn, tmp);
b7bcbe95
FB
4135 }
4136 } else {
4137 /* arm->vfp */
4138 if (dp) {
4373f3ce
PB
4139 tmp = load_reg(s, rd);
4140 gen_vfp_msr(tmp);
4141 gen_mov_vreg_F0(0, rm * 2);
4142 tmp = load_reg(s, rn);
4143 gen_vfp_msr(tmp);
4144 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4145 } else {
8387da81 4146 tmp = load_reg(s, rd);
4373f3ce 4147 gen_vfp_msr(tmp);
b7bcbe95 4148 gen_mov_vreg_F0(0, rm);
8387da81 4149 tmp = load_reg(s, rn);
4373f3ce 4150 gen_vfp_msr(tmp);
b7bcbe95
FB
4151 gen_mov_vreg_F0(0, rm + 1);
4152 }
4153 }
4154 } else {
4155 /* Load/store */
4156 rn = (insn >> 16) & 0xf;
4157 if (dp)
9ee6e8bb 4158 VFP_DREG_D(rd, insn);
b7bcbe95 4159 else
9ee6e8bb 4160 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4161 if ((insn & 0x01200000) == 0x01000000) {
4162 /* Single load/store */
4163 offset = (insn & 0xff) << 2;
4164 if ((insn & (1 << 23)) == 0)
4165 offset = -offset;
934814f1
PM
4166 if (s->thumb && rn == 15) {
4167 /* This is actually UNPREDICTABLE */
4168 addr = tcg_temp_new_i32();
4169 tcg_gen_movi_i32(addr, s->pc & ~2);
4170 } else {
4171 addr = load_reg(s, rn);
4172 }
312eea9f 4173 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4174 if (insn & (1 << 20)) {
312eea9f 4175 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4176 gen_mov_vreg_F0(dp, rd);
4177 } else {
4178 gen_mov_F0_vreg(dp, rd);
312eea9f 4179 gen_vfp_st(s, dp, addr);
b7bcbe95 4180 }
7d1b0095 4181 tcg_temp_free_i32(addr);
b7bcbe95
FB
4182 } else {
4183 /* load/store multiple */
934814f1 4184 int w = insn & (1 << 21);
b7bcbe95
FB
4185 if (dp)
4186 n = (insn >> 1) & 0x7f;
4187 else
4188 n = insn & 0xff;
4189
934814f1
PM
4190 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4191 /* P == U , W == 1 => UNDEF */
4192 return 1;
4193 }
4194 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4195 /* UNPREDICTABLE cases for bad immediates: we choose to
4196 * UNDEF to avoid generating huge numbers of TCG ops
4197 */
4198 return 1;
4199 }
4200 if (rn == 15 && w) {
4201 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4202 return 1;
4203 }
4204
4205 if (s->thumb && rn == 15) {
4206 /* This is actually UNPREDICTABLE */
4207 addr = tcg_temp_new_i32();
4208 tcg_gen_movi_i32(addr, s->pc & ~2);
4209 } else {
4210 addr = load_reg(s, rn);
4211 }
b7bcbe95 4212 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4213 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4214
4215 if (dp)
4216 offset = 8;
4217 else
4218 offset = 4;
4219 for (i = 0; i < n; i++) {
18c9b560 4220 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4221 /* load */
312eea9f 4222 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4223 gen_mov_vreg_F0(dp, rd + i);
4224 } else {
4225 /* store */
4226 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4227 gen_vfp_st(s, dp, addr);
b7bcbe95 4228 }
312eea9f 4229 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4230 }
934814f1 4231 if (w) {
b7bcbe95
FB
4232 /* writeback */
4233 if (insn & (1 << 24))
4234 offset = -offset * n;
4235 else if (dp && (insn & 1))
4236 offset = 4;
4237 else
4238 offset = 0;
4239
4240 if (offset != 0)
312eea9f
FN
4241 tcg_gen_addi_i32(addr, addr, offset);
4242 store_reg(s, rn, addr);
4243 } else {
7d1b0095 4244 tcg_temp_free_i32(addr);
b7bcbe95
FB
4245 }
4246 }
4247 }
4248 break;
4249 default:
4250 /* Should never happen. */
4251 return 1;
4252 }
4253 return 0;
4254}
4255
90aa39a1 4256static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4257{
90aa39a1 4258#ifndef CONFIG_USER_ONLY
dcba3a8d 4259 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
4260 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4261#else
4262 return true;
4263#endif
4264}
6e256c93 4265
8a6b28c7
EC
4266static void gen_goto_ptr(void)
4267{
7f11636d 4268 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
4269}
4270
4cae8f56
AB
4271/* This will end the TB but doesn't guarantee we'll return to
4272 * cpu_loop_exec. Any live exit_requests will be processed as we
4273 * enter the next TB.
4274 */
8a6b28c7 4275static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4276{
4277 if (use_goto_tb(s, dest)) {
57fec1fe 4278 tcg_gen_goto_tb(n);
eaed129d 4279 gen_set_pc_im(s, dest);
07ea28b4 4280 tcg_gen_exit_tb(s->base.tb, n);
6e256c93 4281 } else {
eaed129d 4282 gen_set_pc_im(s, dest);
8a6b28c7 4283 gen_goto_ptr();
6e256c93 4284 }
dcba3a8d 4285 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
4286}
4287
8aaca4c0
FB
4288static inline void gen_jmp (DisasContext *s, uint32_t dest)
4289{
b636649f 4290 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4291 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4292 if (s->thumb)
d9ba4830
PB
4293 dest |= 1;
4294 gen_bx_im(s, dest);
8aaca4c0 4295 } else {
6e256c93 4296 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4297 }
4298}
4299
39d5492a 4300static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4301{
ee097184 4302 if (x)
d9ba4830 4303 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4304 else
d9ba4830 4305 gen_sxth(t0);
ee097184 4306 if (y)
d9ba4830 4307 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4308 else
d9ba4830
PB
4309 gen_sxth(t1);
4310 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4311}
4312
4313/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4314static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4315{
b5ff1b31
FB
4316 uint32_t mask;
4317
4318 mask = 0;
4319 if (flags & (1 << 0))
4320 mask |= 0xff;
4321 if (flags & (1 << 1))
4322 mask |= 0xff00;
4323 if (flags & (1 << 2))
4324 mask |= 0xff0000;
4325 if (flags & (1 << 3))
4326 mask |= 0xff000000;
9ee6e8bb 4327
2ae23e75 4328 /* Mask out undefined bits. */
9ee6e8bb 4329 mask &= ~CPSR_RESERVED;
d614a513 4330 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4331 mask &= ~CPSR_T;
d614a513
PM
4332 }
4333 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4334 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4335 }
4336 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4337 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4338 }
4339 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4340 mask &= ~CPSR_IT;
d614a513 4341 }
4051e12c
PM
4342 /* Mask out execution state and reserved bits. */
4343 if (!spsr) {
4344 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4345 }
b5ff1b31
FB
4346 /* Mask out privileged bits. */
4347 if (IS_USER(s))
9ee6e8bb 4348 mask &= CPSR_USER;
b5ff1b31
FB
4349 return mask;
4350}
4351
2fbac54b 4352/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4353static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4354{
39d5492a 4355 TCGv_i32 tmp;
b5ff1b31
FB
4356 if (spsr) {
4357 /* ??? This is also undefined in system mode. */
4358 if (IS_USER(s))
4359 return 1;
d9ba4830
PB
4360
4361 tmp = load_cpu_field(spsr);
4362 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4363 tcg_gen_andi_i32(t0, t0, mask);
4364 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4365 store_cpu_field(tmp, spsr);
b5ff1b31 4366 } else {
2fbac54b 4367 gen_set_cpsr(t0, mask);
b5ff1b31 4368 }
7d1b0095 4369 tcg_temp_free_i32(t0);
b5ff1b31
FB
4370 gen_lookup_tb(s);
4371 return 0;
4372}
4373
2fbac54b
FN
4374/* Returns nonzero if access to the PSR is not permitted. */
4375static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4376{
39d5492a 4377 TCGv_i32 tmp;
7d1b0095 4378 tmp = tcg_temp_new_i32();
2fbac54b
FN
4379 tcg_gen_movi_i32(tmp, val);
4380 return gen_set_psr(s, mask, spsr, tmp);
4381}
4382
8bfd0550
PM
4383static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4384 int *tgtmode, int *regno)
4385{
4386 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4387 * the target mode and register number, and identify the various
4388 * unpredictable cases.
4389 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4390 * + executed in user mode
4391 * + using R15 as the src/dest register
4392 * + accessing an unimplemented register
4393 * + accessing a register that's inaccessible at current PL/security state*
4394 * + accessing a register that you could access with a different insn
4395 * We choose to UNDEF in all these cases.
4396 * Since we don't know which of the various AArch32 modes we are in
4397 * we have to defer some checks to runtime.
4398 * Accesses to Monitor mode registers from Secure EL1 (which implies
4399 * that EL3 is AArch64) must trap to EL3.
4400 *
4401 * If the access checks fail this function will emit code to take
4402 * an exception and return false. Otherwise it will return true,
4403 * and set *tgtmode and *regno appropriately.
4404 */
4405 int exc_target = default_exception_el(s);
4406
4407 /* These instructions are present only in ARMv8, or in ARMv7 with the
4408 * Virtualization Extensions.
4409 */
4410 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4411 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4412 goto undef;
4413 }
4414
4415 if (IS_USER(s) || rn == 15) {
4416 goto undef;
4417 }
4418
4419 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4420 * of registers into (r, sysm).
4421 */
4422 if (r) {
4423 /* SPSRs for other modes */
4424 switch (sysm) {
4425 case 0xe: /* SPSR_fiq */
4426 *tgtmode = ARM_CPU_MODE_FIQ;
4427 break;
4428 case 0x10: /* SPSR_irq */
4429 *tgtmode = ARM_CPU_MODE_IRQ;
4430 break;
4431 case 0x12: /* SPSR_svc */
4432 *tgtmode = ARM_CPU_MODE_SVC;
4433 break;
4434 case 0x14: /* SPSR_abt */
4435 *tgtmode = ARM_CPU_MODE_ABT;
4436 break;
4437 case 0x16: /* SPSR_und */
4438 *tgtmode = ARM_CPU_MODE_UND;
4439 break;
4440 case 0x1c: /* SPSR_mon */
4441 *tgtmode = ARM_CPU_MODE_MON;
4442 break;
4443 case 0x1e: /* SPSR_hyp */
4444 *tgtmode = ARM_CPU_MODE_HYP;
4445 break;
4446 default: /* unallocated */
4447 goto undef;
4448 }
4449 /* We arbitrarily assign SPSR a register number of 16. */
4450 *regno = 16;
4451 } else {
4452 /* general purpose registers for other modes */
4453 switch (sysm) {
4454 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4455 *tgtmode = ARM_CPU_MODE_USR;
4456 *regno = sysm + 8;
4457 break;
4458 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4459 *tgtmode = ARM_CPU_MODE_FIQ;
4460 *regno = sysm;
4461 break;
4462 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4463 *tgtmode = ARM_CPU_MODE_IRQ;
4464 *regno = sysm & 1 ? 13 : 14;
4465 break;
4466 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4467 *tgtmode = ARM_CPU_MODE_SVC;
4468 *regno = sysm & 1 ? 13 : 14;
4469 break;
4470 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4471 *tgtmode = ARM_CPU_MODE_ABT;
4472 *regno = sysm & 1 ? 13 : 14;
4473 break;
4474 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4475 *tgtmode = ARM_CPU_MODE_UND;
4476 *regno = sysm & 1 ? 13 : 14;
4477 break;
4478 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4479 *tgtmode = ARM_CPU_MODE_MON;
4480 *regno = sysm & 1 ? 13 : 14;
4481 break;
4482 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4483 *tgtmode = ARM_CPU_MODE_HYP;
4484 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4485 *regno = sysm & 1 ? 13 : 17;
4486 break;
4487 default: /* unallocated */
4488 goto undef;
4489 }
4490 }
4491
4492 /* Catch the 'accessing inaccessible register' cases we can detect
4493 * at translate time.
4494 */
4495 switch (*tgtmode) {
4496 case ARM_CPU_MODE_MON:
4497 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4498 goto undef;
4499 }
4500 if (s->current_el == 1) {
4501 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4502 * then accesses to Mon registers trap to EL3
4503 */
4504 exc_target = 3;
4505 goto undef;
4506 }
4507 break;
4508 case ARM_CPU_MODE_HYP:
aec4dd09
PM
4509 /*
4510 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
4511 * (and so we can forbid accesses from EL2 or below). elr_hyp
4512 * can be accessed also from Hyp mode, so forbid accesses from
4513 * EL0 or EL1.
8bfd0550 4514 */
aec4dd09
PM
4515 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
4516 (s->current_el < 3 && *regno != 17)) {
8bfd0550
PM
4517 goto undef;
4518 }
4519 break;
4520 default:
4521 break;
4522 }
4523
4524 return true;
4525
4526undef:
4527 /* If we get here then some access check did not pass */
4528 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4529 return false;
4530}
4531
4532static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4533{
4534 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4535 int tgtmode = 0, regno = 0;
4536
4537 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4538 return;
4539 }
4540
4541 /* Sync state because msr_banked() can raise exceptions */
4542 gen_set_condexec(s);
4543 gen_set_pc_im(s, s->pc - 4);
4544 tcg_reg = load_reg(s, rn);
4545 tcg_tgtmode = tcg_const_i32(tgtmode);
4546 tcg_regno = tcg_const_i32(regno);
4547 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4548 tcg_temp_free_i32(tcg_tgtmode);
4549 tcg_temp_free_i32(tcg_regno);
4550 tcg_temp_free_i32(tcg_reg);
dcba3a8d 4551 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4552}
4553
4554static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4555{
4556 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4557 int tgtmode = 0, regno = 0;
4558
4559 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4560 return;
4561 }
4562
4563 /* Sync state because mrs_banked() can raise exceptions */
4564 gen_set_condexec(s);
4565 gen_set_pc_im(s, s->pc - 4);
4566 tcg_reg = tcg_temp_new_i32();
4567 tcg_tgtmode = tcg_const_i32(tgtmode);
4568 tcg_regno = tcg_const_i32(regno);
4569 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4570 tcg_temp_free_i32(tcg_tgtmode);
4571 tcg_temp_free_i32(tcg_regno);
4572 store_reg(s, rn, tcg_reg);
dcba3a8d 4573 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4574}
4575
fb0e8e79
PM
4576/* Store value to PC as for an exception return (ie don't
4577 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4578 * will do the masking based on the new value of the Thumb bit.
4579 */
4580static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4581{
fb0e8e79
PM
4582 tcg_gen_mov_i32(cpu_R[15], pc);
4583 tcg_temp_free_i32(pc);
b5ff1b31
FB
4584}
4585
b0109805 4586/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4587static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4588{
fb0e8e79
PM
4589 store_pc_exc_ret(s, pc);
4590 /* The cpsr_write_eret helper will mask the low bits of PC
4591 * appropriately depending on the new Thumb bit, so it must
4592 * be called after storing the new PC.
4593 */
e69ad9df
AL
4594 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4595 gen_io_start();
4596 }
235ea1f5 4597 gen_helper_cpsr_write_eret(cpu_env, cpsr);
e69ad9df
AL
4598 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4599 gen_io_end();
4600 }
7d1b0095 4601 tcg_temp_free_i32(cpsr);
b29fd33d 4602 /* Must exit loop to check un-masked IRQs */
dcba3a8d 4603 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 4604}
3b46e624 4605
fb0e8e79
PM
4606/* Generate an old-style exception return. Marks pc as dead. */
4607static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4608{
4609 gen_rfe(s, pc, load_cpu_field(spsr));
4610}
4611
c22edfeb
AB
4612/*
4613 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4614 * only call the helper when running single threaded TCG code to ensure
4615 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4616 * just skip this instruction. Currently the SEV/SEVL instructions
4617 * which are *one* of many ways to wake the CPU from WFE are not
4618 * implemented so we can't sleep like WFI does.
4619 */
9ee6e8bb
PB
4620static void gen_nop_hint(DisasContext *s, int val)
4621{
4622 switch (val) {
2399d4e7
EC
4623 /* When running in MTTCG we don't generate jumps to the yield and
4624 * WFE helpers as it won't affect the scheduling of other vCPUs.
4625 * If we wanted to more completely model WFE/SEV so we don't busy
4626 * spin unnecessarily we would need to do something more involved.
4627 */
c87e5a61 4628 case 1: /* yield */
2399d4e7 4629 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4630 gen_set_pc_im(s, s->pc);
dcba3a8d 4631 s->base.is_jmp = DISAS_YIELD;
c22edfeb 4632 }
c87e5a61 4633 break;
9ee6e8bb 4634 case 3: /* wfi */
eaed129d 4635 gen_set_pc_im(s, s->pc);
dcba3a8d 4636 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
4637 break;
4638 case 2: /* wfe */
2399d4e7 4639 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4640 gen_set_pc_im(s, s->pc);
dcba3a8d 4641 s->base.is_jmp = DISAS_WFE;
c22edfeb 4642 }
72c1d3af 4643 break;
9ee6e8bb 4644 case 4: /* sev */
12b10571
MR
4645 case 5: /* sevl */
4646 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4647 default: /* nop */
4648 break;
4649 }
4650}
99c475ab 4651
ad69471c 4652#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4653
39d5492a 4654static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4655{
4656 switch (size) {
dd8fbd78
FN
4657 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4658 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4659 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4660 default: abort();
9ee6e8bb 4661 }
9ee6e8bb
PB
4662}
4663
39d5492a 4664static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4665{
4666 switch (size) {
dd8fbd78
FN
4667 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4668 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4669 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4670 default: return;
4671 }
4672}
4673
4674/* 32-bit pairwise ops end up the same as the elementwise versions. */
4675#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4676#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4677#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4678#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4679
ad69471c
PB
4680#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4681 switch ((size << 1) | u) { \
4682 case 0: \
dd8fbd78 4683 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4684 break; \
4685 case 1: \
dd8fbd78 4686 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4687 break; \
4688 case 2: \
dd8fbd78 4689 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4690 break; \
4691 case 3: \
dd8fbd78 4692 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4693 break; \
4694 case 4: \
dd8fbd78 4695 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4696 break; \
4697 case 5: \
dd8fbd78 4698 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4699 break; \
4700 default: return 1; \
4701 }} while (0)
9ee6e8bb
PB
4702
4703#define GEN_NEON_INTEGER_OP(name) do { \
4704 switch ((size << 1) | u) { \
ad69471c 4705 case 0: \
dd8fbd78 4706 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4707 break; \
4708 case 1: \
dd8fbd78 4709 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4710 break; \
4711 case 2: \
dd8fbd78 4712 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4713 break; \
4714 case 3: \
dd8fbd78 4715 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4716 break; \
4717 case 4: \
dd8fbd78 4718 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4719 break; \
4720 case 5: \
dd8fbd78 4721 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4722 break; \
9ee6e8bb
PB
4723 default: return 1; \
4724 }} while (0)
4725
39d5492a 4726static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4727{
39d5492a 4728 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4729 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4730 return tmp;
9ee6e8bb
PB
4731}
4732
39d5492a 4733static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4734{
dd8fbd78 4735 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4736 tcg_temp_free_i32(var);
9ee6e8bb
PB
4737}
4738
39d5492a 4739static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4740{
39d5492a 4741 TCGv_i32 tmp;
9ee6e8bb 4742 if (size == 1) {
0fad6efc
PM
4743 tmp = neon_load_reg(reg & 7, reg >> 4);
4744 if (reg & 8) {
dd8fbd78 4745 gen_neon_dup_high16(tmp);
0fad6efc
PM
4746 } else {
4747 gen_neon_dup_low16(tmp);
dd8fbd78 4748 }
0fad6efc
PM
4749 } else {
4750 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4751 }
dd8fbd78 4752 return tmp;
9ee6e8bb
PB
4753}
4754
02acedf9 4755static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4756{
b13708bb
RH
4757 TCGv_ptr pd, pm;
4758
600b828c 4759 if (!q && size == 2) {
02acedf9
PM
4760 return 1;
4761 }
b13708bb
RH
4762 pd = vfp_reg_ptr(true, rd);
4763 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
4764 if (q) {
4765 switch (size) {
4766 case 0:
b13708bb 4767 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
4768 break;
4769 case 1:
b13708bb 4770 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
4771 break;
4772 case 2:
b13708bb 4773 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
4774 break;
4775 default:
4776 abort();
4777 }
4778 } else {
4779 switch (size) {
4780 case 0:
b13708bb 4781 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
4782 break;
4783 case 1:
b13708bb 4784 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
4785 break;
4786 default:
4787 abort();
4788 }
4789 }
b13708bb
RH
4790 tcg_temp_free_ptr(pd);
4791 tcg_temp_free_ptr(pm);
02acedf9 4792 return 0;
19457615
FN
4793}
4794
d68a6f3a 4795static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4796{
b13708bb
RH
4797 TCGv_ptr pd, pm;
4798
600b828c 4799 if (!q && size == 2) {
d68a6f3a
PM
4800 return 1;
4801 }
b13708bb
RH
4802 pd = vfp_reg_ptr(true, rd);
4803 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
4804 if (q) {
4805 switch (size) {
4806 case 0:
b13708bb 4807 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
4808 break;
4809 case 1:
b13708bb 4810 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
4811 break;
4812 case 2:
b13708bb 4813 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
4814 break;
4815 default:
4816 abort();
4817 }
4818 } else {
4819 switch (size) {
4820 case 0:
b13708bb 4821 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
4822 break;
4823 case 1:
b13708bb 4824 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
4825 break;
4826 default:
4827 abort();
4828 }
4829 }
b13708bb
RH
4830 tcg_temp_free_ptr(pd);
4831 tcg_temp_free_ptr(pm);
d68a6f3a 4832 return 0;
19457615
FN
4833}
4834
39d5492a 4835static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4836{
39d5492a 4837 TCGv_i32 rd, tmp;
19457615 4838
7d1b0095
PM
4839 rd = tcg_temp_new_i32();
4840 tmp = tcg_temp_new_i32();
19457615
FN
4841
4842 tcg_gen_shli_i32(rd, t0, 8);
4843 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4844 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4845 tcg_gen_or_i32(rd, rd, tmp);
4846
4847 tcg_gen_shri_i32(t1, t1, 8);
4848 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4849 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4850 tcg_gen_or_i32(t1, t1, tmp);
4851 tcg_gen_mov_i32(t0, rd);
4852
7d1b0095
PM
4853 tcg_temp_free_i32(tmp);
4854 tcg_temp_free_i32(rd);
19457615
FN
4855}
4856
39d5492a 4857static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4858{
39d5492a 4859 TCGv_i32 rd, tmp;
19457615 4860
7d1b0095
PM
4861 rd = tcg_temp_new_i32();
4862 tmp = tcg_temp_new_i32();
19457615
FN
4863
4864 tcg_gen_shli_i32(rd, t0, 16);
4865 tcg_gen_andi_i32(tmp, t1, 0xffff);
4866 tcg_gen_or_i32(rd, rd, tmp);
4867 tcg_gen_shri_i32(t1, t1, 16);
4868 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4869 tcg_gen_or_i32(t1, t1, tmp);
4870 tcg_gen_mov_i32(t0, rd);
4871
7d1b0095
PM
4872 tcg_temp_free_i32(tmp);
4873 tcg_temp_free_i32(rd);
19457615
FN
4874}
4875
4876
9ee6e8bb
PB
4877static struct {
4878 int nregs;
4879 int interleave;
4880 int spacing;
4881} neon_ls_element_type[11] = {
4882 {4, 4, 1},
4883 {4, 4, 2},
4884 {4, 1, 1},
4885 {4, 2, 1},
4886 {3, 3, 1},
4887 {3, 3, 2},
4888 {3, 1, 1},
4889 {1, 1, 1},
4890 {2, 2, 1},
4891 {2, 2, 2},
4892 {2, 1, 1}
4893};
4894
4895/* Translate a NEON load/store element instruction. Return nonzero if the
4896 instruction is invalid. */
7dcc1f89 4897static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4898{
4899 int rd, rn, rm;
4900 int op;
4901 int nregs;
4902 int interleave;
84496233 4903 int spacing;
9ee6e8bb
PB
4904 int stride;
4905 int size;
4906 int reg;
4907 int pass;
4908 int load;
4909 int shift;
9ee6e8bb 4910 int n;
39d5492a
PM
4911 TCGv_i32 addr;
4912 TCGv_i32 tmp;
4913 TCGv_i32 tmp2;
84496233 4914 TCGv_i64 tmp64;
9ee6e8bb 4915
2c7ffc41
PM
4916 /* FIXME: this access check should not take precedence over UNDEF
4917 * for invalid encodings; we will generate incorrect syndrome information
4918 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4919 */
9dbbc748 4920 if (s->fp_excp_el) {
2c7ffc41 4921 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4922 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4923 return 0;
4924 }
4925
5df8bac1 4926 if (!s->vfp_enabled)
9ee6e8bb
PB
4927 return 1;
4928 VFP_DREG_D(rd, insn);
4929 rn = (insn >> 16) & 0xf;
4930 rm = insn & 0xf;
4931 load = (insn & (1 << 21)) != 0;
4932 if ((insn & (1 << 23)) == 0) {
4933 /* Load store all elements. */
4934 op = (insn >> 8) & 0xf;
4935 size = (insn >> 6) & 3;
84496233 4936 if (op > 10)
9ee6e8bb 4937 return 1;
f2dd89d0
PM
4938 /* Catch UNDEF cases for bad values of align field */
4939 switch (op & 0xc) {
4940 case 4:
4941 if (((insn >> 5) & 1) == 1) {
4942 return 1;
4943 }
4944 break;
4945 case 8:
4946 if (((insn >> 4) & 3) == 3) {
4947 return 1;
4948 }
4949 break;
4950 default:
4951 break;
4952 }
9ee6e8bb
PB
4953 nregs = neon_ls_element_type[op].nregs;
4954 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4955 spacing = neon_ls_element_type[op].spacing;
4956 if (size == 3 && (interleave | spacing) != 1)
4957 return 1;
e318a60b 4958 addr = tcg_temp_new_i32();
dcc65026 4959 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4960 stride = (1 << size) * interleave;
4961 for (reg = 0; reg < nregs; reg++) {
4962 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4963 load_reg_var(s, addr, rn);
4964 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4965 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4966 load_reg_var(s, addr, rn);
4967 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4968 }
84496233 4969 if (size == 3) {
8ed1237d 4970 tmp64 = tcg_temp_new_i64();
84496233 4971 if (load) {
12dcc321 4972 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4973 neon_store_reg64(tmp64, rd);
84496233 4974 } else {
84496233 4975 neon_load_reg64(tmp64, rd);
12dcc321 4976 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4977 }
8ed1237d 4978 tcg_temp_free_i64(tmp64);
84496233
JR
4979 tcg_gen_addi_i32(addr, addr, stride);
4980 } else {
4981 for (pass = 0; pass < 2; pass++) {
4982 if (size == 2) {
4983 if (load) {
58ab8e96 4984 tmp = tcg_temp_new_i32();
12dcc321 4985 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
4986 neon_store_reg(rd, pass, tmp);
4987 } else {
4988 tmp = neon_load_reg(rd, pass);
12dcc321 4989 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 4990 tcg_temp_free_i32(tmp);
84496233 4991 }
1b2b1e54 4992 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4993 } else if (size == 1) {
4994 if (load) {
58ab8e96 4995 tmp = tcg_temp_new_i32();
12dcc321 4996 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 4997 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4998 tmp2 = tcg_temp_new_i32();
12dcc321 4999 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 5000 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
5001 tcg_gen_shli_i32(tmp2, tmp2, 16);
5002 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5003 tcg_temp_free_i32(tmp2);
84496233
JR
5004 neon_store_reg(rd, pass, tmp);
5005 } else {
5006 tmp = neon_load_reg(rd, pass);
7d1b0095 5007 tmp2 = tcg_temp_new_i32();
84496233 5008 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 5009 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 5010 tcg_temp_free_i32(tmp);
84496233 5011 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 5012 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 5013 tcg_temp_free_i32(tmp2);
1b2b1e54 5014 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 5015 }
84496233
JR
5016 } else /* size == 0 */ {
5017 if (load) {
f764718d 5018 tmp2 = NULL;
84496233 5019 for (n = 0; n < 4; n++) {
58ab8e96 5020 tmp = tcg_temp_new_i32();
12dcc321 5021 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
5022 tcg_gen_addi_i32(addr, addr, stride);
5023 if (n == 0) {
5024 tmp2 = tmp;
5025 } else {
41ba8341
PB
5026 tcg_gen_shli_i32(tmp, tmp, n * 8);
5027 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 5028 tcg_temp_free_i32(tmp);
84496233 5029 }
9ee6e8bb 5030 }
84496233
JR
5031 neon_store_reg(rd, pass, tmp2);
5032 } else {
5033 tmp2 = neon_load_reg(rd, pass);
5034 for (n = 0; n < 4; n++) {
7d1b0095 5035 tmp = tcg_temp_new_i32();
84496233
JR
5036 if (n == 0) {
5037 tcg_gen_mov_i32(tmp, tmp2);
5038 } else {
5039 tcg_gen_shri_i32(tmp, tmp2, n * 8);
5040 }
12dcc321 5041 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 5042 tcg_temp_free_i32(tmp);
84496233
JR
5043 tcg_gen_addi_i32(addr, addr, stride);
5044 }
7d1b0095 5045 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5046 }
5047 }
5048 }
5049 }
84496233 5050 rd += spacing;
9ee6e8bb 5051 }
e318a60b 5052 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5053 stride = nregs * 8;
5054 } else {
5055 size = (insn >> 10) & 3;
5056 if (size == 3) {
5057 /* Load single element to all lanes. */
8e18cde3
PM
5058 int a = (insn >> 4) & 1;
5059 if (!load) {
9ee6e8bb 5060 return 1;
8e18cde3 5061 }
9ee6e8bb
PB
5062 size = (insn >> 6) & 3;
5063 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
5064
5065 if (size == 3) {
5066 if (nregs != 4 || a == 0) {
9ee6e8bb 5067 return 1;
99c475ab 5068 }
8e18cde3
PM
5069 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5070 size = 2;
5071 }
5072 if (nregs == 1 && a == 1 && size == 0) {
5073 return 1;
5074 }
5075 if (nregs == 3 && a == 1) {
5076 return 1;
5077 }
e318a60b 5078 addr = tcg_temp_new_i32();
8e18cde3
PM
5079 load_reg_var(s, addr, rn);
5080 if (nregs == 1) {
5081 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
5082 tmp = gen_load_and_replicate(s, addr, size);
5083 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5084 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5085 if (insn & (1 << 5)) {
5086 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
5087 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
5088 }
5089 tcg_temp_free_i32(tmp);
5090 } else {
5091 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
5092 stride = (insn & (1 << 5)) ? 2 : 1;
5093 for (reg = 0; reg < nregs; reg++) {
5094 tmp = gen_load_and_replicate(s, addr, size);
5095 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5096 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5097 tcg_temp_free_i32(tmp);
5098 tcg_gen_addi_i32(addr, addr, 1 << size);
5099 rd += stride;
5100 }
9ee6e8bb 5101 }
e318a60b 5102 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5103 stride = (1 << size) * nregs;
5104 } else {
5105 /* Single element. */
93262b16 5106 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
5107 pass = (insn >> 7) & 1;
5108 switch (size) {
5109 case 0:
5110 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
5111 stride = 1;
5112 break;
5113 case 1:
5114 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
5115 stride = (insn & (1 << 5)) ? 2 : 1;
5116 break;
5117 case 2:
5118 shift = 0;
9ee6e8bb
PB
5119 stride = (insn & (1 << 6)) ? 2 : 1;
5120 break;
5121 default:
5122 abort();
5123 }
5124 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
5125 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5126 switch (nregs) {
5127 case 1:
5128 if (((idx & (1 << size)) != 0) ||
5129 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5130 return 1;
5131 }
5132 break;
5133 case 3:
5134 if ((idx & 1) != 0) {
5135 return 1;
5136 }
5137 /* fall through */
5138 case 2:
5139 if (size == 2 && (idx & 2) != 0) {
5140 return 1;
5141 }
5142 break;
5143 case 4:
5144 if ((size == 2) && ((idx & 3) == 3)) {
5145 return 1;
5146 }
5147 break;
5148 default:
5149 abort();
5150 }
5151 if ((rd + stride * (nregs - 1)) > 31) {
5152 /* Attempts to write off the end of the register file
5153 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5154 * the neon_load_reg() would write off the end of the array.
5155 */
5156 return 1;
5157 }
e318a60b 5158 addr = tcg_temp_new_i32();
dcc65026 5159 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5160 for (reg = 0; reg < nregs; reg++) {
5161 if (load) {
58ab8e96 5162 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
5163 switch (size) {
5164 case 0:
12dcc321 5165 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5166 break;
5167 case 1:
12dcc321 5168 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5169 break;
5170 case 2:
12dcc321 5171 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5172 break;
a50f5b91
PB
5173 default: /* Avoid compiler warnings. */
5174 abort();
9ee6e8bb
PB
5175 }
5176 if (size != 2) {
8f8e3aa4 5177 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
5178 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5179 shift, size ? 16 : 8);
7d1b0095 5180 tcg_temp_free_i32(tmp2);
9ee6e8bb 5181 }
8f8e3aa4 5182 neon_store_reg(rd, pass, tmp);
9ee6e8bb 5183 } else { /* Store */
8f8e3aa4
PB
5184 tmp = neon_load_reg(rd, pass);
5185 if (shift)
5186 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
5187 switch (size) {
5188 case 0:
12dcc321 5189 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5190 break;
5191 case 1:
12dcc321 5192 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5193 break;
5194 case 2:
12dcc321 5195 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5196 break;
99c475ab 5197 }
58ab8e96 5198 tcg_temp_free_i32(tmp);
99c475ab 5199 }
9ee6e8bb 5200 rd += stride;
1b2b1e54 5201 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5202 }
e318a60b 5203 tcg_temp_free_i32(addr);
9ee6e8bb 5204 stride = nregs * (1 << size);
99c475ab 5205 }
9ee6e8bb
PB
5206 }
5207 if (rm != 15) {
39d5492a 5208 TCGv_i32 base;
b26eefb6
PB
5209
5210 base = load_reg(s, rn);
9ee6e8bb 5211 if (rm == 13) {
b26eefb6 5212 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5213 } else {
39d5492a 5214 TCGv_i32 index;
b26eefb6
PB
5215 index = load_reg(s, rm);
5216 tcg_gen_add_i32(base, base, index);
7d1b0095 5217 tcg_temp_free_i32(index);
9ee6e8bb 5218 }
b26eefb6 5219 store_reg(s, rn, base);
9ee6e8bb
PB
5220 }
5221 return 0;
5222}
3b46e624 5223
8f8e3aa4 5224/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 5225static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
5226{
5227 tcg_gen_and_i32(t, t, c);
f669df27 5228 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
5229 tcg_gen_or_i32(dest, t, f);
5230}
5231
39d5492a 5232static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5233{
5234 switch (size) {
5235 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5236 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5237 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5238 default: abort();
5239 }
5240}
5241
39d5492a 5242static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5243{
5244 switch (size) {
02da0b2d
PM
5245 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5246 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5247 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5248 default: abort();
5249 }
5250}
5251
39d5492a 5252static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5253{
5254 switch (size) {
02da0b2d
PM
5255 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5256 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5257 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5258 default: abort();
5259 }
5260}
5261
39d5492a 5262static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5263{
5264 switch (size) {
02da0b2d
PM
5265 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5266 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5267 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5268 default: abort();
5269 }
5270}
5271
39d5492a 5272static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5273 int q, int u)
5274{
5275 if (q) {
5276 if (u) {
5277 switch (size) {
5278 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5279 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5280 default: abort();
5281 }
5282 } else {
5283 switch (size) {
5284 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5285 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5286 default: abort();
5287 }
5288 }
5289 } else {
5290 if (u) {
5291 switch (size) {
b408a9b0
CL
5292 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5293 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5294 default: abort();
5295 }
5296 } else {
5297 switch (size) {
5298 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5299 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5300 default: abort();
5301 }
5302 }
5303 }
5304}
5305
39d5492a 5306static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5307{
5308 if (u) {
5309 switch (size) {
5310 case 0: gen_helper_neon_widen_u8(dest, src); break;
5311 case 1: gen_helper_neon_widen_u16(dest, src); break;
5312 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5313 default: abort();
5314 }
5315 } else {
5316 switch (size) {
5317 case 0: gen_helper_neon_widen_s8(dest, src); break;
5318 case 1: gen_helper_neon_widen_s16(dest, src); break;
5319 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5320 default: abort();
5321 }
5322 }
7d1b0095 5323 tcg_temp_free_i32(src);
ad69471c
PB
5324}
5325
5326static inline void gen_neon_addl(int size)
5327{
5328 switch (size) {
5329 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5330 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5331 case 2: tcg_gen_add_i64(CPU_V001); break;
5332 default: abort();
5333 }
5334}
5335
5336static inline void gen_neon_subl(int size)
5337{
5338 switch (size) {
5339 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5340 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5341 case 2: tcg_gen_sub_i64(CPU_V001); break;
5342 default: abort();
5343 }
5344}
5345
a7812ae4 5346static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5347{
5348 switch (size) {
5349 case 0: gen_helper_neon_negl_u16(var, var); break;
5350 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5351 case 2:
5352 tcg_gen_neg_i64(var, var);
5353 break;
ad69471c
PB
5354 default: abort();
5355 }
5356}
5357
a7812ae4 5358static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5359{
5360 switch (size) {
02da0b2d
PM
5361 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5362 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5363 default: abort();
5364 }
5365}
5366
39d5492a
PM
5367static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5368 int size, int u)
ad69471c 5369{
a7812ae4 5370 TCGv_i64 tmp;
ad69471c
PB
5371
5372 switch ((size << 1) | u) {
5373 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5374 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5375 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5376 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5377 case 4:
5378 tmp = gen_muls_i64_i32(a, b);
5379 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5380 tcg_temp_free_i64(tmp);
ad69471c
PB
5381 break;
5382 case 5:
5383 tmp = gen_mulu_i64_i32(a, b);
5384 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5385 tcg_temp_free_i64(tmp);
ad69471c
PB
5386 break;
5387 default: abort();
5388 }
c6067f04
CL
5389
5390 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5391 Don't forget to clean them now. */
5392 if (size < 2) {
7d1b0095
PM
5393 tcg_temp_free_i32(a);
5394 tcg_temp_free_i32(b);
c6067f04 5395 }
ad69471c
PB
5396}
5397
39d5492a
PM
5398static void gen_neon_narrow_op(int op, int u, int size,
5399 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5400{
5401 if (op) {
5402 if (u) {
5403 gen_neon_unarrow_sats(size, dest, src);
5404 } else {
5405 gen_neon_narrow(size, dest, src);
5406 }
5407 } else {
5408 if (u) {
5409 gen_neon_narrow_satu(size, dest, src);
5410 } else {
5411 gen_neon_narrow_sats(size, dest, src);
5412 }
5413 }
5414}
5415
62698be3
PM
5416/* Symbolic constants for op fields for Neon 3-register same-length.
5417 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5418 * table A7-9.
5419 */
5420#define NEON_3R_VHADD 0
5421#define NEON_3R_VQADD 1
5422#define NEON_3R_VRHADD 2
5423#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5424#define NEON_3R_VHSUB 4
5425#define NEON_3R_VQSUB 5
5426#define NEON_3R_VCGT 6
5427#define NEON_3R_VCGE 7
5428#define NEON_3R_VSHL 8
5429#define NEON_3R_VQSHL 9
5430#define NEON_3R_VRSHL 10
5431#define NEON_3R_VQRSHL 11
5432#define NEON_3R_VMAX 12
5433#define NEON_3R_VMIN 13
5434#define NEON_3R_VABD 14
5435#define NEON_3R_VABA 15
5436#define NEON_3R_VADD_VSUB 16
5437#define NEON_3R_VTST_VCEQ 17
5438#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5439#define NEON_3R_VMUL 19
5440#define NEON_3R_VPMAX 20
5441#define NEON_3R_VPMIN 21
5442#define NEON_3R_VQDMULH_VQRDMULH 22
36a71934 5443#define NEON_3R_VPADD_VQRDMLAH 23
f1ecb913 5444#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
36a71934 5445#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
62698be3
PM
5446#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5447#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5448#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5449#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5450#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5451#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5452
5453static const uint8_t neon_3r_sizes[] = {
5454 [NEON_3R_VHADD] = 0x7,
5455 [NEON_3R_VQADD] = 0xf,
5456 [NEON_3R_VRHADD] = 0x7,
5457 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5458 [NEON_3R_VHSUB] = 0x7,
5459 [NEON_3R_VQSUB] = 0xf,
5460 [NEON_3R_VCGT] = 0x7,
5461 [NEON_3R_VCGE] = 0x7,
5462 [NEON_3R_VSHL] = 0xf,
5463 [NEON_3R_VQSHL] = 0xf,
5464 [NEON_3R_VRSHL] = 0xf,
5465 [NEON_3R_VQRSHL] = 0xf,
5466 [NEON_3R_VMAX] = 0x7,
5467 [NEON_3R_VMIN] = 0x7,
5468 [NEON_3R_VABD] = 0x7,
5469 [NEON_3R_VABA] = 0x7,
5470 [NEON_3R_VADD_VSUB] = 0xf,
5471 [NEON_3R_VTST_VCEQ] = 0x7,
5472 [NEON_3R_VML] = 0x7,
5473 [NEON_3R_VMUL] = 0x7,
5474 [NEON_3R_VPMAX] = 0x7,
5475 [NEON_3R_VPMIN] = 0x7,
5476 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
36a71934 5477 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
f1ecb913 5478 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
36a71934 5479 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
62698be3
PM
5480 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5481 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5482 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5483 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5484 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5485 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5486};
5487
600b828c
PM
5488/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5489 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5490 * table A7-13.
5491 */
5492#define NEON_2RM_VREV64 0
5493#define NEON_2RM_VREV32 1
5494#define NEON_2RM_VREV16 2
5495#define NEON_2RM_VPADDL 4
5496#define NEON_2RM_VPADDL_U 5
9d935509
AB
5497#define NEON_2RM_AESE 6 /* Includes AESD */
5498#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5499#define NEON_2RM_VCLS 8
5500#define NEON_2RM_VCLZ 9
5501#define NEON_2RM_VCNT 10
5502#define NEON_2RM_VMVN 11
5503#define NEON_2RM_VPADAL 12
5504#define NEON_2RM_VPADAL_U 13
5505#define NEON_2RM_VQABS 14
5506#define NEON_2RM_VQNEG 15
5507#define NEON_2RM_VCGT0 16
5508#define NEON_2RM_VCGE0 17
5509#define NEON_2RM_VCEQ0 18
5510#define NEON_2RM_VCLE0 19
5511#define NEON_2RM_VCLT0 20
f1ecb913 5512#define NEON_2RM_SHA1H 21
600b828c
PM
5513#define NEON_2RM_VABS 22
5514#define NEON_2RM_VNEG 23
5515#define NEON_2RM_VCGT0_F 24
5516#define NEON_2RM_VCGE0_F 25
5517#define NEON_2RM_VCEQ0_F 26
5518#define NEON_2RM_VCLE0_F 27
5519#define NEON_2RM_VCLT0_F 28
5520#define NEON_2RM_VABS_F 30
5521#define NEON_2RM_VNEG_F 31
5522#define NEON_2RM_VSWP 32
5523#define NEON_2RM_VTRN 33
5524#define NEON_2RM_VUZP 34
5525#define NEON_2RM_VZIP 35
5526#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5527#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5528#define NEON_2RM_VSHLL 38
f1ecb913 5529#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5530#define NEON_2RM_VRINTN 40
2ce70625 5531#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5532#define NEON_2RM_VRINTA 42
5533#define NEON_2RM_VRINTZ 43
600b828c 5534#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5535#define NEON_2RM_VRINTM 45
600b828c 5536#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5537#define NEON_2RM_VRINTP 47
901ad525
WN
5538#define NEON_2RM_VCVTAU 48
5539#define NEON_2RM_VCVTAS 49
5540#define NEON_2RM_VCVTNU 50
5541#define NEON_2RM_VCVTNS 51
5542#define NEON_2RM_VCVTPU 52
5543#define NEON_2RM_VCVTPS 53
5544#define NEON_2RM_VCVTMU 54
5545#define NEON_2RM_VCVTMS 55
600b828c
PM
5546#define NEON_2RM_VRECPE 56
5547#define NEON_2RM_VRSQRTE 57
5548#define NEON_2RM_VRECPE_F 58
5549#define NEON_2RM_VRSQRTE_F 59
5550#define NEON_2RM_VCVT_FS 60
5551#define NEON_2RM_VCVT_FU 61
5552#define NEON_2RM_VCVT_SF 62
5553#define NEON_2RM_VCVT_UF 63
5554
5555static int neon_2rm_is_float_op(int op)
5556{
5557 /* Return true if this neon 2reg-misc op is float-to-float */
5558 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5559 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5560 op == NEON_2RM_VRINTM ||
5561 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5562 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5563}
5564
fe8fcf3d
PM
5565static bool neon_2rm_is_v8_op(int op)
5566{
5567 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5568 switch (op) {
5569 case NEON_2RM_VRINTN:
5570 case NEON_2RM_VRINTA:
5571 case NEON_2RM_VRINTM:
5572 case NEON_2RM_VRINTP:
5573 case NEON_2RM_VRINTZ:
5574 case NEON_2RM_VRINTX:
5575 case NEON_2RM_VCVTAU:
5576 case NEON_2RM_VCVTAS:
5577 case NEON_2RM_VCVTNU:
5578 case NEON_2RM_VCVTNS:
5579 case NEON_2RM_VCVTPU:
5580 case NEON_2RM_VCVTPS:
5581 case NEON_2RM_VCVTMU:
5582 case NEON_2RM_VCVTMS:
5583 return true;
5584 default:
5585 return false;
5586 }
5587}
5588
600b828c
PM
5589/* Each entry in this array has bit n set if the insn allows
5590 * size value n (otherwise it will UNDEF). Since unallocated
5591 * op values will have no bits set they always UNDEF.
5592 */
5593static const uint8_t neon_2rm_sizes[] = {
5594 [NEON_2RM_VREV64] = 0x7,
5595 [NEON_2RM_VREV32] = 0x3,
5596 [NEON_2RM_VREV16] = 0x1,
5597 [NEON_2RM_VPADDL] = 0x7,
5598 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5599 [NEON_2RM_AESE] = 0x1,
5600 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5601 [NEON_2RM_VCLS] = 0x7,
5602 [NEON_2RM_VCLZ] = 0x7,
5603 [NEON_2RM_VCNT] = 0x1,
5604 [NEON_2RM_VMVN] = 0x1,
5605 [NEON_2RM_VPADAL] = 0x7,
5606 [NEON_2RM_VPADAL_U] = 0x7,
5607 [NEON_2RM_VQABS] = 0x7,
5608 [NEON_2RM_VQNEG] = 0x7,
5609 [NEON_2RM_VCGT0] = 0x7,
5610 [NEON_2RM_VCGE0] = 0x7,
5611 [NEON_2RM_VCEQ0] = 0x7,
5612 [NEON_2RM_VCLE0] = 0x7,
5613 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5614 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5615 [NEON_2RM_VABS] = 0x7,
5616 [NEON_2RM_VNEG] = 0x7,
5617 [NEON_2RM_VCGT0_F] = 0x4,
5618 [NEON_2RM_VCGE0_F] = 0x4,
5619 [NEON_2RM_VCEQ0_F] = 0x4,
5620 [NEON_2RM_VCLE0_F] = 0x4,
5621 [NEON_2RM_VCLT0_F] = 0x4,
5622 [NEON_2RM_VABS_F] = 0x4,
5623 [NEON_2RM_VNEG_F] = 0x4,
5624 [NEON_2RM_VSWP] = 0x1,
5625 [NEON_2RM_VTRN] = 0x7,
5626 [NEON_2RM_VUZP] = 0x7,
5627 [NEON_2RM_VZIP] = 0x7,
5628 [NEON_2RM_VMOVN] = 0x7,
5629 [NEON_2RM_VQMOVN] = 0x7,
5630 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5631 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5632 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5633 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5634 [NEON_2RM_VRINTA] = 0x4,
5635 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5636 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5637 [NEON_2RM_VRINTM] = 0x4,
600b828c 5638 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5639 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5640 [NEON_2RM_VCVTAU] = 0x4,
5641 [NEON_2RM_VCVTAS] = 0x4,
5642 [NEON_2RM_VCVTNU] = 0x4,
5643 [NEON_2RM_VCVTNS] = 0x4,
5644 [NEON_2RM_VCVTPU] = 0x4,
5645 [NEON_2RM_VCVTPS] = 0x4,
5646 [NEON_2RM_VCVTMU] = 0x4,
5647 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5648 [NEON_2RM_VRECPE] = 0x4,
5649 [NEON_2RM_VRSQRTE] = 0x4,
5650 [NEON_2RM_VRECPE_F] = 0x4,
5651 [NEON_2RM_VRSQRTE_F] = 0x4,
5652 [NEON_2RM_VCVT_FS] = 0x4,
5653 [NEON_2RM_VCVT_FU] = 0x4,
5654 [NEON_2RM_VCVT_SF] = 0x4,
5655 [NEON_2RM_VCVT_UF] = 0x4,
5656};
5657
36a71934
RH
5658
5659/* Expand v8.1 simd helper. */
5660static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
5661 int q, int rd, int rn, int rm)
5662{
5663 if (arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
5664 int opr_sz = (1 + q) * 8;
5665 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
5666 vfp_reg_offset(1, rn),
5667 vfp_reg_offset(1, rm), cpu_env,
5668 opr_sz, opr_sz, 0, fn);
5669 return 0;
5670 }
5671 return 1;
5672}
5673
9ee6e8bb
PB
5674/* Translate a NEON data processing instruction. Return nonzero if the
5675 instruction is invalid.
ad69471c
PB
5676 We process data in a mixture of 32-bit and 64-bit chunks.
5677 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5678
7dcc1f89 5679static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5680{
5681 int op;
5682 int q;
5683 int rd, rn, rm;
5684 int size;
5685 int shift;
5686 int pass;
5687 int count;
5688 int pairwise;
5689 int u;
ca9a32e4 5690 uint32_t imm, mask;
39d5492a 5691 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 5692 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 5693 TCGv_i64 tmp64;
9ee6e8bb 5694
2c7ffc41
PM
5695 /* FIXME: this access check should not take precedence over UNDEF
5696 * for invalid encodings; we will generate incorrect syndrome information
5697 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5698 */
9dbbc748 5699 if (s->fp_excp_el) {
2c7ffc41 5700 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5701 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5702 return 0;
5703 }
5704
5df8bac1 5705 if (!s->vfp_enabled)
9ee6e8bb
PB
5706 return 1;
5707 q = (insn & (1 << 6)) != 0;
5708 u = (insn >> 24) & 1;
5709 VFP_DREG_D(rd, insn);
5710 VFP_DREG_N(rn, insn);
5711 VFP_DREG_M(rm, insn);
5712 size = (insn >> 20) & 3;
5713 if ((insn & (1 << 23)) == 0) {
5714 /* Three register same length. */
5715 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5716 /* Catch invalid op and bad size combinations: UNDEF */
5717 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5718 return 1;
5719 }
25f84f79
PM
5720 /* All insns of this form UNDEF for either this condition or the
5721 * superset of cases "Q==1"; we catch the latter later.
5722 */
5723 if (q && ((rd | rn | rm) & 1)) {
5724 return 1;
5725 }
36a71934
RH
5726 switch (op) {
5727 case NEON_3R_SHA:
5728 /* The SHA-1/SHA-256 3-register instructions require special
5729 * treatment here, as their size field is overloaded as an
5730 * op type selector, and they all consume their input in a
5731 * single pass.
5732 */
f1ecb913
AB
5733 if (!q) {
5734 return 1;
5735 }
5736 if (!u) { /* SHA-1 */
d614a513 5737 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5738 return 1;
5739 }
1a66ac61
RH
5740 ptr1 = vfp_reg_ptr(true, rd);
5741 ptr2 = vfp_reg_ptr(true, rn);
5742 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 5743 tmp4 = tcg_const_i32(size);
1a66ac61 5744 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
5745 tcg_temp_free_i32(tmp4);
5746 } else { /* SHA-256 */
d614a513 5747 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5748 return 1;
5749 }
1a66ac61
RH
5750 ptr1 = vfp_reg_ptr(true, rd);
5751 ptr2 = vfp_reg_ptr(true, rn);
5752 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
5753 switch (size) {
5754 case 0:
1a66ac61 5755 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
5756 break;
5757 case 1:
1a66ac61 5758 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
5759 break;
5760 case 2:
1a66ac61 5761 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
5762 break;
5763 }
5764 }
1a66ac61
RH
5765 tcg_temp_free_ptr(ptr1);
5766 tcg_temp_free_ptr(ptr2);
5767 tcg_temp_free_ptr(ptr3);
f1ecb913 5768 return 0;
36a71934
RH
5769
5770 case NEON_3R_VPADD_VQRDMLAH:
5771 if (!u) {
5772 break; /* VPADD */
5773 }
5774 /* VQRDMLAH */
5775 switch (size) {
5776 case 1:
5777 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
5778 q, rd, rn, rm);
5779 case 2:
5780 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
5781 q, rd, rn, rm);
5782 }
5783 return 1;
5784
5785 case NEON_3R_VFM_VQRDMLSH:
5786 if (!u) {
5787 /* VFM, VFMS */
5788 if (size == 1) {
5789 return 1;
5790 }
5791 break;
5792 }
5793 /* VQRDMLSH */
5794 switch (size) {
5795 case 1:
5796 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
5797 q, rd, rn, rm);
5798 case 2:
5799 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
5800 q, rd, rn, rm);
5801 }
5802 return 1;
f1ecb913 5803 }
62698be3
PM
5804 if (size == 3 && op != NEON_3R_LOGIC) {
5805 /* 64-bit element instructions. */
9ee6e8bb 5806 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5807 neon_load_reg64(cpu_V0, rn + pass);
5808 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5809 switch (op) {
62698be3 5810 case NEON_3R_VQADD:
9ee6e8bb 5811 if (u) {
02da0b2d
PM
5812 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5813 cpu_V0, cpu_V1);
2c0262af 5814 } else {
02da0b2d
PM
5815 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5816 cpu_V0, cpu_V1);
2c0262af 5817 }
9ee6e8bb 5818 break;
62698be3 5819 case NEON_3R_VQSUB:
9ee6e8bb 5820 if (u) {
02da0b2d
PM
5821 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5822 cpu_V0, cpu_V1);
ad69471c 5823 } else {
02da0b2d
PM
5824 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5825 cpu_V0, cpu_V1);
ad69471c
PB
5826 }
5827 break;
62698be3 5828 case NEON_3R_VSHL:
ad69471c
PB
5829 if (u) {
5830 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5831 } else {
5832 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5833 }
5834 break;
62698be3 5835 case NEON_3R_VQSHL:
ad69471c 5836 if (u) {
02da0b2d
PM
5837 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5838 cpu_V1, cpu_V0);
ad69471c 5839 } else {
02da0b2d
PM
5840 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5841 cpu_V1, cpu_V0);
ad69471c
PB
5842 }
5843 break;
62698be3 5844 case NEON_3R_VRSHL:
ad69471c
PB
5845 if (u) {
5846 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5847 } else {
ad69471c
PB
5848 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5849 }
5850 break;
62698be3 5851 case NEON_3R_VQRSHL:
ad69471c 5852 if (u) {
02da0b2d
PM
5853 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5854 cpu_V1, cpu_V0);
ad69471c 5855 } else {
02da0b2d
PM
5856 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5857 cpu_V1, cpu_V0);
1e8d4eec 5858 }
9ee6e8bb 5859 break;
62698be3 5860 case NEON_3R_VADD_VSUB:
9ee6e8bb 5861 if (u) {
ad69471c 5862 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5863 } else {
ad69471c 5864 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5865 }
5866 break;
5867 default:
5868 abort();
2c0262af 5869 }
ad69471c 5870 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5871 }
9ee6e8bb 5872 return 0;
2c0262af 5873 }
25f84f79 5874 pairwise = 0;
9ee6e8bb 5875 switch (op) {
62698be3
PM
5876 case NEON_3R_VSHL:
5877 case NEON_3R_VQSHL:
5878 case NEON_3R_VRSHL:
5879 case NEON_3R_VQRSHL:
9ee6e8bb 5880 {
ad69471c
PB
5881 int rtmp;
5882 /* Shift instruction operands are reversed. */
5883 rtmp = rn;
9ee6e8bb 5884 rn = rm;
ad69471c 5885 rm = rtmp;
9ee6e8bb 5886 }
2c0262af 5887 break;
36a71934 5888 case NEON_3R_VPADD_VQRDMLAH:
62698be3
PM
5889 case NEON_3R_VPMAX:
5890 case NEON_3R_VPMIN:
9ee6e8bb 5891 pairwise = 1;
2c0262af 5892 break;
25f84f79
PM
5893 case NEON_3R_FLOAT_ARITH:
5894 pairwise = (u && size < 2); /* if VPADD (float) */
5895 break;
5896 case NEON_3R_FLOAT_MINMAX:
5897 pairwise = u; /* if VPMIN/VPMAX (float) */
5898 break;
5899 case NEON_3R_FLOAT_CMP:
5900 if (!u && size) {
5901 /* no encoding for U=0 C=1x */
5902 return 1;
5903 }
5904 break;
5905 case NEON_3R_FLOAT_ACMP:
5906 if (!u) {
5907 return 1;
5908 }
5909 break;
505935fc
WN
5910 case NEON_3R_FLOAT_MISC:
5911 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5912 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5913 return 1;
5914 }
2c0262af 5915 break;
25f84f79
PM
5916 case NEON_3R_VMUL:
5917 if (u && (size != 0)) {
5918 /* UNDEF on invalid size for polynomial subcase */
5919 return 1;
5920 }
2c0262af 5921 break;
36a71934
RH
5922 case NEON_3R_VFM_VQRDMLSH:
5923 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
5924 return 1;
5925 }
5926 break;
9ee6e8bb 5927 default:
2c0262af 5928 break;
9ee6e8bb 5929 }
dd8fbd78 5930
25f84f79
PM
5931 if (pairwise && q) {
5932 /* All the pairwise insns UNDEF if Q is set */
5933 return 1;
5934 }
5935
9ee6e8bb
PB
5936 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5937
5938 if (pairwise) {
5939 /* Pairwise. */
a5a14945
JR
5940 if (pass < 1) {
5941 tmp = neon_load_reg(rn, 0);
5942 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5943 } else {
a5a14945
JR
5944 tmp = neon_load_reg(rm, 0);
5945 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5946 }
5947 } else {
5948 /* Elementwise. */
dd8fbd78
FN
5949 tmp = neon_load_reg(rn, pass);
5950 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5951 }
5952 switch (op) {
62698be3 5953 case NEON_3R_VHADD:
9ee6e8bb
PB
5954 GEN_NEON_INTEGER_OP(hadd);
5955 break;
62698be3 5956 case NEON_3R_VQADD:
02da0b2d 5957 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5958 break;
62698be3 5959 case NEON_3R_VRHADD:
9ee6e8bb 5960 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5961 break;
62698be3 5962 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5963 switch ((u << 2) | size) {
5964 case 0: /* VAND */
dd8fbd78 5965 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5966 break;
5967 case 1: /* BIC */
f669df27 5968 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5969 break;
5970 case 2: /* VORR */
dd8fbd78 5971 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5972 break;
5973 case 3: /* VORN */
f669df27 5974 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5975 break;
5976 case 4: /* VEOR */
dd8fbd78 5977 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5978 break;
5979 case 5: /* VBSL */
dd8fbd78
FN
5980 tmp3 = neon_load_reg(rd, pass);
5981 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5982 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5983 break;
5984 case 6: /* VBIT */
dd8fbd78
FN
5985 tmp3 = neon_load_reg(rd, pass);
5986 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5987 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5988 break;
5989 case 7: /* VBIF */
dd8fbd78
FN
5990 tmp3 = neon_load_reg(rd, pass);
5991 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5992 tcg_temp_free_i32(tmp3);
9ee6e8bb 5993 break;
2c0262af
FB
5994 }
5995 break;
62698be3 5996 case NEON_3R_VHSUB:
9ee6e8bb
PB
5997 GEN_NEON_INTEGER_OP(hsub);
5998 break;
62698be3 5999 case NEON_3R_VQSUB:
02da0b2d 6000 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 6001 break;
62698be3 6002 case NEON_3R_VCGT:
9ee6e8bb
PB
6003 GEN_NEON_INTEGER_OP(cgt);
6004 break;
62698be3 6005 case NEON_3R_VCGE:
9ee6e8bb
PB
6006 GEN_NEON_INTEGER_OP(cge);
6007 break;
62698be3 6008 case NEON_3R_VSHL:
ad69471c 6009 GEN_NEON_INTEGER_OP(shl);
2c0262af 6010 break;
62698be3 6011 case NEON_3R_VQSHL:
02da0b2d 6012 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 6013 break;
62698be3 6014 case NEON_3R_VRSHL:
ad69471c 6015 GEN_NEON_INTEGER_OP(rshl);
2c0262af 6016 break;
62698be3 6017 case NEON_3R_VQRSHL:
02da0b2d 6018 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 6019 break;
62698be3 6020 case NEON_3R_VMAX:
9ee6e8bb
PB
6021 GEN_NEON_INTEGER_OP(max);
6022 break;
62698be3 6023 case NEON_3R_VMIN:
9ee6e8bb
PB
6024 GEN_NEON_INTEGER_OP(min);
6025 break;
62698be3 6026 case NEON_3R_VABD:
9ee6e8bb
PB
6027 GEN_NEON_INTEGER_OP(abd);
6028 break;
62698be3 6029 case NEON_3R_VABA:
9ee6e8bb 6030 GEN_NEON_INTEGER_OP(abd);
7d1b0095 6031 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
6032 tmp2 = neon_load_reg(rd, pass);
6033 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 6034 break;
62698be3 6035 case NEON_3R_VADD_VSUB:
9ee6e8bb 6036 if (!u) { /* VADD */
62698be3 6037 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6038 } else { /* VSUB */
6039 switch (size) {
dd8fbd78
FN
6040 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
6041 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
6042 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 6043 default: abort();
9ee6e8bb
PB
6044 }
6045 }
6046 break;
62698be3 6047 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
6048 if (!u) { /* VTST */
6049 switch (size) {
dd8fbd78
FN
6050 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
6051 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
6052 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 6053 default: abort();
9ee6e8bb
PB
6054 }
6055 } else { /* VCEQ */
6056 switch (size) {
dd8fbd78
FN
6057 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6058 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6059 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 6060 default: abort();
9ee6e8bb
PB
6061 }
6062 }
6063 break;
62698be3 6064 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 6065 switch (size) {
dd8fbd78
FN
6066 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6067 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6068 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 6069 default: abort();
9ee6e8bb 6070 }
7d1b0095 6071 tcg_temp_free_i32(tmp2);
dd8fbd78 6072 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6073 if (u) { /* VMLS */
dd8fbd78 6074 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 6075 } else { /* VMLA */
dd8fbd78 6076 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6077 }
6078 break;
62698be3 6079 case NEON_3R_VMUL:
9ee6e8bb 6080 if (u) { /* polynomial */
dd8fbd78 6081 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
6082 } else { /* Integer */
6083 switch (size) {
dd8fbd78
FN
6084 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6085 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6086 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 6087 default: abort();
9ee6e8bb
PB
6088 }
6089 }
6090 break;
62698be3 6091 case NEON_3R_VPMAX:
9ee6e8bb
PB
6092 GEN_NEON_INTEGER_OP(pmax);
6093 break;
62698be3 6094 case NEON_3R_VPMIN:
9ee6e8bb
PB
6095 GEN_NEON_INTEGER_OP(pmin);
6096 break;
62698be3 6097 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
6098 if (!u) { /* VQDMULH */
6099 switch (size) {
02da0b2d
PM
6100 case 1:
6101 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6102 break;
6103 case 2:
6104 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6105 break;
62698be3 6106 default: abort();
9ee6e8bb 6107 }
62698be3 6108 } else { /* VQRDMULH */
9ee6e8bb 6109 switch (size) {
02da0b2d
PM
6110 case 1:
6111 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6112 break;
6113 case 2:
6114 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6115 break;
62698be3 6116 default: abort();
9ee6e8bb
PB
6117 }
6118 }
6119 break;
36a71934 6120 case NEON_3R_VPADD_VQRDMLAH:
9ee6e8bb 6121 switch (size) {
dd8fbd78
FN
6122 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6123 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6124 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 6125 default: abort();
9ee6e8bb
PB
6126 }
6127 break;
62698be3 6128 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
6129 {
6130 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
6131 switch ((u << 2) | size) {
6132 case 0: /* VADD */
aa47cfdd
PM
6133 case 4: /* VPADD */
6134 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6135 break;
6136 case 2: /* VSUB */
aa47cfdd 6137 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6138 break;
6139 case 6: /* VABD */
aa47cfdd 6140 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6141 break;
6142 default:
62698be3 6143 abort();
9ee6e8bb 6144 }
aa47cfdd 6145 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6146 break;
aa47cfdd 6147 }
62698be3 6148 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
6149 {
6150 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6151 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6152 if (!u) {
7d1b0095 6153 tcg_temp_free_i32(tmp2);
dd8fbd78 6154 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6155 if (size == 0) {
aa47cfdd 6156 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6157 } else {
aa47cfdd 6158 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
6159 }
6160 }
aa47cfdd 6161 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6162 break;
aa47cfdd 6163 }
62698be3 6164 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
6165 {
6166 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 6167 if (!u) {
aa47cfdd 6168 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 6169 } else {
aa47cfdd
PM
6170 if (size == 0) {
6171 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6172 } else {
6173 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6174 }
b5ff1b31 6175 }
aa47cfdd 6176 tcg_temp_free_ptr(fpstatus);
2c0262af 6177 break;
aa47cfdd 6178 }
62698be3 6179 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6180 {
6181 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6182 if (size == 0) {
6183 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6184 } else {
6185 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6186 }
6187 tcg_temp_free_ptr(fpstatus);
2c0262af 6188 break;
aa47cfdd 6189 }
62698be3 6190 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6191 {
6192 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6193 if (size == 0) {
f71a2ae5 6194 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6195 } else {
f71a2ae5 6196 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6197 }
6198 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6199 break;
aa47cfdd 6200 }
505935fc
WN
6201 case NEON_3R_FLOAT_MISC:
6202 if (u) {
6203 /* VMAXNM/VMINNM */
6204 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6205 if (size == 0) {
f71a2ae5 6206 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6207 } else {
f71a2ae5 6208 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6209 }
6210 tcg_temp_free_ptr(fpstatus);
6211 } else {
6212 if (size == 0) {
6213 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6214 } else {
6215 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6216 }
6217 }
2c0262af 6218 break;
36a71934 6219 case NEON_3R_VFM_VQRDMLSH:
da97f52c
PM
6220 {
6221 /* VFMA, VFMS: fused multiply-add */
6222 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6223 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6224 if (size) {
6225 /* VFMS */
6226 gen_helper_vfp_negs(tmp, tmp);
6227 }
6228 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6229 tcg_temp_free_i32(tmp3);
6230 tcg_temp_free_ptr(fpstatus);
6231 break;
6232 }
9ee6e8bb
PB
6233 default:
6234 abort();
2c0262af 6235 }
7d1b0095 6236 tcg_temp_free_i32(tmp2);
dd8fbd78 6237
9ee6e8bb
PB
6238 /* Save the result. For elementwise operations we can put it
6239 straight into the destination register. For pairwise operations
6240 we have to be careful to avoid clobbering the source operands. */
6241 if (pairwise && rd == rm) {
dd8fbd78 6242 neon_store_scratch(pass, tmp);
9ee6e8bb 6243 } else {
dd8fbd78 6244 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6245 }
6246
6247 } /* for pass */
6248 if (pairwise && rd == rm) {
6249 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6250 tmp = neon_load_scratch(pass);
6251 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6252 }
6253 }
ad69471c 6254 /* End of 3 register same size operations. */
9ee6e8bb
PB
6255 } else if (insn & (1 << 4)) {
6256 if ((insn & 0x00380080) != 0) {
6257 /* Two registers and shift. */
6258 op = (insn >> 8) & 0xf;
6259 if (insn & (1 << 7)) {
cc13115b
PM
6260 /* 64-bit shift. */
6261 if (op > 7) {
6262 return 1;
6263 }
9ee6e8bb
PB
6264 size = 3;
6265 } else {
6266 size = 2;
6267 while ((insn & (1 << (size + 19))) == 0)
6268 size--;
6269 }
6270 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 6271 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
6272 by immediate using the variable shift operations. */
6273 if (op < 8) {
6274 /* Shift by immediate:
6275 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6276 if (q && ((rd | rm) & 1)) {
6277 return 1;
6278 }
6279 if (!u && (op == 4 || op == 6)) {
6280 return 1;
6281 }
9ee6e8bb
PB
6282 /* Right shifts are encoded as N - shift, where N is the
6283 element size in bits. */
6284 if (op <= 4)
6285 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
6286 if (size == 3) {
6287 count = q + 1;
6288 } else {
6289 count = q ? 4: 2;
6290 }
6291 switch (size) {
6292 case 0:
6293 imm = (uint8_t) shift;
6294 imm |= imm << 8;
6295 imm |= imm << 16;
6296 break;
6297 case 1:
6298 imm = (uint16_t) shift;
6299 imm |= imm << 16;
6300 break;
6301 case 2:
6302 case 3:
6303 imm = shift;
6304 break;
6305 default:
6306 abort();
6307 }
6308
6309 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6310 if (size == 3) {
6311 neon_load_reg64(cpu_V0, rm + pass);
6312 tcg_gen_movi_i64(cpu_V1, imm);
6313 switch (op) {
6314 case 0: /* VSHR */
6315 case 1: /* VSRA */
6316 if (u)
6317 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6318 else
ad69471c 6319 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6320 break;
ad69471c
PB
6321 case 2: /* VRSHR */
6322 case 3: /* VRSRA */
6323 if (u)
6324 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6325 else
ad69471c 6326 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6327 break;
ad69471c 6328 case 4: /* VSRI */
ad69471c
PB
6329 case 5: /* VSHL, VSLI */
6330 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6331 break;
0322b26e 6332 case 6: /* VQSHLU */
02da0b2d
PM
6333 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6334 cpu_V0, cpu_V1);
ad69471c 6335 break;
0322b26e
PM
6336 case 7: /* VQSHL */
6337 if (u) {
02da0b2d 6338 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6339 cpu_V0, cpu_V1);
6340 } else {
02da0b2d 6341 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6342 cpu_V0, cpu_V1);
6343 }
9ee6e8bb 6344 break;
9ee6e8bb 6345 }
ad69471c
PB
6346 if (op == 1 || op == 3) {
6347 /* Accumulate. */
5371cb81 6348 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6349 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6350 } else if (op == 4 || (op == 5 && u)) {
6351 /* Insert */
923e6509
CL
6352 neon_load_reg64(cpu_V1, rd + pass);
6353 uint64_t mask;
6354 if (shift < -63 || shift > 63) {
6355 mask = 0;
6356 } else {
6357 if (op == 4) {
6358 mask = 0xffffffffffffffffull >> -shift;
6359 } else {
6360 mask = 0xffffffffffffffffull << shift;
6361 }
6362 }
6363 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6364 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6365 }
6366 neon_store_reg64(cpu_V0, rd + pass);
6367 } else { /* size < 3 */
6368 /* Operands in T0 and T1. */
dd8fbd78 6369 tmp = neon_load_reg(rm, pass);
7d1b0095 6370 tmp2 = tcg_temp_new_i32();
dd8fbd78 6371 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6372 switch (op) {
6373 case 0: /* VSHR */
6374 case 1: /* VSRA */
6375 GEN_NEON_INTEGER_OP(shl);
6376 break;
6377 case 2: /* VRSHR */
6378 case 3: /* VRSRA */
6379 GEN_NEON_INTEGER_OP(rshl);
6380 break;
6381 case 4: /* VSRI */
ad69471c
PB
6382 case 5: /* VSHL, VSLI */
6383 switch (size) {
dd8fbd78
FN
6384 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6385 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6386 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6387 default: abort();
ad69471c
PB
6388 }
6389 break;
0322b26e 6390 case 6: /* VQSHLU */
ad69471c 6391 switch (size) {
0322b26e 6392 case 0:
02da0b2d
PM
6393 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6394 tmp, tmp2);
0322b26e
PM
6395 break;
6396 case 1:
02da0b2d
PM
6397 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6398 tmp, tmp2);
0322b26e
PM
6399 break;
6400 case 2:
02da0b2d
PM
6401 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6402 tmp, tmp2);
0322b26e
PM
6403 break;
6404 default:
cc13115b 6405 abort();
ad69471c
PB
6406 }
6407 break;
0322b26e 6408 case 7: /* VQSHL */
02da0b2d 6409 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6410 break;
ad69471c 6411 }
7d1b0095 6412 tcg_temp_free_i32(tmp2);
ad69471c
PB
6413
6414 if (op == 1 || op == 3) {
6415 /* Accumulate. */
dd8fbd78 6416 tmp2 = neon_load_reg(rd, pass);
5371cb81 6417 gen_neon_add(size, tmp, tmp2);
7d1b0095 6418 tcg_temp_free_i32(tmp2);
ad69471c
PB
6419 } else if (op == 4 || (op == 5 && u)) {
6420 /* Insert */
6421 switch (size) {
6422 case 0:
6423 if (op == 4)
ca9a32e4 6424 mask = 0xff >> -shift;
ad69471c 6425 else
ca9a32e4
JR
6426 mask = (uint8_t)(0xff << shift);
6427 mask |= mask << 8;
6428 mask |= mask << 16;
ad69471c
PB
6429 break;
6430 case 1:
6431 if (op == 4)
ca9a32e4 6432 mask = 0xffff >> -shift;
ad69471c 6433 else
ca9a32e4
JR
6434 mask = (uint16_t)(0xffff << shift);
6435 mask |= mask << 16;
ad69471c
PB
6436 break;
6437 case 2:
ca9a32e4
JR
6438 if (shift < -31 || shift > 31) {
6439 mask = 0;
6440 } else {
6441 if (op == 4)
6442 mask = 0xffffffffu >> -shift;
6443 else
6444 mask = 0xffffffffu << shift;
6445 }
ad69471c
PB
6446 break;
6447 default:
6448 abort();
6449 }
dd8fbd78 6450 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6451 tcg_gen_andi_i32(tmp, tmp, mask);
6452 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6453 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6454 tcg_temp_free_i32(tmp2);
ad69471c 6455 }
dd8fbd78 6456 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6457 }
6458 } /* for pass */
6459 } else if (op < 10) {
ad69471c 6460 /* Shift by immediate and narrow:
9ee6e8bb 6461 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6462 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6463 if (rm & 1) {
6464 return 1;
6465 }
9ee6e8bb
PB
6466 shift = shift - (1 << (size + 3));
6467 size++;
92cdfaeb 6468 if (size == 3) {
a7812ae4 6469 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6470 neon_load_reg64(cpu_V0, rm);
6471 neon_load_reg64(cpu_V1, rm + 1);
6472 for (pass = 0; pass < 2; pass++) {
6473 TCGv_i64 in;
6474 if (pass == 0) {
6475 in = cpu_V0;
6476 } else {
6477 in = cpu_V1;
6478 }
ad69471c 6479 if (q) {
0b36f4cd 6480 if (input_unsigned) {
92cdfaeb 6481 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6482 } else {
92cdfaeb 6483 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6484 }
ad69471c 6485 } else {
0b36f4cd 6486 if (input_unsigned) {
92cdfaeb 6487 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6488 } else {
92cdfaeb 6489 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6490 }
ad69471c 6491 }
7d1b0095 6492 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6493 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6494 neon_store_reg(rd, pass, tmp);
6495 } /* for pass */
6496 tcg_temp_free_i64(tmp64);
6497 } else {
6498 if (size == 1) {
6499 imm = (uint16_t)shift;
6500 imm |= imm << 16;
2c0262af 6501 } else {
92cdfaeb
PM
6502 /* size == 2 */
6503 imm = (uint32_t)shift;
6504 }
6505 tmp2 = tcg_const_i32(imm);
6506 tmp4 = neon_load_reg(rm + 1, 0);
6507 tmp5 = neon_load_reg(rm + 1, 1);
6508 for (pass = 0; pass < 2; pass++) {
6509 if (pass == 0) {
6510 tmp = neon_load_reg(rm, 0);
6511 } else {
6512 tmp = tmp4;
6513 }
0b36f4cd
CL
6514 gen_neon_shift_narrow(size, tmp, tmp2, q,
6515 input_unsigned);
92cdfaeb
PM
6516 if (pass == 0) {
6517 tmp3 = neon_load_reg(rm, 1);
6518 } else {
6519 tmp3 = tmp5;
6520 }
0b36f4cd
CL
6521 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6522 input_unsigned);
36aa55dc 6523 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6524 tcg_temp_free_i32(tmp);
6525 tcg_temp_free_i32(tmp3);
6526 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6527 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6528 neon_store_reg(rd, pass, tmp);
6529 } /* for pass */
c6067f04 6530 tcg_temp_free_i32(tmp2);
b75263d6 6531 }
9ee6e8bb 6532 } else if (op == 10) {
cc13115b
PM
6533 /* VSHLL, VMOVL */
6534 if (q || (rd & 1)) {
9ee6e8bb 6535 return 1;
cc13115b 6536 }
ad69471c
PB
6537 tmp = neon_load_reg(rm, 0);
6538 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6539 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6540 if (pass == 1)
6541 tmp = tmp2;
6542
6543 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6544
9ee6e8bb
PB
6545 if (shift != 0) {
6546 /* The shift is less than the width of the source
ad69471c
PB
6547 type, so we can just shift the whole register. */
6548 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6549 /* Widen the result of shift: we need to clear
6550 * the potential overflow bits resulting from
6551 * left bits of the narrow input appearing as
6552 * right bits of left the neighbour narrow
6553 * input. */
ad69471c
PB
6554 if (size < 2 || !u) {
6555 uint64_t imm64;
6556 if (size == 0) {
6557 imm = (0xffu >> (8 - shift));
6558 imm |= imm << 16;
acdf01ef 6559 } else if (size == 1) {
ad69471c 6560 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6561 } else {
6562 /* size == 2 */
6563 imm = 0xffffffff >> (32 - shift);
6564 }
6565 if (size < 2) {
6566 imm64 = imm | (((uint64_t)imm) << 32);
6567 } else {
6568 imm64 = imm;
9ee6e8bb 6569 }
acdf01ef 6570 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6571 }
6572 }
ad69471c 6573 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6574 }
f73534a5 6575 } else if (op >= 14) {
9ee6e8bb 6576 /* VCVT fixed-point. */
cc13115b
PM
6577 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6578 return 1;
6579 }
f73534a5
PM
6580 /* We have already masked out the must-be-1 top bit of imm6,
6581 * hence this 32-shift where the ARM ARM has 64-imm6.
6582 */
6583 shift = 32 - shift;
9ee6e8bb 6584 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6585 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6586 if (!(op & 1)) {
9ee6e8bb 6587 if (u)
5500b06c 6588 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6589 else
5500b06c 6590 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6591 } else {
6592 if (u)
5500b06c 6593 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6594 else
5500b06c 6595 gen_vfp_tosl(0, shift, 1);
2c0262af 6596 }
4373f3ce 6597 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6598 }
6599 } else {
9ee6e8bb
PB
6600 return 1;
6601 }
6602 } else { /* (insn & 0x00380080) == 0 */
6603 int invert;
7d80fee5
PM
6604 if (q && (rd & 1)) {
6605 return 1;
6606 }
9ee6e8bb
PB
6607
6608 op = (insn >> 8) & 0xf;
6609 /* One register and immediate. */
6610 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6611 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6612 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6613 * We choose to not special-case this and will behave as if a
6614 * valid constant encoding of 0 had been given.
6615 */
9ee6e8bb
PB
6616 switch (op) {
6617 case 0: case 1:
6618 /* no-op */
6619 break;
6620 case 2: case 3:
6621 imm <<= 8;
6622 break;
6623 case 4: case 5:
6624 imm <<= 16;
6625 break;
6626 case 6: case 7:
6627 imm <<= 24;
6628 break;
6629 case 8: case 9:
6630 imm |= imm << 16;
6631 break;
6632 case 10: case 11:
6633 imm = (imm << 8) | (imm << 24);
6634 break;
6635 case 12:
8e31209e 6636 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6637 break;
6638 case 13:
6639 imm = (imm << 16) | 0xffff;
6640 break;
6641 case 14:
6642 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6643 if (invert)
6644 imm = ~imm;
6645 break;
6646 case 15:
7d80fee5
PM
6647 if (invert) {
6648 return 1;
6649 }
9ee6e8bb
PB
6650 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6651 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6652 break;
6653 }
6654 if (invert)
6655 imm = ~imm;
6656
9ee6e8bb
PB
6657 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6658 if (op & 1 && op < 12) {
ad69471c 6659 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6660 if (invert) {
6661 /* The immediate value has already been inverted, so
6662 BIC becomes AND. */
ad69471c 6663 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6664 } else {
ad69471c 6665 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6666 }
9ee6e8bb 6667 } else {
ad69471c 6668 /* VMOV, VMVN. */
7d1b0095 6669 tmp = tcg_temp_new_i32();
9ee6e8bb 6670 if (op == 14 && invert) {
a5a14945 6671 int n;
ad69471c
PB
6672 uint32_t val;
6673 val = 0;
9ee6e8bb
PB
6674 for (n = 0; n < 4; n++) {
6675 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6676 val |= 0xff << (n * 8);
9ee6e8bb 6677 }
ad69471c
PB
6678 tcg_gen_movi_i32(tmp, val);
6679 } else {
6680 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6681 }
9ee6e8bb 6682 }
ad69471c 6683 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6684 }
6685 }
e4b3861d 6686 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6687 if (size != 3) {
6688 op = (insn >> 8) & 0xf;
6689 if ((insn & (1 << 6)) == 0) {
6690 /* Three registers of different lengths. */
6691 int src1_wide;
6692 int src2_wide;
6693 int prewiden;
526d0096
PM
6694 /* undefreq: bit 0 : UNDEF if size == 0
6695 * bit 1 : UNDEF if size == 1
6696 * bit 2 : UNDEF if size == 2
6697 * bit 3 : UNDEF if U == 1
6698 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6699 */
6700 int undefreq;
6701 /* prewiden, src1_wide, src2_wide, undefreq */
6702 static const int neon_3reg_wide[16][4] = {
6703 {1, 0, 0, 0}, /* VADDL */
6704 {1, 1, 0, 0}, /* VADDW */
6705 {1, 0, 0, 0}, /* VSUBL */
6706 {1, 1, 0, 0}, /* VSUBW */
6707 {0, 1, 1, 0}, /* VADDHN */
6708 {0, 0, 0, 0}, /* VABAL */
6709 {0, 1, 1, 0}, /* VSUBHN */
6710 {0, 0, 0, 0}, /* VABDL */
6711 {0, 0, 0, 0}, /* VMLAL */
526d0096 6712 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6713 {0, 0, 0, 0}, /* VMLSL */
526d0096 6714 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6715 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6716 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6717 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6718 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6719 };
6720
6721 prewiden = neon_3reg_wide[op][0];
6722 src1_wide = neon_3reg_wide[op][1];
6723 src2_wide = neon_3reg_wide[op][2];
695272dc 6724 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6725
526d0096
PM
6726 if ((undefreq & (1 << size)) ||
6727 ((undefreq & 8) && u)) {
695272dc
PM
6728 return 1;
6729 }
6730 if ((src1_wide && (rn & 1)) ||
6731 (src2_wide && (rm & 1)) ||
6732 (!src2_wide && (rd & 1))) {
ad69471c 6733 return 1;
695272dc 6734 }
ad69471c 6735
4e624eda
PM
6736 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6737 * outside the loop below as it only performs a single pass.
6738 */
6739 if (op == 14 && size == 2) {
6740 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6741
d614a513 6742 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6743 return 1;
6744 }
6745 tcg_rn = tcg_temp_new_i64();
6746 tcg_rm = tcg_temp_new_i64();
6747 tcg_rd = tcg_temp_new_i64();
6748 neon_load_reg64(tcg_rn, rn);
6749 neon_load_reg64(tcg_rm, rm);
6750 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6751 neon_store_reg64(tcg_rd, rd);
6752 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6753 neon_store_reg64(tcg_rd, rd + 1);
6754 tcg_temp_free_i64(tcg_rn);
6755 tcg_temp_free_i64(tcg_rm);
6756 tcg_temp_free_i64(tcg_rd);
6757 return 0;
6758 }
6759
9ee6e8bb
PB
6760 /* Avoid overlapping operands. Wide source operands are
6761 always aligned so will never overlap with wide
6762 destinations in problematic ways. */
8f8e3aa4 6763 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6764 tmp = neon_load_reg(rm, 1);
6765 neon_store_scratch(2, tmp);
8f8e3aa4 6766 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6767 tmp = neon_load_reg(rn, 1);
6768 neon_store_scratch(2, tmp);
9ee6e8bb 6769 }
f764718d 6770 tmp3 = NULL;
9ee6e8bb 6771 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6772 if (src1_wide) {
6773 neon_load_reg64(cpu_V0, rn + pass);
f764718d 6774 tmp = NULL;
9ee6e8bb 6775 } else {
ad69471c 6776 if (pass == 1 && rd == rn) {
dd8fbd78 6777 tmp = neon_load_scratch(2);
9ee6e8bb 6778 } else {
ad69471c
PB
6779 tmp = neon_load_reg(rn, pass);
6780 }
6781 if (prewiden) {
6782 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6783 }
6784 }
ad69471c
PB
6785 if (src2_wide) {
6786 neon_load_reg64(cpu_V1, rm + pass);
f764718d 6787 tmp2 = NULL;
9ee6e8bb 6788 } else {
ad69471c 6789 if (pass == 1 && rd == rm) {
dd8fbd78 6790 tmp2 = neon_load_scratch(2);
9ee6e8bb 6791 } else {
ad69471c
PB
6792 tmp2 = neon_load_reg(rm, pass);
6793 }
6794 if (prewiden) {
6795 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6796 }
9ee6e8bb
PB
6797 }
6798 switch (op) {
6799 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6800 gen_neon_addl(size);
9ee6e8bb 6801 break;
79b0e534 6802 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6803 gen_neon_subl(size);
9ee6e8bb
PB
6804 break;
6805 case 5: case 7: /* VABAL, VABDL */
6806 switch ((size << 1) | u) {
ad69471c
PB
6807 case 0:
6808 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6809 break;
6810 case 1:
6811 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6812 break;
6813 case 2:
6814 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6815 break;
6816 case 3:
6817 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6818 break;
6819 case 4:
6820 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6821 break;
6822 case 5:
6823 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6824 break;
9ee6e8bb
PB
6825 default: abort();
6826 }
7d1b0095
PM
6827 tcg_temp_free_i32(tmp2);
6828 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6829 break;
6830 case 8: case 9: case 10: case 11: case 12: case 13:
6831 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6832 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6833 break;
6834 case 14: /* Polynomial VMULL */
e5ca24cb 6835 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6836 tcg_temp_free_i32(tmp2);
6837 tcg_temp_free_i32(tmp);
e5ca24cb 6838 break;
695272dc
PM
6839 default: /* 15 is RESERVED: caught earlier */
6840 abort();
9ee6e8bb 6841 }
ebcd88ce
PM
6842 if (op == 13) {
6843 /* VQDMULL */
6844 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6845 neon_store_reg64(cpu_V0, rd + pass);
6846 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6847 /* Accumulate. */
ebcd88ce 6848 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6849 switch (op) {
4dc064e6
PM
6850 case 10: /* VMLSL */
6851 gen_neon_negl(cpu_V0, size);
6852 /* Fall through */
6853 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6854 gen_neon_addl(size);
9ee6e8bb
PB
6855 break;
6856 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6857 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6858 if (op == 11) {
6859 gen_neon_negl(cpu_V0, size);
6860 }
ad69471c
PB
6861 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6862 break;
9ee6e8bb
PB
6863 default:
6864 abort();
6865 }
ad69471c 6866 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6867 } else if (op == 4 || op == 6) {
6868 /* Narrowing operation. */
7d1b0095 6869 tmp = tcg_temp_new_i32();
79b0e534 6870 if (!u) {
9ee6e8bb 6871 switch (size) {
ad69471c
PB
6872 case 0:
6873 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6874 break;
6875 case 1:
6876 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6877 break;
6878 case 2:
6879 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6880 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6881 break;
9ee6e8bb
PB
6882 default: abort();
6883 }
6884 } else {
6885 switch (size) {
ad69471c
PB
6886 case 0:
6887 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6888 break;
6889 case 1:
6890 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6891 break;
6892 case 2:
6893 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6894 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6895 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6896 break;
9ee6e8bb
PB
6897 default: abort();
6898 }
6899 }
ad69471c
PB
6900 if (pass == 0) {
6901 tmp3 = tmp;
6902 } else {
6903 neon_store_reg(rd, 0, tmp3);
6904 neon_store_reg(rd, 1, tmp);
6905 }
9ee6e8bb
PB
6906 } else {
6907 /* Write back the result. */
ad69471c 6908 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6909 }
6910 }
6911 } else {
3e3326df
PM
6912 /* Two registers and a scalar. NB that for ops of this form
6913 * the ARM ARM labels bit 24 as Q, but it is in our variable
6914 * 'u', not 'q'.
6915 */
6916 if (size == 0) {
6917 return 1;
6918 }
9ee6e8bb 6919 switch (op) {
9ee6e8bb 6920 case 1: /* Float VMLA scalar */
9ee6e8bb 6921 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6922 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6923 if (size == 1) {
6924 return 1;
6925 }
6926 /* fall through */
6927 case 0: /* Integer VMLA scalar */
6928 case 4: /* Integer VMLS scalar */
6929 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6930 case 12: /* VQDMULH scalar */
6931 case 13: /* VQRDMULH scalar */
3e3326df
PM
6932 if (u && ((rd | rn) & 1)) {
6933 return 1;
6934 }
dd8fbd78
FN
6935 tmp = neon_get_scalar(size, rm);
6936 neon_store_scratch(0, tmp);
9ee6e8bb 6937 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6938 tmp = neon_load_scratch(0);
6939 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6940 if (op == 12) {
6941 if (size == 1) {
02da0b2d 6942 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6943 } else {
02da0b2d 6944 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6945 }
6946 } else if (op == 13) {
6947 if (size == 1) {
02da0b2d 6948 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6949 } else {
02da0b2d 6950 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6951 }
6952 } else if (op & 1) {
aa47cfdd
PM
6953 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6954 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6955 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6956 } else {
6957 switch (size) {
dd8fbd78
FN
6958 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6959 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6960 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6961 default: abort();
9ee6e8bb
PB
6962 }
6963 }
7d1b0095 6964 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6965 if (op < 8) {
6966 /* Accumulate. */
dd8fbd78 6967 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6968 switch (op) {
6969 case 0:
dd8fbd78 6970 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6971 break;
6972 case 1:
aa47cfdd
PM
6973 {
6974 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6975 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6976 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6977 break;
aa47cfdd 6978 }
9ee6e8bb 6979 case 4:
dd8fbd78 6980 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6981 break;
6982 case 5:
aa47cfdd
PM
6983 {
6984 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6985 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6986 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6987 break;
aa47cfdd 6988 }
9ee6e8bb
PB
6989 default:
6990 abort();
6991 }
7d1b0095 6992 tcg_temp_free_i32(tmp2);
9ee6e8bb 6993 }
dd8fbd78 6994 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6995 }
6996 break;
9ee6e8bb 6997 case 3: /* VQDMLAL scalar */
9ee6e8bb 6998 case 7: /* VQDMLSL scalar */
9ee6e8bb 6999 case 11: /* VQDMULL scalar */
3e3326df 7000 if (u == 1) {
ad69471c 7001 return 1;
3e3326df
PM
7002 }
7003 /* fall through */
7004 case 2: /* VMLAL sclar */
7005 case 6: /* VMLSL scalar */
7006 case 10: /* VMULL scalar */
7007 if (rd & 1) {
7008 return 1;
7009 }
dd8fbd78 7010 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
7011 /* We need a copy of tmp2 because gen_neon_mull
7012 * deletes it during pass 0. */
7d1b0095 7013 tmp4 = tcg_temp_new_i32();
c6067f04 7014 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 7015 tmp3 = neon_load_reg(rn, 1);
ad69471c 7016
9ee6e8bb 7017 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7018 if (pass == 0) {
7019 tmp = neon_load_reg(rn, 0);
9ee6e8bb 7020 } else {
dd8fbd78 7021 tmp = tmp3;
c6067f04 7022 tmp2 = tmp4;
9ee6e8bb 7023 }
ad69471c 7024 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
7025 if (op != 11) {
7026 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 7027 }
9ee6e8bb 7028 switch (op) {
4dc064e6
PM
7029 case 6:
7030 gen_neon_negl(cpu_V0, size);
7031 /* Fall through */
7032 case 2:
ad69471c 7033 gen_neon_addl(size);
9ee6e8bb
PB
7034 break;
7035 case 3: case 7:
ad69471c 7036 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
7037 if (op == 7) {
7038 gen_neon_negl(cpu_V0, size);
7039 }
ad69471c 7040 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
7041 break;
7042 case 10:
7043 /* no-op */
7044 break;
7045 case 11:
ad69471c 7046 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
7047 break;
7048 default:
7049 abort();
7050 }
ad69471c 7051 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 7052 }
61adacc8
RH
7053 break;
7054 case 14: /* VQRDMLAH scalar */
7055 case 15: /* VQRDMLSH scalar */
7056 {
7057 NeonGenThreeOpEnvFn *fn;
dd8fbd78 7058
61adacc8
RH
7059 if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
7060 return 1;
7061 }
7062 if (u && ((rd | rn) & 1)) {
7063 return 1;
7064 }
7065 if (op == 14) {
7066 if (size == 1) {
7067 fn = gen_helper_neon_qrdmlah_s16;
7068 } else {
7069 fn = gen_helper_neon_qrdmlah_s32;
7070 }
7071 } else {
7072 if (size == 1) {
7073 fn = gen_helper_neon_qrdmlsh_s16;
7074 } else {
7075 fn = gen_helper_neon_qrdmlsh_s32;
7076 }
7077 }
dd8fbd78 7078
61adacc8
RH
7079 tmp2 = neon_get_scalar(size, rm);
7080 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7081 tmp = neon_load_reg(rn, pass);
7082 tmp3 = neon_load_reg(rd, pass);
7083 fn(tmp, cpu_env, tmp, tmp2, tmp3);
7084 tcg_temp_free_i32(tmp3);
7085 neon_store_reg(rd, pass, tmp);
7086 }
7087 tcg_temp_free_i32(tmp2);
7088 }
9ee6e8bb 7089 break;
61adacc8
RH
7090 default:
7091 g_assert_not_reached();
9ee6e8bb
PB
7092 }
7093 }
7094 } else { /* size == 3 */
7095 if (!u) {
7096 /* Extract. */
9ee6e8bb 7097 imm = (insn >> 8) & 0xf;
ad69471c
PB
7098
7099 if (imm > 7 && !q)
7100 return 1;
7101
52579ea1
PM
7102 if (q && ((rd | rn | rm) & 1)) {
7103 return 1;
7104 }
7105
ad69471c
PB
7106 if (imm == 0) {
7107 neon_load_reg64(cpu_V0, rn);
7108 if (q) {
7109 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 7110 }
ad69471c
PB
7111 } else if (imm == 8) {
7112 neon_load_reg64(cpu_V0, rn + 1);
7113 if (q) {
7114 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7115 }
ad69471c 7116 } else if (q) {
a7812ae4 7117 tmp64 = tcg_temp_new_i64();
ad69471c
PB
7118 if (imm < 8) {
7119 neon_load_reg64(cpu_V0, rn);
a7812ae4 7120 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
7121 } else {
7122 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 7123 neon_load_reg64(tmp64, rm);
ad69471c
PB
7124 }
7125 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 7126 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
7127 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7128 if (imm < 8) {
7129 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7130 } else {
ad69471c
PB
7131 neon_load_reg64(cpu_V1, rm + 1);
7132 imm -= 8;
9ee6e8bb 7133 }
ad69471c 7134 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
7135 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
7136 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 7137 tcg_temp_free_i64(tmp64);
ad69471c 7138 } else {
a7812ae4 7139 /* BUGFIX */
ad69471c 7140 neon_load_reg64(cpu_V0, rn);
a7812ae4 7141 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 7142 neon_load_reg64(cpu_V1, rm);
a7812ae4 7143 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
7144 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7145 }
7146 neon_store_reg64(cpu_V0, rd);
7147 if (q) {
7148 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
7149 }
7150 } else if ((insn & (1 << 11)) == 0) {
7151 /* Two register misc. */
7152 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7153 size = (insn >> 18) & 3;
600b828c
PM
7154 /* UNDEF for unknown op values and bad op-size combinations */
7155 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7156 return 1;
7157 }
fe8fcf3d
PM
7158 if (neon_2rm_is_v8_op(op) &&
7159 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7160 return 1;
7161 }
fc2a9b37
PM
7162 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7163 q && ((rm | rd) & 1)) {
7164 return 1;
7165 }
9ee6e8bb 7166 switch (op) {
600b828c 7167 case NEON_2RM_VREV64:
9ee6e8bb 7168 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
7169 tmp = neon_load_reg(rm, pass * 2);
7170 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 7171 switch (size) {
dd8fbd78
FN
7172 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7173 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
7174 case 2: /* no-op */ break;
7175 default: abort();
7176 }
dd8fbd78 7177 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 7178 if (size == 2) {
dd8fbd78 7179 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 7180 } else {
9ee6e8bb 7181 switch (size) {
dd8fbd78
FN
7182 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7183 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
7184 default: abort();
7185 }
dd8fbd78 7186 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
7187 }
7188 }
7189 break;
600b828c
PM
7190 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7191 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
7192 for (pass = 0; pass < q + 1; pass++) {
7193 tmp = neon_load_reg(rm, pass * 2);
7194 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7195 tmp = neon_load_reg(rm, pass * 2 + 1);
7196 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7197 switch (size) {
7198 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7199 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7200 case 2: tcg_gen_add_i64(CPU_V001); break;
7201 default: abort();
7202 }
600b828c 7203 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 7204 /* Accumulate. */
ad69471c
PB
7205 neon_load_reg64(cpu_V1, rd + pass);
7206 gen_neon_addl(size);
9ee6e8bb 7207 }
ad69471c 7208 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7209 }
7210 break;
600b828c 7211 case NEON_2RM_VTRN:
9ee6e8bb 7212 if (size == 2) {
a5a14945 7213 int n;
9ee6e8bb 7214 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7215 tmp = neon_load_reg(rm, n);
7216 tmp2 = neon_load_reg(rd, n + 1);
7217 neon_store_reg(rm, n, tmp2);
7218 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7219 }
7220 } else {
7221 goto elementwise;
7222 }
7223 break;
600b828c 7224 case NEON_2RM_VUZP:
02acedf9 7225 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7226 return 1;
9ee6e8bb
PB
7227 }
7228 break;
600b828c 7229 case NEON_2RM_VZIP:
d68a6f3a 7230 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7231 return 1;
9ee6e8bb
PB
7232 }
7233 break;
600b828c
PM
7234 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7235 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7236 if (rm & 1) {
7237 return 1;
7238 }
f764718d 7239 tmp2 = NULL;
9ee6e8bb 7240 for (pass = 0; pass < 2; pass++) {
ad69471c 7241 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7242 tmp = tcg_temp_new_i32();
600b828c
PM
7243 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7244 tmp, cpu_V0);
ad69471c
PB
7245 if (pass == 0) {
7246 tmp2 = tmp;
7247 } else {
7248 neon_store_reg(rd, 0, tmp2);
7249 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7250 }
9ee6e8bb
PB
7251 }
7252 break;
600b828c 7253 case NEON_2RM_VSHLL:
fc2a9b37 7254 if (q || (rd & 1)) {
9ee6e8bb 7255 return 1;
600b828c 7256 }
ad69471c
PB
7257 tmp = neon_load_reg(rm, 0);
7258 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7259 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7260 if (pass == 1)
7261 tmp = tmp2;
7262 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7263 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7264 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7265 }
7266 break;
600b828c 7267 case NEON_2RM_VCVT_F16_F32:
486624fc
AB
7268 {
7269 TCGv_ptr fpst;
7270 TCGv_i32 ahp;
7271
d614a513 7272 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7273 q || (rm & 1)) {
7274 return 1;
7275 }
7d1b0095
PM
7276 tmp = tcg_temp_new_i32();
7277 tmp2 = tcg_temp_new_i32();
486624fc
AB
7278 fpst = get_fpstatus_ptr(true);
7279 ahp = get_ahp_flag();
60011498 7280 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
486624fc 7281 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498 7282 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
486624fc 7283 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7284 tcg_gen_shli_i32(tmp2, tmp2, 16);
7285 tcg_gen_or_i32(tmp2, tmp2, tmp);
7286 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
486624fc 7287 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498
PB
7288 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7289 neon_store_reg(rd, 0, tmp2);
7d1b0095 7290 tmp2 = tcg_temp_new_i32();
486624fc 7291 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7292 tcg_gen_shli_i32(tmp2, tmp2, 16);
7293 tcg_gen_or_i32(tmp2, tmp2, tmp);
7294 neon_store_reg(rd, 1, tmp2);
7d1b0095 7295 tcg_temp_free_i32(tmp);
486624fc
AB
7296 tcg_temp_free_i32(ahp);
7297 tcg_temp_free_ptr(fpst);
60011498 7298 break;
486624fc 7299 }
600b828c 7300 case NEON_2RM_VCVT_F32_F16:
486624fc
AB
7301 {
7302 TCGv_ptr fpst;
7303 TCGv_i32 ahp;
d614a513 7304 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7305 q || (rd & 1)) {
7306 return 1;
7307 }
486624fc
AB
7308 fpst = get_fpstatus_ptr(true);
7309 ahp = get_ahp_flag();
7d1b0095 7310 tmp3 = tcg_temp_new_i32();
60011498
PB
7311 tmp = neon_load_reg(rm, 0);
7312 tmp2 = neon_load_reg(rm, 1);
7313 tcg_gen_ext16u_i32(tmp3, tmp);
486624fc 7314 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7315 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7316 tcg_gen_shri_i32(tmp3, tmp, 16);
486624fc 7317 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7318 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7319 tcg_temp_free_i32(tmp);
60011498 7320 tcg_gen_ext16u_i32(tmp3, tmp2);
486624fc 7321 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7322 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7323 tcg_gen_shri_i32(tmp3, tmp2, 16);
486624fc 7324 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7325 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7326 tcg_temp_free_i32(tmp2);
7327 tcg_temp_free_i32(tmp3);
486624fc
AB
7328 tcg_temp_free_i32(ahp);
7329 tcg_temp_free_ptr(fpst);
60011498 7330 break;
486624fc 7331 }
9d935509 7332 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 7333 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
7334 || ((rm | rd) & 1)) {
7335 return 1;
7336 }
1a66ac61
RH
7337 ptr1 = vfp_reg_ptr(true, rd);
7338 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
7339
7340 /* Bit 6 is the lowest opcode bit; it distinguishes between
7341 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7342 */
7343 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7344
7345 if (op == NEON_2RM_AESE) {
1a66ac61 7346 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 7347 } else {
1a66ac61 7348 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 7349 }
1a66ac61
RH
7350 tcg_temp_free_ptr(ptr1);
7351 tcg_temp_free_ptr(ptr2);
9d935509
AB
7352 tcg_temp_free_i32(tmp3);
7353 break;
f1ecb913 7354 case NEON_2RM_SHA1H:
d614a513 7355 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
7356 || ((rm | rd) & 1)) {
7357 return 1;
7358 }
1a66ac61
RH
7359 ptr1 = vfp_reg_ptr(true, rd);
7360 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7361
1a66ac61 7362 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 7363
1a66ac61
RH
7364 tcg_temp_free_ptr(ptr1);
7365 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
7366 break;
7367 case NEON_2RM_SHA1SU1:
7368 if ((rm | rd) & 1) {
7369 return 1;
7370 }
7371 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7372 if (q) {
d614a513 7373 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
7374 return 1;
7375 }
d614a513 7376 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
7377 return 1;
7378 }
1a66ac61
RH
7379 ptr1 = vfp_reg_ptr(true, rd);
7380 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7381 if (q) {
1a66ac61 7382 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 7383 } else {
1a66ac61 7384 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 7385 }
1a66ac61
RH
7386 tcg_temp_free_ptr(ptr1);
7387 tcg_temp_free_ptr(ptr2);
f1ecb913 7388 break;
9ee6e8bb
PB
7389 default:
7390 elementwise:
7391 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7392 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7393 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7394 neon_reg_offset(rm, pass));
f764718d 7395 tmp = NULL;
9ee6e8bb 7396 } else {
dd8fbd78 7397 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7398 }
7399 switch (op) {
600b828c 7400 case NEON_2RM_VREV32:
9ee6e8bb 7401 switch (size) {
dd8fbd78
FN
7402 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7403 case 1: gen_swap_half(tmp); break;
600b828c 7404 default: abort();
9ee6e8bb
PB
7405 }
7406 break;
600b828c 7407 case NEON_2RM_VREV16:
dd8fbd78 7408 gen_rev16(tmp);
9ee6e8bb 7409 break;
600b828c 7410 case NEON_2RM_VCLS:
9ee6e8bb 7411 switch (size) {
dd8fbd78
FN
7412 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7413 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7414 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7415 default: abort();
9ee6e8bb
PB
7416 }
7417 break;
600b828c 7418 case NEON_2RM_VCLZ:
9ee6e8bb 7419 switch (size) {
dd8fbd78
FN
7420 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7421 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 7422 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 7423 default: abort();
9ee6e8bb
PB
7424 }
7425 break;
600b828c 7426 case NEON_2RM_VCNT:
dd8fbd78 7427 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7428 break;
600b828c 7429 case NEON_2RM_VMVN:
dd8fbd78 7430 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7431 break;
600b828c 7432 case NEON_2RM_VQABS:
9ee6e8bb 7433 switch (size) {
02da0b2d
PM
7434 case 0:
7435 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7436 break;
7437 case 1:
7438 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7439 break;
7440 case 2:
7441 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7442 break;
600b828c 7443 default: abort();
9ee6e8bb
PB
7444 }
7445 break;
600b828c 7446 case NEON_2RM_VQNEG:
9ee6e8bb 7447 switch (size) {
02da0b2d
PM
7448 case 0:
7449 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7450 break;
7451 case 1:
7452 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7453 break;
7454 case 2:
7455 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7456 break;
600b828c 7457 default: abort();
9ee6e8bb
PB
7458 }
7459 break;
600b828c 7460 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7461 tmp2 = tcg_const_i32(0);
9ee6e8bb 7462 switch(size) {
dd8fbd78
FN
7463 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7464 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7465 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7466 default: abort();
9ee6e8bb 7467 }
39d5492a 7468 tcg_temp_free_i32(tmp2);
600b828c 7469 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7470 tcg_gen_not_i32(tmp, tmp);
600b828c 7471 }
9ee6e8bb 7472 break;
600b828c 7473 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7474 tmp2 = tcg_const_i32(0);
9ee6e8bb 7475 switch(size) {
dd8fbd78
FN
7476 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7477 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7478 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7479 default: abort();
9ee6e8bb 7480 }
39d5492a 7481 tcg_temp_free_i32(tmp2);
600b828c 7482 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7483 tcg_gen_not_i32(tmp, tmp);
600b828c 7484 }
9ee6e8bb 7485 break;
600b828c 7486 case NEON_2RM_VCEQ0:
dd8fbd78 7487 tmp2 = tcg_const_i32(0);
9ee6e8bb 7488 switch(size) {
dd8fbd78
FN
7489 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7490 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7491 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7492 default: abort();
9ee6e8bb 7493 }
39d5492a 7494 tcg_temp_free_i32(tmp2);
9ee6e8bb 7495 break;
600b828c 7496 case NEON_2RM_VABS:
9ee6e8bb 7497 switch(size) {
dd8fbd78
FN
7498 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7499 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7500 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7501 default: abort();
9ee6e8bb
PB
7502 }
7503 break;
600b828c 7504 case NEON_2RM_VNEG:
dd8fbd78
FN
7505 tmp2 = tcg_const_i32(0);
7506 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7507 tcg_temp_free_i32(tmp2);
9ee6e8bb 7508 break;
600b828c 7509 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7510 {
7511 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7512 tmp2 = tcg_const_i32(0);
aa47cfdd 7513 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7514 tcg_temp_free_i32(tmp2);
aa47cfdd 7515 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7516 break;
aa47cfdd 7517 }
600b828c 7518 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7519 {
7520 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7521 tmp2 = tcg_const_i32(0);
aa47cfdd 7522 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7523 tcg_temp_free_i32(tmp2);
aa47cfdd 7524 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7525 break;
aa47cfdd 7526 }
600b828c 7527 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7528 {
7529 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7530 tmp2 = tcg_const_i32(0);
aa47cfdd 7531 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7532 tcg_temp_free_i32(tmp2);
aa47cfdd 7533 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7534 break;
aa47cfdd 7535 }
600b828c 7536 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7537 {
7538 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7539 tmp2 = tcg_const_i32(0);
aa47cfdd 7540 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7541 tcg_temp_free_i32(tmp2);
aa47cfdd 7542 tcg_temp_free_ptr(fpstatus);
0e326109 7543 break;
aa47cfdd 7544 }
600b828c 7545 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7546 {
7547 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7548 tmp2 = tcg_const_i32(0);
aa47cfdd 7549 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7550 tcg_temp_free_i32(tmp2);
aa47cfdd 7551 tcg_temp_free_ptr(fpstatus);
0e326109 7552 break;
aa47cfdd 7553 }
600b828c 7554 case NEON_2RM_VABS_F:
4373f3ce 7555 gen_vfp_abs(0);
9ee6e8bb 7556 break;
600b828c 7557 case NEON_2RM_VNEG_F:
4373f3ce 7558 gen_vfp_neg(0);
9ee6e8bb 7559 break;
600b828c 7560 case NEON_2RM_VSWP:
dd8fbd78
FN
7561 tmp2 = neon_load_reg(rd, pass);
7562 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7563 break;
600b828c 7564 case NEON_2RM_VTRN:
dd8fbd78 7565 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7566 switch (size) {
dd8fbd78
FN
7567 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7568 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7569 default: abort();
9ee6e8bb 7570 }
dd8fbd78 7571 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7572 break;
34f7b0a2
WN
7573 case NEON_2RM_VRINTN:
7574 case NEON_2RM_VRINTA:
7575 case NEON_2RM_VRINTM:
7576 case NEON_2RM_VRINTP:
7577 case NEON_2RM_VRINTZ:
7578 {
7579 TCGv_i32 tcg_rmode;
7580 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7581 int rmode;
7582
7583 if (op == NEON_2RM_VRINTZ) {
7584 rmode = FPROUNDING_ZERO;
7585 } else {
7586 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7587 }
7588
7589 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7590 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7591 cpu_env);
7592 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7593 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7594 cpu_env);
7595 tcg_temp_free_ptr(fpstatus);
7596 tcg_temp_free_i32(tcg_rmode);
7597 break;
7598 }
2ce70625
WN
7599 case NEON_2RM_VRINTX:
7600 {
7601 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7602 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7603 tcg_temp_free_ptr(fpstatus);
7604 break;
7605 }
901ad525
WN
7606 case NEON_2RM_VCVTAU:
7607 case NEON_2RM_VCVTAS:
7608 case NEON_2RM_VCVTNU:
7609 case NEON_2RM_VCVTNS:
7610 case NEON_2RM_VCVTPU:
7611 case NEON_2RM_VCVTPS:
7612 case NEON_2RM_VCVTMU:
7613 case NEON_2RM_VCVTMS:
7614 {
7615 bool is_signed = !extract32(insn, 7, 1);
7616 TCGv_ptr fpst = get_fpstatus_ptr(1);
7617 TCGv_i32 tcg_rmode, tcg_shift;
7618 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7619
7620 tcg_shift = tcg_const_i32(0);
7621 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7622 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7623 cpu_env);
7624
7625 if (is_signed) {
7626 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7627 tcg_shift, fpst);
7628 } else {
7629 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7630 tcg_shift, fpst);
7631 }
7632
7633 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7634 cpu_env);
7635 tcg_temp_free_i32(tcg_rmode);
7636 tcg_temp_free_i32(tcg_shift);
7637 tcg_temp_free_ptr(fpst);
7638 break;
7639 }
600b828c 7640 case NEON_2RM_VRECPE:
b6d4443a
AB
7641 {
7642 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7643 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7644 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7645 break;
b6d4443a 7646 }
600b828c 7647 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7648 {
7649 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7650 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7651 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7652 break;
c2fb418e 7653 }
600b828c 7654 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7655 {
7656 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7657 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7658 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7659 break;
b6d4443a 7660 }
600b828c 7661 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7662 {
7663 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7664 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7665 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7666 break;
c2fb418e 7667 }
600b828c 7668 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7669 gen_vfp_sito(0, 1);
9ee6e8bb 7670 break;
600b828c 7671 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7672 gen_vfp_uito(0, 1);
9ee6e8bb 7673 break;
600b828c 7674 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7675 gen_vfp_tosiz(0, 1);
9ee6e8bb 7676 break;
600b828c 7677 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7678 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7679 break;
7680 default:
600b828c
PM
7681 /* Reserved op values were caught by the
7682 * neon_2rm_sizes[] check earlier.
7683 */
7684 abort();
9ee6e8bb 7685 }
600b828c 7686 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7687 tcg_gen_st_f32(cpu_F0s, cpu_env,
7688 neon_reg_offset(rd, pass));
9ee6e8bb 7689 } else {
dd8fbd78 7690 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7691 }
7692 }
7693 break;
7694 }
7695 } else if ((insn & (1 << 10)) == 0) {
7696 /* VTBL, VTBX. */
56907d77
PM
7697 int n = ((insn >> 8) & 3) + 1;
7698 if ((rn + n) > 32) {
7699 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7700 * helper function running off the end of the register file.
7701 */
7702 return 1;
7703 }
7704 n <<= 3;
9ee6e8bb 7705 if (insn & (1 << 6)) {
8f8e3aa4 7706 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7707 } else {
7d1b0095 7708 tmp = tcg_temp_new_i32();
8f8e3aa4 7709 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7710 }
8f8e3aa4 7711 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 7712 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 7713 tmp5 = tcg_const_i32(n);
e7c06c4e 7714 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 7715 tcg_temp_free_i32(tmp);
9ee6e8bb 7716 if (insn & (1 << 6)) {
8f8e3aa4 7717 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7718 } else {
7d1b0095 7719 tmp = tcg_temp_new_i32();
8f8e3aa4 7720 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7721 }
8f8e3aa4 7722 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 7723 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 7724 tcg_temp_free_i32(tmp5);
e7c06c4e 7725 tcg_temp_free_ptr(ptr1);
8f8e3aa4 7726 neon_store_reg(rd, 0, tmp2);
3018f259 7727 neon_store_reg(rd, 1, tmp3);
7d1b0095 7728 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7729 } else if ((insn & 0x380) == 0) {
7730 /* VDUP */
133da6aa
JR
7731 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7732 return 1;
7733 }
9ee6e8bb 7734 if (insn & (1 << 19)) {
dd8fbd78 7735 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7736 } else {
dd8fbd78 7737 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7738 }
7739 if (insn & (1 << 16)) {
dd8fbd78 7740 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7741 } else if (insn & (1 << 17)) {
7742 if ((insn >> 18) & 1)
dd8fbd78 7743 gen_neon_dup_high16(tmp);
9ee6e8bb 7744 else
dd8fbd78 7745 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7746 }
7747 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7748 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7749 tcg_gen_mov_i32(tmp2, tmp);
7750 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7751 }
7d1b0095 7752 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7753 } else {
7754 return 1;
7755 }
7756 }
7757 }
7758 return 0;
7759}
7760
8b7209fa
RH
7761/* Advanced SIMD three registers of the same length extension.
7762 * 31 25 23 22 20 16 12 11 10 9 8 3 0
7763 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7764 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7765 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7766 */
7767static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
7768{
26c470a7
RH
7769 gen_helper_gvec_3 *fn_gvec = NULL;
7770 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
7771 int rd, rn, rm, opr_sz;
7772 int data = 0;
8b7209fa
RH
7773 bool q;
7774
7775 q = extract32(insn, 6, 1);
7776 VFP_DREG_D(rd, insn);
7777 VFP_DREG_N(rn, insn);
7778 VFP_DREG_M(rm, insn);
7779 if ((rd | rn | rm) & q) {
7780 return 1;
7781 }
7782
7783 if ((insn & 0xfe200f10) == 0xfc200800) {
7784 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
26c470a7
RH
7785 int size = extract32(insn, 20, 1);
7786 data = extract32(insn, 23, 2); /* rot */
8b7209fa
RH
7787 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
7788 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7789 return 1;
7790 }
7791 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
7792 } else if ((insn & 0xfea00f10) == 0xfc800800) {
7793 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
26c470a7
RH
7794 int size = extract32(insn, 20, 1);
7795 data = extract32(insn, 24, 1); /* rot */
8b7209fa
RH
7796 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
7797 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7798 return 1;
7799 }
7800 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
26c470a7
RH
7801 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
7802 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
7803 bool u = extract32(insn, 4, 1);
7804 if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
7805 return 1;
7806 }
7807 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
8b7209fa
RH
7808 } else {
7809 return 1;
7810 }
7811
7812 if (s->fp_excp_el) {
7813 gen_exception_insn(s, 4, EXCP_UDEF,
7814 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
7815 return 0;
7816 }
7817 if (!s->vfp_enabled) {
7818 return 1;
7819 }
7820
7821 opr_sz = (1 + q) * 8;
26c470a7
RH
7822 if (fn_gvec_ptr) {
7823 TCGv_ptr fpst = get_fpstatus_ptr(1);
7824 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
7825 vfp_reg_offset(1, rn),
7826 vfp_reg_offset(1, rm), fpst,
7827 opr_sz, opr_sz, data, fn_gvec_ptr);
7828 tcg_temp_free_ptr(fpst);
7829 } else {
7830 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
7831 vfp_reg_offset(1, rn),
7832 vfp_reg_offset(1, rm),
7833 opr_sz, opr_sz, data, fn_gvec);
7834 }
8b7209fa
RH
7835 return 0;
7836}
7837
638808ff
RH
7838/* Advanced SIMD two registers and a scalar extension.
7839 * 31 24 23 22 20 16 12 11 10 9 8 3 0
7840 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7841 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7842 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7843 *
7844 */
7845
7846static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
7847{
26c470a7
RH
7848 gen_helper_gvec_3 *fn_gvec = NULL;
7849 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
2cc99919 7850 int rd, rn, rm, opr_sz, data;
638808ff
RH
7851 bool q;
7852
7853 q = extract32(insn, 6, 1);
7854 VFP_DREG_D(rd, insn);
7855 VFP_DREG_N(rn, insn);
638808ff
RH
7856 if ((rd | rn) & q) {
7857 return 1;
7858 }
7859
7860 if ((insn & 0xff000f10) == 0xfe000800) {
7861 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
2cc99919
RH
7862 int rot = extract32(insn, 20, 2);
7863 int size = extract32(insn, 23, 1);
7864 int index;
7865
7866 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) {
638808ff
RH
7867 return 1;
7868 }
2cc99919
RH
7869 if (size == 0) {
7870 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
7871 return 1;
7872 }
7873 /* For fp16, rm is just Vm, and index is M. */
7874 rm = extract32(insn, 0, 4);
7875 index = extract32(insn, 5, 1);
7876 } else {
7877 /* For fp32, rm is the usual M:Vm, and index is 0. */
7878 VFP_DREG_M(rm, insn);
7879 index = 0;
7880 }
7881 data = (index << 2) | rot;
7882 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
7883 : gen_helper_gvec_fcmlah_idx);
26c470a7
RH
7884 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
7885 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
7886 int u = extract32(insn, 4, 1);
7887 if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
7888 return 1;
7889 }
7890 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
7891 /* rm is just Vm, and index is M. */
7892 data = extract32(insn, 5, 1); /* index */
7893 rm = extract32(insn, 0, 4);
638808ff
RH
7894 } else {
7895 return 1;
7896 }
7897
7898 if (s->fp_excp_el) {
7899 gen_exception_insn(s, 4, EXCP_UDEF,
7900 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
7901 return 0;
7902 }
7903 if (!s->vfp_enabled) {
7904 return 1;
7905 }
7906
7907 opr_sz = (1 + q) * 8;
26c470a7
RH
7908 if (fn_gvec_ptr) {
7909 TCGv_ptr fpst = get_fpstatus_ptr(1);
7910 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
7911 vfp_reg_offset(1, rn),
7912 vfp_reg_offset(1, rm), fpst,
7913 opr_sz, opr_sz, data, fn_gvec_ptr);
7914 tcg_temp_free_ptr(fpst);
7915 } else {
7916 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
7917 vfp_reg_offset(1, rn),
7918 vfp_reg_offset(1, rm),
7919 opr_sz, opr_sz, data, fn_gvec);
7920 }
638808ff
RH
7921 return 0;
7922}
7923
7dcc1f89 7924static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7925{
4b6a83fb
PM
7926 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7927 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7928
7929 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7930
7931 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7932 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7933 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7934 return 1;
7935 }
d614a513 7936 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7937 return disas_iwmmxt_insn(s, insn);
d614a513 7938 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7939 return disas_dsp_insn(s, insn);
c0f4af17
PM
7940 }
7941 return 1;
4b6a83fb
PM
7942 }
7943
7944 /* Otherwise treat as a generic register access */
7945 is64 = (insn & (1 << 25)) == 0;
7946 if (!is64 && ((insn & (1 << 4)) == 0)) {
7947 /* cdp */
7948 return 1;
7949 }
7950
7951 crm = insn & 0xf;
7952 if (is64) {
7953 crn = 0;
7954 opc1 = (insn >> 4) & 0xf;
7955 opc2 = 0;
7956 rt2 = (insn >> 16) & 0xf;
7957 } else {
7958 crn = (insn >> 16) & 0xf;
7959 opc1 = (insn >> 21) & 7;
7960 opc2 = (insn >> 5) & 7;
7961 rt2 = 0;
7962 }
7963 isread = (insn >> 20) & 1;
7964 rt = (insn >> 12) & 0xf;
7965
60322b39 7966 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7967 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7968 if (ri) {
7969 /* Check access permissions */
dcbff19b 7970 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7971 return 1;
7972 }
7973
c0f4af17 7974 if (ri->accessfn ||
d614a513 7975 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7976 /* Emit code to perform further access permissions checks at
7977 * runtime; this may result in an exception.
c0f4af17
PM
7978 * Note that on XScale all cp0..c13 registers do an access check
7979 * call in order to handle c15_cpar.
f59df3f2
PM
7980 */
7981 TCGv_ptr tmpptr;
3f208fd7 7982 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7983 uint32_t syndrome;
7984
7985 /* Note that since we are an implementation which takes an
7986 * exception on a trapped conditional instruction only if the
7987 * instruction passes its condition code check, we can take
7988 * advantage of the clause in the ARM ARM that allows us to set
7989 * the COND field in the instruction to 0xE in all cases.
7990 * We could fish the actual condition out of the insn (ARM)
7991 * or the condexec bits (Thumb) but it isn't necessary.
7992 */
7993 switch (cpnum) {
7994 case 14:
7995 if (is64) {
7996 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7997 isread, false);
8bcbf37c
PM
7998 } else {
7999 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8000 rt, isread, false);
8bcbf37c
PM
8001 }
8002 break;
8003 case 15:
8004 if (is64) {
8005 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8006 isread, false);
8bcbf37c
PM
8007 } else {
8008 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8009 rt, isread, false);
8bcbf37c
PM
8010 }
8011 break;
8012 default:
8013 /* ARMv8 defines that only coprocessors 14 and 15 exist,
8014 * so this can only happen if this is an ARMv7 or earlier CPU,
8015 * in which case the syndrome information won't actually be
8016 * guest visible.
8017 */
d614a513 8018 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
8019 syndrome = syn_uncategorized();
8020 break;
8021 }
8022
43bfa4a1 8023 gen_set_condexec(s);
3977ee5d 8024 gen_set_pc_im(s, s->pc - 4);
f59df3f2 8025 tmpptr = tcg_const_ptr(ri);
8bcbf37c 8026 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
8027 tcg_isread = tcg_const_i32(isread);
8028 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
8029 tcg_isread);
f59df3f2 8030 tcg_temp_free_ptr(tmpptr);
8bcbf37c 8031 tcg_temp_free_i32(tcg_syn);
3f208fd7 8032 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
8033 }
8034
4b6a83fb
PM
8035 /* Handle special cases first */
8036 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
8037 case ARM_CP_NOP:
8038 return 0;
8039 case ARM_CP_WFI:
8040 if (isread) {
8041 return 1;
8042 }
eaed129d 8043 gen_set_pc_im(s, s->pc);
dcba3a8d 8044 s->base.is_jmp = DISAS_WFI;
2bee5105 8045 return 0;
4b6a83fb
PM
8046 default:
8047 break;
8048 }
8049
c5a49c63 8050 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8051 gen_io_start();
8052 }
8053
4b6a83fb
PM
8054 if (isread) {
8055 /* Read */
8056 if (is64) {
8057 TCGv_i64 tmp64;
8058 TCGv_i32 tmp;
8059 if (ri->type & ARM_CP_CONST) {
8060 tmp64 = tcg_const_i64(ri->resetvalue);
8061 } else if (ri->readfn) {
8062 TCGv_ptr tmpptr;
4b6a83fb
PM
8063 tmp64 = tcg_temp_new_i64();
8064 tmpptr = tcg_const_ptr(ri);
8065 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
8066 tcg_temp_free_ptr(tmpptr);
8067 } else {
8068 tmp64 = tcg_temp_new_i64();
8069 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
8070 }
8071 tmp = tcg_temp_new_i32();
ecc7b3aa 8072 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
8073 store_reg(s, rt, tmp);
8074 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 8075 tmp = tcg_temp_new_i32();
ecc7b3aa 8076 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 8077 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
8078 store_reg(s, rt2, tmp);
8079 } else {
39d5492a 8080 TCGv_i32 tmp;
4b6a83fb
PM
8081 if (ri->type & ARM_CP_CONST) {
8082 tmp = tcg_const_i32(ri->resetvalue);
8083 } else if (ri->readfn) {
8084 TCGv_ptr tmpptr;
4b6a83fb
PM
8085 tmp = tcg_temp_new_i32();
8086 tmpptr = tcg_const_ptr(ri);
8087 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
8088 tcg_temp_free_ptr(tmpptr);
8089 } else {
8090 tmp = load_cpu_offset(ri->fieldoffset);
8091 }
8092 if (rt == 15) {
8093 /* Destination register of r15 for 32 bit loads sets
8094 * the condition codes from the high 4 bits of the value
8095 */
8096 gen_set_nzcv(tmp);
8097 tcg_temp_free_i32(tmp);
8098 } else {
8099 store_reg(s, rt, tmp);
8100 }
8101 }
8102 } else {
8103 /* Write */
8104 if (ri->type & ARM_CP_CONST) {
8105 /* If not forbidden by access permissions, treat as WI */
8106 return 0;
8107 }
8108
8109 if (is64) {
39d5492a 8110 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
8111 TCGv_i64 tmp64 = tcg_temp_new_i64();
8112 tmplo = load_reg(s, rt);
8113 tmphi = load_reg(s, rt2);
8114 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
8115 tcg_temp_free_i32(tmplo);
8116 tcg_temp_free_i32(tmphi);
8117 if (ri->writefn) {
8118 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
8119 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
8120 tcg_temp_free_ptr(tmpptr);
8121 } else {
8122 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
8123 }
8124 tcg_temp_free_i64(tmp64);
8125 } else {
8126 if (ri->writefn) {
39d5492a 8127 TCGv_i32 tmp;
4b6a83fb 8128 TCGv_ptr tmpptr;
4b6a83fb
PM
8129 tmp = load_reg(s, rt);
8130 tmpptr = tcg_const_ptr(ri);
8131 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
8132 tcg_temp_free_ptr(tmpptr);
8133 tcg_temp_free_i32(tmp);
8134 } else {
39d5492a 8135 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
8136 store_cpu_offset(tmp, ri->fieldoffset);
8137 }
8138 }
2452731c
PM
8139 }
8140
c5a49c63 8141 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8142 /* I/O operations must end the TB here (whether read or write) */
8143 gen_io_end();
8144 gen_lookup_tb(s);
8145 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
8146 /* We default to ending the TB on a coprocessor register write,
8147 * but allow this to be suppressed by the register definition
8148 * (usually only necessary to work around guest bugs).
8149 */
2452731c 8150 gen_lookup_tb(s);
4b6a83fb 8151 }
2452731c 8152
4b6a83fb
PM
8153 return 0;
8154 }
8155
626187d8
PM
8156 /* Unknown register; this might be a guest error or a QEMU
8157 * unimplemented feature.
8158 */
8159 if (is64) {
8160 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8161 "64 bit system register cp:%d opc1: %d crm:%d "
8162 "(%s)\n",
8163 isread ? "read" : "write", cpnum, opc1, crm,
8164 s->ns ? "non-secure" : "secure");
626187d8
PM
8165 } else {
8166 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8167 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
8168 "(%s)\n",
8169 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
8170 s->ns ? "non-secure" : "secure");
626187d8
PM
8171 }
8172
4a9a539f 8173 return 1;
9ee6e8bb
PB
8174}
8175
5e3f878a
PB
8176
8177/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 8178static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 8179{
39d5492a 8180 TCGv_i32 tmp;
7d1b0095 8181 tmp = tcg_temp_new_i32();
ecc7b3aa 8182 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 8183 store_reg(s, rlow, tmp);
7d1b0095 8184 tmp = tcg_temp_new_i32();
5e3f878a 8185 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 8186 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
8187 store_reg(s, rhigh, tmp);
8188}
8189
8190/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 8191static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 8192{
a7812ae4 8193 TCGv_i64 tmp;
39d5492a 8194 TCGv_i32 tmp2;
5e3f878a 8195
36aa55dc 8196 /* Load value and extend to 64 bits. */
a7812ae4 8197 tmp = tcg_temp_new_i64();
5e3f878a
PB
8198 tmp2 = load_reg(s, rlow);
8199 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 8200 tcg_temp_free_i32(tmp2);
5e3f878a 8201 tcg_gen_add_i64(val, val, tmp);
b75263d6 8202 tcg_temp_free_i64(tmp);
5e3f878a
PB
8203}
8204
8205/* load and add a 64-bit value from a register pair. */
a7812ae4 8206static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 8207{
a7812ae4 8208 TCGv_i64 tmp;
39d5492a
PM
8209 TCGv_i32 tmpl;
8210 TCGv_i32 tmph;
5e3f878a
PB
8211
8212 /* Load 64-bit value rd:rn. */
36aa55dc
PB
8213 tmpl = load_reg(s, rlow);
8214 tmph = load_reg(s, rhigh);
a7812ae4 8215 tmp = tcg_temp_new_i64();
36aa55dc 8216 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
8217 tcg_temp_free_i32(tmpl);
8218 tcg_temp_free_i32(tmph);
5e3f878a 8219 tcg_gen_add_i64(val, val, tmp);
b75263d6 8220 tcg_temp_free_i64(tmp);
5e3f878a
PB
8221}
8222
c9f10124 8223/* Set N and Z flags from hi|lo. */
39d5492a 8224static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 8225{
c9f10124
RH
8226 tcg_gen_mov_i32(cpu_NF, hi);
8227 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
8228}
8229
426f5abc
PB
8230/* Load/Store exclusive instructions are implemented by remembering
8231 the value/address loaded, and seeing if these are the same
354161b3 8232 when the store is performed. This should be sufficient to implement
426f5abc 8233 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
8234 regular stores. The compare vs the remembered value is done during
8235 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 8236static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 8237 TCGv_i32 addr, int size)
426f5abc 8238{
94ee24e7 8239 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 8240 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 8241
50225ad0
PM
8242 s->is_ldex = true;
8243
426f5abc 8244 if (size == 3) {
39d5492a 8245 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 8246 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 8247
3448d47b
PM
8248 /* For AArch32, architecturally the 32-bit word at the lowest
8249 * address is always Rt and the one at addr+4 is Rt2, even if
8250 * the CPU is big-endian. That means we don't want to do a
8251 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
8252 * for an architecturally 64-bit access, but instead do a
8253 * 64-bit access using MO_BE if appropriate and then split
8254 * the two halves.
8255 * This only makes a difference for BE32 user-mode, where
8256 * frob64() must not flip the two halves of the 64-bit data
8257 * but this code must treat BE32 user-mode like BE32 system.
8258 */
8259 TCGv taddr = gen_aa32_addr(s, addr, opc);
8260
8261 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
8262 tcg_temp_free(taddr);
354161b3 8263 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
8264 if (s->be_data == MO_BE) {
8265 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
8266 } else {
8267 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
8268 }
354161b3
EC
8269 tcg_temp_free_i64(t64);
8270
8271 store_reg(s, rt2, tmp2);
03d05e2d 8272 } else {
354161b3 8273 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 8274 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 8275 }
03d05e2d
PM
8276
8277 store_reg(s, rt, tmp);
8278 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
8279}
8280
8281static void gen_clrex(DisasContext *s)
8282{
03d05e2d 8283 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
8284}
8285
426f5abc 8286static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 8287 TCGv_i32 addr, int size)
426f5abc 8288{
354161b3
EC
8289 TCGv_i32 t0, t1, t2;
8290 TCGv_i64 extaddr;
8291 TCGv taddr;
42a268c2
RH
8292 TCGLabel *done_label;
8293 TCGLabel *fail_label;
354161b3 8294 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
8295
8296 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
8297 [addr] = {Rt};
8298 {Rd} = 0;
8299 } else {
8300 {Rd} = 1;
8301 } */
8302 fail_label = gen_new_label();
8303 done_label = gen_new_label();
03d05e2d
PM
8304 extaddr = tcg_temp_new_i64();
8305 tcg_gen_extu_i32_i64(extaddr, addr);
8306 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
8307 tcg_temp_free_i64(extaddr);
8308
354161b3
EC
8309 taddr = gen_aa32_addr(s, addr, opc);
8310 t0 = tcg_temp_new_i32();
8311 t1 = load_reg(s, rt);
426f5abc 8312 if (size == 3) {
354161b3
EC
8313 TCGv_i64 o64 = tcg_temp_new_i64();
8314 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 8315
354161b3 8316 t2 = load_reg(s, rt2);
3448d47b
PM
8317 /* For AArch32, architecturally the 32-bit word at the lowest
8318 * address is always Rt and the one at addr+4 is Rt2, even if
8319 * the CPU is big-endian. Since we're going to treat this as a
8320 * single 64-bit BE store, we need to put the two halves in the
8321 * opposite order for BE to LE, so that they end up in the right
8322 * places.
8323 * We don't want gen_aa32_frob64() because that does the wrong
8324 * thing for BE32 usermode.
8325 */
8326 if (s->be_data == MO_BE) {
8327 tcg_gen_concat_i32_i64(n64, t2, t1);
8328 } else {
8329 tcg_gen_concat_i32_i64(n64, t1, t2);
8330 }
354161b3 8331 tcg_temp_free_i32(t2);
03d05e2d 8332
354161b3
EC
8333 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
8334 get_mem_index(s), opc);
8335 tcg_temp_free_i64(n64);
8336
354161b3
EC
8337 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
8338 tcg_gen_extrl_i64_i32(t0, o64);
8339
8340 tcg_temp_free_i64(o64);
8341 } else {
8342 t2 = tcg_temp_new_i32();
8343 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
8344 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
8345 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
8346 tcg_temp_free_i32(t2);
426f5abc 8347 }
354161b3
EC
8348 tcg_temp_free_i32(t1);
8349 tcg_temp_free(taddr);
8350 tcg_gen_mov_i32(cpu_R[rd], t0);
8351 tcg_temp_free_i32(t0);
426f5abc 8352 tcg_gen_br(done_label);
354161b3 8353
426f5abc
PB
8354 gen_set_label(fail_label);
8355 tcg_gen_movi_i32(cpu_R[rd], 1);
8356 gen_set_label(done_label);
03d05e2d 8357 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 8358}
426f5abc 8359
81465888
PM
8360/* gen_srs:
8361 * @env: CPUARMState
8362 * @s: DisasContext
8363 * @mode: mode field from insn (which stack to store to)
8364 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
8365 * @writeback: true if writeback bit set
8366 *
8367 * Generate code for the SRS (Store Return State) insn.
8368 */
8369static void gen_srs(DisasContext *s,
8370 uint32_t mode, uint32_t amode, bool writeback)
8371{
8372 int32_t offset;
cbc0326b
PM
8373 TCGv_i32 addr, tmp;
8374 bool undef = false;
8375
8376 /* SRS is:
8377 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 8378 * and specified mode is monitor mode
cbc0326b
PM
8379 * - UNDEFINED in Hyp mode
8380 * - UNPREDICTABLE in User or System mode
8381 * - UNPREDICTABLE if the specified mode is:
8382 * -- not implemented
8383 * -- not a valid mode number
8384 * -- a mode that's at a higher exception level
8385 * -- Monitor, if we are Non-secure
f01377f5 8386 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 8387 */
ba63cf47 8388 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
8389 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
8390 return;
8391 }
8392
8393 if (s->current_el == 0 || s->current_el == 2) {
8394 undef = true;
8395 }
8396
8397 switch (mode) {
8398 case ARM_CPU_MODE_USR:
8399 case ARM_CPU_MODE_FIQ:
8400 case ARM_CPU_MODE_IRQ:
8401 case ARM_CPU_MODE_SVC:
8402 case ARM_CPU_MODE_ABT:
8403 case ARM_CPU_MODE_UND:
8404 case ARM_CPU_MODE_SYS:
8405 break;
8406 case ARM_CPU_MODE_HYP:
8407 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8408 undef = true;
8409 }
8410 break;
8411 case ARM_CPU_MODE_MON:
8412 /* No need to check specifically for "are we non-secure" because
8413 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8414 * so if this isn't EL3 then we must be non-secure.
8415 */
8416 if (s->current_el != 3) {
8417 undef = true;
8418 }
8419 break;
8420 default:
8421 undef = true;
8422 }
8423
8424 if (undef) {
8425 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8426 default_exception_el(s));
8427 return;
8428 }
8429
8430 addr = tcg_temp_new_i32();
8431 tmp = tcg_const_i32(mode);
f01377f5
PM
8432 /* get_r13_banked() will raise an exception if called from System mode */
8433 gen_set_condexec(s);
8434 gen_set_pc_im(s, s->pc - 4);
81465888
PM
8435 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8436 tcg_temp_free_i32(tmp);
8437 switch (amode) {
8438 case 0: /* DA */
8439 offset = -4;
8440 break;
8441 case 1: /* IA */
8442 offset = 0;
8443 break;
8444 case 2: /* DB */
8445 offset = -8;
8446 break;
8447 case 3: /* IB */
8448 offset = 4;
8449 break;
8450 default:
8451 abort();
8452 }
8453 tcg_gen_addi_i32(addr, addr, offset);
8454 tmp = load_reg(s, 14);
12dcc321 8455 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8456 tcg_temp_free_i32(tmp);
81465888
PM
8457 tmp = load_cpu_field(spsr);
8458 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 8459 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8460 tcg_temp_free_i32(tmp);
81465888
PM
8461 if (writeback) {
8462 switch (amode) {
8463 case 0:
8464 offset = -8;
8465 break;
8466 case 1:
8467 offset = 4;
8468 break;
8469 case 2:
8470 offset = -4;
8471 break;
8472 case 3:
8473 offset = 0;
8474 break;
8475 default:
8476 abort();
8477 }
8478 tcg_gen_addi_i32(addr, addr, offset);
8479 tmp = tcg_const_i32(mode);
8480 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8481 tcg_temp_free_i32(tmp);
8482 }
8483 tcg_temp_free_i32(addr);
dcba3a8d 8484 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
8485}
8486
c2d9644e
RK
8487/* Generate a label used for skipping this instruction */
8488static void arm_gen_condlabel(DisasContext *s)
8489{
8490 if (!s->condjmp) {
8491 s->condlabel = gen_new_label();
8492 s->condjmp = 1;
8493 }
8494}
8495
8496/* Skip this instruction if the ARM condition is false */
8497static void arm_skip_unless(DisasContext *s, uint32_t cond)
8498{
8499 arm_gen_condlabel(s);
8500 arm_gen_test_cc(cond ^ 1, s->condlabel);
8501}
8502
f4df2210 8503static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8504{
f4df2210 8505 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8506 TCGv_i32 tmp;
8507 TCGv_i32 tmp2;
8508 TCGv_i32 tmp3;
8509 TCGv_i32 addr;
a7812ae4 8510 TCGv_i64 tmp64;
9ee6e8bb 8511
e13886e3
PM
8512 /* M variants do not implement ARM mode; this must raise the INVSTATE
8513 * UsageFault exception.
8514 */
b53d8923 8515 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
8516 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8517 default_exception_el(s));
8518 return;
b53d8923 8519 }
9ee6e8bb
PB
8520 cond = insn >> 28;
8521 if (cond == 0xf){
be5e7a76
DES
8522 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8523 * choose to UNDEF. In ARMv5 and above the space is used
8524 * for miscellaneous unconditional instructions.
8525 */
8526 ARCH(5);
8527
9ee6e8bb
PB
8528 /* Unconditional instructions. */
8529 if (((insn >> 25) & 7) == 1) {
8530 /* NEON Data processing. */
d614a513 8531 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8532 goto illegal_op;
d614a513 8533 }
9ee6e8bb 8534
7dcc1f89 8535 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8536 goto illegal_op;
7dcc1f89 8537 }
9ee6e8bb
PB
8538 return;
8539 }
8540 if ((insn & 0x0f100000) == 0x04000000) {
8541 /* NEON load/store. */
d614a513 8542 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8543 goto illegal_op;
d614a513 8544 }
9ee6e8bb 8545
7dcc1f89 8546 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8547 goto illegal_op;
7dcc1f89 8548 }
9ee6e8bb
PB
8549 return;
8550 }
6a57f3eb
WN
8551 if ((insn & 0x0f000e10) == 0x0e000a00) {
8552 /* VFP. */
7dcc1f89 8553 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8554 goto illegal_op;
8555 }
8556 return;
8557 }
3d185e5d
PM
8558 if (((insn & 0x0f30f000) == 0x0510f000) ||
8559 ((insn & 0x0f30f010) == 0x0710f000)) {
8560 if ((insn & (1 << 22)) == 0) {
8561 /* PLDW; v7MP */
d614a513 8562 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8563 goto illegal_op;
8564 }
8565 }
8566 /* Otherwise PLD; v5TE+ */
be5e7a76 8567 ARCH(5TE);
3d185e5d
PM
8568 return;
8569 }
8570 if (((insn & 0x0f70f000) == 0x0450f000) ||
8571 ((insn & 0x0f70f010) == 0x0650f000)) {
8572 ARCH(7);
8573 return; /* PLI; V7 */
8574 }
8575 if (((insn & 0x0f700000) == 0x04100000) ||
8576 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8577 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8578 goto illegal_op;
8579 }
8580 return; /* v7MP: Unallocated memory hint: must NOP */
8581 }
8582
8583 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8584 ARCH(6);
8585 /* setend */
9886ecdf
PB
8586 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8587 gen_helper_setend(cpu_env);
dcba3a8d 8588 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8589 }
8590 return;
8591 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8592 switch ((insn >> 4) & 0xf) {
8593 case 1: /* clrex */
8594 ARCH(6K);
426f5abc 8595 gen_clrex(s);
9ee6e8bb
PB
8596 return;
8597 case 4: /* dsb */
8598 case 5: /* dmb */
9ee6e8bb 8599 ARCH(7);
61e4c432 8600 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8601 return;
6df99dec
SS
8602 case 6: /* isb */
8603 /* We need to break the TB after this insn to execute
8604 * self-modifying code correctly and also to take
8605 * any pending interrupts immediately.
8606 */
0b609cc1 8607 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 8608 return;
9ee6e8bb
PB
8609 default:
8610 goto illegal_op;
8611 }
8612 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8613 /* srs */
81465888
PM
8614 ARCH(6);
8615 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8616 return;
ea825eee 8617 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8618 /* rfe */
c67b6b71 8619 int32_t offset;
9ee6e8bb
PB
8620 if (IS_USER(s))
8621 goto illegal_op;
8622 ARCH(6);
8623 rn = (insn >> 16) & 0xf;
b0109805 8624 addr = load_reg(s, rn);
9ee6e8bb
PB
8625 i = (insn >> 23) & 3;
8626 switch (i) {
b0109805 8627 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8628 case 1: offset = 0; break; /* IA */
8629 case 2: offset = -8; break; /* DB */
b0109805 8630 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8631 default: abort();
8632 }
8633 if (offset)
b0109805
PB
8634 tcg_gen_addi_i32(addr, addr, offset);
8635 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8636 tmp = tcg_temp_new_i32();
12dcc321 8637 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8638 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8639 tmp2 = tcg_temp_new_i32();
12dcc321 8640 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8641 if (insn & (1 << 21)) {
8642 /* Base writeback. */
8643 switch (i) {
b0109805 8644 case 0: offset = -8; break;
c67b6b71
FN
8645 case 1: offset = 4; break;
8646 case 2: offset = -4; break;
b0109805 8647 case 3: offset = 0; break;
9ee6e8bb
PB
8648 default: abort();
8649 }
8650 if (offset)
b0109805
PB
8651 tcg_gen_addi_i32(addr, addr, offset);
8652 store_reg(s, rn, addr);
8653 } else {
7d1b0095 8654 tcg_temp_free_i32(addr);
9ee6e8bb 8655 }
b0109805 8656 gen_rfe(s, tmp, tmp2);
c67b6b71 8657 return;
9ee6e8bb
PB
8658 } else if ((insn & 0x0e000000) == 0x0a000000) {
8659 /* branch link and change to thumb (blx <offset>) */
8660 int32_t offset;
8661
8662 val = (uint32_t)s->pc;
7d1b0095 8663 tmp = tcg_temp_new_i32();
d9ba4830
PB
8664 tcg_gen_movi_i32(tmp, val);
8665 store_reg(s, 14, tmp);
9ee6e8bb
PB
8666 /* Sign-extend the 24-bit offset */
8667 offset = (((int32_t)insn) << 8) >> 8;
8668 /* offset * 4 + bit24 * 2 + (thumb bit) */
8669 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8670 /* pipeline offset */
8671 val += 4;
be5e7a76 8672 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8673 gen_bx_im(s, val);
9ee6e8bb
PB
8674 return;
8675 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8676 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8677 /* iWMMXt register transfer. */
c0f4af17 8678 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8679 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8680 return;
c0f4af17
PM
8681 }
8682 }
9ee6e8bb 8683 }
8b7209fa
RH
8684 } else if ((insn & 0x0e000a00) == 0x0c000800
8685 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8686 if (disas_neon_insn_3same_ext(s, insn)) {
8687 goto illegal_op;
8688 }
8689 return;
638808ff
RH
8690 } else if ((insn & 0x0f000a00) == 0x0e000800
8691 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8692 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
8693 goto illegal_op;
8694 }
8695 return;
9ee6e8bb
PB
8696 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8697 /* Coprocessor double register transfer. */
be5e7a76 8698 ARCH(5TE);
9ee6e8bb
PB
8699 } else if ((insn & 0x0f000010) == 0x0e000010) {
8700 /* Additional coprocessor register transfer. */
7997d92f 8701 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8702 uint32_t mask;
8703 uint32_t val;
8704 /* cps (privileged) */
8705 if (IS_USER(s))
8706 return;
8707 mask = val = 0;
8708 if (insn & (1 << 19)) {
8709 if (insn & (1 << 8))
8710 mask |= CPSR_A;
8711 if (insn & (1 << 7))
8712 mask |= CPSR_I;
8713 if (insn & (1 << 6))
8714 mask |= CPSR_F;
8715 if (insn & (1 << 18))
8716 val |= mask;
8717 }
7997d92f 8718 if (insn & (1 << 17)) {
9ee6e8bb
PB
8719 mask |= CPSR_M;
8720 val |= (insn & 0x1f);
8721 }
8722 if (mask) {
2fbac54b 8723 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8724 }
8725 return;
8726 }
8727 goto illegal_op;
8728 }
8729 if (cond != 0xe) {
8730 /* if not always execute, we generate a conditional jump to
8731 next instruction */
c2d9644e 8732 arm_skip_unless(s, cond);
9ee6e8bb
PB
8733 }
8734 if ((insn & 0x0f900000) == 0x03000000) {
8735 if ((insn & (1 << 21)) == 0) {
8736 ARCH(6T2);
8737 rd = (insn >> 12) & 0xf;
8738 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8739 if ((insn & (1 << 22)) == 0) {
8740 /* MOVW */
7d1b0095 8741 tmp = tcg_temp_new_i32();
5e3f878a 8742 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8743 } else {
8744 /* MOVT */
5e3f878a 8745 tmp = load_reg(s, rd);
86831435 8746 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8747 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8748 }
5e3f878a 8749 store_reg(s, rd, tmp);
9ee6e8bb
PB
8750 } else {
8751 if (((insn >> 12) & 0xf) != 0xf)
8752 goto illegal_op;
8753 if (((insn >> 16) & 0xf) == 0) {
8754 gen_nop_hint(s, insn & 0xff);
8755 } else {
8756 /* CPSR = immediate */
8757 val = insn & 0xff;
8758 shift = ((insn >> 8) & 0xf) * 2;
8759 if (shift)
8760 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8761 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8762 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8763 i, val)) {
9ee6e8bb 8764 goto illegal_op;
7dcc1f89 8765 }
9ee6e8bb
PB
8766 }
8767 }
8768 } else if ((insn & 0x0f900000) == 0x01000000
8769 && (insn & 0x00000090) != 0x00000090) {
8770 /* miscellaneous instructions */
8771 op1 = (insn >> 21) & 3;
8772 sh = (insn >> 4) & 0xf;
8773 rm = insn & 0xf;
8774 switch (sh) {
8bfd0550
PM
8775 case 0x0: /* MSR, MRS */
8776 if (insn & (1 << 9)) {
8777 /* MSR (banked) and MRS (banked) */
8778 int sysm = extract32(insn, 16, 4) |
8779 (extract32(insn, 8, 1) << 4);
8780 int r = extract32(insn, 22, 1);
8781
8782 if (op1 & 1) {
8783 /* MSR (banked) */
8784 gen_msr_banked(s, r, sysm, rm);
8785 } else {
8786 /* MRS (banked) */
8787 int rd = extract32(insn, 12, 4);
8788
8789 gen_mrs_banked(s, r, sysm, rd);
8790 }
8791 break;
8792 }
8793
8794 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8795 if (op1 & 1) {
8796 /* PSR = reg */
2fbac54b 8797 tmp = load_reg(s, rm);
9ee6e8bb 8798 i = ((op1 & 2) != 0);
7dcc1f89 8799 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8800 goto illegal_op;
8801 } else {
8802 /* reg = PSR */
8803 rd = (insn >> 12) & 0xf;
8804 if (op1 & 2) {
8805 if (IS_USER(s))
8806 goto illegal_op;
d9ba4830 8807 tmp = load_cpu_field(spsr);
9ee6e8bb 8808 } else {
7d1b0095 8809 tmp = tcg_temp_new_i32();
9ef39277 8810 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8811 }
d9ba4830 8812 store_reg(s, rd, tmp);
9ee6e8bb
PB
8813 }
8814 break;
8815 case 0x1:
8816 if (op1 == 1) {
8817 /* branch/exchange thumb (bx). */
be5e7a76 8818 ARCH(4T);
d9ba4830
PB
8819 tmp = load_reg(s, rm);
8820 gen_bx(s, tmp);
9ee6e8bb
PB
8821 } else if (op1 == 3) {
8822 /* clz */
be5e7a76 8823 ARCH(5);
9ee6e8bb 8824 rd = (insn >> 12) & 0xf;
1497c961 8825 tmp = load_reg(s, rm);
7539a012 8826 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8827 store_reg(s, rd, tmp);
9ee6e8bb
PB
8828 } else {
8829 goto illegal_op;
8830 }
8831 break;
8832 case 0x2:
8833 if (op1 == 1) {
8834 ARCH(5J); /* bxj */
8835 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8836 tmp = load_reg(s, rm);
8837 gen_bx(s, tmp);
9ee6e8bb
PB
8838 } else {
8839 goto illegal_op;
8840 }
8841 break;
8842 case 0x3:
8843 if (op1 != 1)
8844 goto illegal_op;
8845
be5e7a76 8846 ARCH(5);
9ee6e8bb 8847 /* branch link/exchange thumb (blx) */
d9ba4830 8848 tmp = load_reg(s, rm);
7d1b0095 8849 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8850 tcg_gen_movi_i32(tmp2, s->pc);
8851 store_reg(s, 14, tmp2);
8852 gen_bx(s, tmp);
9ee6e8bb 8853 break;
eb0ecd5a
WN
8854 case 0x4:
8855 {
8856 /* crc32/crc32c */
8857 uint32_t c = extract32(insn, 8, 4);
8858
8859 /* Check this CPU supports ARMv8 CRC instructions.
8860 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8861 * Bits 8, 10 and 11 should be zero.
8862 */
d614a513 8863 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8864 (c & 0xd) != 0) {
8865 goto illegal_op;
8866 }
8867
8868 rn = extract32(insn, 16, 4);
8869 rd = extract32(insn, 12, 4);
8870
8871 tmp = load_reg(s, rn);
8872 tmp2 = load_reg(s, rm);
aa633469
PM
8873 if (op1 == 0) {
8874 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8875 } else if (op1 == 1) {
8876 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8877 }
eb0ecd5a
WN
8878 tmp3 = tcg_const_i32(1 << op1);
8879 if (c & 0x2) {
8880 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8881 } else {
8882 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8883 }
8884 tcg_temp_free_i32(tmp2);
8885 tcg_temp_free_i32(tmp3);
8886 store_reg(s, rd, tmp);
8887 break;
8888 }
9ee6e8bb 8889 case 0x5: /* saturating add/subtract */
be5e7a76 8890 ARCH(5TE);
9ee6e8bb
PB
8891 rd = (insn >> 12) & 0xf;
8892 rn = (insn >> 16) & 0xf;
b40d0353 8893 tmp = load_reg(s, rm);
5e3f878a 8894 tmp2 = load_reg(s, rn);
9ee6e8bb 8895 if (op1 & 2)
9ef39277 8896 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8897 if (op1 & 1)
9ef39277 8898 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8899 else
9ef39277 8900 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8901 tcg_temp_free_i32(tmp2);
5e3f878a 8902 store_reg(s, rd, tmp);
9ee6e8bb 8903 break;
49e14940 8904 case 7:
d4a2dc67
PM
8905 {
8906 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8907 switch (op1) {
19a6e31c
PM
8908 case 0:
8909 /* HLT */
8910 gen_hlt(s, imm16);
8911 break;
37e6456e
PM
8912 case 1:
8913 /* bkpt */
8914 ARCH(5);
c900a2e6 8915 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
37e6456e
PM
8916 break;
8917 case 2:
8918 /* Hypervisor call (v7) */
8919 ARCH(7);
8920 if (IS_USER(s)) {
8921 goto illegal_op;
8922 }
8923 gen_hvc(s, imm16);
8924 break;
8925 case 3:
8926 /* Secure monitor call (v6+) */
8927 ARCH(6K);
8928 if (IS_USER(s)) {
8929 goto illegal_op;
8930 }
8931 gen_smc(s);
8932 break;
8933 default:
19a6e31c 8934 g_assert_not_reached();
49e14940 8935 }
9ee6e8bb 8936 break;
d4a2dc67 8937 }
9ee6e8bb
PB
8938 case 0x8: /* signed multiply */
8939 case 0xa:
8940 case 0xc:
8941 case 0xe:
be5e7a76 8942 ARCH(5TE);
9ee6e8bb
PB
8943 rs = (insn >> 8) & 0xf;
8944 rn = (insn >> 12) & 0xf;
8945 rd = (insn >> 16) & 0xf;
8946 if (op1 == 1) {
8947 /* (32 * 16) >> 16 */
5e3f878a
PB
8948 tmp = load_reg(s, rm);
8949 tmp2 = load_reg(s, rs);
9ee6e8bb 8950 if (sh & 4)
5e3f878a 8951 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8952 else
5e3f878a 8953 gen_sxth(tmp2);
a7812ae4
PB
8954 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8955 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8956 tmp = tcg_temp_new_i32();
ecc7b3aa 8957 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8958 tcg_temp_free_i64(tmp64);
9ee6e8bb 8959 if ((sh & 2) == 0) {
5e3f878a 8960 tmp2 = load_reg(s, rn);
9ef39277 8961 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8962 tcg_temp_free_i32(tmp2);
9ee6e8bb 8963 }
5e3f878a 8964 store_reg(s, rd, tmp);
9ee6e8bb
PB
8965 } else {
8966 /* 16 * 16 */
5e3f878a
PB
8967 tmp = load_reg(s, rm);
8968 tmp2 = load_reg(s, rs);
8969 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8970 tcg_temp_free_i32(tmp2);
9ee6e8bb 8971 if (op1 == 2) {
a7812ae4
PB
8972 tmp64 = tcg_temp_new_i64();
8973 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8974 tcg_temp_free_i32(tmp);
a7812ae4
PB
8975 gen_addq(s, tmp64, rn, rd);
8976 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8977 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8978 } else {
8979 if (op1 == 0) {
5e3f878a 8980 tmp2 = load_reg(s, rn);
9ef39277 8981 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8982 tcg_temp_free_i32(tmp2);
9ee6e8bb 8983 }
5e3f878a 8984 store_reg(s, rd, tmp);
9ee6e8bb
PB
8985 }
8986 }
8987 break;
8988 default:
8989 goto illegal_op;
8990 }
8991 } else if (((insn & 0x0e000000) == 0 &&
8992 (insn & 0x00000090) != 0x90) ||
8993 ((insn & 0x0e000000) == (1 << 25))) {
8994 int set_cc, logic_cc, shiftop;
8995
8996 op1 = (insn >> 21) & 0xf;
8997 set_cc = (insn >> 20) & 1;
8998 logic_cc = table_logic_cc[op1] & set_cc;
8999
9000 /* data processing instruction */
9001 if (insn & (1 << 25)) {
9002 /* immediate operand */
9003 val = insn & 0xff;
9004 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 9005 if (shift) {
9ee6e8bb 9006 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 9007 }
7d1b0095 9008 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
9009 tcg_gen_movi_i32(tmp2, val);
9010 if (logic_cc && shift) {
9011 gen_set_CF_bit31(tmp2);
9012 }
9ee6e8bb
PB
9013 } else {
9014 /* register */
9015 rm = (insn) & 0xf;
e9bb4aa9 9016 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9017 shiftop = (insn >> 5) & 3;
9018 if (!(insn & (1 << 4))) {
9019 shift = (insn >> 7) & 0x1f;
e9bb4aa9 9020 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
9021 } else {
9022 rs = (insn >> 8) & 0xf;
8984bd2e 9023 tmp = load_reg(s, rs);
e9bb4aa9 9024 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
9025 }
9026 }
9027 if (op1 != 0x0f && op1 != 0x0d) {
9028 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
9029 tmp = load_reg(s, rn);
9030 } else {
f764718d 9031 tmp = NULL;
9ee6e8bb
PB
9032 }
9033 rd = (insn >> 12) & 0xf;
9034 switch(op1) {
9035 case 0x00:
e9bb4aa9
JR
9036 tcg_gen_and_i32(tmp, tmp, tmp2);
9037 if (logic_cc) {
9038 gen_logic_CC(tmp);
9039 }
7dcc1f89 9040 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9041 break;
9042 case 0x01:
e9bb4aa9
JR
9043 tcg_gen_xor_i32(tmp, tmp, tmp2);
9044 if (logic_cc) {
9045 gen_logic_CC(tmp);
9046 }
7dcc1f89 9047 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9048 break;
9049 case 0x02:
9050 if (set_cc && rd == 15) {
9051 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 9052 if (IS_USER(s)) {
9ee6e8bb 9053 goto illegal_op;
e9bb4aa9 9054 }
72485ec4 9055 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 9056 gen_exception_return(s, tmp);
9ee6e8bb 9057 } else {
e9bb4aa9 9058 if (set_cc) {
72485ec4 9059 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9060 } else {
9061 tcg_gen_sub_i32(tmp, tmp, tmp2);
9062 }
7dcc1f89 9063 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9064 }
9065 break;
9066 case 0x03:
e9bb4aa9 9067 if (set_cc) {
72485ec4 9068 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9069 } else {
9070 tcg_gen_sub_i32(tmp, tmp2, tmp);
9071 }
7dcc1f89 9072 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9073 break;
9074 case 0x04:
e9bb4aa9 9075 if (set_cc) {
72485ec4 9076 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9077 } else {
9078 tcg_gen_add_i32(tmp, tmp, tmp2);
9079 }
7dcc1f89 9080 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9081 break;
9082 case 0x05:
e9bb4aa9 9083 if (set_cc) {
49b4c31e 9084 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9085 } else {
9086 gen_add_carry(tmp, tmp, tmp2);
9087 }
7dcc1f89 9088 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9089 break;
9090 case 0x06:
e9bb4aa9 9091 if (set_cc) {
2de68a49 9092 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9093 } else {
9094 gen_sub_carry(tmp, tmp, tmp2);
9095 }
7dcc1f89 9096 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9097 break;
9098 case 0x07:
e9bb4aa9 9099 if (set_cc) {
2de68a49 9100 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9101 } else {
9102 gen_sub_carry(tmp, tmp2, tmp);
9103 }
7dcc1f89 9104 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9105 break;
9106 case 0x08:
9107 if (set_cc) {
e9bb4aa9
JR
9108 tcg_gen_and_i32(tmp, tmp, tmp2);
9109 gen_logic_CC(tmp);
9ee6e8bb 9110 }
7d1b0095 9111 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9112 break;
9113 case 0x09:
9114 if (set_cc) {
e9bb4aa9
JR
9115 tcg_gen_xor_i32(tmp, tmp, tmp2);
9116 gen_logic_CC(tmp);
9ee6e8bb 9117 }
7d1b0095 9118 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9119 break;
9120 case 0x0a:
9121 if (set_cc) {
72485ec4 9122 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 9123 }
7d1b0095 9124 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9125 break;
9126 case 0x0b:
9127 if (set_cc) {
72485ec4 9128 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9129 }
7d1b0095 9130 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9131 break;
9132 case 0x0c:
e9bb4aa9
JR
9133 tcg_gen_or_i32(tmp, tmp, tmp2);
9134 if (logic_cc) {
9135 gen_logic_CC(tmp);
9136 }
7dcc1f89 9137 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9138 break;
9139 case 0x0d:
9140 if (logic_cc && rd == 15) {
9141 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 9142 if (IS_USER(s)) {
9ee6e8bb 9143 goto illegal_op;
e9bb4aa9
JR
9144 }
9145 gen_exception_return(s, tmp2);
9ee6e8bb 9146 } else {
e9bb4aa9
JR
9147 if (logic_cc) {
9148 gen_logic_CC(tmp2);
9149 }
7dcc1f89 9150 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9151 }
9152 break;
9153 case 0x0e:
f669df27 9154 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
9155 if (logic_cc) {
9156 gen_logic_CC(tmp);
9157 }
7dcc1f89 9158 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9159 break;
9160 default:
9161 case 0x0f:
e9bb4aa9
JR
9162 tcg_gen_not_i32(tmp2, tmp2);
9163 if (logic_cc) {
9164 gen_logic_CC(tmp2);
9165 }
7dcc1f89 9166 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9167 break;
9168 }
e9bb4aa9 9169 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 9170 tcg_temp_free_i32(tmp2);
e9bb4aa9 9171 }
9ee6e8bb
PB
9172 } else {
9173 /* other instructions */
9174 op1 = (insn >> 24) & 0xf;
9175 switch(op1) {
9176 case 0x0:
9177 case 0x1:
9178 /* multiplies, extra load/stores */
9179 sh = (insn >> 5) & 3;
9180 if (sh == 0) {
9181 if (op1 == 0x0) {
9182 rd = (insn >> 16) & 0xf;
9183 rn = (insn >> 12) & 0xf;
9184 rs = (insn >> 8) & 0xf;
9185 rm = (insn) & 0xf;
9186 op1 = (insn >> 20) & 0xf;
9187 switch (op1) {
9188 case 0: case 1: case 2: case 3: case 6:
9189 /* 32 bit mul */
5e3f878a
PB
9190 tmp = load_reg(s, rs);
9191 tmp2 = load_reg(s, rm);
9192 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9193 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9194 if (insn & (1 << 22)) {
9195 /* Subtract (mls) */
9196 ARCH(6T2);
5e3f878a
PB
9197 tmp2 = load_reg(s, rn);
9198 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 9199 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9200 } else if (insn & (1 << 21)) {
9201 /* Add */
5e3f878a
PB
9202 tmp2 = load_reg(s, rn);
9203 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9204 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9205 }
9206 if (insn & (1 << 20))
5e3f878a
PB
9207 gen_logic_CC(tmp);
9208 store_reg(s, rd, tmp);
9ee6e8bb 9209 break;
8aac08b1
AJ
9210 case 4:
9211 /* 64 bit mul double accumulate (UMAAL) */
9212 ARCH(6);
9213 tmp = load_reg(s, rs);
9214 tmp2 = load_reg(s, rm);
9215 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9216 gen_addq_lo(s, tmp64, rn);
9217 gen_addq_lo(s, tmp64, rd);
9218 gen_storeq_reg(s, rn, rd, tmp64);
9219 tcg_temp_free_i64(tmp64);
9220 break;
9221 case 8: case 9: case 10: case 11:
9222 case 12: case 13: case 14: case 15:
9223 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
9224 tmp = load_reg(s, rs);
9225 tmp2 = load_reg(s, rm);
8aac08b1 9226 if (insn & (1 << 22)) {
c9f10124 9227 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 9228 } else {
c9f10124 9229 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
9230 }
9231 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
9232 TCGv_i32 al = load_reg(s, rn);
9233 TCGv_i32 ah = load_reg(s, rd);
c9f10124 9234 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
9235 tcg_temp_free_i32(al);
9236 tcg_temp_free_i32(ah);
9ee6e8bb 9237 }
8aac08b1 9238 if (insn & (1 << 20)) {
c9f10124 9239 gen_logicq_cc(tmp, tmp2);
8aac08b1 9240 }
c9f10124
RH
9241 store_reg(s, rn, tmp);
9242 store_reg(s, rd, tmp2);
9ee6e8bb 9243 break;
8aac08b1
AJ
9244 default:
9245 goto illegal_op;
9ee6e8bb
PB
9246 }
9247 } else {
9248 rn = (insn >> 16) & 0xf;
9249 rd = (insn >> 12) & 0xf;
9250 if (insn & (1 << 23)) {
9251 /* load/store exclusive */
2359bf80 9252 int op2 = (insn >> 8) & 3;
86753403 9253 op1 = (insn >> 21) & 0x3;
2359bf80
MR
9254
9255 switch (op2) {
9256 case 0: /* lda/stl */
9257 if (op1 == 1) {
9258 goto illegal_op;
9259 }
9260 ARCH(8);
9261 break;
9262 case 1: /* reserved */
9263 goto illegal_op;
9264 case 2: /* ldaex/stlex */
9265 ARCH(8);
9266 break;
9267 case 3: /* ldrex/strex */
9268 if (op1) {
9269 ARCH(6K);
9270 } else {
9271 ARCH(6);
9272 }
9273 break;
9274 }
9275
3174f8e9 9276 addr = tcg_temp_local_new_i32();
98a46317 9277 load_reg_var(s, addr, rn);
2359bf80
MR
9278
9279 /* Since the emulation does not have barriers,
9280 the acquire/release semantics need no special
9281 handling */
9282 if (op2 == 0) {
9283 if (insn & (1 << 20)) {
9284 tmp = tcg_temp_new_i32();
9285 switch (op1) {
9286 case 0: /* lda */
9bb6558a
PM
9287 gen_aa32_ld32u_iss(s, tmp, addr,
9288 get_mem_index(s),
9289 rd | ISSIsAcqRel);
2359bf80
MR
9290 break;
9291 case 2: /* ldab */
9bb6558a
PM
9292 gen_aa32_ld8u_iss(s, tmp, addr,
9293 get_mem_index(s),
9294 rd | ISSIsAcqRel);
2359bf80
MR
9295 break;
9296 case 3: /* ldah */
9bb6558a
PM
9297 gen_aa32_ld16u_iss(s, tmp, addr,
9298 get_mem_index(s),
9299 rd | ISSIsAcqRel);
2359bf80
MR
9300 break;
9301 default:
9302 abort();
9303 }
9304 store_reg(s, rd, tmp);
9305 } else {
9306 rm = insn & 0xf;
9307 tmp = load_reg(s, rm);
9308 switch (op1) {
9309 case 0: /* stl */
9bb6558a
PM
9310 gen_aa32_st32_iss(s, tmp, addr,
9311 get_mem_index(s),
9312 rm | ISSIsAcqRel);
2359bf80
MR
9313 break;
9314 case 2: /* stlb */
9bb6558a
PM
9315 gen_aa32_st8_iss(s, tmp, addr,
9316 get_mem_index(s),
9317 rm | ISSIsAcqRel);
2359bf80
MR
9318 break;
9319 case 3: /* stlh */
9bb6558a
PM
9320 gen_aa32_st16_iss(s, tmp, addr,
9321 get_mem_index(s),
9322 rm | ISSIsAcqRel);
2359bf80
MR
9323 break;
9324 default:
9325 abort();
9326 }
9327 tcg_temp_free_i32(tmp);
9328 }
9329 } else if (insn & (1 << 20)) {
86753403
PB
9330 switch (op1) {
9331 case 0: /* ldrex */
426f5abc 9332 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
9333 break;
9334 case 1: /* ldrexd */
426f5abc 9335 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
9336 break;
9337 case 2: /* ldrexb */
426f5abc 9338 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
9339 break;
9340 case 3: /* ldrexh */
426f5abc 9341 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
9342 break;
9343 default:
9344 abort();
9345 }
9ee6e8bb
PB
9346 } else {
9347 rm = insn & 0xf;
86753403
PB
9348 switch (op1) {
9349 case 0: /* strex */
426f5abc 9350 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
9351 break;
9352 case 1: /* strexd */
502e64fe 9353 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
9354 break;
9355 case 2: /* strexb */
426f5abc 9356 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
9357 break;
9358 case 3: /* strexh */
426f5abc 9359 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
9360 break;
9361 default:
9362 abort();
9363 }
9ee6e8bb 9364 }
39d5492a 9365 tcg_temp_free_i32(addr);
c4869ca6
OS
9366 } else if ((insn & 0x00300f00) == 0) {
9367 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
9368 * - SWP, SWPB
9369 */
9370
cf12bce0
EC
9371 TCGv taddr;
9372 TCGMemOp opc = s->be_data;
9373
9ee6e8bb
PB
9374 rm = (insn) & 0xf;
9375
9ee6e8bb 9376 if (insn & (1 << 22)) {
cf12bce0 9377 opc |= MO_UB;
9ee6e8bb 9378 } else {
cf12bce0 9379 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 9380 }
cf12bce0
EC
9381
9382 addr = load_reg(s, rn);
9383 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 9384 tcg_temp_free_i32(addr);
cf12bce0
EC
9385
9386 tmp = load_reg(s, rm);
9387 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
9388 get_mem_index(s), opc);
9389 tcg_temp_free(taddr);
9390 store_reg(s, rd, tmp);
c4869ca6
OS
9391 } else {
9392 goto illegal_op;
9ee6e8bb
PB
9393 }
9394 }
9395 } else {
9396 int address_offset;
3960c336 9397 bool load = insn & (1 << 20);
63f26fcf
PM
9398 bool wbit = insn & (1 << 21);
9399 bool pbit = insn & (1 << 24);
3960c336 9400 bool doubleword = false;
9bb6558a
PM
9401 ISSInfo issinfo;
9402
9ee6e8bb
PB
9403 /* Misc load/store */
9404 rn = (insn >> 16) & 0xf;
9405 rd = (insn >> 12) & 0xf;
3960c336 9406
9bb6558a
PM
9407 /* ISS not valid if writeback */
9408 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
9409
3960c336
PM
9410 if (!load && (sh & 2)) {
9411 /* doubleword */
9412 ARCH(5TE);
9413 if (rd & 1) {
9414 /* UNPREDICTABLE; we choose to UNDEF */
9415 goto illegal_op;
9416 }
9417 load = (sh & 1) == 0;
9418 doubleword = true;
9419 }
9420
b0109805 9421 addr = load_reg(s, rn);
63f26fcf 9422 if (pbit) {
b0109805 9423 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 9424 }
9ee6e8bb 9425 address_offset = 0;
3960c336
PM
9426
9427 if (doubleword) {
9428 if (!load) {
9ee6e8bb 9429 /* store */
b0109805 9430 tmp = load_reg(s, rd);
12dcc321 9431 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9432 tcg_temp_free_i32(tmp);
b0109805
PB
9433 tcg_gen_addi_i32(addr, addr, 4);
9434 tmp = load_reg(s, rd + 1);
12dcc321 9435 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9436 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9437 } else {
9438 /* load */
5a839c0d 9439 tmp = tcg_temp_new_i32();
12dcc321 9440 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9441 store_reg(s, rd, tmp);
9442 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9443 tmp = tcg_temp_new_i32();
12dcc321 9444 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9445 rd++;
9ee6e8bb
PB
9446 }
9447 address_offset = -4;
3960c336
PM
9448 } else if (load) {
9449 /* load */
9450 tmp = tcg_temp_new_i32();
9451 switch (sh) {
9452 case 1:
9bb6558a
PM
9453 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9454 issinfo);
3960c336
PM
9455 break;
9456 case 2:
9bb6558a
PM
9457 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9458 issinfo);
3960c336
PM
9459 break;
9460 default:
9461 case 3:
9bb6558a
PM
9462 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9463 issinfo);
3960c336
PM
9464 break;
9465 }
9ee6e8bb
PB
9466 } else {
9467 /* store */
b0109805 9468 tmp = load_reg(s, rd);
9bb6558a 9469 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 9470 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9471 }
9472 /* Perform base writeback before the loaded value to
9473 ensure correct behavior with overlapping index registers.
b6af0975 9474 ldrd with base writeback is undefined if the
9ee6e8bb 9475 destination and index registers overlap. */
63f26fcf 9476 if (!pbit) {
b0109805
PB
9477 gen_add_datah_offset(s, insn, address_offset, addr);
9478 store_reg(s, rn, addr);
63f26fcf 9479 } else if (wbit) {
9ee6e8bb 9480 if (address_offset)
b0109805
PB
9481 tcg_gen_addi_i32(addr, addr, address_offset);
9482 store_reg(s, rn, addr);
9483 } else {
7d1b0095 9484 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9485 }
9486 if (load) {
9487 /* Complete the load. */
b0109805 9488 store_reg(s, rd, tmp);
9ee6e8bb
PB
9489 }
9490 }
9491 break;
9492 case 0x4:
9493 case 0x5:
9494 goto do_ldst;
9495 case 0x6:
9496 case 0x7:
9497 if (insn & (1 << 4)) {
9498 ARCH(6);
9499 /* Armv6 Media instructions. */
9500 rm = insn & 0xf;
9501 rn = (insn >> 16) & 0xf;
2c0262af 9502 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
9503 rs = (insn >> 8) & 0xf;
9504 switch ((insn >> 23) & 3) {
9505 case 0: /* Parallel add/subtract. */
9506 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
9507 tmp = load_reg(s, rn);
9508 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9509 sh = (insn >> 5) & 7;
9510 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9511 goto illegal_op;
6ddbc6e4 9512 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 9513 tcg_temp_free_i32(tmp2);
6ddbc6e4 9514 store_reg(s, rd, tmp);
9ee6e8bb
PB
9515 break;
9516 case 1:
9517 if ((insn & 0x00700020) == 0) {
6c95676b 9518 /* Halfword pack. */
3670669c
PB
9519 tmp = load_reg(s, rn);
9520 tmp2 = load_reg(s, rm);
9ee6e8bb 9521 shift = (insn >> 7) & 0x1f;
3670669c
PB
9522 if (insn & (1 << 6)) {
9523 /* pkhtb */
22478e79
AZ
9524 if (shift == 0)
9525 shift = 31;
9526 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 9527 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 9528 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
9529 } else {
9530 /* pkhbt */
22478e79
AZ
9531 if (shift)
9532 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 9533 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
9534 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9535 }
9536 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9537 tcg_temp_free_i32(tmp2);
3670669c 9538 store_reg(s, rd, tmp);
9ee6e8bb
PB
9539 } else if ((insn & 0x00200020) == 0x00200000) {
9540 /* [us]sat */
6ddbc6e4 9541 tmp = load_reg(s, rm);
9ee6e8bb
PB
9542 shift = (insn >> 7) & 0x1f;
9543 if (insn & (1 << 6)) {
9544 if (shift == 0)
9545 shift = 31;
6ddbc6e4 9546 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9547 } else {
6ddbc6e4 9548 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9549 }
9550 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9551 tmp2 = tcg_const_i32(sh);
9552 if (insn & (1 << 22))
9ef39277 9553 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9554 else
9ef39277 9555 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9556 tcg_temp_free_i32(tmp2);
6ddbc6e4 9557 store_reg(s, rd, tmp);
9ee6e8bb
PB
9558 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9559 /* [us]sat16 */
6ddbc6e4 9560 tmp = load_reg(s, rm);
9ee6e8bb 9561 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9562 tmp2 = tcg_const_i32(sh);
9563 if (insn & (1 << 22))
9ef39277 9564 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9565 else
9ef39277 9566 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9567 tcg_temp_free_i32(tmp2);
6ddbc6e4 9568 store_reg(s, rd, tmp);
9ee6e8bb
PB
9569 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9570 /* Select bytes. */
6ddbc6e4
PB
9571 tmp = load_reg(s, rn);
9572 tmp2 = load_reg(s, rm);
7d1b0095 9573 tmp3 = tcg_temp_new_i32();
0ecb72a5 9574 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9575 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9576 tcg_temp_free_i32(tmp3);
9577 tcg_temp_free_i32(tmp2);
6ddbc6e4 9578 store_reg(s, rd, tmp);
9ee6e8bb 9579 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9580 tmp = load_reg(s, rm);
9ee6e8bb 9581 shift = (insn >> 10) & 3;
1301f322 9582 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9583 rotate, a shift is sufficient. */
9584 if (shift != 0)
f669df27 9585 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9586 op1 = (insn >> 20) & 7;
9587 switch (op1) {
5e3f878a
PB
9588 case 0: gen_sxtb16(tmp); break;
9589 case 2: gen_sxtb(tmp); break;
9590 case 3: gen_sxth(tmp); break;
9591 case 4: gen_uxtb16(tmp); break;
9592 case 6: gen_uxtb(tmp); break;
9593 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9594 default: goto illegal_op;
9595 }
9596 if (rn != 15) {
5e3f878a 9597 tmp2 = load_reg(s, rn);
9ee6e8bb 9598 if ((op1 & 3) == 0) {
5e3f878a 9599 gen_add16(tmp, tmp2);
9ee6e8bb 9600 } else {
5e3f878a 9601 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9602 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9603 }
9604 }
6c95676b 9605 store_reg(s, rd, tmp);
9ee6e8bb
PB
9606 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9607 /* rev */
b0109805 9608 tmp = load_reg(s, rm);
9ee6e8bb
PB
9609 if (insn & (1 << 22)) {
9610 if (insn & (1 << 7)) {
b0109805 9611 gen_revsh(tmp);
9ee6e8bb
PB
9612 } else {
9613 ARCH(6T2);
b0109805 9614 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9615 }
9616 } else {
9617 if (insn & (1 << 7))
b0109805 9618 gen_rev16(tmp);
9ee6e8bb 9619 else
66896cb8 9620 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9621 }
b0109805 9622 store_reg(s, rd, tmp);
9ee6e8bb
PB
9623 } else {
9624 goto illegal_op;
9625 }
9626 break;
9627 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9628 switch ((insn >> 20) & 0x7) {
9629 case 5:
9630 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9631 /* op2 not 00x or 11x : UNDEF */
9632 goto illegal_op;
9633 }
838fa72d
AJ
9634 /* Signed multiply most significant [accumulate].
9635 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9636 tmp = load_reg(s, rm);
9637 tmp2 = load_reg(s, rs);
a7812ae4 9638 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9639
955a7dd5 9640 if (rd != 15) {
838fa72d 9641 tmp = load_reg(s, rd);
9ee6e8bb 9642 if (insn & (1 << 6)) {
838fa72d 9643 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9644 } else {
838fa72d 9645 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9646 }
9647 }
838fa72d
AJ
9648 if (insn & (1 << 5)) {
9649 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9650 }
9651 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9652 tmp = tcg_temp_new_i32();
ecc7b3aa 9653 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9654 tcg_temp_free_i64(tmp64);
955a7dd5 9655 store_reg(s, rn, tmp);
41e9564d
PM
9656 break;
9657 case 0:
9658 case 4:
9659 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9660 if (insn & (1 << 7)) {
9661 goto illegal_op;
9662 }
9663 tmp = load_reg(s, rm);
9664 tmp2 = load_reg(s, rs);
9ee6e8bb 9665 if (insn & (1 << 5))
5e3f878a
PB
9666 gen_swap_half(tmp2);
9667 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9668 if (insn & (1 << 22)) {
5e3f878a 9669 /* smlald, smlsld */
33bbd75a
PC
9670 TCGv_i64 tmp64_2;
9671
a7812ae4 9672 tmp64 = tcg_temp_new_i64();
33bbd75a 9673 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9674 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9675 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9676 tcg_temp_free_i32(tmp);
33bbd75a
PC
9677 tcg_temp_free_i32(tmp2);
9678 if (insn & (1 << 6)) {
9679 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9680 } else {
9681 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9682 }
9683 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9684 gen_addq(s, tmp64, rd, rn);
9685 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9686 tcg_temp_free_i64(tmp64);
9ee6e8bb 9687 } else {
5e3f878a 9688 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9689 if (insn & (1 << 6)) {
9690 /* This subtraction cannot overflow. */
9691 tcg_gen_sub_i32(tmp, tmp, tmp2);
9692 } else {
9693 /* This addition cannot overflow 32 bits;
9694 * however it may overflow considered as a
9695 * signed operation, in which case we must set
9696 * the Q flag.
9697 */
9698 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9699 }
9700 tcg_temp_free_i32(tmp2);
22478e79 9701 if (rd != 15)
9ee6e8bb 9702 {
22478e79 9703 tmp2 = load_reg(s, rd);
9ef39277 9704 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9705 tcg_temp_free_i32(tmp2);
9ee6e8bb 9706 }
22478e79 9707 store_reg(s, rn, tmp);
9ee6e8bb 9708 }
41e9564d 9709 break;
b8b8ea05
PM
9710 case 1:
9711 case 3:
9712 /* SDIV, UDIV */
d614a513 9713 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9714 goto illegal_op;
9715 }
9716 if (((insn >> 5) & 7) || (rd != 15)) {
9717 goto illegal_op;
9718 }
9719 tmp = load_reg(s, rm);
9720 tmp2 = load_reg(s, rs);
9721 if (insn & (1 << 21)) {
9722 gen_helper_udiv(tmp, tmp, tmp2);
9723 } else {
9724 gen_helper_sdiv(tmp, tmp, tmp2);
9725 }
9726 tcg_temp_free_i32(tmp2);
9727 store_reg(s, rn, tmp);
9728 break;
41e9564d
PM
9729 default:
9730 goto illegal_op;
9ee6e8bb
PB
9731 }
9732 break;
9733 case 3:
9734 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9735 switch (op1) {
9736 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9737 ARCH(6);
9738 tmp = load_reg(s, rm);
9739 tmp2 = load_reg(s, rs);
9740 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9741 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9742 if (rd != 15) {
9743 tmp2 = load_reg(s, rd);
6ddbc6e4 9744 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9745 tcg_temp_free_i32(tmp2);
9ee6e8bb 9746 }
ded9d295 9747 store_reg(s, rn, tmp);
9ee6e8bb
PB
9748 break;
9749 case 0x20: case 0x24: case 0x28: case 0x2c:
9750 /* Bitfield insert/clear. */
9751 ARCH(6T2);
9752 shift = (insn >> 7) & 0x1f;
9753 i = (insn >> 16) & 0x1f;
45140a57
KB
9754 if (i < shift) {
9755 /* UNPREDICTABLE; we choose to UNDEF */
9756 goto illegal_op;
9757 }
9ee6e8bb
PB
9758 i = i + 1 - shift;
9759 if (rm == 15) {
7d1b0095 9760 tmp = tcg_temp_new_i32();
5e3f878a 9761 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9762 } else {
5e3f878a 9763 tmp = load_reg(s, rm);
9ee6e8bb
PB
9764 }
9765 if (i != 32) {
5e3f878a 9766 tmp2 = load_reg(s, rd);
d593c48e 9767 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9768 tcg_temp_free_i32(tmp2);
9ee6e8bb 9769 }
5e3f878a 9770 store_reg(s, rd, tmp);
9ee6e8bb
PB
9771 break;
9772 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9773 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9774 ARCH(6T2);
5e3f878a 9775 tmp = load_reg(s, rm);
9ee6e8bb
PB
9776 shift = (insn >> 7) & 0x1f;
9777 i = ((insn >> 16) & 0x1f) + 1;
9778 if (shift + i > 32)
9779 goto illegal_op;
9780 if (i < 32) {
9781 if (op1 & 0x20) {
59a71b4c 9782 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9783 } else {
59a71b4c 9784 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9785 }
9786 }
5e3f878a 9787 store_reg(s, rd, tmp);
9ee6e8bb
PB
9788 break;
9789 default:
9790 goto illegal_op;
9791 }
9792 break;
9793 }
9794 break;
9795 }
9796 do_ldst:
9797 /* Check for undefined extension instructions
9798 * per the ARM Bible IE:
9799 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9800 */
9801 sh = (0xf << 20) | (0xf << 4);
9802 if (op1 == 0x7 && ((insn & sh) == sh))
9803 {
9804 goto illegal_op;
9805 }
9806 /* load/store byte/word */
9807 rn = (insn >> 16) & 0xf;
9808 rd = (insn >> 12) & 0xf;
b0109805 9809 tmp2 = load_reg(s, rn);
a99caa48
PM
9810 if ((insn & 0x01200000) == 0x00200000) {
9811 /* ldrt/strt */
579d21cc 9812 i = get_a32_user_mem_index(s);
a99caa48
PM
9813 } else {
9814 i = get_mem_index(s);
9815 }
9ee6e8bb 9816 if (insn & (1 << 24))
b0109805 9817 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9818 if (insn & (1 << 20)) {
9819 /* load */
5a839c0d 9820 tmp = tcg_temp_new_i32();
9ee6e8bb 9821 if (insn & (1 << 22)) {
9bb6558a 9822 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9823 } else {
9bb6558a 9824 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9825 }
9ee6e8bb
PB
9826 } else {
9827 /* store */
b0109805 9828 tmp = load_reg(s, rd);
5a839c0d 9829 if (insn & (1 << 22)) {
9bb6558a 9830 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9831 } else {
9bb6558a 9832 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9833 }
9834 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9835 }
9836 if (!(insn & (1 << 24))) {
b0109805
PB
9837 gen_add_data_offset(s, insn, tmp2);
9838 store_reg(s, rn, tmp2);
9839 } else if (insn & (1 << 21)) {
9840 store_reg(s, rn, tmp2);
9841 } else {
7d1b0095 9842 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9843 }
9844 if (insn & (1 << 20)) {
9845 /* Complete the load. */
7dcc1f89 9846 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9847 }
9848 break;
9849 case 0x08:
9850 case 0x09:
9851 {
da3e53dd
PM
9852 int j, n, loaded_base;
9853 bool exc_return = false;
9854 bool is_load = extract32(insn, 20, 1);
9855 bool user = false;
39d5492a 9856 TCGv_i32 loaded_var;
9ee6e8bb
PB
9857 /* load/store multiple words */
9858 /* XXX: store correct base if write back */
9ee6e8bb 9859 if (insn & (1 << 22)) {
da3e53dd 9860 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9861 if (IS_USER(s))
9862 goto illegal_op; /* only usable in supervisor mode */
9863
da3e53dd
PM
9864 if (is_load && extract32(insn, 15, 1)) {
9865 exc_return = true;
9866 } else {
9867 user = true;
9868 }
9ee6e8bb
PB
9869 }
9870 rn = (insn >> 16) & 0xf;
b0109805 9871 addr = load_reg(s, rn);
9ee6e8bb
PB
9872
9873 /* compute total size */
9874 loaded_base = 0;
f764718d 9875 loaded_var = NULL;
9ee6e8bb
PB
9876 n = 0;
9877 for(i=0;i<16;i++) {
9878 if (insn & (1 << i))
9879 n++;
9880 }
9881 /* XXX: test invalid n == 0 case ? */
9882 if (insn & (1 << 23)) {
9883 if (insn & (1 << 24)) {
9884 /* pre increment */
b0109805 9885 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9886 } else {
9887 /* post increment */
9888 }
9889 } else {
9890 if (insn & (1 << 24)) {
9891 /* pre decrement */
b0109805 9892 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9893 } else {
9894 /* post decrement */
9895 if (n != 1)
b0109805 9896 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9897 }
9898 }
9899 j = 0;
9900 for(i=0;i<16;i++) {
9901 if (insn & (1 << i)) {
da3e53dd 9902 if (is_load) {
9ee6e8bb 9903 /* load */
5a839c0d 9904 tmp = tcg_temp_new_i32();
12dcc321 9905 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9906 if (user) {
b75263d6 9907 tmp2 = tcg_const_i32(i);
1ce94f81 9908 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9909 tcg_temp_free_i32(tmp2);
7d1b0095 9910 tcg_temp_free_i32(tmp);
9ee6e8bb 9911 } else if (i == rn) {
b0109805 9912 loaded_var = tmp;
9ee6e8bb 9913 loaded_base = 1;
fb0e8e79
PM
9914 } else if (rn == 15 && exc_return) {
9915 store_pc_exc_ret(s, tmp);
9ee6e8bb 9916 } else {
7dcc1f89 9917 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9918 }
9919 } else {
9920 /* store */
9921 if (i == 15) {
9922 /* special case: r15 = PC + 8 */
9923 val = (long)s->pc + 4;
7d1b0095 9924 tmp = tcg_temp_new_i32();
b0109805 9925 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9926 } else if (user) {
7d1b0095 9927 tmp = tcg_temp_new_i32();
b75263d6 9928 tmp2 = tcg_const_i32(i);
9ef39277 9929 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9930 tcg_temp_free_i32(tmp2);
9ee6e8bb 9931 } else {
b0109805 9932 tmp = load_reg(s, i);
9ee6e8bb 9933 }
12dcc321 9934 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9935 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9936 }
9937 j++;
9938 /* no need to add after the last transfer */
9939 if (j != n)
b0109805 9940 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9941 }
9942 }
9943 if (insn & (1 << 21)) {
9944 /* write back */
9945 if (insn & (1 << 23)) {
9946 if (insn & (1 << 24)) {
9947 /* pre increment */
9948 } else {
9949 /* post increment */
b0109805 9950 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9951 }
9952 } else {
9953 if (insn & (1 << 24)) {
9954 /* pre decrement */
9955 if (n != 1)
b0109805 9956 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9957 } else {
9958 /* post decrement */
b0109805 9959 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9960 }
9961 }
b0109805
PB
9962 store_reg(s, rn, addr);
9963 } else {
7d1b0095 9964 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9965 }
9966 if (loaded_base) {
b0109805 9967 store_reg(s, rn, loaded_var);
9ee6e8bb 9968 }
da3e53dd 9969 if (exc_return) {
9ee6e8bb 9970 /* Restore CPSR from SPSR. */
d9ba4830 9971 tmp = load_cpu_field(spsr);
e69ad9df
AL
9972 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9973 gen_io_start();
9974 }
235ea1f5 9975 gen_helper_cpsr_write_eret(cpu_env, tmp);
e69ad9df
AL
9976 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9977 gen_io_end();
9978 }
7d1b0095 9979 tcg_temp_free_i32(tmp);
b29fd33d 9980 /* Must exit loop to check un-masked IRQs */
dcba3a8d 9981 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
9982 }
9983 }
9984 break;
9985 case 0xa:
9986 case 0xb:
9987 {
9988 int32_t offset;
9989
9990 /* branch (and link) */
9991 val = (int32_t)s->pc;
9992 if (insn & (1 << 24)) {
7d1b0095 9993 tmp = tcg_temp_new_i32();
5e3f878a
PB
9994 tcg_gen_movi_i32(tmp, val);
9995 store_reg(s, 14, tmp);
9ee6e8bb 9996 }
534df156
PM
9997 offset = sextract32(insn << 2, 0, 26);
9998 val += offset + 4;
9ee6e8bb
PB
9999 gen_jmp(s, val);
10000 }
10001 break;
10002 case 0xc:
10003 case 0xd:
10004 case 0xe:
6a57f3eb
WN
10005 if (((insn >> 8) & 0xe) == 10) {
10006 /* VFP. */
7dcc1f89 10007 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10008 goto illegal_op;
10009 }
7dcc1f89 10010 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 10011 /* Coprocessor. */
9ee6e8bb 10012 goto illegal_op;
6a57f3eb 10013 }
9ee6e8bb
PB
10014 break;
10015 case 0xf:
10016 /* swi */
eaed129d 10017 gen_set_pc_im(s, s->pc);
d4a2dc67 10018 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 10019 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
10020 break;
10021 default:
10022 illegal_op:
73710361
GB
10023 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
10024 default_exception_el(s));
9ee6e8bb
PB
10025 break;
10026 }
10027 }
10028}
10029
296e5a0a
PM
10030static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
10031{
10032 /* Return true if this is a 16 bit instruction. We must be precise
10033 * about this (matching the decode). We assume that s->pc still
10034 * points to the first 16 bits of the insn.
10035 */
10036 if ((insn >> 11) < 0x1d) {
10037 /* Definitely a 16-bit instruction */
10038 return true;
10039 }
10040
10041 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10042 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10043 * end up actually treating this as two 16-bit insns, though,
10044 * if it's half of a bl/blx pair that might span a page boundary.
10045 */
14120108
JS
10046 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
10047 arm_dc_feature(s, ARM_FEATURE_M)) {
296e5a0a
PM
10048 /* Thumb2 cores (including all M profile ones) always treat
10049 * 32-bit insns as 32-bit.
10050 */
10051 return false;
10052 }
10053
bfe7ad5b 10054 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
296e5a0a
PM
10055 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10056 * is not on the next page; we merge this into a 32-bit
10057 * insn.
10058 */
10059 return false;
10060 }
10061 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10062 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10063 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10064 * -- handle as single 16 bit insn
10065 */
10066 return true;
10067}
10068
9ee6e8bb
PB
10069/* Return true if this is a Thumb-2 logical op. */
10070static int
10071thumb2_logic_op(int op)
10072{
10073 return (op < 8);
10074}
10075
10076/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
10077 then set condition code flags based on the result of the operation.
10078 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
10079 to the high bit of T1.
10080 Returns zero if the opcode is valid. */
10081
10082static int
39d5492a
PM
10083gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
10084 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
10085{
10086 int logic_cc;
10087
10088 logic_cc = 0;
10089 switch (op) {
10090 case 0: /* and */
396e467c 10091 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
10092 logic_cc = conds;
10093 break;
10094 case 1: /* bic */
f669df27 10095 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
10096 logic_cc = conds;
10097 break;
10098 case 2: /* orr */
396e467c 10099 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
10100 logic_cc = conds;
10101 break;
10102 case 3: /* orn */
29501f1b 10103 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
10104 logic_cc = conds;
10105 break;
10106 case 4: /* eor */
396e467c 10107 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
10108 logic_cc = conds;
10109 break;
10110 case 8: /* add */
10111 if (conds)
72485ec4 10112 gen_add_CC(t0, t0, t1);
9ee6e8bb 10113 else
396e467c 10114 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
10115 break;
10116 case 10: /* adc */
10117 if (conds)
49b4c31e 10118 gen_adc_CC(t0, t0, t1);
9ee6e8bb 10119 else
396e467c 10120 gen_adc(t0, t1);
9ee6e8bb
PB
10121 break;
10122 case 11: /* sbc */
2de68a49
RH
10123 if (conds) {
10124 gen_sbc_CC(t0, t0, t1);
10125 } else {
396e467c 10126 gen_sub_carry(t0, t0, t1);
2de68a49 10127 }
9ee6e8bb
PB
10128 break;
10129 case 13: /* sub */
10130 if (conds)
72485ec4 10131 gen_sub_CC(t0, t0, t1);
9ee6e8bb 10132 else
396e467c 10133 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
10134 break;
10135 case 14: /* rsb */
10136 if (conds)
72485ec4 10137 gen_sub_CC(t0, t1, t0);
9ee6e8bb 10138 else
396e467c 10139 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
10140 break;
10141 default: /* 5, 6, 7, 9, 12, 15. */
10142 return 1;
10143 }
10144 if (logic_cc) {
396e467c 10145 gen_logic_CC(t0);
9ee6e8bb 10146 if (shifter_out)
396e467c 10147 gen_set_CF_bit31(t1);
9ee6e8bb
PB
10148 }
10149 return 0;
10150}
10151
2eea841c
PM
10152/* Translate a 32-bit thumb instruction. */
10153static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 10154{
296e5a0a 10155 uint32_t imm, shift, offset;
9ee6e8bb 10156 uint32_t rd, rn, rm, rs;
39d5492a
PM
10157 TCGv_i32 tmp;
10158 TCGv_i32 tmp2;
10159 TCGv_i32 tmp3;
10160 TCGv_i32 addr;
a7812ae4 10161 TCGv_i64 tmp64;
9ee6e8bb
PB
10162 int op;
10163 int shiftop;
10164 int conds;
10165 int logic_cc;
10166
14120108
JS
10167 /*
10168 * ARMv6-M supports a limited subset of Thumb2 instructions.
10169 * Other Thumb1 architectures allow only 32-bit
10170 * combined BL/BLX prefix and suffix.
296e5a0a 10171 */
14120108
JS
10172 if (arm_dc_feature(s, ARM_FEATURE_M) &&
10173 !arm_dc_feature(s, ARM_FEATURE_V7)) {
10174 int i;
10175 bool found = false;
8297cb13
JS
10176 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
10177 0xf3b08040 /* dsb */,
10178 0xf3b08050 /* dmb */,
10179 0xf3b08060 /* isb */,
10180 0xf3e08000 /* mrs */,
10181 0xf000d000 /* bl */};
10182 static const uint32_t armv6m_mask[] = {0xffe0d000,
10183 0xfff0d0f0,
10184 0xfff0d0f0,
10185 0xfff0d0f0,
10186 0xffe0d000,
10187 0xf800d000};
14120108
JS
10188
10189 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
10190 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
10191 found = true;
10192 break;
10193 }
10194 }
10195 if (!found) {
10196 goto illegal_op;
10197 }
10198 } else if ((insn & 0xf800e800) != 0xf000e800) {
9ee6e8bb
PB
10199 ARCH(6T2);
10200 }
10201
10202 rn = (insn >> 16) & 0xf;
10203 rs = (insn >> 12) & 0xf;
10204 rd = (insn >> 8) & 0xf;
10205 rm = insn & 0xf;
10206 switch ((insn >> 25) & 0xf) {
10207 case 0: case 1: case 2: case 3:
10208 /* 16-bit instructions. Should never happen. */
10209 abort();
10210 case 4:
10211 if (insn & (1 << 22)) {
ebfe27c5
PM
10212 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10213 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 10214 * table branch, TT.
ebfe27c5 10215 */
76eff04d
PM
10216 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
10217 arm_dc_feature(s, ARM_FEATURE_V8)) {
10218 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10219 * - SG (v8M only)
10220 * The bulk of the behaviour for this instruction is implemented
10221 * in v7m_handle_execute_nsc(), which deals with the insn when
10222 * it is executed by a CPU in non-secure state from memory
10223 * which is Secure & NonSecure-Callable.
10224 * Here we only need to handle the remaining cases:
10225 * * in NS memory (including the "security extension not
10226 * implemented" case) : NOP
10227 * * in S memory but CPU already secure (clear IT bits)
10228 * We know that the attribute for the memory this insn is
10229 * in must match the current CPU state, because otherwise
10230 * get_phys_addr_pmsav8 would have generated an exception.
10231 */
10232 if (s->v8m_secure) {
10233 /* Like the IT insn, we don't need to generate any code */
10234 s->condexec_cond = 0;
10235 s->condexec_mask = 0;
10236 }
10237 } else if (insn & 0x01200000) {
ebfe27c5
PM
10238 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10239 * - load/store dual (post-indexed)
10240 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
10241 * - load/store dual (literal and immediate)
10242 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10243 * - load/store dual (pre-indexed)
10244 */
9ee6e8bb 10245 if (rn == 15) {
ebfe27c5
PM
10246 if (insn & (1 << 21)) {
10247 /* UNPREDICTABLE */
10248 goto illegal_op;
10249 }
7d1b0095 10250 addr = tcg_temp_new_i32();
b0109805 10251 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 10252 } else {
b0109805 10253 addr = load_reg(s, rn);
9ee6e8bb
PB
10254 }
10255 offset = (insn & 0xff) * 4;
10256 if ((insn & (1 << 23)) == 0)
10257 offset = -offset;
10258 if (insn & (1 << 24)) {
b0109805 10259 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
10260 offset = 0;
10261 }
10262 if (insn & (1 << 20)) {
10263 /* ldrd */
e2592fad 10264 tmp = tcg_temp_new_i32();
12dcc321 10265 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
10266 store_reg(s, rs, tmp);
10267 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10268 tmp = tcg_temp_new_i32();
12dcc321 10269 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10270 store_reg(s, rd, tmp);
9ee6e8bb
PB
10271 } else {
10272 /* strd */
b0109805 10273 tmp = load_reg(s, rs);
12dcc321 10274 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10275 tcg_temp_free_i32(tmp);
b0109805
PB
10276 tcg_gen_addi_i32(addr, addr, 4);
10277 tmp = load_reg(s, rd);
12dcc321 10278 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10279 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10280 }
10281 if (insn & (1 << 21)) {
10282 /* Base writeback. */
b0109805
PB
10283 tcg_gen_addi_i32(addr, addr, offset - 4);
10284 store_reg(s, rn, addr);
10285 } else {
7d1b0095 10286 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10287 }
10288 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
10289 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
10290 * - load/store exclusive word
5158de24 10291 * - TT (v8M only)
ebfe27c5
PM
10292 */
10293 if (rs == 15) {
5158de24
PM
10294 if (!(insn & (1 << 20)) &&
10295 arm_dc_feature(s, ARM_FEATURE_M) &&
10296 arm_dc_feature(s, ARM_FEATURE_V8)) {
10297 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
10298 * - TT (v8M only)
10299 */
10300 bool alt = insn & (1 << 7);
10301 TCGv_i32 addr, op, ttresp;
10302
10303 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
10304 /* we UNDEF for these UNPREDICTABLE cases */
10305 goto illegal_op;
10306 }
10307
10308 if (alt && !s->v8m_secure) {
10309 goto illegal_op;
10310 }
10311
10312 addr = load_reg(s, rn);
10313 op = tcg_const_i32(extract32(insn, 6, 2));
10314 ttresp = tcg_temp_new_i32();
10315 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
10316 tcg_temp_free_i32(addr);
10317 tcg_temp_free_i32(op);
10318 store_reg(s, rd, ttresp);
384c6c03 10319 break;
5158de24 10320 }
ebfe27c5
PM
10321 goto illegal_op;
10322 }
39d5492a 10323 addr = tcg_temp_local_new_i32();
98a46317 10324 load_reg_var(s, addr, rn);
426f5abc 10325 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 10326 if (insn & (1 << 20)) {
426f5abc 10327 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 10328 } else {
426f5abc 10329 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 10330 }
39d5492a 10331 tcg_temp_free_i32(addr);
2359bf80 10332 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
10333 /* Table Branch. */
10334 if (rn == 15) {
7d1b0095 10335 addr = tcg_temp_new_i32();
b0109805 10336 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 10337 } else {
b0109805 10338 addr = load_reg(s, rn);
9ee6e8bb 10339 }
b26eefb6 10340 tmp = load_reg(s, rm);
b0109805 10341 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
10342 if (insn & (1 << 4)) {
10343 /* tbh */
b0109805 10344 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10345 tcg_temp_free_i32(tmp);
e2592fad 10346 tmp = tcg_temp_new_i32();
12dcc321 10347 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10348 } else { /* tbb */
7d1b0095 10349 tcg_temp_free_i32(tmp);
e2592fad 10350 tmp = tcg_temp_new_i32();
12dcc321 10351 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10352 }
7d1b0095 10353 tcg_temp_free_i32(addr);
b0109805
PB
10354 tcg_gen_shli_i32(tmp, tmp, 1);
10355 tcg_gen_addi_i32(tmp, tmp, s->pc);
10356 store_reg(s, 15, tmp);
9ee6e8bb 10357 } else {
2359bf80 10358 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 10359 op = (insn >> 4) & 0x3;
2359bf80
MR
10360 switch (op2) {
10361 case 0:
426f5abc 10362 goto illegal_op;
2359bf80
MR
10363 case 1:
10364 /* Load/store exclusive byte/halfword/doubleword */
10365 if (op == 2) {
10366 goto illegal_op;
10367 }
10368 ARCH(7);
10369 break;
10370 case 2:
10371 /* Load-acquire/store-release */
10372 if (op == 3) {
10373 goto illegal_op;
10374 }
10375 /* Fall through */
10376 case 3:
10377 /* Load-acquire/store-release exclusive */
10378 ARCH(8);
10379 break;
426f5abc 10380 }
39d5492a 10381 addr = tcg_temp_local_new_i32();
98a46317 10382 load_reg_var(s, addr, rn);
2359bf80
MR
10383 if (!(op2 & 1)) {
10384 if (insn & (1 << 20)) {
10385 tmp = tcg_temp_new_i32();
10386 switch (op) {
10387 case 0: /* ldab */
9bb6558a
PM
10388 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
10389 rs | ISSIsAcqRel);
2359bf80
MR
10390 break;
10391 case 1: /* ldah */
9bb6558a
PM
10392 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10393 rs | ISSIsAcqRel);
2359bf80
MR
10394 break;
10395 case 2: /* lda */
9bb6558a
PM
10396 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
10397 rs | ISSIsAcqRel);
2359bf80
MR
10398 break;
10399 default:
10400 abort();
10401 }
10402 store_reg(s, rs, tmp);
10403 } else {
10404 tmp = load_reg(s, rs);
10405 switch (op) {
10406 case 0: /* stlb */
9bb6558a
PM
10407 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
10408 rs | ISSIsAcqRel);
2359bf80
MR
10409 break;
10410 case 1: /* stlh */
9bb6558a
PM
10411 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
10412 rs | ISSIsAcqRel);
2359bf80
MR
10413 break;
10414 case 2: /* stl */
9bb6558a
PM
10415 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
10416 rs | ISSIsAcqRel);
2359bf80
MR
10417 break;
10418 default:
10419 abort();
10420 }
10421 tcg_temp_free_i32(tmp);
10422 }
10423 } else if (insn & (1 << 20)) {
426f5abc 10424 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 10425 } else {
426f5abc 10426 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 10427 }
39d5492a 10428 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10429 }
10430 } else {
10431 /* Load/store multiple, RFE, SRS. */
10432 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 10433 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 10434 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10435 goto illegal_op;
00115976 10436 }
9ee6e8bb
PB
10437 if (insn & (1 << 20)) {
10438 /* rfe */
b0109805
PB
10439 addr = load_reg(s, rn);
10440 if ((insn & (1 << 24)) == 0)
10441 tcg_gen_addi_i32(addr, addr, -8);
10442 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 10443 tmp = tcg_temp_new_i32();
12dcc321 10444 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10445 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10446 tmp2 = tcg_temp_new_i32();
12dcc321 10447 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
10448 if (insn & (1 << 21)) {
10449 /* Base writeback. */
b0109805
PB
10450 if (insn & (1 << 24)) {
10451 tcg_gen_addi_i32(addr, addr, 4);
10452 } else {
10453 tcg_gen_addi_i32(addr, addr, -4);
10454 }
10455 store_reg(s, rn, addr);
10456 } else {
7d1b0095 10457 tcg_temp_free_i32(addr);
9ee6e8bb 10458 }
b0109805 10459 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
10460 } else {
10461 /* srs */
81465888
PM
10462 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10463 insn & (1 << 21));
9ee6e8bb
PB
10464 }
10465 } else {
5856d44e 10466 int i, loaded_base = 0;
39d5492a 10467 TCGv_i32 loaded_var;
9ee6e8bb 10468 /* Load/store multiple. */
b0109805 10469 addr = load_reg(s, rn);
9ee6e8bb
PB
10470 offset = 0;
10471 for (i = 0; i < 16; i++) {
10472 if (insn & (1 << i))
10473 offset += 4;
10474 }
10475 if (insn & (1 << 24)) {
b0109805 10476 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10477 }
10478
f764718d 10479 loaded_var = NULL;
9ee6e8bb
PB
10480 for (i = 0; i < 16; i++) {
10481 if ((insn & (1 << i)) == 0)
10482 continue;
10483 if (insn & (1 << 20)) {
10484 /* Load. */
e2592fad 10485 tmp = tcg_temp_new_i32();
12dcc321 10486 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10487 if (i == 15) {
3bb8a96f 10488 gen_bx_excret(s, tmp);
5856d44e
YO
10489 } else if (i == rn) {
10490 loaded_var = tmp;
10491 loaded_base = 1;
9ee6e8bb 10492 } else {
b0109805 10493 store_reg(s, i, tmp);
9ee6e8bb
PB
10494 }
10495 } else {
10496 /* Store. */
b0109805 10497 tmp = load_reg(s, i);
12dcc321 10498 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10499 tcg_temp_free_i32(tmp);
9ee6e8bb 10500 }
b0109805 10501 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 10502 }
5856d44e
YO
10503 if (loaded_base) {
10504 store_reg(s, rn, loaded_var);
10505 }
9ee6e8bb
PB
10506 if (insn & (1 << 21)) {
10507 /* Base register writeback. */
10508 if (insn & (1 << 24)) {
b0109805 10509 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10510 }
10511 /* Fault if writeback register is in register list. */
10512 if (insn & (1 << rn))
10513 goto illegal_op;
b0109805
PB
10514 store_reg(s, rn, addr);
10515 } else {
7d1b0095 10516 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10517 }
10518 }
10519 }
10520 break;
2af9ab77
JB
10521 case 5:
10522
9ee6e8bb 10523 op = (insn >> 21) & 0xf;
2af9ab77 10524 if (op == 6) {
62b44f05
AR
10525 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10526 goto illegal_op;
10527 }
2af9ab77
JB
10528 /* Halfword pack. */
10529 tmp = load_reg(s, rn);
10530 tmp2 = load_reg(s, rm);
10531 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
10532 if (insn & (1 << 5)) {
10533 /* pkhtb */
10534 if (shift == 0)
10535 shift = 31;
10536 tcg_gen_sari_i32(tmp2, tmp2, shift);
10537 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10538 tcg_gen_ext16u_i32(tmp2, tmp2);
10539 } else {
10540 /* pkhbt */
10541 if (shift)
10542 tcg_gen_shli_i32(tmp2, tmp2, shift);
10543 tcg_gen_ext16u_i32(tmp, tmp);
10544 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10545 }
10546 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10547 tcg_temp_free_i32(tmp2);
3174f8e9
FN
10548 store_reg(s, rd, tmp);
10549 } else {
2af9ab77
JB
10550 /* Data processing register constant shift. */
10551 if (rn == 15) {
7d1b0095 10552 tmp = tcg_temp_new_i32();
2af9ab77
JB
10553 tcg_gen_movi_i32(tmp, 0);
10554 } else {
10555 tmp = load_reg(s, rn);
10556 }
10557 tmp2 = load_reg(s, rm);
10558
10559 shiftop = (insn >> 4) & 3;
10560 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10561 conds = (insn & (1 << 20)) != 0;
10562 logic_cc = (conds && thumb2_logic_op(op));
10563 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10564 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10565 goto illegal_op;
7d1b0095 10566 tcg_temp_free_i32(tmp2);
2af9ab77
JB
10567 if (rd != 15) {
10568 store_reg(s, rd, tmp);
10569 } else {
7d1b0095 10570 tcg_temp_free_i32(tmp);
2af9ab77 10571 }
3174f8e9 10572 }
9ee6e8bb
PB
10573 break;
10574 case 13: /* Misc data processing. */
10575 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10576 if (op < 4 && (insn & 0xf000) != 0xf000)
10577 goto illegal_op;
10578 switch (op) {
10579 case 0: /* Register controlled shift. */
8984bd2e
PB
10580 tmp = load_reg(s, rn);
10581 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10582 if ((insn & 0x70) != 0)
10583 goto illegal_op;
10584 op = (insn >> 21) & 3;
8984bd2e
PB
10585 logic_cc = (insn & (1 << 20)) != 0;
10586 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10587 if (logic_cc)
10588 gen_logic_CC(tmp);
bedb8a6b 10589 store_reg(s, rd, tmp);
9ee6e8bb
PB
10590 break;
10591 case 1: /* Sign/zero extend. */
62b44f05
AR
10592 op = (insn >> 20) & 7;
10593 switch (op) {
10594 case 0: /* SXTAH, SXTH */
10595 case 1: /* UXTAH, UXTH */
10596 case 4: /* SXTAB, SXTB */
10597 case 5: /* UXTAB, UXTB */
10598 break;
10599 case 2: /* SXTAB16, SXTB16 */
10600 case 3: /* UXTAB16, UXTB16 */
10601 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10602 goto illegal_op;
10603 }
10604 break;
10605 default:
10606 goto illegal_op;
10607 }
10608 if (rn != 15) {
10609 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10610 goto illegal_op;
10611 }
10612 }
5e3f878a 10613 tmp = load_reg(s, rm);
9ee6e8bb 10614 shift = (insn >> 4) & 3;
1301f322 10615 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10616 rotate, a shift is sufficient. */
10617 if (shift != 0)
f669df27 10618 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10619 op = (insn >> 20) & 7;
10620 switch (op) {
5e3f878a
PB
10621 case 0: gen_sxth(tmp); break;
10622 case 1: gen_uxth(tmp); break;
10623 case 2: gen_sxtb16(tmp); break;
10624 case 3: gen_uxtb16(tmp); break;
10625 case 4: gen_sxtb(tmp); break;
10626 case 5: gen_uxtb(tmp); break;
62b44f05
AR
10627 default:
10628 g_assert_not_reached();
9ee6e8bb
PB
10629 }
10630 if (rn != 15) {
5e3f878a 10631 tmp2 = load_reg(s, rn);
9ee6e8bb 10632 if ((op >> 1) == 1) {
5e3f878a 10633 gen_add16(tmp, tmp2);
9ee6e8bb 10634 } else {
5e3f878a 10635 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10636 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10637 }
10638 }
5e3f878a 10639 store_reg(s, rd, tmp);
9ee6e8bb
PB
10640 break;
10641 case 2: /* SIMD add/subtract. */
62b44f05
AR
10642 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10643 goto illegal_op;
10644 }
9ee6e8bb
PB
10645 op = (insn >> 20) & 7;
10646 shift = (insn >> 4) & 7;
10647 if ((op & 3) == 3 || (shift & 3) == 3)
10648 goto illegal_op;
6ddbc6e4
PB
10649 tmp = load_reg(s, rn);
10650 tmp2 = load_reg(s, rm);
10651 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10652 tcg_temp_free_i32(tmp2);
6ddbc6e4 10653 store_reg(s, rd, tmp);
9ee6e8bb
PB
10654 break;
10655 case 3: /* Other data processing. */
10656 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10657 if (op < 4) {
10658 /* Saturating add/subtract. */
62b44f05
AR
10659 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10660 goto illegal_op;
10661 }
d9ba4830
PB
10662 tmp = load_reg(s, rn);
10663 tmp2 = load_reg(s, rm);
9ee6e8bb 10664 if (op & 1)
9ef39277 10665 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10666 if (op & 2)
9ef39277 10667 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10668 else
9ef39277 10669 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10670 tcg_temp_free_i32(tmp2);
9ee6e8bb 10671 } else {
62b44f05
AR
10672 switch (op) {
10673 case 0x0a: /* rbit */
10674 case 0x08: /* rev */
10675 case 0x09: /* rev16 */
10676 case 0x0b: /* revsh */
10677 case 0x18: /* clz */
10678 break;
10679 case 0x10: /* sel */
10680 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10681 goto illegal_op;
10682 }
10683 break;
10684 case 0x20: /* crc32/crc32c */
10685 case 0x21:
10686 case 0x22:
10687 case 0x28:
10688 case 0x29:
10689 case 0x2a:
10690 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10691 goto illegal_op;
10692 }
10693 break;
10694 default:
10695 goto illegal_op;
10696 }
d9ba4830 10697 tmp = load_reg(s, rn);
9ee6e8bb
PB
10698 switch (op) {
10699 case 0x0a: /* rbit */
d9ba4830 10700 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10701 break;
10702 case 0x08: /* rev */
66896cb8 10703 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10704 break;
10705 case 0x09: /* rev16 */
d9ba4830 10706 gen_rev16(tmp);
9ee6e8bb
PB
10707 break;
10708 case 0x0b: /* revsh */
d9ba4830 10709 gen_revsh(tmp);
9ee6e8bb
PB
10710 break;
10711 case 0x10: /* sel */
d9ba4830 10712 tmp2 = load_reg(s, rm);
7d1b0095 10713 tmp3 = tcg_temp_new_i32();
0ecb72a5 10714 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10715 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10716 tcg_temp_free_i32(tmp3);
10717 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10718 break;
10719 case 0x18: /* clz */
7539a012 10720 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10721 break;
eb0ecd5a
WN
10722 case 0x20:
10723 case 0x21:
10724 case 0x22:
10725 case 0x28:
10726 case 0x29:
10727 case 0x2a:
10728 {
10729 /* crc32/crc32c */
10730 uint32_t sz = op & 0x3;
10731 uint32_t c = op & 0x8;
10732
eb0ecd5a 10733 tmp2 = load_reg(s, rm);
aa633469
PM
10734 if (sz == 0) {
10735 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10736 } else if (sz == 1) {
10737 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10738 }
eb0ecd5a
WN
10739 tmp3 = tcg_const_i32(1 << sz);
10740 if (c) {
10741 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10742 } else {
10743 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10744 }
10745 tcg_temp_free_i32(tmp2);
10746 tcg_temp_free_i32(tmp3);
10747 break;
10748 }
9ee6e8bb 10749 default:
62b44f05 10750 g_assert_not_reached();
9ee6e8bb
PB
10751 }
10752 }
d9ba4830 10753 store_reg(s, rd, tmp);
9ee6e8bb
PB
10754 break;
10755 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10756 switch ((insn >> 20) & 7) {
10757 case 0: /* 32 x 32 -> 32 */
10758 case 7: /* Unsigned sum of absolute differences. */
10759 break;
10760 case 1: /* 16 x 16 -> 32 */
10761 case 2: /* Dual multiply add. */
10762 case 3: /* 32 * 16 -> 32msb */
10763 case 4: /* Dual multiply subtract. */
10764 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10765 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10766 goto illegal_op;
10767 }
10768 break;
10769 }
9ee6e8bb 10770 op = (insn >> 4) & 0xf;
d9ba4830
PB
10771 tmp = load_reg(s, rn);
10772 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10773 switch ((insn >> 20) & 7) {
10774 case 0: /* 32 x 32 -> 32 */
d9ba4830 10775 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10776 tcg_temp_free_i32(tmp2);
9ee6e8bb 10777 if (rs != 15) {
d9ba4830 10778 tmp2 = load_reg(s, rs);
9ee6e8bb 10779 if (op)
d9ba4830 10780 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10781 else
d9ba4830 10782 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10783 tcg_temp_free_i32(tmp2);
9ee6e8bb 10784 }
9ee6e8bb
PB
10785 break;
10786 case 1: /* 16 x 16 -> 32 */
d9ba4830 10787 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10788 tcg_temp_free_i32(tmp2);
9ee6e8bb 10789 if (rs != 15) {
d9ba4830 10790 tmp2 = load_reg(s, rs);
9ef39277 10791 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10792 tcg_temp_free_i32(tmp2);
9ee6e8bb 10793 }
9ee6e8bb
PB
10794 break;
10795 case 2: /* Dual multiply add. */
10796 case 4: /* Dual multiply subtract. */
10797 if (op)
d9ba4830
PB
10798 gen_swap_half(tmp2);
10799 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10800 if (insn & (1 << 22)) {
e1d177b9 10801 /* This subtraction cannot overflow. */
d9ba4830 10802 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10803 } else {
e1d177b9
PM
10804 /* This addition cannot overflow 32 bits;
10805 * however it may overflow considered as a signed
10806 * operation, in which case we must set the Q flag.
10807 */
9ef39277 10808 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10809 }
7d1b0095 10810 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10811 if (rs != 15)
10812 {
d9ba4830 10813 tmp2 = load_reg(s, rs);
9ef39277 10814 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10815 tcg_temp_free_i32(tmp2);
9ee6e8bb 10816 }
9ee6e8bb
PB
10817 break;
10818 case 3: /* 32 * 16 -> 32msb */
10819 if (op)
d9ba4830 10820 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10821 else
d9ba4830 10822 gen_sxth(tmp2);
a7812ae4
PB
10823 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10824 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10825 tmp = tcg_temp_new_i32();
ecc7b3aa 10826 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10827 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10828 if (rs != 15)
10829 {
d9ba4830 10830 tmp2 = load_reg(s, rs);
9ef39277 10831 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10832 tcg_temp_free_i32(tmp2);
9ee6e8bb 10833 }
9ee6e8bb 10834 break;
838fa72d
AJ
10835 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10836 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10837 if (rs != 15) {
838fa72d
AJ
10838 tmp = load_reg(s, rs);
10839 if (insn & (1 << 20)) {
10840 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10841 } else {
838fa72d 10842 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10843 }
2c0262af 10844 }
838fa72d
AJ
10845 if (insn & (1 << 4)) {
10846 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10847 }
10848 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10849 tmp = tcg_temp_new_i32();
ecc7b3aa 10850 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10851 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10852 break;
10853 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10854 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10855 tcg_temp_free_i32(tmp2);
9ee6e8bb 10856 if (rs != 15) {
d9ba4830
PB
10857 tmp2 = load_reg(s, rs);
10858 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10859 tcg_temp_free_i32(tmp2);
5fd46862 10860 }
9ee6e8bb 10861 break;
2c0262af 10862 }
d9ba4830 10863 store_reg(s, rd, tmp);
2c0262af 10864 break;
9ee6e8bb
PB
10865 case 6: case 7: /* 64-bit multiply, Divide. */
10866 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10867 tmp = load_reg(s, rn);
10868 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10869 if ((op & 0x50) == 0x10) {
10870 /* sdiv, udiv */
d614a513 10871 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10872 goto illegal_op;
47789990 10873 }
9ee6e8bb 10874 if (op & 0x20)
5e3f878a 10875 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10876 else
5e3f878a 10877 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10878 tcg_temp_free_i32(tmp2);
5e3f878a 10879 store_reg(s, rd, tmp);
9ee6e8bb
PB
10880 } else if ((op & 0xe) == 0xc) {
10881 /* Dual multiply accumulate long. */
62b44f05
AR
10882 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10883 tcg_temp_free_i32(tmp);
10884 tcg_temp_free_i32(tmp2);
10885 goto illegal_op;
10886 }
9ee6e8bb 10887 if (op & 1)
5e3f878a
PB
10888 gen_swap_half(tmp2);
10889 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10890 if (op & 0x10) {
5e3f878a 10891 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10892 } else {
5e3f878a 10893 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10894 }
7d1b0095 10895 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10896 /* BUGFIX */
10897 tmp64 = tcg_temp_new_i64();
10898 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10899 tcg_temp_free_i32(tmp);
a7812ae4
PB
10900 gen_addq(s, tmp64, rs, rd);
10901 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10902 tcg_temp_free_i64(tmp64);
2c0262af 10903 } else {
9ee6e8bb
PB
10904 if (op & 0x20) {
10905 /* Unsigned 64-bit multiply */
a7812ae4 10906 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10907 } else {
9ee6e8bb
PB
10908 if (op & 8) {
10909 /* smlalxy */
62b44f05
AR
10910 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10911 tcg_temp_free_i32(tmp2);
10912 tcg_temp_free_i32(tmp);
10913 goto illegal_op;
10914 }
5e3f878a 10915 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10916 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10917 tmp64 = tcg_temp_new_i64();
10918 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10919 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10920 } else {
10921 /* Signed 64-bit multiply */
a7812ae4 10922 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10923 }
b5ff1b31 10924 }
9ee6e8bb
PB
10925 if (op & 4) {
10926 /* umaal */
62b44f05
AR
10927 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10928 tcg_temp_free_i64(tmp64);
10929 goto illegal_op;
10930 }
a7812ae4
PB
10931 gen_addq_lo(s, tmp64, rs);
10932 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10933 } else if (op & 0x40) {
10934 /* 64-bit accumulate. */
a7812ae4 10935 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10936 }
a7812ae4 10937 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10938 tcg_temp_free_i64(tmp64);
5fd46862 10939 }
2c0262af 10940 break;
9ee6e8bb
PB
10941 }
10942 break;
10943 case 6: case 7: case 14: case 15:
10944 /* Coprocessor. */
7517748e
PM
10945 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10946 /* We don't currently implement M profile FP support,
b1e5336a
PM
10947 * so this entire space should give a NOCP fault, with
10948 * the exception of the v8M VLLDM and VLSTM insns, which
10949 * must be NOPs in Secure state and UNDEF in Nonsecure state.
7517748e 10950 */
b1e5336a
PM
10951 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
10952 (insn & 0xffa00f00) == 0xec200a00) {
10953 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
10954 * - VLLDM, VLSTM
10955 * We choose to UNDEF if the RAZ bits are non-zero.
10956 */
10957 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
10958 goto illegal_op;
10959 }
10960 /* Just NOP since FP support is not implemented */
10961 break;
10962 }
10963 /* All other insns: NOCP */
7517748e
PM
10964 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10965 default_exception_el(s));
10966 break;
10967 }
0052087e
RH
10968 if ((insn & 0xfe000a00) == 0xfc000800
10969 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10970 /* The Thumb2 and ARM encodings are identical. */
10971 if (disas_neon_insn_3same_ext(s, insn)) {
10972 goto illegal_op;
10973 }
10974 } else if ((insn & 0xff000a00) == 0xfe000800
10975 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10976 /* The Thumb2 and ARM encodings are identical. */
10977 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
10978 goto illegal_op;
10979 }
10980 } else if (((insn >> 24) & 3) == 3) {
9ee6e8bb 10981 /* Translate into the equivalent ARM encoding. */
f06053e3 10982 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10983 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10984 goto illegal_op;
7dcc1f89 10985 }
6a57f3eb 10986 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10987 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10988 goto illegal_op;
10989 }
9ee6e8bb
PB
10990 } else {
10991 if (insn & (1 << 28))
10992 goto illegal_op;
7dcc1f89 10993 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10994 goto illegal_op;
7dcc1f89 10995 }
9ee6e8bb
PB
10996 }
10997 break;
10998 case 8: case 9: case 10: case 11:
10999 if (insn & (1 << 15)) {
11000 /* Branches, misc control. */
11001 if (insn & 0x5000) {
11002 /* Unconditional branch. */
11003 /* signextend(hw1[10:0]) -> offset[:12]. */
11004 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
11005 /* hw1[10:0] -> offset[11:1]. */
11006 offset |= (insn & 0x7ff) << 1;
11007 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
11008 offset[24:22] already have the same value because of the
11009 sign extension above. */
11010 offset ^= ((~insn) & (1 << 13)) << 10;
11011 offset ^= ((~insn) & (1 << 11)) << 11;
11012
9ee6e8bb
PB
11013 if (insn & (1 << 14)) {
11014 /* Branch and link. */
3174f8e9 11015 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 11016 }
3b46e624 11017
b0109805 11018 offset += s->pc;
9ee6e8bb
PB
11019 if (insn & (1 << 12)) {
11020 /* b/bl */
b0109805 11021 gen_jmp(s, offset);
9ee6e8bb
PB
11022 } else {
11023 /* blx */
b0109805 11024 offset &= ~(uint32_t)2;
be5e7a76 11025 /* thumb2 bx, no need to check */
b0109805 11026 gen_bx_im(s, offset);
2c0262af 11027 }
9ee6e8bb
PB
11028 } else if (((insn >> 23) & 7) == 7) {
11029 /* Misc control */
11030 if (insn & (1 << 13))
11031 goto illegal_op;
11032
11033 if (insn & (1 << 26)) {
001b3cab
PM
11034 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11035 goto illegal_op;
11036 }
37e6456e
PM
11037 if (!(insn & (1 << 20))) {
11038 /* Hypervisor call (v7) */
11039 int imm16 = extract32(insn, 16, 4) << 12
11040 | extract32(insn, 0, 12);
11041 ARCH(7);
11042 if (IS_USER(s)) {
11043 goto illegal_op;
11044 }
11045 gen_hvc(s, imm16);
11046 } else {
11047 /* Secure monitor call (v6+) */
11048 ARCH(6K);
11049 if (IS_USER(s)) {
11050 goto illegal_op;
11051 }
11052 gen_smc(s);
11053 }
2c0262af 11054 } else {
9ee6e8bb
PB
11055 op = (insn >> 20) & 7;
11056 switch (op) {
11057 case 0: /* msr cpsr. */
b53d8923 11058 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 11059 tmp = load_reg(s, rn);
b28b3377
PM
11060 /* the constant is the mask and SYSm fields */
11061 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 11062 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 11063 tcg_temp_free_i32(addr);
7d1b0095 11064 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11065 gen_lookup_tb(s);
11066 break;
11067 }
11068 /* fall through */
11069 case 1: /* msr spsr. */
b53d8923 11070 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11071 goto illegal_op;
b53d8923 11072 }
8bfd0550
PM
11073
11074 if (extract32(insn, 5, 1)) {
11075 /* MSR (banked) */
11076 int sysm = extract32(insn, 8, 4) |
11077 (extract32(insn, 4, 1) << 4);
11078 int r = op & 1;
11079
11080 gen_msr_banked(s, r, sysm, rm);
11081 break;
11082 }
11083
11084 /* MSR (for PSRs) */
2fbac54b
FN
11085 tmp = load_reg(s, rn);
11086 if (gen_set_psr(s,
7dcc1f89 11087 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 11088 op == 1, tmp))
9ee6e8bb
PB
11089 goto illegal_op;
11090 break;
11091 case 2: /* cps, nop-hint. */
11092 if (((insn >> 8) & 7) == 0) {
11093 gen_nop_hint(s, insn & 0xff);
11094 }
11095 /* Implemented as NOP in user mode. */
11096 if (IS_USER(s))
11097 break;
11098 offset = 0;
11099 imm = 0;
11100 if (insn & (1 << 10)) {
11101 if (insn & (1 << 7))
11102 offset |= CPSR_A;
11103 if (insn & (1 << 6))
11104 offset |= CPSR_I;
11105 if (insn & (1 << 5))
11106 offset |= CPSR_F;
11107 if (insn & (1 << 9))
11108 imm = CPSR_A | CPSR_I | CPSR_F;
11109 }
11110 if (insn & (1 << 8)) {
11111 offset |= 0x1f;
11112 imm |= (insn & 0x1f);
11113 }
11114 if (offset) {
2fbac54b 11115 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
11116 }
11117 break;
11118 case 3: /* Special control operations. */
14120108 11119 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
8297cb13 11120 !arm_dc_feature(s, ARM_FEATURE_M)) {
14120108
JS
11121 goto illegal_op;
11122 }
9ee6e8bb
PB
11123 op = (insn >> 4) & 0xf;
11124 switch (op) {
11125 case 2: /* clrex */
426f5abc 11126 gen_clrex(s);
9ee6e8bb
PB
11127 break;
11128 case 4: /* dsb */
11129 case 5: /* dmb */
61e4c432 11130 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 11131 break;
6df99dec
SS
11132 case 6: /* isb */
11133 /* We need to break the TB after this insn
11134 * to execute self-modifying code correctly
11135 * and also to take any pending interrupts
11136 * immediately.
11137 */
0b609cc1 11138 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 11139 break;
9ee6e8bb
PB
11140 default:
11141 goto illegal_op;
11142 }
11143 break;
11144 case 4: /* bxj */
9d7c59c8
PM
11145 /* Trivial implementation equivalent to bx.
11146 * This instruction doesn't exist at all for M-profile.
11147 */
11148 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11149 goto illegal_op;
11150 }
d9ba4830
PB
11151 tmp = load_reg(s, rn);
11152 gen_bx(s, tmp);
9ee6e8bb
PB
11153 break;
11154 case 5: /* Exception return. */
b8b45b68
RV
11155 if (IS_USER(s)) {
11156 goto illegal_op;
11157 }
11158 if (rn != 14 || rd != 15) {
11159 goto illegal_op;
11160 }
11161 tmp = load_reg(s, rn);
11162 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
11163 gen_exception_return(s, tmp);
11164 break;
8bfd0550 11165 case 6: /* MRS */
43ac6574
PM
11166 if (extract32(insn, 5, 1) &&
11167 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
11168 /* MRS (banked) */
11169 int sysm = extract32(insn, 16, 4) |
11170 (extract32(insn, 4, 1) << 4);
11171
11172 gen_mrs_banked(s, 0, sysm, rd);
11173 break;
11174 }
11175
3d54026f
PM
11176 if (extract32(insn, 16, 4) != 0xf) {
11177 goto illegal_op;
11178 }
11179 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
11180 extract32(insn, 0, 8) != 0) {
11181 goto illegal_op;
11182 }
11183
8bfd0550 11184 /* mrs cpsr */
7d1b0095 11185 tmp = tcg_temp_new_i32();
b53d8923 11186 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
11187 addr = tcg_const_i32(insn & 0xff);
11188 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 11189 tcg_temp_free_i32(addr);
9ee6e8bb 11190 } else {
9ef39277 11191 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 11192 }
8984bd2e 11193 store_reg(s, rd, tmp);
9ee6e8bb 11194 break;
8bfd0550 11195 case 7: /* MRS */
43ac6574
PM
11196 if (extract32(insn, 5, 1) &&
11197 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
11198 /* MRS (banked) */
11199 int sysm = extract32(insn, 16, 4) |
11200 (extract32(insn, 4, 1) << 4);
11201
11202 gen_mrs_banked(s, 1, sysm, rd);
11203 break;
11204 }
11205
11206 /* mrs spsr. */
9ee6e8bb 11207 /* Not accessible in user mode. */
b53d8923 11208 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11209 goto illegal_op;
b53d8923 11210 }
3d54026f
PM
11211
11212 if (extract32(insn, 16, 4) != 0xf ||
11213 extract32(insn, 0, 8) != 0) {
11214 goto illegal_op;
11215 }
11216
d9ba4830
PB
11217 tmp = load_cpu_field(spsr);
11218 store_reg(s, rd, tmp);
9ee6e8bb 11219 break;
2c0262af
FB
11220 }
11221 }
9ee6e8bb
PB
11222 } else {
11223 /* Conditional branch. */
11224 op = (insn >> 22) & 0xf;
11225 /* Generate a conditional jump to next instruction. */
c2d9644e 11226 arm_skip_unless(s, op);
9ee6e8bb
PB
11227
11228 /* offset[11:1] = insn[10:0] */
11229 offset = (insn & 0x7ff) << 1;
11230 /* offset[17:12] = insn[21:16]. */
11231 offset |= (insn & 0x003f0000) >> 4;
11232 /* offset[31:20] = insn[26]. */
11233 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
11234 /* offset[18] = insn[13]. */
11235 offset |= (insn & (1 << 13)) << 5;
11236 /* offset[19] = insn[11]. */
11237 offset |= (insn & (1 << 11)) << 8;
11238
11239 /* jump to the offset */
b0109805 11240 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
11241 }
11242 } else {
11243 /* Data processing immediate. */
11244 if (insn & (1 << 25)) {
11245 if (insn & (1 << 24)) {
11246 if (insn & (1 << 20))
11247 goto illegal_op;
11248 /* Bitfield/Saturate. */
11249 op = (insn >> 21) & 7;
11250 imm = insn & 0x1f;
11251 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 11252 if (rn == 15) {
7d1b0095 11253 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
11254 tcg_gen_movi_i32(tmp, 0);
11255 } else {
11256 tmp = load_reg(s, rn);
11257 }
9ee6e8bb
PB
11258 switch (op) {
11259 case 2: /* Signed bitfield extract. */
11260 imm++;
11261 if (shift + imm > 32)
11262 goto illegal_op;
59a71b4c
RH
11263 if (imm < 32) {
11264 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
11265 }
9ee6e8bb
PB
11266 break;
11267 case 6: /* Unsigned bitfield extract. */
11268 imm++;
11269 if (shift + imm > 32)
11270 goto illegal_op;
59a71b4c
RH
11271 if (imm < 32) {
11272 tcg_gen_extract_i32(tmp, tmp, shift, imm);
11273 }
9ee6e8bb
PB
11274 break;
11275 case 3: /* Bitfield insert/clear. */
11276 if (imm < shift)
11277 goto illegal_op;
11278 imm = imm + 1 - shift;
11279 if (imm != 32) {
6ddbc6e4 11280 tmp2 = load_reg(s, rd);
d593c48e 11281 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 11282 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11283 }
11284 break;
11285 case 7:
11286 goto illegal_op;
11287 default: /* Saturate. */
9ee6e8bb
PB
11288 if (shift) {
11289 if (op & 1)
6ddbc6e4 11290 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 11291 else
6ddbc6e4 11292 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 11293 }
6ddbc6e4 11294 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
11295 if (op & 4) {
11296 /* Unsigned. */
62b44f05
AR
11297 if ((op & 1) && shift == 0) {
11298 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11299 tcg_temp_free_i32(tmp);
11300 tcg_temp_free_i32(tmp2);
11301 goto illegal_op;
11302 }
9ef39277 11303 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 11304 } else {
9ef39277 11305 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 11306 }
2c0262af 11307 } else {
9ee6e8bb 11308 /* Signed. */
62b44f05
AR
11309 if ((op & 1) && shift == 0) {
11310 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11311 tcg_temp_free_i32(tmp);
11312 tcg_temp_free_i32(tmp2);
11313 goto illegal_op;
11314 }
9ef39277 11315 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 11316 } else {
9ef39277 11317 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 11318 }
2c0262af 11319 }
b75263d6 11320 tcg_temp_free_i32(tmp2);
9ee6e8bb 11321 break;
2c0262af 11322 }
6ddbc6e4 11323 store_reg(s, rd, tmp);
9ee6e8bb
PB
11324 } else {
11325 imm = ((insn & 0x04000000) >> 15)
11326 | ((insn & 0x7000) >> 4) | (insn & 0xff);
11327 if (insn & (1 << 22)) {
11328 /* 16-bit immediate. */
11329 imm |= (insn >> 4) & 0xf000;
11330 if (insn & (1 << 23)) {
11331 /* movt */
5e3f878a 11332 tmp = load_reg(s, rd);
86831435 11333 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 11334 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 11335 } else {
9ee6e8bb 11336 /* movw */
7d1b0095 11337 tmp = tcg_temp_new_i32();
5e3f878a 11338 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
11339 }
11340 } else {
9ee6e8bb
PB
11341 /* Add/sub 12-bit immediate. */
11342 if (rn == 15) {
b0109805 11343 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 11344 if (insn & (1 << 23))
b0109805 11345 offset -= imm;
9ee6e8bb 11346 else
b0109805 11347 offset += imm;
7d1b0095 11348 tmp = tcg_temp_new_i32();
5e3f878a 11349 tcg_gen_movi_i32(tmp, offset);
2c0262af 11350 } else {
5e3f878a 11351 tmp = load_reg(s, rn);
9ee6e8bb 11352 if (insn & (1 << 23))
5e3f878a 11353 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 11354 else
5e3f878a 11355 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 11356 }
9ee6e8bb 11357 }
5e3f878a 11358 store_reg(s, rd, tmp);
191abaa2 11359 }
9ee6e8bb
PB
11360 } else {
11361 int shifter_out = 0;
11362 /* modified 12-bit immediate. */
11363 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
11364 imm = (insn & 0xff);
11365 switch (shift) {
11366 case 0: /* XY */
11367 /* Nothing to do. */
11368 break;
11369 case 1: /* 00XY00XY */
11370 imm |= imm << 16;
11371 break;
11372 case 2: /* XY00XY00 */
11373 imm |= imm << 16;
11374 imm <<= 8;
11375 break;
11376 case 3: /* XYXYXYXY */
11377 imm |= imm << 16;
11378 imm |= imm << 8;
11379 break;
11380 default: /* Rotated constant. */
11381 shift = (shift << 1) | (imm >> 7);
11382 imm |= 0x80;
11383 imm = imm << (32 - shift);
11384 shifter_out = 1;
11385 break;
b5ff1b31 11386 }
7d1b0095 11387 tmp2 = tcg_temp_new_i32();
3174f8e9 11388 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 11389 rn = (insn >> 16) & 0xf;
3174f8e9 11390 if (rn == 15) {
7d1b0095 11391 tmp = tcg_temp_new_i32();
3174f8e9
FN
11392 tcg_gen_movi_i32(tmp, 0);
11393 } else {
11394 tmp = load_reg(s, rn);
11395 }
9ee6e8bb
PB
11396 op = (insn >> 21) & 0xf;
11397 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 11398 shifter_out, tmp, tmp2))
9ee6e8bb 11399 goto illegal_op;
7d1b0095 11400 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11401 rd = (insn >> 8) & 0xf;
11402 if (rd != 15) {
3174f8e9
FN
11403 store_reg(s, rd, tmp);
11404 } else {
7d1b0095 11405 tcg_temp_free_i32(tmp);
2c0262af 11406 }
2c0262af 11407 }
9ee6e8bb
PB
11408 }
11409 break;
11410 case 12: /* Load/store single data item. */
11411 {
11412 int postinc = 0;
11413 int writeback = 0;
a99caa48 11414 int memidx;
9bb6558a
PM
11415 ISSInfo issinfo;
11416
9ee6e8bb 11417 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 11418 if (disas_neon_ls_insn(s, insn)) {
c1713132 11419 goto illegal_op;
7dcc1f89 11420 }
9ee6e8bb
PB
11421 break;
11422 }
a2fdc890
PM
11423 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
11424 if (rs == 15) {
11425 if (!(insn & (1 << 20))) {
11426 goto illegal_op;
11427 }
11428 if (op != 2) {
11429 /* Byte or halfword load space with dest == r15 : memory hints.
11430 * Catch them early so we don't emit pointless addressing code.
11431 * This space is a mix of:
11432 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
11433 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
11434 * cores)
11435 * unallocated hints, which must be treated as NOPs
11436 * UNPREDICTABLE space, which we NOP or UNDEF depending on
11437 * which is easiest for the decoding logic
11438 * Some space which must UNDEF
11439 */
11440 int op1 = (insn >> 23) & 3;
11441 int op2 = (insn >> 6) & 0x3f;
11442 if (op & 2) {
11443 goto illegal_op;
11444 }
11445 if (rn == 15) {
02afbf64
PM
11446 /* UNPREDICTABLE, unallocated hint or
11447 * PLD/PLDW/PLI (literal)
11448 */
2eea841c 11449 return;
a2fdc890
PM
11450 }
11451 if (op1 & 1) {
2eea841c 11452 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11453 }
11454 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 11455 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11456 }
11457 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 11458 goto illegal_op;
a2fdc890
PM
11459 }
11460 }
a99caa48 11461 memidx = get_mem_index(s);
9ee6e8bb 11462 if (rn == 15) {
7d1b0095 11463 addr = tcg_temp_new_i32();
9ee6e8bb
PB
11464 /* PC relative. */
11465 /* s->pc has already been incremented by 4. */
11466 imm = s->pc & 0xfffffffc;
11467 if (insn & (1 << 23))
11468 imm += insn & 0xfff;
11469 else
11470 imm -= insn & 0xfff;
b0109805 11471 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 11472 } else {
b0109805 11473 addr = load_reg(s, rn);
9ee6e8bb
PB
11474 if (insn & (1 << 23)) {
11475 /* Positive offset. */
11476 imm = insn & 0xfff;
b0109805 11477 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 11478 } else {
9ee6e8bb 11479 imm = insn & 0xff;
2a0308c5
PM
11480 switch ((insn >> 8) & 0xf) {
11481 case 0x0: /* Shifted Register. */
9ee6e8bb 11482 shift = (insn >> 4) & 0xf;
2a0308c5
PM
11483 if (shift > 3) {
11484 tcg_temp_free_i32(addr);
18c9b560 11485 goto illegal_op;
2a0308c5 11486 }
b26eefb6 11487 tmp = load_reg(s, rm);
9ee6e8bb 11488 if (shift)
b26eefb6 11489 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 11490 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11491 tcg_temp_free_i32(tmp);
9ee6e8bb 11492 break;
2a0308c5 11493 case 0xc: /* Negative offset. */
b0109805 11494 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 11495 break;
2a0308c5 11496 case 0xe: /* User privilege. */
b0109805 11497 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 11498 memidx = get_a32_user_mem_index(s);
9ee6e8bb 11499 break;
2a0308c5 11500 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
11501 imm = -imm;
11502 /* Fall through. */
2a0308c5 11503 case 0xb: /* Post-increment. */
9ee6e8bb
PB
11504 postinc = 1;
11505 writeback = 1;
11506 break;
2a0308c5 11507 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
11508 imm = -imm;
11509 /* Fall through. */
2a0308c5 11510 case 0xf: /* Pre-increment. */
b0109805 11511 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
11512 writeback = 1;
11513 break;
11514 default:
2a0308c5 11515 tcg_temp_free_i32(addr);
b7bcbe95 11516 goto illegal_op;
9ee6e8bb
PB
11517 }
11518 }
11519 }
9bb6558a
PM
11520
11521 issinfo = writeback ? ISSInvalid : rs;
11522
9ee6e8bb
PB
11523 if (insn & (1 << 20)) {
11524 /* Load. */
5a839c0d 11525 tmp = tcg_temp_new_i32();
a2fdc890 11526 switch (op) {
5a839c0d 11527 case 0:
9bb6558a 11528 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11529 break;
11530 case 4:
9bb6558a 11531 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11532 break;
11533 case 1:
9bb6558a 11534 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11535 break;
11536 case 5:
9bb6558a 11537 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11538 break;
11539 case 2:
9bb6558a 11540 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11541 break;
2a0308c5 11542 default:
5a839c0d 11543 tcg_temp_free_i32(tmp);
2a0308c5
PM
11544 tcg_temp_free_i32(addr);
11545 goto illegal_op;
a2fdc890
PM
11546 }
11547 if (rs == 15) {
3bb8a96f 11548 gen_bx_excret(s, tmp);
9ee6e8bb 11549 } else {
a2fdc890 11550 store_reg(s, rs, tmp);
9ee6e8bb
PB
11551 }
11552 } else {
11553 /* Store. */
b0109805 11554 tmp = load_reg(s, rs);
9ee6e8bb 11555 switch (op) {
5a839c0d 11556 case 0:
9bb6558a 11557 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11558 break;
11559 case 1:
9bb6558a 11560 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11561 break;
11562 case 2:
9bb6558a 11563 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11564 break;
2a0308c5 11565 default:
5a839c0d 11566 tcg_temp_free_i32(tmp);
2a0308c5
PM
11567 tcg_temp_free_i32(addr);
11568 goto illegal_op;
b7bcbe95 11569 }
5a839c0d 11570 tcg_temp_free_i32(tmp);
2c0262af 11571 }
9ee6e8bb 11572 if (postinc)
b0109805
PB
11573 tcg_gen_addi_i32(addr, addr, imm);
11574 if (writeback) {
11575 store_reg(s, rn, addr);
11576 } else {
7d1b0095 11577 tcg_temp_free_i32(addr);
b0109805 11578 }
9ee6e8bb
PB
11579 }
11580 break;
11581 default:
11582 goto illegal_op;
2c0262af 11583 }
2eea841c 11584 return;
9ee6e8bb 11585illegal_op:
2eea841c
PM
11586 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11587 default_exception_el(s));
2c0262af
FB
11588}
11589
296e5a0a 11590static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 11591{
296e5a0a 11592 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
11593 int32_t offset;
11594 int i;
39d5492a
PM
11595 TCGv_i32 tmp;
11596 TCGv_i32 tmp2;
11597 TCGv_i32 addr;
99c475ab 11598
99c475ab
FB
11599 switch (insn >> 12) {
11600 case 0: case 1:
396e467c 11601
99c475ab
FB
11602 rd = insn & 7;
11603 op = (insn >> 11) & 3;
11604 if (op == 3) {
11605 /* add/subtract */
11606 rn = (insn >> 3) & 7;
396e467c 11607 tmp = load_reg(s, rn);
99c475ab
FB
11608 if (insn & (1 << 10)) {
11609 /* immediate */
7d1b0095 11610 tmp2 = tcg_temp_new_i32();
396e467c 11611 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
11612 } else {
11613 /* reg */
11614 rm = (insn >> 6) & 7;
396e467c 11615 tmp2 = load_reg(s, rm);
99c475ab 11616 }
9ee6e8bb
PB
11617 if (insn & (1 << 9)) {
11618 if (s->condexec_mask)
396e467c 11619 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11620 else
72485ec4 11621 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
11622 } else {
11623 if (s->condexec_mask)
396e467c 11624 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 11625 else
72485ec4 11626 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 11627 }
7d1b0095 11628 tcg_temp_free_i32(tmp2);
396e467c 11629 store_reg(s, rd, tmp);
99c475ab
FB
11630 } else {
11631 /* shift immediate */
11632 rm = (insn >> 3) & 7;
11633 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
11634 tmp = load_reg(s, rm);
11635 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11636 if (!s->condexec_mask)
11637 gen_logic_CC(tmp);
11638 store_reg(s, rd, tmp);
99c475ab
FB
11639 }
11640 break;
11641 case 2: case 3:
11642 /* arithmetic large immediate */
11643 op = (insn >> 11) & 3;
11644 rd = (insn >> 8) & 0x7;
396e467c 11645 if (op == 0) { /* mov */
7d1b0095 11646 tmp = tcg_temp_new_i32();
396e467c 11647 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11648 if (!s->condexec_mask)
396e467c
FN
11649 gen_logic_CC(tmp);
11650 store_reg(s, rd, tmp);
11651 } else {
11652 tmp = load_reg(s, rd);
7d1b0095 11653 tmp2 = tcg_temp_new_i32();
396e467c
FN
11654 tcg_gen_movi_i32(tmp2, insn & 0xff);
11655 switch (op) {
11656 case 1: /* cmp */
72485ec4 11657 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11658 tcg_temp_free_i32(tmp);
11659 tcg_temp_free_i32(tmp2);
396e467c
FN
11660 break;
11661 case 2: /* add */
11662 if (s->condexec_mask)
11663 tcg_gen_add_i32(tmp, tmp, tmp2);
11664 else
72485ec4 11665 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11666 tcg_temp_free_i32(tmp2);
396e467c
FN
11667 store_reg(s, rd, tmp);
11668 break;
11669 case 3: /* sub */
11670 if (s->condexec_mask)
11671 tcg_gen_sub_i32(tmp, tmp, tmp2);
11672 else
72485ec4 11673 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11674 tcg_temp_free_i32(tmp2);
396e467c
FN
11675 store_reg(s, rd, tmp);
11676 break;
11677 }
99c475ab 11678 }
99c475ab
FB
11679 break;
11680 case 4:
11681 if (insn & (1 << 11)) {
11682 rd = (insn >> 8) & 7;
5899f386
FB
11683 /* load pc-relative. Bit 1 of PC is ignored. */
11684 val = s->pc + 2 + ((insn & 0xff) * 4);
11685 val &= ~(uint32_t)2;
7d1b0095 11686 addr = tcg_temp_new_i32();
b0109805 11687 tcg_gen_movi_i32(addr, val);
c40c8556 11688 tmp = tcg_temp_new_i32();
9bb6558a
PM
11689 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11690 rd | ISSIs16Bit);
7d1b0095 11691 tcg_temp_free_i32(addr);
b0109805 11692 store_reg(s, rd, tmp);
99c475ab
FB
11693 break;
11694 }
11695 if (insn & (1 << 10)) {
ebfe27c5
PM
11696 /* 0b0100_01xx_xxxx_xxxx
11697 * - data processing extended, branch and exchange
11698 */
99c475ab
FB
11699 rd = (insn & 7) | ((insn >> 4) & 8);
11700 rm = (insn >> 3) & 0xf;
11701 op = (insn >> 8) & 3;
11702 switch (op) {
11703 case 0: /* add */
396e467c
FN
11704 tmp = load_reg(s, rd);
11705 tmp2 = load_reg(s, rm);
11706 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11707 tcg_temp_free_i32(tmp2);
396e467c 11708 store_reg(s, rd, tmp);
99c475ab
FB
11709 break;
11710 case 1: /* cmp */
396e467c
FN
11711 tmp = load_reg(s, rd);
11712 tmp2 = load_reg(s, rm);
72485ec4 11713 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11714 tcg_temp_free_i32(tmp2);
11715 tcg_temp_free_i32(tmp);
99c475ab
FB
11716 break;
11717 case 2: /* mov/cpy */
396e467c
FN
11718 tmp = load_reg(s, rm);
11719 store_reg(s, rd, tmp);
99c475ab 11720 break;
ebfe27c5
PM
11721 case 3:
11722 {
11723 /* 0b0100_0111_xxxx_xxxx
11724 * - branch [and link] exchange thumb register
11725 */
11726 bool link = insn & (1 << 7);
11727
fb602cb7 11728 if (insn & 3) {
ebfe27c5
PM
11729 goto undef;
11730 }
11731 if (link) {
be5e7a76 11732 ARCH(5);
ebfe27c5 11733 }
fb602cb7
PM
11734 if ((insn & 4)) {
11735 /* BXNS/BLXNS: only exists for v8M with the
11736 * security extensions, and always UNDEF if NonSecure.
11737 * We don't implement these in the user-only mode
11738 * either (in theory you can use them from Secure User
11739 * mode but they are too tied in to system emulation.)
11740 */
11741 if (!s->v8m_secure || IS_USER_ONLY) {
11742 goto undef;
11743 }
11744 if (link) {
3e3fa230 11745 gen_blxns(s, rm);
fb602cb7
PM
11746 } else {
11747 gen_bxns(s, rm);
11748 }
11749 break;
11750 }
11751 /* BLX/BX */
ebfe27c5
PM
11752 tmp = load_reg(s, rm);
11753 if (link) {
99c475ab 11754 val = (uint32_t)s->pc | 1;
7d1b0095 11755 tmp2 = tcg_temp_new_i32();
b0109805
PB
11756 tcg_gen_movi_i32(tmp2, val);
11757 store_reg(s, 14, tmp2);
3bb8a96f
PM
11758 gen_bx(s, tmp);
11759 } else {
11760 /* Only BX works as exception-return, not BLX */
11761 gen_bx_excret(s, tmp);
99c475ab 11762 }
99c475ab
FB
11763 break;
11764 }
ebfe27c5 11765 }
99c475ab
FB
11766 break;
11767 }
11768
11769 /* data processing register */
11770 rd = insn & 7;
11771 rm = (insn >> 3) & 7;
11772 op = (insn >> 6) & 0xf;
11773 if (op == 2 || op == 3 || op == 4 || op == 7) {
11774 /* the shift/rotate ops want the operands backwards */
11775 val = rm;
11776 rm = rd;
11777 rd = val;
11778 val = 1;
11779 } else {
11780 val = 0;
11781 }
11782
396e467c 11783 if (op == 9) { /* neg */
7d1b0095 11784 tmp = tcg_temp_new_i32();
396e467c
FN
11785 tcg_gen_movi_i32(tmp, 0);
11786 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11787 tmp = load_reg(s, rd);
11788 } else {
f764718d 11789 tmp = NULL;
396e467c 11790 }
99c475ab 11791
396e467c 11792 tmp2 = load_reg(s, rm);
5899f386 11793 switch (op) {
99c475ab 11794 case 0x0: /* and */
396e467c 11795 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11796 if (!s->condexec_mask)
396e467c 11797 gen_logic_CC(tmp);
99c475ab
FB
11798 break;
11799 case 0x1: /* eor */
396e467c 11800 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11801 if (!s->condexec_mask)
396e467c 11802 gen_logic_CC(tmp);
99c475ab
FB
11803 break;
11804 case 0x2: /* lsl */
9ee6e8bb 11805 if (s->condexec_mask) {
365af80e 11806 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11807 } else {
9ef39277 11808 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11809 gen_logic_CC(tmp2);
9ee6e8bb 11810 }
99c475ab
FB
11811 break;
11812 case 0x3: /* lsr */
9ee6e8bb 11813 if (s->condexec_mask) {
365af80e 11814 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11815 } else {
9ef39277 11816 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11817 gen_logic_CC(tmp2);
9ee6e8bb 11818 }
99c475ab
FB
11819 break;
11820 case 0x4: /* asr */
9ee6e8bb 11821 if (s->condexec_mask) {
365af80e 11822 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11823 } else {
9ef39277 11824 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11825 gen_logic_CC(tmp2);
9ee6e8bb 11826 }
99c475ab
FB
11827 break;
11828 case 0x5: /* adc */
49b4c31e 11829 if (s->condexec_mask) {
396e467c 11830 gen_adc(tmp, tmp2);
49b4c31e
RH
11831 } else {
11832 gen_adc_CC(tmp, tmp, tmp2);
11833 }
99c475ab
FB
11834 break;
11835 case 0x6: /* sbc */
2de68a49 11836 if (s->condexec_mask) {
396e467c 11837 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11838 } else {
11839 gen_sbc_CC(tmp, tmp, tmp2);
11840 }
99c475ab
FB
11841 break;
11842 case 0x7: /* ror */
9ee6e8bb 11843 if (s->condexec_mask) {
f669df27
AJ
11844 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11845 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11846 } else {
9ef39277 11847 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11848 gen_logic_CC(tmp2);
9ee6e8bb 11849 }
99c475ab
FB
11850 break;
11851 case 0x8: /* tst */
396e467c
FN
11852 tcg_gen_and_i32(tmp, tmp, tmp2);
11853 gen_logic_CC(tmp);
99c475ab 11854 rd = 16;
5899f386 11855 break;
99c475ab 11856 case 0x9: /* neg */
9ee6e8bb 11857 if (s->condexec_mask)
396e467c 11858 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11859 else
72485ec4 11860 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11861 break;
11862 case 0xa: /* cmp */
72485ec4 11863 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11864 rd = 16;
11865 break;
11866 case 0xb: /* cmn */
72485ec4 11867 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11868 rd = 16;
11869 break;
11870 case 0xc: /* orr */
396e467c 11871 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11872 if (!s->condexec_mask)
396e467c 11873 gen_logic_CC(tmp);
99c475ab
FB
11874 break;
11875 case 0xd: /* mul */
7b2919a0 11876 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11877 if (!s->condexec_mask)
396e467c 11878 gen_logic_CC(tmp);
99c475ab
FB
11879 break;
11880 case 0xe: /* bic */
f669df27 11881 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11882 if (!s->condexec_mask)
396e467c 11883 gen_logic_CC(tmp);
99c475ab
FB
11884 break;
11885 case 0xf: /* mvn */
396e467c 11886 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11887 if (!s->condexec_mask)
396e467c 11888 gen_logic_CC(tmp2);
99c475ab 11889 val = 1;
5899f386 11890 rm = rd;
99c475ab
FB
11891 break;
11892 }
11893 if (rd != 16) {
396e467c
FN
11894 if (val) {
11895 store_reg(s, rm, tmp2);
11896 if (op != 0xf)
7d1b0095 11897 tcg_temp_free_i32(tmp);
396e467c
FN
11898 } else {
11899 store_reg(s, rd, tmp);
7d1b0095 11900 tcg_temp_free_i32(tmp2);
396e467c
FN
11901 }
11902 } else {
7d1b0095
PM
11903 tcg_temp_free_i32(tmp);
11904 tcg_temp_free_i32(tmp2);
99c475ab
FB
11905 }
11906 break;
11907
11908 case 5:
11909 /* load/store register offset. */
11910 rd = insn & 7;
11911 rn = (insn >> 3) & 7;
11912 rm = (insn >> 6) & 7;
11913 op = (insn >> 9) & 7;
b0109805 11914 addr = load_reg(s, rn);
b26eefb6 11915 tmp = load_reg(s, rm);
b0109805 11916 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11917 tcg_temp_free_i32(tmp);
99c475ab 11918
c40c8556 11919 if (op < 3) { /* store */
b0109805 11920 tmp = load_reg(s, rd);
c40c8556
PM
11921 } else {
11922 tmp = tcg_temp_new_i32();
11923 }
99c475ab
FB
11924
11925 switch (op) {
11926 case 0: /* str */
9bb6558a 11927 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11928 break;
11929 case 1: /* strh */
9bb6558a 11930 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11931 break;
11932 case 2: /* strb */
9bb6558a 11933 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11934 break;
11935 case 3: /* ldrsb */
9bb6558a 11936 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11937 break;
11938 case 4: /* ldr */
9bb6558a 11939 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11940 break;
11941 case 5: /* ldrh */
9bb6558a 11942 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11943 break;
11944 case 6: /* ldrb */
9bb6558a 11945 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11946 break;
11947 case 7: /* ldrsh */
9bb6558a 11948 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11949 break;
11950 }
c40c8556 11951 if (op >= 3) { /* load */
b0109805 11952 store_reg(s, rd, tmp);
c40c8556
PM
11953 } else {
11954 tcg_temp_free_i32(tmp);
11955 }
7d1b0095 11956 tcg_temp_free_i32(addr);
99c475ab
FB
11957 break;
11958
11959 case 6:
11960 /* load/store word immediate offset */
11961 rd = insn & 7;
11962 rn = (insn >> 3) & 7;
b0109805 11963 addr = load_reg(s, rn);
99c475ab 11964 val = (insn >> 4) & 0x7c;
b0109805 11965 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11966
11967 if (insn & (1 << 11)) {
11968 /* load */
c40c8556 11969 tmp = tcg_temp_new_i32();
12dcc321 11970 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11971 store_reg(s, rd, tmp);
99c475ab
FB
11972 } else {
11973 /* store */
b0109805 11974 tmp = load_reg(s, rd);
12dcc321 11975 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11976 tcg_temp_free_i32(tmp);
99c475ab 11977 }
7d1b0095 11978 tcg_temp_free_i32(addr);
99c475ab
FB
11979 break;
11980
11981 case 7:
11982 /* load/store byte immediate offset */
11983 rd = insn & 7;
11984 rn = (insn >> 3) & 7;
b0109805 11985 addr = load_reg(s, rn);
99c475ab 11986 val = (insn >> 6) & 0x1f;
b0109805 11987 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11988
11989 if (insn & (1 << 11)) {
11990 /* load */
c40c8556 11991 tmp = tcg_temp_new_i32();
9bb6558a 11992 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11993 store_reg(s, rd, tmp);
99c475ab
FB
11994 } else {
11995 /* store */
b0109805 11996 tmp = load_reg(s, rd);
9bb6558a 11997 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11998 tcg_temp_free_i32(tmp);
99c475ab 11999 }
7d1b0095 12000 tcg_temp_free_i32(addr);
99c475ab
FB
12001 break;
12002
12003 case 8:
12004 /* load/store halfword immediate offset */
12005 rd = insn & 7;
12006 rn = (insn >> 3) & 7;
b0109805 12007 addr = load_reg(s, rn);
99c475ab 12008 val = (insn >> 5) & 0x3e;
b0109805 12009 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12010
12011 if (insn & (1 << 11)) {
12012 /* load */
c40c8556 12013 tmp = tcg_temp_new_i32();
9bb6558a 12014 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12015 store_reg(s, rd, tmp);
99c475ab
FB
12016 } else {
12017 /* store */
b0109805 12018 tmp = load_reg(s, rd);
9bb6558a 12019 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12020 tcg_temp_free_i32(tmp);
99c475ab 12021 }
7d1b0095 12022 tcg_temp_free_i32(addr);
99c475ab
FB
12023 break;
12024
12025 case 9:
12026 /* load/store from stack */
12027 rd = (insn >> 8) & 7;
b0109805 12028 addr = load_reg(s, 13);
99c475ab 12029 val = (insn & 0xff) * 4;
b0109805 12030 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12031
12032 if (insn & (1 << 11)) {
12033 /* load */
c40c8556 12034 tmp = tcg_temp_new_i32();
9bb6558a 12035 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12036 store_reg(s, rd, tmp);
99c475ab
FB
12037 } else {
12038 /* store */
b0109805 12039 tmp = load_reg(s, rd);
9bb6558a 12040 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12041 tcg_temp_free_i32(tmp);
99c475ab 12042 }
7d1b0095 12043 tcg_temp_free_i32(addr);
99c475ab
FB
12044 break;
12045
12046 case 10:
12047 /* add to high reg */
12048 rd = (insn >> 8) & 7;
5899f386
FB
12049 if (insn & (1 << 11)) {
12050 /* SP */
5e3f878a 12051 tmp = load_reg(s, 13);
5899f386
FB
12052 } else {
12053 /* PC. bit 1 is ignored. */
7d1b0095 12054 tmp = tcg_temp_new_i32();
5e3f878a 12055 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 12056 }
99c475ab 12057 val = (insn & 0xff) * 4;
5e3f878a
PB
12058 tcg_gen_addi_i32(tmp, tmp, val);
12059 store_reg(s, rd, tmp);
99c475ab
FB
12060 break;
12061
12062 case 11:
12063 /* misc */
12064 op = (insn >> 8) & 0xf;
12065 switch (op) {
12066 case 0:
12067 /* adjust stack pointer */
b26eefb6 12068 tmp = load_reg(s, 13);
99c475ab
FB
12069 val = (insn & 0x7f) * 4;
12070 if (insn & (1 << 7))
6a0d8a1d 12071 val = -(int32_t)val;
b26eefb6
PB
12072 tcg_gen_addi_i32(tmp, tmp, val);
12073 store_reg(s, 13, tmp);
99c475ab
FB
12074 break;
12075
9ee6e8bb
PB
12076 case 2: /* sign/zero extend. */
12077 ARCH(6);
12078 rd = insn & 7;
12079 rm = (insn >> 3) & 7;
b0109805 12080 tmp = load_reg(s, rm);
9ee6e8bb 12081 switch ((insn >> 6) & 3) {
b0109805
PB
12082 case 0: gen_sxth(tmp); break;
12083 case 1: gen_sxtb(tmp); break;
12084 case 2: gen_uxth(tmp); break;
12085 case 3: gen_uxtb(tmp); break;
9ee6e8bb 12086 }
b0109805 12087 store_reg(s, rd, tmp);
9ee6e8bb 12088 break;
99c475ab
FB
12089 case 4: case 5: case 0xc: case 0xd:
12090 /* push/pop */
b0109805 12091 addr = load_reg(s, 13);
5899f386
FB
12092 if (insn & (1 << 8))
12093 offset = 4;
99c475ab 12094 else
5899f386
FB
12095 offset = 0;
12096 for (i = 0; i < 8; i++) {
12097 if (insn & (1 << i))
12098 offset += 4;
12099 }
12100 if ((insn & (1 << 11)) == 0) {
b0109805 12101 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 12102 }
99c475ab
FB
12103 for (i = 0; i < 8; i++) {
12104 if (insn & (1 << i)) {
12105 if (insn & (1 << 11)) {
12106 /* pop */
c40c8556 12107 tmp = tcg_temp_new_i32();
12dcc321 12108 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 12109 store_reg(s, i, tmp);
99c475ab
FB
12110 } else {
12111 /* push */
b0109805 12112 tmp = load_reg(s, i);
12dcc321 12113 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12114 tcg_temp_free_i32(tmp);
99c475ab 12115 }
5899f386 12116 /* advance to the next address. */
b0109805 12117 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
12118 }
12119 }
f764718d 12120 tmp = NULL;
99c475ab
FB
12121 if (insn & (1 << 8)) {
12122 if (insn & (1 << 11)) {
12123 /* pop pc */
c40c8556 12124 tmp = tcg_temp_new_i32();
12dcc321 12125 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
12126 /* don't set the pc until the rest of the instruction
12127 has completed */
12128 } else {
12129 /* push lr */
b0109805 12130 tmp = load_reg(s, 14);
12dcc321 12131 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12132 tcg_temp_free_i32(tmp);
99c475ab 12133 }
b0109805 12134 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 12135 }
5899f386 12136 if ((insn & (1 << 11)) == 0) {
b0109805 12137 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 12138 }
99c475ab 12139 /* write back the new stack pointer */
b0109805 12140 store_reg(s, 13, addr);
99c475ab 12141 /* set the new PC value */
be5e7a76 12142 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 12143 store_reg_from_load(s, 15, tmp);
be5e7a76 12144 }
99c475ab
FB
12145 break;
12146
9ee6e8bb
PB
12147 case 1: case 3: case 9: case 11: /* czb */
12148 rm = insn & 7;
d9ba4830 12149 tmp = load_reg(s, rm);
c2d9644e 12150 arm_gen_condlabel(s);
9ee6e8bb 12151 if (insn & (1 << 11))
cb63669a 12152 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 12153 else
cb63669a 12154 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 12155 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
12156 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
12157 val = (uint32_t)s->pc + 2;
12158 val += offset;
12159 gen_jmp(s, val);
12160 break;
12161
12162 case 15: /* IT, nop-hint. */
12163 if ((insn & 0xf) == 0) {
12164 gen_nop_hint(s, (insn >> 4) & 0xf);
12165 break;
12166 }
12167 /* If Then. */
12168 s->condexec_cond = (insn >> 4) & 0xe;
12169 s->condexec_mask = insn & 0x1f;
12170 /* No actual code generated for this insn, just setup state. */
12171 break;
12172
06c949e6 12173 case 0xe: /* bkpt */
d4a2dc67
PM
12174 {
12175 int imm8 = extract32(insn, 0, 8);
be5e7a76 12176 ARCH(5);
c900a2e6 12177 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
06c949e6 12178 break;
d4a2dc67 12179 }
06c949e6 12180
19a6e31c
PM
12181 case 0xa: /* rev, and hlt */
12182 {
12183 int op1 = extract32(insn, 6, 2);
12184
12185 if (op1 == 2) {
12186 /* HLT */
12187 int imm6 = extract32(insn, 0, 6);
12188
12189 gen_hlt(s, imm6);
12190 break;
12191 }
12192
12193 /* Otherwise this is rev */
9ee6e8bb
PB
12194 ARCH(6);
12195 rn = (insn >> 3) & 0x7;
12196 rd = insn & 0x7;
b0109805 12197 tmp = load_reg(s, rn);
19a6e31c 12198 switch (op1) {
66896cb8 12199 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
12200 case 1: gen_rev16(tmp); break;
12201 case 3: gen_revsh(tmp); break;
19a6e31c
PM
12202 default:
12203 g_assert_not_reached();
9ee6e8bb 12204 }
b0109805 12205 store_reg(s, rd, tmp);
9ee6e8bb 12206 break;
19a6e31c 12207 }
9ee6e8bb 12208
d9e028c1
PM
12209 case 6:
12210 switch ((insn >> 5) & 7) {
12211 case 2:
12212 /* setend */
12213 ARCH(6);
9886ecdf
PB
12214 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
12215 gen_helper_setend(cpu_env);
dcba3a8d 12216 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 12217 }
9ee6e8bb 12218 break;
d9e028c1
PM
12219 case 3:
12220 /* cps */
12221 ARCH(6);
12222 if (IS_USER(s)) {
12223 break;
8984bd2e 12224 }
b53d8923 12225 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
12226 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
12227 /* FAULTMASK */
12228 if (insn & 1) {
12229 addr = tcg_const_i32(19);
12230 gen_helper_v7m_msr(cpu_env, addr, tmp);
12231 tcg_temp_free_i32(addr);
12232 }
12233 /* PRIMASK */
12234 if (insn & 2) {
12235 addr = tcg_const_i32(16);
12236 gen_helper_v7m_msr(cpu_env, addr, tmp);
12237 tcg_temp_free_i32(addr);
12238 }
12239 tcg_temp_free_i32(tmp);
12240 gen_lookup_tb(s);
12241 } else {
12242 if (insn & (1 << 4)) {
12243 shift = CPSR_A | CPSR_I | CPSR_F;
12244 } else {
12245 shift = 0;
12246 }
12247 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 12248 }
d9e028c1
PM
12249 break;
12250 default:
12251 goto undef;
9ee6e8bb
PB
12252 }
12253 break;
12254
99c475ab
FB
12255 default:
12256 goto undef;
12257 }
12258 break;
12259
12260 case 12:
a7d3970d 12261 {
99c475ab 12262 /* load/store multiple */
f764718d 12263 TCGv_i32 loaded_var = NULL;
99c475ab 12264 rn = (insn >> 8) & 0x7;
b0109805 12265 addr = load_reg(s, rn);
99c475ab
FB
12266 for (i = 0; i < 8; i++) {
12267 if (insn & (1 << i)) {
99c475ab
FB
12268 if (insn & (1 << 11)) {
12269 /* load */
c40c8556 12270 tmp = tcg_temp_new_i32();
12dcc321 12271 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
12272 if (i == rn) {
12273 loaded_var = tmp;
12274 } else {
12275 store_reg(s, i, tmp);
12276 }
99c475ab
FB
12277 } else {
12278 /* store */
b0109805 12279 tmp = load_reg(s, i);
12dcc321 12280 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12281 tcg_temp_free_i32(tmp);
99c475ab 12282 }
5899f386 12283 /* advance to the next address */
b0109805 12284 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
12285 }
12286 }
b0109805 12287 if ((insn & (1 << rn)) == 0) {
a7d3970d 12288 /* base reg not in list: base register writeback */
b0109805
PB
12289 store_reg(s, rn, addr);
12290 } else {
a7d3970d
PM
12291 /* base reg in list: if load, complete it now */
12292 if (insn & (1 << 11)) {
12293 store_reg(s, rn, loaded_var);
12294 }
7d1b0095 12295 tcg_temp_free_i32(addr);
b0109805 12296 }
99c475ab 12297 break;
a7d3970d 12298 }
99c475ab
FB
12299 case 13:
12300 /* conditional branch or swi */
12301 cond = (insn >> 8) & 0xf;
12302 if (cond == 0xe)
12303 goto undef;
12304
12305 if (cond == 0xf) {
12306 /* swi */
eaed129d 12307 gen_set_pc_im(s, s->pc);
d4a2dc67 12308 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 12309 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
12310 break;
12311 }
12312 /* generate a conditional jump to next instruction */
c2d9644e 12313 arm_skip_unless(s, cond);
99c475ab
FB
12314
12315 /* jump to the offset */
5899f386 12316 val = (uint32_t)s->pc + 2;
99c475ab 12317 offset = ((int32_t)insn << 24) >> 24;
5899f386 12318 val += offset << 1;
8aaca4c0 12319 gen_jmp(s, val);
99c475ab
FB
12320 break;
12321
12322 case 14:
358bf29e 12323 if (insn & (1 << 11)) {
296e5a0a
PM
12324 /* thumb_insn_is_16bit() ensures we can't get here for
12325 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
12326 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
12327 */
12328 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12329 ARCH(5);
12330 offset = ((insn & 0x7ff) << 1);
12331 tmp = load_reg(s, 14);
12332 tcg_gen_addi_i32(tmp, tmp, offset);
12333 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
12334
12335 tmp2 = tcg_temp_new_i32();
12336 tcg_gen_movi_i32(tmp2, s->pc | 1);
12337 store_reg(s, 14, tmp2);
12338 gen_bx(s, tmp);
358bf29e
PB
12339 break;
12340 }
9ee6e8bb 12341 /* unconditional branch */
99c475ab
FB
12342 val = (uint32_t)s->pc;
12343 offset = ((int32_t)insn << 21) >> 21;
12344 val += (offset << 1) + 2;
8aaca4c0 12345 gen_jmp(s, val);
99c475ab
FB
12346 break;
12347
12348 case 15:
296e5a0a
PM
12349 /* thumb_insn_is_16bit() ensures we can't get here for
12350 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
12351 */
12352 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12353
12354 if (insn & (1 << 11)) {
12355 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
12356 offset = ((insn & 0x7ff) << 1) | 1;
12357 tmp = load_reg(s, 14);
12358 tcg_gen_addi_i32(tmp, tmp, offset);
12359
12360 tmp2 = tcg_temp_new_i32();
12361 tcg_gen_movi_i32(tmp2, s->pc | 1);
12362 store_reg(s, 14, tmp2);
12363 gen_bx(s, tmp);
12364 } else {
12365 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
12366 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
12367
12368 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
12369 }
9ee6e8bb 12370 break;
99c475ab
FB
12371 }
12372 return;
9ee6e8bb 12373illegal_op:
99c475ab 12374undef:
73710361
GB
12375 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
12376 default_exception_el(s));
99c475ab
FB
12377}
12378
541ebcd4
PM
12379static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
12380{
12381 /* Return true if the insn at dc->pc might cross a page boundary.
12382 * (False positives are OK, false negatives are not.)
5b8d7289
PM
12383 * We know this is a Thumb insn, and our caller ensures we are
12384 * only called if dc->pc is less than 4 bytes from the page
12385 * boundary, so we cross the page if the first 16 bits indicate
12386 * that this is a 32 bit insn.
541ebcd4 12387 */
5b8d7289 12388 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 12389
5b8d7289 12390 return !thumb_insn_is_16bit(s, insn);
541ebcd4
PM
12391}
12392
b542683d 12393static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2c0262af 12394{
1d8a5535 12395 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 12396 CPUARMState *env = cs->env_ptr;
4e5e1215 12397 ARMCPU *cpu = arm_env_get_cpu(env);
3b46e624 12398
dcba3a8d 12399 dc->pc = dc->base.pc_first;
e50e6a20 12400 dc->condjmp = 0;
3926cc84 12401
40f860cd 12402 dc->aarch64 = 0;
cef9ee70
SS
12403 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
12404 * there is no secure EL1, so we route exceptions to EL3.
12405 */
12406 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
12407 !arm_el_is_aa64(env, 3);
1d8a5535
LV
12408 dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
12409 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
12410 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
12411 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
12412 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
12413 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
c1e37810 12414 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 12415#if !defined(CONFIG_USER_ONLY)
c1e37810 12416 dc->user = (dc->current_el == 0);
3926cc84 12417#endif
1d8a5535
LV
12418 dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
12419 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
12420 dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
12421 dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
12422 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
12423 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
12424 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
fb602cb7
PM
12425 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
12426 regime_is_secure(env, dc->mmu_idx);
60322b39 12427 dc->cp_regs = cpu->cp_regs;
a984e42c 12428 dc->features = env->features;
40f860cd 12429
50225ad0
PM
12430 /* Single step state. The code-generation logic here is:
12431 * SS_ACTIVE == 0:
12432 * generate code with no special handling for single-stepping (except
12433 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
12434 * this happens anyway because those changes are all system register or
12435 * PSTATE writes).
12436 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
12437 * emit code for one insn
12438 * emit code to clear PSTATE.SS
12439 * emit code to generate software step exception for completed step
12440 * end TB (as usual for having generated an exception)
12441 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
12442 * emit code to generate a software step exception
12443 * end the TB
12444 */
1d8a5535
LV
12445 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
12446 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
50225ad0
PM
12447 dc->is_ldex = false;
12448 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
12449
bfe7ad5b 12450 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1d8a5535 12451
f7708456
RH
12452 /* If architectural single step active, limit to 1. */
12453 if (is_singlestepping(dc)) {
b542683d 12454 dc->base.max_insns = 1;
f7708456
RH
12455 }
12456
d0264d86
RH
12457 /* ARM is a fixed-length ISA. Bound the number of insns to execute
12458 to those left on the page. */
12459 if (!dc->thumb) {
bfe7ad5b 12460 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 12461 dc->base.max_insns = MIN(dc->base.max_insns, bound);
d0264d86
RH
12462 }
12463
a7812ae4
PB
12464 cpu_F0s = tcg_temp_new_i32();
12465 cpu_F1s = tcg_temp_new_i32();
12466 cpu_F0d = tcg_temp_new_i64();
12467 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
12468 cpu_V0 = cpu_F0d;
12469 cpu_V1 = cpu_F1d;
e677137d 12470 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 12471 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
12472}
12473
b1476854
LV
12474static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
12475{
12476 DisasContext *dc = container_of(dcbase, DisasContext, base);
12477
12478 /* A note on handling of the condexec (IT) bits:
12479 *
12480 * We want to avoid the overhead of having to write the updated condexec
12481 * bits back to the CPUARMState for every instruction in an IT block. So:
12482 * (1) if the condexec bits are not already zero then we write
12483 * zero back into the CPUARMState now. This avoids complications trying
12484 * to do it at the end of the block. (For example if we don't do this
12485 * it's hard to identify whether we can safely skip writing condexec
12486 * at the end of the TB, which we definitely want to do for the case
12487 * where a TB doesn't do anything with the IT state at all.)
12488 * (2) if we are going to leave the TB then we call gen_set_condexec()
12489 * which will write the correct value into CPUARMState if zero is wrong.
12490 * This is done both for leaving the TB at the end, and for leaving
12491 * it because of an exception we know will happen, which is done in
12492 * gen_exception_insn(). The latter is necessary because we need to
12493 * leave the TB with the PC/IT state just prior to execution of the
12494 * instruction which caused the exception.
12495 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
12496 * then the CPUARMState will be wrong and we need to reset it.
12497 * This is handled in the same way as restoration of the
12498 * PC in these situations; we save the value of the condexec bits
12499 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
12500 * then uses this to restore them after an exception.
12501 *
12502 * Note that there are no instructions which can read the condexec
12503 * bits, and none which can write non-static values to them, so
12504 * we don't need to care about whether CPUARMState is correct in the
12505 * middle of a TB.
12506 */
12507
12508 /* Reset the conditional execution bits immediately. This avoids
12509 complications trying to do it at the end of the block. */
12510 if (dc->condexec_mask || dc->condexec_cond) {
12511 TCGv_i32 tmp = tcg_temp_new_i32();
12512 tcg_gen_movi_i32(tmp, 0);
12513 store_cpu_field(tmp, condexec_bits);
12514 }
23169224 12515 tcg_clear_temp_count();
b1476854
LV
12516}
12517
f62bd897
LV
12518static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
12519{
12520 DisasContext *dc = container_of(dcbase, DisasContext, base);
12521
f62bd897
LV
12522 tcg_gen_insn_start(dc->pc,
12523 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
12524 0);
15fa08f8 12525 dc->insn_start = tcg_last_op();
f62bd897
LV
12526}
12527
a68956ad
LV
12528static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
12529 const CPUBreakpoint *bp)
12530{
12531 DisasContext *dc = container_of(dcbase, DisasContext, base);
12532
12533 if (bp->flags & BP_CPU) {
12534 gen_set_condexec(dc);
12535 gen_set_pc_im(dc, dc->pc);
12536 gen_helper_check_breakpoints(cpu_env);
12537 /* End the TB early; it's likely not going to be executed */
12538 dc->base.is_jmp = DISAS_TOO_MANY;
12539 } else {
12540 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12541 /* The address covered by the breakpoint must be
12542 included in [tb->pc, tb->pc + tb->size) in order
12543 to for it to be properly cleared -- thus we
12544 increment the PC here so that the logic setting
12545 tb->size below does the right thing. */
12546 /* TODO: Advance PC by correct instruction length to
12547 * avoid disassembler error messages */
12548 dc->pc += 2;
12549 dc->base.is_jmp = DISAS_NORETURN;
12550 }
12551
12552 return true;
12553}
12554
722ef0a5 12555static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 12556{
13189a90
LV
12557#ifdef CONFIG_USER_ONLY
12558 /* Intercept jump to the magic kernel page. */
12559 if (dc->pc >= 0xffff0000) {
12560 /* We always get here via a jump, so know we are not in a
12561 conditional execution block. */
12562 gen_exception_internal(EXCP_KERNEL_TRAP);
12563 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12564 return true;
13189a90
LV
12565 }
12566#endif
12567
12568 if (dc->ss_active && !dc->pstate_ss) {
12569 /* Singlestep state is Active-pending.
12570 * If we're in this state at the start of a TB then either
12571 * a) we just took an exception to an EL which is being debugged
12572 * and this is the first insn in the exception handler
12573 * b) debug exceptions were masked and we just unmasked them
12574 * without changing EL (eg by clearing PSTATE.D)
12575 * In either case we're going to take a swstep exception in the
12576 * "did not step an insn" case, and so the syndrome ISV and EX
12577 * bits should be zero.
12578 */
12579 assert(dc->base.num_insns == 1);
12580 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12581 default_exception_el(dc));
12582 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12583 return true;
13189a90
LV
12584 }
12585
722ef0a5
RH
12586 return false;
12587}
13189a90 12588
d0264d86 12589static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 12590{
13189a90
LV
12591 if (dc->condjmp && !dc->base.is_jmp) {
12592 gen_set_label(dc->condlabel);
12593 dc->condjmp = 0;
12594 }
13189a90 12595 dc->base.pc_next = dc->pc;
23169224 12596 translator_loop_temp_check(&dc->base);
13189a90
LV
12597}
12598
722ef0a5
RH
12599static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12600{
12601 DisasContext *dc = container_of(dcbase, DisasContext, base);
12602 CPUARMState *env = cpu->env_ptr;
12603 unsigned int insn;
12604
12605 if (arm_pre_translate_insn(dc)) {
12606 return;
12607 }
12608
12609 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
58803318 12610 dc->insn = insn;
722ef0a5
RH
12611 dc->pc += 4;
12612 disas_arm_insn(dc, insn);
12613
d0264d86
RH
12614 arm_post_translate_insn(dc);
12615
12616 /* ARM is a fixed-length ISA. We performed the cross-page check
12617 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
12618}
12619
dcf14dfb
PM
12620static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12621{
12622 /* Return true if this Thumb insn is always unconditional,
12623 * even inside an IT block. This is true of only a very few
12624 * instructions: BKPT, HLT, and SG.
12625 *
12626 * A larger class of instructions are UNPREDICTABLE if used
12627 * inside an IT block; we do not need to detect those here, because
12628 * what we do by default (perform the cc check and update the IT
12629 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12630 * choice for those situations.
12631 *
12632 * insn is either a 16-bit or a 32-bit instruction; the two are
12633 * distinguishable because for the 16-bit case the top 16 bits
12634 * are zeroes, and that isn't a valid 32-bit encoding.
12635 */
12636 if ((insn & 0xffffff00) == 0xbe00) {
12637 /* BKPT */
12638 return true;
12639 }
12640
12641 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12642 !arm_dc_feature(s, ARM_FEATURE_M)) {
12643 /* HLT: v8A only. This is unconditional even when it is going to
12644 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12645 * For v7 cores this was a plain old undefined encoding and so
12646 * honours its cc check. (We might be using the encoding as
12647 * a semihosting trap, but we don't change the cc check behaviour
12648 * on that account, because a debugger connected to a real v7A
12649 * core and emulating semihosting traps by catching the UNDEF
12650 * exception would also only see cases where the cc check passed.
12651 * No guest code should be trying to do a HLT semihosting trap
12652 * in an IT block anyway.
12653 */
12654 return true;
12655 }
12656
12657 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12658 arm_dc_feature(s, ARM_FEATURE_M)) {
12659 /* SG: v8M only */
12660 return true;
12661 }
12662
12663 return false;
12664}
12665
722ef0a5
RH
12666static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12667{
12668 DisasContext *dc = container_of(dcbase, DisasContext, base);
12669 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
12670 uint32_t insn;
12671 bool is_16bit;
722ef0a5
RH
12672
12673 if (arm_pre_translate_insn(dc)) {
12674 return;
12675 }
12676
296e5a0a
PM
12677 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12678 is_16bit = thumb_insn_is_16bit(dc, insn);
12679 dc->pc += 2;
12680 if (!is_16bit) {
12681 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12682
12683 insn = insn << 16 | insn2;
12684 dc->pc += 2;
12685 }
58803318 12686 dc->insn = insn;
296e5a0a 12687
dcf14dfb 12688 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
12689 uint32_t cond = dc->condexec_cond;
12690
12691 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
c2d9644e 12692 arm_skip_unless(dc, cond);
296e5a0a
PM
12693 }
12694 }
12695
12696 if (is_16bit) {
12697 disas_thumb_insn(dc, insn);
12698 } else {
2eea841c 12699 disas_thumb2_insn(dc, insn);
296e5a0a 12700 }
722ef0a5
RH
12701
12702 /* Advance the Thumb condexec condition. */
12703 if (dc->condexec_mask) {
12704 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12705 ((dc->condexec_mask >> 4) & 1));
12706 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12707 if (dc->condexec_mask == 0) {
12708 dc->condexec_cond = 0;
12709 }
12710 }
12711
d0264d86
RH
12712 arm_post_translate_insn(dc);
12713
12714 /* Thumb is a variable-length ISA. Stop translation when the next insn
12715 * will touch a new page. This ensures that prefetch aborts occur at
12716 * the right place.
12717 *
12718 * We want to stop the TB if the next insn starts in a new page,
12719 * or if it spans between this page and the next. This means that
12720 * if we're looking at the last halfword in the page we need to
12721 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12722 * or a 32-bit Thumb insn (which won't).
12723 * This is to avoid generating a silly TB with a single 16-bit insn
12724 * in it at the end of this page (which would execute correctly
12725 * but isn't very efficient).
12726 */
12727 if (dc->base.is_jmp == DISAS_NEXT
bfe7ad5b
EC
12728 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
12729 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
d0264d86
RH
12730 && insn_crosses_page(env, dc)))) {
12731 dc->base.is_jmp = DISAS_TOO_MANY;
12732 }
722ef0a5
RH
12733}
12734
70d3c035 12735static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 12736{
70d3c035 12737 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 12738
c5a49c63 12739 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
12740 /* FIXME: This can theoretically happen with self-modifying code. */
12741 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 12742 }
9ee6e8bb 12743
b5ff1b31 12744 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12745 instruction was a conditional branch or trap, and the PC has
12746 already been written. */
f021b2c4 12747 gen_set_condexec(dc);
dcba3a8d 12748 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
12749 /* Exception return branches need some special case code at the
12750 * end of the TB, which is complex enough that it has to
12751 * handle the single-step vs not and the condition-failed
12752 * insn codepath itself.
12753 */
12754 gen_bx_excret_final_code(dc);
12755 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12756 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 12757 switch (dc->base.is_jmp) {
7999a5c8 12758 case DISAS_SWI:
50225ad0 12759 gen_ss_advance(dc);
73710361
GB
12760 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12761 default_exception_el(dc));
7999a5c8
SF
12762 break;
12763 case DISAS_HVC:
37e6456e 12764 gen_ss_advance(dc);
73710361 12765 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12766 break;
12767 case DISAS_SMC:
37e6456e 12768 gen_ss_advance(dc);
73710361 12769 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12770 break;
12771 case DISAS_NEXT:
a68956ad 12772 case DISAS_TOO_MANY:
7999a5c8
SF
12773 case DISAS_UPDATE:
12774 gen_set_pc_im(dc, dc->pc);
12775 /* fall through */
12776 default:
5425415e
PM
12777 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12778 gen_singlestep_exception(dc);
a0c231e6
RH
12779 break;
12780 case DISAS_NORETURN:
12781 break;
7999a5c8 12782 }
8aaca4c0 12783 } else {
9ee6e8bb
PB
12784 /* While branches must always occur at the end of an IT block,
12785 there are a few other things that can cause us to terminate
65626741 12786 the TB in the middle of an IT block:
9ee6e8bb
PB
12787 - Exception generating instructions (bkpt, swi, undefined).
12788 - Page boundaries.
12789 - Hardware watchpoints.
12790 Hardware breakpoints have already been handled and skip this code.
12791 */
dcba3a8d 12792 switch(dc->base.is_jmp) {
8aaca4c0 12793 case DISAS_NEXT:
a68956ad 12794 case DISAS_TOO_MANY:
6e256c93 12795 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 12796 break;
577bf808 12797 case DISAS_JUMP:
8a6b28c7
EC
12798 gen_goto_ptr();
12799 break;
e8d52302
AB
12800 case DISAS_UPDATE:
12801 gen_set_pc_im(dc, dc->pc);
12802 /* fall through */
577bf808 12803 default:
8aaca4c0 12804 /* indicate that the hash table must be used to find the next TB */
07ea28b4 12805 tcg_gen_exit_tb(NULL, 0);
8aaca4c0 12806 break;
a0c231e6 12807 case DISAS_NORETURN:
8aaca4c0
FB
12808 /* nothing more to generate */
12809 break;
9ee6e8bb 12810 case DISAS_WFI:
58803318
SS
12811 {
12812 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12813 !(dc->insn & (1U << 31))) ? 2 : 4);
12814
12815 gen_helper_wfi(cpu_env, tmp);
12816 tcg_temp_free_i32(tmp);
84549b6d
PM
12817 /* The helper doesn't necessarily throw an exception, but we
12818 * must go back to the main loop to check for interrupts anyway.
12819 */
07ea28b4 12820 tcg_gen_exit_tb(NULL, 0);
9ee6e8bb 12821 break;
58803318 12822 }
72c1d3af
PM
12823 case DISAS_WFE:
12824 gen_helper_wfe(cpu_env);
12825 break;
c87e5a61
PM
12826 case DISAS_YIELD:
12827 gen_helper_yield(cpu_env);
12828 break;
9ee6e8bb 12829 case DISAS_SWI:
73710361
GB
12830 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12831 default_exception_el(dc));
9ee6e8bb 12832 break;
37e6456e 12833 case DISAS_HVC:
73710361 12834 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12835 break;
12836 case DISAS_SMC:
73710361 12837 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12838 break;
8aaca4c0 12839 }
f021b2c4
PM
12840 }
12841
12842 if (dc->condjmp) {
12843 /* "Condition failed" instruction codepath for the branch/trap insn */
12844 gen_set_label(dc->condlabel);
12845 gen_set_condexec(dc);
b636649f 12846 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
12847 gen_set_pc_im(dc, dc->pc);
12848 gen_singlestep_exception(dc);
12849 } else {
6e256c93 12850 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 12851 }
2c0262af 12852 }
23169224
LV
12853
12854 /* Functions above can change dc->pc, so re-align db->pc_next */
12855 dc->base.pc_next = dc->pc;
70d3c035
LV
12856}
12857
4013f7fc
LV
12858static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12859{
12860 DisasContext *dc = container_of(dcbase, DisasContext, base);
12861
12862 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 12863 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
12864}
12865
23169224
LV
12866static const TranslatorOps arm_translator_ops = {
12867 .init_disas_context = arm_tr_init_disas_context,
12868 .tb_start = arm_tr_tb_start,
12869 .insn_start = arm_tr_insn_start,
12870 .breakpoint_check = arm_tr_breakpoint_check,
12871 .translate_insn = arm_tr_translate_insn,
12872 .tb_stop = arm_tr_tb_stop,
12873 .disas_log = arm_tr_disas_log,
12874};
12875
722ef0a5
RH
12876static const TranslatorOps thumb_translator_ops = {
12877 .init_disas_context = arm_tr_init_disas_context,
12878 .tb_start = arm_tr_tb_start,
12879 .insn_start = arm_tr_insn_start,
12880 .breakpoint_check = arm_tr_breakpoint_check,
12881 .translate_insn = thumb_tr_translate_insn,
12882 .tb_stop = arm_tr_tb_stop,
12883 .disas_log = arm_tr_disas_log,
12884};
12885
70d3c035 12886/* generate intermediate code for basic block 'tb'. */
23169224 12887void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
70d3c035 12888{
23169224
LV
12889 DisasContext dc;
12890 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 12891
722ef0a5
RH
12892 if (ARM_TBFLAG_THUMB(tb->flags)) {
12893 ops = &thumb_translator_ops;
12894 }
23169224 12895#ifdef TARGET_AARCH64
70d3c035 12896 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
23169224 12897 ops = &aarch64_translator_ops;
2c0262af
FB
12898 }
12899#endif
23169224
LV
12900
12901 translator_loop(ops, &dc.base, cpu, tb);
2c0262af
FB
12902}
12903
b5ff1b31 12904static const char *cpu_mode_names[16] = {
28c9457d
EI
12905 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12906 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 12907};
9ee6e8bb 12908
878096ee
AF
12909void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12910 int flags)
2c0262af 12911{
878096ee
AF
12912 ARMCPU *cpu = ARM_CPU(cs);
12913 CPUARMState *env = &cpu->env;
2c0262af
FB
12914 int i;
12915
17731115
PM
12916 if (is_a64(env)) {
12917 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12918 return;
12919 }
12920
2c0262af 12921 for(i=0;i<16;i++) {
7fe48483 12922 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12923 if ((i % 4) == 3)
7fe48483 12924 cpu_fprintf(f, "\n");
2c0262af 12925 else
7fe48483 12926 cpu_fprintf(f, " ");
2c0262af 12927 }
06e5cf7a 12928
5b906f35
PM
12929 if (arm_feature(env, ARM_FEATURE_M)) {
12930 uint32_t xpsr = xpsr_read(env);
12931 const char *mode;
1e577cc7
PM
12932 const char *ns_status = "";
12933
12934 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12935 ns_status = env->v7m.secure ? "S " : "NS ";
12936 }
5b906f35
PM
12937
12938 if (xpsr & XPSR_EXCP) {
12939 mode = "handler";
12940 } else {
8bfc26ea 12941 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
12942 mode = "unpriv-thread";
12943 } else {
12944 mode = "priv-thread";
12945 }
12946 }
12947
1e577cc7 12948 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
5b906f35
PM
12949 xpsr,
12950 xpsr & XPSR_N ? 'N' : '-',
12951 xpsr & XPSR_Z ? 'Z' : '-',
12952 xpsr & XPSR_C ? 'C' : '-',
12953 xpsr & XPSR_V ? 'V' : '-',
12954 xpsr & XPSR_T ? 'T' : 'A',
1e577cc7 12955 ns_status,
5b906f35 12956 mode);
06e5cf7a 12957 } else {
5b906f35
PM
12958 uint32_t psr = cpsr_read(env);
12959 const char *ns_status = "";
12960
12961 if (arm_feature(env, ARM_FEATURE_EL3) &&
12962 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12963 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12964 }
12965
12966 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12967 psr,
12968 psr & CPSR_N ? 'N' : '-',
12969 psr & CPSR_Z ? 'Z' : '-',
12970 psr & CPSR_C ? 'C' : '-',
12971 psr & CPSR_V ? 'V' : '-',
12972 psr & CPSR_T ? 'T' : 'A',
12973 ns_status,
12974 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
12975 }
b7bcbe95 12976
f2617cfc
PM
12977 if (flags & CPU_DUMP_FPU) {
12978 int numvfpregs = 0;
12979 if (arm_feature(env, ARM_FEATURE_VFP)) {
12980 numvfpregs += 16;
12981 }
12982 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12983 numvfpregs += 16;
12984 }
12985 for (i = 0; i < numvfpregs; i++) {
9a2b5256 12986 uint64_t v = *aa32_vfp_dreg(env, i);
f2617cfc
PM
12987 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12988 i * 2, (uint32_t)v,
12989 i * 2 + 1, (uint32_t)(v >> 32),
12990 i, v);
12991 }
12992 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 12993 }
2c0262af 12994}
a6b025d3 12995
bad729e2
RH
12996void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12997 target_ulong *data)
d2856f1a 12998{
3926cc84 12999 if (is_a64(env)) {
bad729e2 13000 env->pc = data[0];
40f860cd 13001 env->condexec_bits = 0;
aaa1f954 13002 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13003 } else {
bad729e2
RH
13004 env->regs[15] = data[0];
13005 env->condexec_bits = data[1];
aaa1f954 13006 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13007 }
d2856f1a 13008}