]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
36a71934 28#include "tcg-op-gvec.h"
1de7afc9 29#include "qemu/log.h"
534df156 30#include "qemu/bitops.h"
1d854765 31#include "arm_ldst.h"
19a6e31c 32#include "exec/semihost.h"
1497c961 33
2ef6175a
RH
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
2c0262af 36
a7e30d84 37#include "trace-tcg.h"
508127e2 38#include "exec/log.h"
a7e30d84
LV
39
40
2b51668f
PM
41#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 43/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 44#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
c99a55d3 45#define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
2b51668f
PM
46#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 51
86753403 52#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 53
f570c61e 54#include "translate.h"
e12ce78d 55
b5ff1b31
FB
56#if defined(CONFIG_USER_ONLY)
57#define IS_USER(s) 1
58#else
59#define IS_USER(s) (s->user)
60#endif
61
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
b26eefb6 69/* FIXME: These should be removed. */
39d5492a 70static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 71static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 72
022c62cb 73#include "exec/gen-icount.h"
2e70f6ef 74
155c3eac
FN
75static const char *regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
78
61adacc8
RH
79/* Function prototypes for gen_ functions calling Neon helpers. */
80typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
81 TCGv_i32, TCGv_i32);
82
b26eefb6
PB
83/* initialize TCG globals. */
84void arm_translate_init(void)
85{
155c3eac
FN
86 int i;
87
155c3eac 88 for (i = 0; i < 16; i++) {
e1ccc054 89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 90 offsetof(CPUARMState, regs[i]),
155c3eac
FN
91 regnames[i]);
92 }
e1ccc054
RH
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 97
e1ccc054 98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 102
14ade10f 103 a64_translate_init();
b26eefb6
PB
104}
105
9bb6558a
PM
106/* Flags for the disas_set_da_iss info argument:
107 * lower bits hold the Rt register number, higher bits are flags.
108 */
109typedef enum ISSInfo {
110 ISSNone = 0,
111 ISSRegMask = 0x1f,
112 ISSInvalid = (1 << 5),
113 ISSIsAcqRel = (1 << 6),
114 ISSIsWrite = (1 << 7),
115 ISSIs16Bit = (1 << 8),
116} ISSInfo;
117
118/* Save the syndrome information for a Data Abort */
119static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
120{
121 uint32_t syn;
122 int sas = memop & MO_SIZE;
123 bool sse = memop & MO_SIGN;
124 bool is_acqrel = issinfo & ISSIsAcqRel;
125 bool is_write = issinfo & ISSIsWrite;
126 bool is_16bit = issinfo & ISSIs16Bit;
127 int srt = issinfo & ISSRegMask;
128
129 if (issinfo & ISSInvalid) {
130 /* Some callsites want to conditionally provide ISS info,
131 * eg "only if this was not a writeback"
132 */
133 return;
134 }
135
136 if (srt == 15) {
137 /* For AArch32, insns where the src/dest is R15 never generate
138 * ISS information. Catching that here saves checking at all
139 * the call sites.
140 */
141 return;
142 }
143
144 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
145 0, 0, 0, is_write, 0, is_16bit);
146 disas_set_insn_syndrome(s, syn);
147}
148
8bd5c820 149static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 150{
8bd5c820 151 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
152 * insns:
153 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
154 * otherwise, access as if at PL0.
155 */
156 switch (s->mmu_idx) {
157 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
158 case ARMMMUIdx_S12NSE0:
159 case ARMMMUIdx_S12NSE1:
8bd5c820 160 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
161 case ARMMMUIdx_S1E3:
162 case ARMMMUIdx_S1SE0:
163 case ARMMMUIdx_S1SE1:
8bd5c820 164 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
165 case ARMMMUIdx_MUser:
166 case ARMMMUIdx_MPriv:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
168 case ARMMMUIdx_MUserNegPri:
169 case ARMMMUIdx_MPrivNegPri:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
171 case ARMMMUIdx_MSUser:
172 case ARMMMUIdx_MSPriv:
b9f587d6 173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
174 case ARMMMUIdx_MSUserNegPri:
175 case ARMMMUIdx_MSPrivNegPri:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
177 case ARMMMUIdx_S2NS:
178 default:
179 g_assert_not_reached();
180 }
181}
182
39d5492a 183static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 184{
39d5492a 185 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
186 tcg_gen_ld_i32(tmp, cpu_env, offset);
187 return tmp;
188}
189
0ecb72a5 190#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 191
39d5492a 192static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
193{
194 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 195 tcg_temp_free_i32(var);
d9ba4830
PB
196}
197
198#define store_cpu_field(var, name) \
0ecb72a5 199 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 200
b26eefb6 201/* Set a variable to the value of a CPU register. */
39d5492a 202static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
203{
204 if (reg == 15) {
205 uint32_t addr;
b90372ad 206 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
207 if (s->thumb)
208 addr = (long)s->pc + 2;
209 else
210 addr = (long)s->pc + 4;
211 tcg_gen_movi_i32(var, addr);
212 } else {
155c3eac 213 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
214 }
215}
216
217/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 218static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 219{
39d5492a 220 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
221 load_reg_var(s, tmp, reg);
222 return tmp;
223}
224
225/* Set a CPU register. The source must be a temporary and will be
226 marked as dead. */
39d5492a 227static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
228{
229 if (reg == 15) {
9b6a3ea7
PM
230 /* In Thumb mode, we must ignore bit 0.
231 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
232 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
233 * We choose to ignore [1:0] in ARM mode for all architecture versions.
234 */
235 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 236 s->base.is_jmp = DISAS_JUMP;
b26eefb6 237 }
155c3eac 238 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 239 tcg_temp_free_i32(var);
b26eefb6
PB
240}
241
b26eefb6 242/* Value extensions. */
86831435
PB
243#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
244#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
245#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
246#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
247
1497c961
PB
248#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
249#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 250
b26eefb6 251
39d5492a 252static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 253{
39d5492a 254 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 255 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
256 tcg_temp_free_i32(tmp_mask);
257}
d9ba4830
PB
258/* Set NZCV flags from the high 4 bits of var. */
259#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
260
d4a2dc67 261static void gen_exception_internal(int excp)
d9ba4830 262{
d4a2dc67
PM
263 TCGv_i32 tcg_excp = tcg_const_i32(excp);
264
265 assert(excp_is_internal(excp));
266 gen_helper_exception_internal(cpu_env, tcg_excp);
267 tcg_temp_free_i32(tcg_excp);
268}
269
73710361 270static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
271{
272 TCGv_i32 tcg_excp = tcg_const_i32(excp);
273 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 274 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 275
73710361
GB
276 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
277 tcg_syn, tcg_el);
278
279 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
280 tcg_temp_free_i32(tcg_syn);
281 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
282}
283
50225ad0
PM
284static void gen_ss_advance(DisasContext *s)
285{
286 /* If the singlestep state is Active-not-pending, advance to
287 * Active-pending.
288 */
289 if (s->ss_active) {
290 s->pstate_ss = 0;
291 gen_helper_clear_pstate_ss(cpu_env);
292 }
293}
294
295static void gen_step_complete_exception(DisasContext *s)
296{
297 /* We just completed step of an insn. Move from Active-not-pending
298 * to Active-pending, and then also take the swstep exception.
299 * This corresponds to making the (IMPDEF) choice to prioritize
300 * swstep exceptions over asynchronous exceptions taken to an exception
301 * level where debug is disabled. This choice has the advantage that
302 * we do not need to maintain internal state corresponding to the
303 * ISV/EX syndrome bits between completion of the step and generation
304 * of the exception, and our syndrome information is always correct.
305 */
306 gen_ss_advance(s);
73710361
GB
307 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
308 default_exception_el(s));
dcba3a8d 309 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
310}
311
5425415e
PM
312static void gen_singlestep_exception(DisasContext *s)
313{
314 /* Generate the right kind of exception for singlestep, which is
315 * either the architectural singlestep or EXCP_DEBUG for QEMU's
316 * gdb singlestepping.
317 */
318 if (s->ss_active) {
319 gen_step_complete_exception(s);
320 } else {
321 gen_exception_internal(EXCP_DEBUG);
322 }
323}
324
b636649f
PM
325static inline bool is_singlestepping(DisasContext *s)
326{
327 /* Return true if we are singlestepping either because of
328 * architectural singlestep or QEMU gdbstub singlestep. This does
329 * not include the command line '-singlestep' mode which is rather
330 * misnamed as it only means "one instruction per TB" and doesn't
331 * affect the code we generate.
332 */
dcba3a8d 333 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
334}
335
39d5492a 336static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 337{
39d5492a
PM
338 TCGv_i32 tmp1 = tcg_temp_new_i32();
339 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
340 tcg_gen_ext16s_i32(tmp1, a);
341 tcg_gen_ext16s_i32(tmp2, b);
3670669c 342 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 343 tcg_temp_free_i32(tmp2);
3670669c
PB
344 tcg_gen_sari_i32(a, a, 16);
345 tcg_gen_sari_i32(b, b, 16);
346 tcg_gen_mul_i32(b, b, a);
347 tcg_gen_mov_i32(a, tmp1);
7d1b0095 348 tcg_temp_free_i32(tmp1);
3670669c
PB
349}
350
351/* Byteswap each halfword. */
39d5492a 352static void gen_rev16(TCGv_i32 var)
3670669c 353{
39d5492a 354 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 355 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 356 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
357 tcg_gen_and_i32(tmp, tmp, mask);
358 tcg_gen_and_i32(var, var, mask);
3670669c 359 tcg_gen_shli_i32(var, var, 8);
3670669c 360 tcg_gen_or_i32(var, var, tmp);
68cedf73 361 tcg_temp_free_i32(mask);
7d1b0095 362 tcg_temp_free_i32(tmp);
3670669c
PB
363}
364
365/* Byteswap low halfword and sign extend. */
39d5492a 366static void gen_revsh(TCGv_i32 var)
3670669c 367{
1a855029
AJ
368 tcg_gen_ext16u_i32(var, var);
369 tcg_gen_bswap16_i32(var, var);
370 tcg_gen_ext16s_i32(var, var);
3670669c
PB
371}
372
838fa72d 373/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 374static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 375{
838fa72d
AJ
376 TCGv_i64 tmp64 = tcg_temp_new_i64();
377
378 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 379 tcg_temp_free_i32(b);
838fa72d
AJ
380 tcg_gen_shli_i64(tmp64, tmp64, 32);
381 tcg_gen_add_i64(a, tmp64, a);
382
383 tcg_temp_free_i64(tmp64);
384 return a;
385}
386
387/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 388static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
389{
390 TCGv_i64 tmp64 = tcg_temp_new_i64();
391
392 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 393 tcg_temp_free_i32(b);
838fa72d
AJ
394 tcg_gen_shli_i64(tmp64, tmp64, 32);
395 tcg_gen_sub_i64(a, tmp64, a);
396
397 tcg_temp_free_i64(tmp64);
398 return a;
3670669c
PB
399}
400
5e3f878a 401/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 402static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 403{
39d5492a
PM
404 TCGv_i32 lo = tcg_temp_new_i32();
405 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 406 TCGv_i64 ret;
5e3f878a 407
831d7fe8 408 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 409 tcg_temp_free_i32(a);
7d1b0095 410 tcg_temp_free_i32(b);
831d7fe8
RH
411
412 ret = tcg_temp_new_i64();
413 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
414 tcg_temp_free_i32(lo);
415 tcg_temp_free_i32(hi);
831d7fe8
RH
416
417 return ret;
5e3f878a
PB
418}
419
39d5492a 420static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 421{
39d5492a
PM
422 TCGv_i32 lo = tcg_temp_new_i32();
423 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 424 TCGv_i64 ret;
5e3f878a 425
831d7fe8 426 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 427 tcg_temp_free_i32(a);
7d1b0095 428 tcg_temp_free_i32(b);
831d7fe8
RH
429
430 ret = tcg_temp_new_i64();
431 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
432 tcg_temp_free_i32(lo);
433 tcg_temp_free_i32(hi);
831d7fe8
RH
434
435 return ret;
5e3f878a
PB
436}
437
8f01245e 438/* Swap low and high halfwords. */
39d5492a 439static void gen_swap_half(TCGv_i32 var)
8f01245e 440{
39d5492a 441 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
442 tcg_gen_shri_i32(tmp, var, 16);
443 tcg_gen_shli_i32(var, var, 16);
444 tcg_gen_or_i32(var, var, tmp);
7d1b0095 445 tcg_temp_free_i32(tmp);
8f01245e
PB
446}
447
b26eefb6
PB
448/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
449 tmp = (t0 ^ t1) & 0x8000;
450 t0 &= ~0x8000;
451 t1 &= ~0x8000;
452 t0 = (t0 + t1) ^ tmp;
453 */
454
39d5492a 455static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 456{
39d5492a 457 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
458 tcg_gen_xor_i32(tmp, t0, t1);
459 tcg_gen_andi_i32(tmp, tmp, 0x8000);
460 tcg_gen_andi_i32(t0, t0, ~0x8000);
461 tcg_gen_andi_i32(t1, t1, ~0x8000);
462 tcg_gen_add_i32(t0, t0, t1);
463 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
464 tcg_temp_free_i32(tmp);
465 tcg_temp_free_i32(t1);
b26eefb6
PB
466}
467
468/* Set CF to the top bit of var. */
39d5492a 469static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 470{
66c374de 471 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
472}
473
474/* Set N and Z flags from var. */
39d5492a 475static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 476{
66c374de
AJ
477 tcg_gen_mov_i32(cpu_NF, var);
478 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
479}
480
481/* T0 += T1 + CF. */
39d5492a 482static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 483{
396e467c 484 tcg_gen_add_i32(t0, t0, t1);
66c374de 485 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
486}
487
e9bb4aa9 488/* dest = T0 + T1 + CF. */
39d5492a 489static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 490{
e9bb4aa9 491 tcg_gen_add_i32(dest, t0, t1);
66c374de 492 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
493}
494
3670669c 495/* dest = T0 - T1 + CF - 1. */
39d5492a 496static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 497{
3670669c 498 tcg_gen_sub_i32(dest, t0, t1);
66c374de 499 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 500 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
501}
502
72485ec4 503/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 504static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 505{
39d5492a 506 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
507 tcg_gen_movi_i32(tmp, 0);
508 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 509 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 510 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
511 tcg_gen_xor_i32(tmp, t0, t1);
512 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
513 tcg_temp_free_i32(tmp);
514 tcg_gen_mov_i32(dest, cpu_NF);
515}
516
49b4c31e 517/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 518static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 519{
39d5492a 520 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
521 if (TCG_TARGET_HAS_add2_i32) {
522 tcg_gen_movi_i32(tmp, 0);
523 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 524 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
525 } else {
526 TCGv_i64 q0 = tcg_temp_new_i64();
527 TCGv_i64 q1 = tcg_temp_new_i64();
528 tcg_gen_extu_i32_i64(q0, t0);
529 tcg_gen_extu_i32_i64(q1, t1);
530 tcg_gen_add_i64(q0, q0, q1);
531 tcg_gen_extu_i32_i64(q1, cpu_CF);
532 tcg_gen_add_i64(q0, q0, q1);
533 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
534 tcg_temp_free_i64(q0);
535 tcg_temp_free_i64(q1);
536 }
537 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
538 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
539 tcg_gen_xor_i32(tmp, t0, t1);
540 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
541 tcg_temp_free_i32(tmp);
542 tcg_gen_mov_i32(dest, cpu_NF);
543}
544
72485ec4 545/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 546static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 547{
39d5492a 548 TCGv_i32 tmp;
72485ec4
AJ
549 tcg_gen_sub_i32(cpu_NF, t0, t1);
550 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
551 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
552 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
553 tmp = tcg_temp_new_i32();
554 tcg_gen_xor_i32(tmp, t0, t1);
555 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
556 tcg_temp_free_i32(tmp);
557 tcg_gen_mov_i32(dest, cpu_NF);
558}
559
e77f0832 560/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 561static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 562{
39d5492a 563 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
564 tcg_gen_not_i32(tmp, t1);
565 gen_adc_CC(dest, t0, tmp);
39d5492a 566 tcg_temp_free_i32(tmp);
2de68a49
RH
567}
568
365af80e 569#define GEN_SHIFT(name) \
39d5492a 570static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 571{ \
39d5492a 572 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
573 tmp1 = tcg_temp_new_i32(); \
574 tcg_gen_andi_i32(tmp1, t1, 0xff); \
575 tmp2 = tcg_const_i32(0); \
576 tmp3 = tcg_const_i32(0x1f); \
577 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
578 tcg_temp_free_i32(tmp3); \
579 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
580 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
581 tcg_temp_free_i32(tmp2); \
582 tcg_temp_free_i32(tmp1); \
583}
584GEN_SHIFT(shl)
585GEN_SHIFT(shr)
586#undef GEN_SHIFT
587
39d5492a 588static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 589{
39d5492a 590 TCGv_i32 tmp1, tmp2;
365af80e
AJ
591 tmp1 = tcg_temp_new_i32();
592 tcg_gen_andi_i32(tmp1, t1, 0xff);
593 tmp2 = tcg_const_i32(0x1f);
594 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
595 tcg_temp_free_i32(tmp2);
596 tcg_gen_sar_i32(dest, t0, tmp1);
597 tcg_temp_free_i32(tmp1);
598}
599
39d5492a 600static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 601{
39d5492a
PM
602 TCGv_i32 c0 = tcg_const_i32(0);
603 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
604 tcg_gen_neg_i32(tmp, src);
605 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
606 tcg_temp_free_i32(c0);
607 tcg_temp_free_i32(tmp);
608}
ad69471c 609
39d5492a 610static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 611{
9a119ff6 612 if (shift == 0) {
66c374de 613 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 614 } else {
66c374de
AJ
615 tcg_gen_shri_i32(cpu_CF, var, shift);
616 if (shift != 31) {
617 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
618 }
9a119ff6 619 }
9a119ff6 620}
b26eefb6 621
9a119ff6 622/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
623static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
624 int shift, int flags)
9a119ff6
PB
625{
626 switch (shiftop) {
627 case 0: /* LSL */
628 if (shift != 0) {
629 if (flags)
630 shifter_out_im(var, 32 - shift);
631 tcg_gen_shli_i32(var, var, shift);
632 }
633 break;
634 case 1: /* LSR */
635 if (shift == 0) {
636 if (flags) {
66c374de 637 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
638 }
639 tcg_gen_movi_i32(var, 0);
640 } else {
641 if (flags)
642 shifter_out_im(var, shift - 1);
643 tcg_gen_shri_i32(var, var, shift);
644 }
645 break;
646 case 2: /* ASR */
647 if (shift == 0)
648 shift = 32;
649 if (flags)
650 shifter_out_im(var, shift - 1);
651 if (shift == 32)
652 shift = 31;
653 tcg_gen_sari_i32(var, var, shift);
654 break;
655 case 3: /* ROR/RRX */
656 if (shift != 0) {
657 if (flags)
658 shifter_out_im(var, shift - 1);
f669df27 659 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 660 } else {
39d5492a 661 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 662 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
663 if (flags)
664 shifter_out_im(var, 0);
665 tcg_gen_shri_i32(var, var, 1);
b26eefb6 666 tcg_gen_or_i32(var, var, tmp);
7d1b0095 667 tcg_temp_free_i32(tmp);
b26eefb6
PB
668 }
669 }
670};
671
39d5492a
PM
672static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
673 TCGv_i32 shift, int flags)
8984bd2e
PB
674{
675 if (flags) {
676 switch (shiftop) {
9ef39277
BS
677 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
678 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
679 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
680 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
681 }
682 } else {
683 switch (shiftop) {
365af80e
AJ
684 case 0:
685 gen_shl(var, var, shift);
686 break;
687 case 1:
688 gen_shr(var, var, shift);
689 break;
690 case 2:
691 gen_sar(var, var, shift);
692 break;
f669df27
AJ
693 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
694 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
695 }
696 }
7d1b0095 697 tcg_temp_free_i32(shift);
8984bd2e
PB
698}
699
6ddbc6e4
PB
700#define PAS_OP(pfx) \
701 switch (op2) { \
702 case 0: gen_pas_helper(glue(pfx,add16)); break; \
703 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
704 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
705 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
706 case 4: gen_pas_helper(glue(pfx,add8)); break; \
707 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
708 }
39d5492a 709static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 710{
a7812ae4 711 TCGv_ptr tmp;
6ddbc6e4
PB
712
713 switch (op1) {
714#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
715 case 1:
a7812ae4 716 tmp = tcg_temp_new_ptr();
0ecb72a5 717 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 718 PAS_OP(s)
b75263d6 719 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
720 break;
721 case 5:
a7812ae4 722 tmp = tcg_temp_new_ptr();
0ecb72a5 723 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 724 PAS_OP(u)
b75263d6 725 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
726 break;
727#undef gen_pas_helper
728#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
729 case 2:
730 PAS_OP(q);
731 break;
732 case 3:
733 PAS_OP(sh);
734 break;
735 case 6:
736 PAS_OP(uq);
737 break;
738 case 7:
739 PAS_OP(uh);
740 break;
741#undef gen_pas_helper
742 }
743}
9ee6e8bb
PB
744#undef PAS_OP
745
6ddbc6e4
PB
746/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
747#define PAS_OP(pfx) \
ed89a2f1 748 switch (op1) { \
6ddbc6e4
PB
749 case 0: gen_pas_helper(glue(pfx,add8)); break; \
750 case 1: gen_pas_helper(glue(pfx,add16)); break; \
751 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
752 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
753 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
754 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
755 }
39d5492a 756static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 757{
a7812ae4 758 TCGv_ptr tmp;
6ddbc6e4 759
ed89a2f1 760 switch (op2) {
6ddbc6e4
PB
761#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
762 case 0:
a7812ae4 763 tmp = tcg_temp_new_ptr();
0ecb72a5 764 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 765 PAS_OP(s)
b75263d6 766 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
767 break;
768 case 4:
a7812ae4 769 tmp = tcg_temp_new_ptr();
0ecb72a5 770 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 771 PAS_OP(u)
b75263d6 772 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
773 break;
774#undef gen_pas_helper
775#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
776 case 1:
777 PAS_OP(q);
778 break;
779 case 2:
780 PAS_OP(sh);
781 break;
782 case 5:
783 PAS_OP(uq);
784 break;
785 case 6:
786 PAS_OP(uh);
787 break;
788#undef gen_pas_helper
789 }
790}
9ee6e8bb
PB
791#undef PAS_OP
792
39fb730a 793/*
6c2c63d3 794 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
795 * This is common between ARM and Aarch64 targets.
796 */
6c2c63d3 797void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 798{
6c2c63d3
RH
799 TCGv_i32 value;
800 TCGCond cond;
801 bool global = true;
d9ba4830 802
d9ba4830
PB
803 switch (cc) {
804 case 0: /* eq: Z */
d9ba4830 805 case 1: /* ne: !Z */
6c2c63d3
RH
806 cond = TCG_COND_EQ;
807 value = cpu_ZF;
d9ba4830 808 break;
6c2c63d3 809
d9ba4830 810 case 2: /* cs: C */
d9ba4830 811 case 3: /* cc: !C */
6c2c63d3
RH
812 cond = TCG_COND_NE;
813 value = cpu_CF;
d9ba4830 814 break;
6c2c63d3 815
d9ba4830 816 case 4: /* mi: N */
d9ba4830 817 case 5: /* pl: !N */
6c2c63d3
RH
818 cond = TCG_COND_LT;
819 value = cpu_NF;
d9ba4830 820 break;
6c2c63d3 821
d9ba4830 822 case 6: /* vs: V */
d9ba4830 823 case 7: /* vc: !V */
6c2c63d3
RH
824 cond = TCG_COND_LT;
825 value = cpu_VF;
d9ba4830 826 break;
6c2c63d3 827
d9ba4830 828 case 8: /* hi: C && !Z */
6c2c63d3
RH
829 case 9: /* ls: !C || Z -> !(C && !Z) */
830 cond = TCG_COND_NE;
831 value = tcg_temp_new_i32();
832 global = false;
833 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
834 ZF is non-zero for !Z; so AND the two subexpressions. */
835 tcg_gen_neg_i32(value, cpu_CF);
836 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 837 break;
6c2c63d3 838
d9ba4830 839 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 840 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
841 /* Since we're only interested in the sign bit, == 0 is >= 0. */
842 cond = TCG_COND_GE;
843 value = tcg_temp_new_i32();
844 global = false;
845 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 846 break;
6c2c63d3 847
d9ba4830 848 case 12: /* gt: !Z && N == V */
d9ba4830 849 case 13: /* le: Z || N != V */
6c2c63d3
RH
850 cond = TCG_COND_NE;
851 value = tcg_temp_new_i32();
852 global = false;
853 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
854 * the sign bit then AND with ZF to yield the result. */
855 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
856 tcg_gen_sari_i32(value, value, 31);
857 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 858 break;
6c2c63d3 859
9305eac0
RH
860 case 14: /* always */
861 case 15: /* always */
862 /* Use the ALWAYS condition, which will fold early.
863 * It doesn't matter what we use for the value. */
864 cond = TCG_COND_ALWAYS;
865 value = cpu_ZF;
866 goto no_invert;
867
d9ba4830
PB
868 default:
869 fprintf(stderr, "Bad condition code 0x%x\n", cc);
870 abort();
871 }
6c2c63d3
RH
872
873 if (cc & 1) {
874 cond = tcg_invert_cond(cond);
875 }
876
9305eac0 877 no_invert:
6c2c63d3
RH
878 cmp->cond = cond;
879 cmp->value = value;
880 cmp->value_global = global;
881}
882
883void arm_free_cc(DisasCompare *cmp)
884{
885 if (!cmp->value_global) {
886 tcg_temp_free_i32(cmp->value);
887 }
888}
889
890void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
891{
892 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
893}
894
895void arm_gen_test_cc(int cc, TCGLabel *label)
896{
897 DisasCompare cmp;
898 arm_test_cc(&cmp, cc);
899 arm_jump_cc(&cmp, label);
900 arm_free_cc(&cmp);
d9ba4830 901}
2c0262af 902
b1d8e52e 903static const uint8_t table_logic_cc[16] = {
2c0262af
FB
904 1, /* and */
905 1, /* xor */
906 0, /* sub */
907 0, /* rsb */
908 0, /* add */
909 0, /* adc */
910 0, /* sbc */
911 0, /* rsc */
912 1, /* andl */
913 1, /* xorl */
914 0, /* cmp */
915 0, /* cmn */
916 1, /* orr */
917 1, /* mov */
918 1, /* bic */
919 1, /* mvn */
920};
3b46e624 921
4d5e8c96
PM
922static inline void gen_set_condexec(DisasContext *s)
923{
924 if (s->condexec_mask) {
925 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
926 TCGv_i32 tmp = tcg_temp_new_i32();
927 tcg_gen_movi_i32(tmp, val);
928 store_cpu_field(tmp, condexec_bits);
929 }
930}
931
932static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
933{
934 tcg_gen_movi_i32(cpu_R[15], val);
935}
936
d9ba4830
PB
937/* Set PC and Thumb state from an immediate address. */
938static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 939{
39d5492a 940 TCGv_i32 tmp;
99c475ab 941
dcba3a8d 942 s->base.is_jmp = DISAS_JUMP;
d9ba4830 943 if (s->thumb != (addr & 1)) {
7d1b0095 944 tmp = tcg_temp_new_i32();
d9ba4830 945 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 946 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 947 tcg_temp_free_i32(tmp);
d9ba4830 948 }
155c3eac 949 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
950}
951
952/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 953static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 954{
dcba3a8d 955 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
956 tcg_gen_andi_i32(cpu_R[15], var, ~1);
957 tcg_gen_andi_i32(var, var, 1);
958 store_cpu_field(var, thumb);
d9ba4830
PB
959}
960
3bb8a96f
PM
961/* Set PC and Thumb state from var. var is marked as dead.
962 * For M-profile CPUs, include logic to detect exception-return
963 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
964 * and BX reg, and no others, and happens only for code in Handler mode.
965 */
966static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
967{
968 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 969 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
970 */
971 gen_bx(s, var);
d02a8698
PM
972 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
973 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 974 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
975 }
976}
977
978static inline void gen_bx_excret_final_code(DisasContext *s)
979{
980 /* Generate the code to finish possible exception return and end the TB */
981 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
982 uint32_t min_magic;
983
984 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
985 /* Covers FNC_RETURN and EXC_RETURN magic */
986 min_magic = FNC_RETURN_MIN_MAGIC;
987 } else {
988 /* EXC_RETURN magic only */
989 min_magic = EXC_RETURN_MIN_MAGIC;
990 }
3bb8a96f
PM
991
992 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 993 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
994 /* No: end the TB as we would for a DISAS_JMP */
995 if (is_singlestepping(s)) {
996 gen_singlestep_exception(s);
997 } else {
07ea28b4 998 tcg_gen_exit_tb(NULL, 0);
3bb8a96f
PM
999 }
1000 gen_set_label(excret_label);
1001 /* Yes: this is an exception return.
1002 * At this point in runtime env->regs[15] and env->thumb will hold
1003 * the exception-return magic number, which do_v7m_exception_exit()
1004 * will read. Nothing else will be able to see those values because
1005 * the cpu-exec main loop guarantees that we will always go straight
1006 * from raising the exception to the exception-handling code.
1007 *
1008 * gen_ss_advance(s) does nothing on M profile currently but
1009 * calling it is conceptually the right thing as we have executed
1010 * this instruction (compare SWI, HVC, SMC handling).
1011 */
1012 gen_ss_advance(s);
1013 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1014}
1015
fb602cb7
PM
1016static inline void gen_bxns(DisasContext *s, int rm)
1017{
1018 TCGv_i32 var = load_reg(s, rm);
1019
1020 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1021 * we need to sync state before calling it, but:
1022 * - we don't need to do gen_set_pc_im() because the bxns helper will
1023 * always set the PC itself
1024 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1025 * unless it's outside an IT block or the last insn in an IT block,
1026 * so we know that condexec == 0 (already set at the top of the TB)
1027 * is correct in the non-UNPREDICTABLE cases, and we can choose
1028 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1029 */
1030 gen_helper_v7m_bxns(cpu_env, var);
1031 tcg_temp_free_i32(var);
ef475b5d 1032 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1033}
1034
3e3fa230
PM
1035static inline void gen_blxns(DisasContext *s, int rm)
1036{
1037 TCGv_i32 var = load_reg(s, rm);
1038
1039 /* We don't need to sync condexec state, for the same reason as bxns.
1040 * We do however need to set the PC, because the blxns helper reads it.
1041 * The blxns helper may throw an exception.
1042 */
1043 gen_set_pc_im(s, s->pc);
1044 gen_helper_v7m_blxns(cpu_env, var);
1045 tcg_temp_free_i32(var);
1046 s->base.is_jmp = DISAS_EXIT;
1047}
1048
21aeb343
JR
1049/* Variant of store_reg which uses branch&exchange logic when storing
1050 to r15 in ARM architecture v7 and above. The source must be a temporary
1051 and will be marked as dead. */
7dcc1f89 1052static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1053{
1054 if (reg == 15 && ENABLE_ARCH_7) {
1055 gen_bx(s, var);
1056 } else {
1057 store_reg(s, reg, var);
1058 }
1059}
1060
be5e7a76
DES
1061/* Variant of store_reg which uses branch&exchange logic when storing
1062 * to r15 in ARM architecture v5T and above. This is used for storing
1063 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1064 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1065static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1066{
1067 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1068 gen_bx_excret(s, var);
be5e7a76
DES
1069 } else {
1070 store_reg(s, reg, var);
1071 }
1072}
1073
e334bd31
PB
1074#ifdef CONFIG_USER_ONLY
1075#define IS_USER_ONLY 1
1076#else
1077#define IS_USER_ONLY 0
1078#endif
1079
08307563
PM
1080/* Abstractions of "generate code to do a guest load/store for
1081 * AArch32", where a vaddr is always 32 bits (and is zero
1082 * extended if we're a 64 bit core) and data is also
1083 * 32 bits unless specifically doing a 64 bit access.
1084 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1085 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1086 */
08307563 1087
7f5616f5 1088static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1089{
7f5616f5
RH
1090 TCGv addr = tcg_temp_new();
1091 tcg_gen_extu_i32_tl(addr, a32);
1092
e334bd31 1093 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1094 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1095 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1096 }
7f5616f5 1097 return addr;
08307563
PM
1098}
1099
7f5616f5
RH
1100static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1101 int index, TCGMemOp opc)
08307563 1102{
2aeba0d0
JS
1103 TCGv addr;
1104
1105 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1106 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1107 opc |= MO_ALIGN;
1108 }
1109
1110 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1111 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1112 tcg_temp_free(addr);
08307563
PM
1113}
1114
7f5616f5
RH
1115static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1116 int index, TCGMemOp opc)
1117{
2aeba0d0
JS
1118 TCGv addr;
1119
1120 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1121 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1122 opc |= MO_ALIGN;
1123 }
1124
1125 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1126 tcg_gen_qemu_st_i32(val, addr, index, opc);
1127 tcg_temp_free(addr);
1128}
08307563 1129
7f5616f5 1130#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1131static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1132 TCGv_i32 a32, int index) \
08307563 1133{ \
7f5616f5 1134 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1135} \
1136static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1137 TCGv_i32 val, \
1138 TCGv_i32 a32, int index, \
1139 ISSInfo issinfo) \
1140{ \
1141 gen_aa32_ld##SUFF(s, val, a32, index); \
1142 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1143}
1144
7f5616f5 1145#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1146static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1147 TCGv_i32 a32, int index) \
08307563 1148{ \
7f5616f5 1149 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1150} \
1151static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1152 TCGv_i32 val, \
1153 TCGv_i32 a32, int index, \
1154 ISSInfo issinfo) \
1155{ \
1156 gen_aa32_st##SUFF(s, val, a32, index); \
1157 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1158}
1159
7f5616f5 1160static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1161{
e334bd31
PB
1162 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1163 if (!IS_USER_ONLY && s->sctlr_b) {
1164 tcg_gen_rotri_i64(val, val, 32);
1165 }
08307563
PM
1166}
1167
7f5616f5
RH
1168static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1169 int index, TCGMemOp opc)
08307563 1170{
7f5616f5
RH
1171 TCGv addr = gen_aa32_addr(s, a32, opc);
1172 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1173 gen_aa32_frob64(s, val);
1174 tcg_temp_free(addr);
1175}
1176
1177static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1178 TCGv_i32 a32, int index)
1179{
1180 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1181}
1182
1183static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1184 int index, TCGMemOp opc)
1185{
1186 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1187
1188 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1189 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1190 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1191 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1192 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1193 tcg_temp_free_i64(tmp);
e334bd31 1194 } else {
7f5616f5 1195 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1196 }
7f5616f5 1197 tcg_temp_free(addr);
08307563
PM
1198}
1199
7f5616f5
RH
1200static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1201 TCGv_i32 a32, int index)
1202{
1203 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1204}
08307563 1205
7f5616f5
RH
1206DO_GEN_LD(8s, MO_SB)
1207DO_GEN_LD(8u, MO_UB)
1208DO_GEN_LD(16s, MO_SW)
1209DO_GEN_LD(16u, MO_UW)
1210DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1211DO_GEN_ST(8, MO_UB)
1212DO_GEN_ST(16, MO_UW)
1213DO_GEN_ST(32, MO_UL)
08307563 1214
37e6456e
PM
1215static inline void gen_hvc(DisasContext *s, int imm16)
1216{
1217 /* The pre HVC helper handles cases when HVC gets trapped
1218 * as an undefined insn by runtime configuration (ie before
1219 * the insn really executes).
1220 */
1221 gen_set_pc_im(s, s->pc - 4);
1222 gen_helper_pre_hvc(cpu_env);
1223 /* Otherwise we will treat this as a real exception which
1224 * happens after execution of the insn. (The distinction matters
1225 * for the PC value reported to the exception handler and also
1226 * for single stepping.)
1227 */
1228 s->svc_imm = imm16;
1229 gen_set_pc_im(s, s->pc);
dcba3a8d 1230 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1231}
1232
1233static inline void gen_smc(DisasContext *s)
1234{
1235 /* As with HVC, we may take an exception either before or after
1236 * the insn executes.
1237 */
1238 TCGv_i32 tmp;
1239
1240 gen_set_pc_im(s, s->pc - 4);
1241 tmp = tcg_const_i32(syn_aa32_smc());
1242 gen_helper_pre_smc(cpu_env, tmp);
1243 tcg_temp_free_i32(tmp);
1244 gen_set_pc_im(s, s->pc);
dcba3a8d 1245 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1246}
1247
d4a2dc67
PM
1248static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1249{
1250 gen_set_condexec(s);
1251 gen_set_pc_im(s, s->pc - offset);
1252 gen_exception_internal(excp);
dcba3a8d 1253 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1254}
1255
73710361
GB
1256static void gen_exception_insn(DisasContext *s, int offset, int excp,
1257 int syn, uint32_t target_el)
d4a2dc67
PM
1258{
1259 gen_set_condexec(s);
1260 gen_set_pc_im(s, s->pc - offset);
73710361 1261 gen_exception(excp, syn, target_el);
dcba3a8d 1262 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1263}
1264
c900a2e6
PM
1265static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1266{
1267 TCGv_i32 tcg_syn;
1268
1269 gen_set_condexec(s);
1270 gen_set_pc_im(s, s->pc - offset);
1271 tcg_syn = tcg_const_i32(syn);
1272 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1273 tcg_temp_free_i32(tcg_syn);
1274 s->base.is_jmp = DISAS_NORETURN;
1275}
1276
b5ff1b31
FB
1277/* Force a TB lookup after an instruction that changes the CPU state. */
1278static inline void gen_lookup_tb(DisasContext *s)
1279{
a6445c52 1280 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1281 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1282}
1283
19a6e31c
PM
1284static inline void gen_hlt(DisasContext *s, int imm)
1285{
1286 /* HLT. This has two purposes.
1287 * Architecturally, it is an external halting debug instruction.
1288 * Since QEMU doesn't implement external debug, we treat this as
1289 * it is required for halting debug disabled: it will UNDEF.
1290 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1291 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1292 * must trigger semihosting even for ARMv7 and earlier, where
1293 * HLT was an undefined encoding.
1294 * In system mode, we don't allow userspace access to
1295 * semihosting, to provide some semblance of security
1296 * (and for consistency with our 32-bit semihosting).
1297 */
1298 if (semihosting_enabled() &&
1299#ifndef CONFIG_USER_ONLY
1300 s->current_el != 0 &&
1301#endif
1302 (imm == (s->thumb ? 0x3c : 0xf000))) {
1303 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1304 return;
1305 }
1306
1307 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1308 default_exception_el(s));
1309}
1310
b0109805 1311static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1312 TCGv_i32 var)
2c0262af 1313{
1e8d4eec 1314 int val, rm, shift, shiftop;
39d5492a 1315 TCGv_i32 offset;
2c0262af
FB
1316
1317 if (!(insn & (1 << 25))) {
1318 /* immediate */
1319 val = insn & 0xfff;
1320 if (!(insn & (1 << 23)))
1321 val = -val;
537730b9 1322 if (val != 0)
b0109805 1323 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1324 } else {
1325 /* shift/register */
1326 rm = (insn) & 0xf;
1327 shift = (insn >> 7) & 0x1f;
1e8d4eec 1328 shiftop = (insn >> 5) & 3;
b26eefb6 1329 offset = load_reg(s, rm);
9a119ff6 1330 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1331 if (!(insn & (1 << 23)))
b0109805 1332 tcg_gen_sub_i32(var, var, offset);
2c0262af 1333 else
b0109805 1334 tcg_gen_add_i32(var, var, offset);
7d1b0095 1335 tcg_temp_free_i32(offset);
2c0262af
FB
1336 }
1337}
1338
191f9a93 1339static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1340 int extra, TCGv_i32 var)
2c0262af
FB
1341{
1342 int val, rm;
39d5492a 1343 TCGv_i32 offset;
3b46e624 1344
2c0262af
FB
1345 if (insn & (1 << 22)) {
1346 /* immediate */
1347 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1348 if (!(insn & (1 << 23)))
1349 val = -val;
18acad92 1350 val += extra;
537730b9 1351 if (val != 0)
b0109805 1352 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1353 } else {
1354 /* register */
191f9a93 1355 if (extra)
b0109805 1356 tcg_gen_addi_i32(var, var, extra);
2c0262af 1357 rm = (insn) & 0xf;
b26eefb6 1358 offset = load_reg(s, rm);
2c0262af 1359 if (!(insn & (1 << 23)))
b0109805 1360 tcg_gen_sub_i32(var, var, offset);
2c0262af 1361 else
b0109805 1362 tcg_gen_add_i32(var, var, offset);
7d1b0095 1363 tcg_temp_free_i32(offset);
2c0262af
FB
1364 }
1365}
1366
5aaebd13
PM
1367static TCGv_ptr get_fpstatus_ptr(int neon)
1368{
1369 TCGv_ptr statusptr = tcg_temp_new_ptr();
1370 int offset;
1371 if (neon) {
0ecb72a5 1372 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1373 } else {
0ecb72a5 1374 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1375 }
1376 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1377 return statusptr;
1378}
1379
4373f3ce
PB
1380#define VFP_OP2(name) \
1381static inline void gen_vfp_##name(int dp) \
1382{ \
ae1857ec
PM
1383 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1384 if (dp) { \
1385 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1386 } else { \
1387 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1388 } \
1389 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1390}
1391
4373f3ce
PB
1392VFP_OP2(add)
1393VFP_OP2(sub)
1394VFP_OP2(mul)
1395VFP_OP2(div)
1396
1397#undef VFP_OP2
1398
605a6aed
PM
1399static inline void gen_vfp_F1_mul(int dp)
1400{
1401 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1402 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1403 if (dp) {
ae1857ec 1404 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1405 } else {
ae1857ec 1406 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1407 }
ae1857ec 1408 tcg_temp_free_ptr(fpst);
605a6aed
PM
1409}
1410
1411static inline void gen_vfp_F1_neg(int dp)
1412{
1413 /* Like gen_vfp_neg() but put result in F1 */
1414 if (dp) {
1415 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1416 } else {
1417 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1418 }
1419}
1420
4373f3ce
PB
1421static inline void gen_vfp_abs(int dp)
1422{
1423 if (dp)
1424 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1425 else
1426 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1427}
1428
1429static inline void gen_vfp_neg(int dp)
1430{
1431 if (dp)
1432 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1433 else
1434 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1435}
1436
1437static inline void gen_vfp_sqrt(int dp)
1438{
1439 if (dp)
1440 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1441 else
1442 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1443}
1444
1445static inline void gen_vfp_cmp(int dp)
1446{
1447 if (dp)
1448 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1449 else
1450 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1451}
1452
1453static inline void gen_vfp_cmpe(int dp)
1454{
1455 if (dp)
1456 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1457 else
1458 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1459}
1460
1461static inline void gen_vfp_F1_ld0(int dp)
1462{
1463 if (dp)
5b340b51 1464 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1465 else
5b340b51 1466 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1467}
1468
5500b06c
PM
1469#define VFP_GEN_ITOF(name) \
1470static inline void gen_vfp_##name(int dp, int neon) \
1471{ \
5aaebd13 1472 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1473 if (dp) { \
1474 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1475 } else { \
1476 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1477 } \
b7fa9214 1478 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1479}
1480
5500b06c
PM
1481VFP_GEN_ITOF(uito)
1482VFP_GEN_ITOF(sito)
1483#undef VFP_GEN_ITOF
4373f3ce 1484
5500b06c
PM
1485#define VFP_GEN_FTOI(name) \
1486static inline void gen_vfp_##name(int dp, int neon) \
1487{ \
5aaebd13 1488 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1489 if (dp) { \
1490 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1491 } else { \
1492 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1493 } \
b7fa9214 1494 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1495}
1496
5500b06c
PM
1497VFP_GEN_FTOI(toui)
1498VFP_GEN_FTOI(touiz)
1499VFP_GEN_FTOI(tosi)
1500VFP_GEN_FTOI(tosiz)
1501#undef VFP_GEN_FTOI
4373f3ce 1502
16d5b3ca 1503#define VFP_GEN_FIX(name, round) \
5500b06c 1504static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1505{ \
39d5492a 1506 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1507 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1508 if (dp) { \
16d5b3ca
WN
1509 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1510 statusptr); \
5500b06c 1511 } else { \
16d5b3ca
WN
1512 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1513 statusptr); \
5500b06c 1514 } \
b75263d6 1515 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1516 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1517}
16d5b3ca
WN
1518VFP_GEN_FIX(tosh, _round_to_zero)
1519VFP_GEN_FIX(tosl, _round_to_zero)
1520VFP_GEN_FIX(touh, _round_to_zero)
1521VFP_GEN_FIX(toul, _round_to_zero)
1522VFP_GEN_FIX(shto, )
1523VFP_GEN_FIX(slto, )
1524VFP_GEN_FIX(uhto, )
1525VFP_GEN_FIX(ulto, )
4373f3ce 1526#undef VFP_GEN_FIX
9ee6e8bb 1527
39d5492a 1528static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1529{
08307563 1530 if (dp) {
12dcc321 1531 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1532 } else {
12dcc321 1533 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1534 }
b5ff1b31
FB
1535}
1536
39d5492a 1537static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1538{
08307563 1539 if (dp) {
12dcc321 1540 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1541 } else {
12dcc321 1542 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1543 }
b5ff1b31
FB
1544}
1545
c39c2b90 1546static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1547{
9a2b5256 1548 if (dp) {
c39c2b90 1549 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1550 } else {
c39c2b90 1551 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1552 if (reg & 1) {
1553 ofs += offsetof(CPU_DoubleU, l.upper);
1554 } else {
1555 ofs += offsetof(CPU_DoubleU, l.lower);
1556 }
1557 return ofs;
8e96005d
FB
1558 }
1559}
9ee6e8bb
PB
1560
1561/* Return the offset of a 32-bit piece of a NEON register.
1562 zero is the least significant end of the register. */
1563static inline long
1564neon_reg_offset (int reg, int n)
1565{
1566 int sreg;
1567 sreg = reg * 2 + n;
1568 return vfp_reg_offset(0, sreg);
1569}
1570
39d5492a 1571static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1572{
39d5492a 1573 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1574 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1575 return tmp;
1576}
1577
39d5492a 1578static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1579{
1580 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1581 tcg_temp_free_i32(var);
8f8e3aa4
PB
1582}
1583
a7812ae4 1584static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1585{
1586 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1587}
1588
a7812ae4 1589static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1590{
1591 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1592}
1593
1a66ac61
RH
1594static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1595{
1596 TCGv_ptr ret = tcg_temp_new_ptr();
1597 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1598 return ret;
1599}
1600
4373f3ce
PB
1601#define tcg_gen_ld_f32 tcg_gen_ld_i32
1602#define tcg_gen_ld_f64 tcg_gen_ld_i64
1603#define tcg_gen_st_f32 tcg_gen_st_i32
1604#define tcg_gen_st_f64 tcg_gen_st_i64
1605
b7bcbe95
FB
1606static inline void gen_mov_F0_vreg(int dp, int reg)
1607{
1608 if (dp)
4373f3ce 1609 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1610 else
4373f3ce 1611 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1612}
1613
1614static inline void gen_mov_F1_vreg(int dp, int reg)
1615{
1616 if (dp)
4373f3ce 1617 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1618 else
4373f3ce 1619 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1620}
1621
1622static inline void gen_mov_vreg_F0(int dp, int reg)
1623{
1624 if (dp)
4373f3ce 1625 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1626 else
4373f3ce 1627 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1628}
1629
18c9b560
AZ
1630#define ARM_CP_RW_BIT (1 << 20)
1631
a7812ae4 1632static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1633{
0ecb72a5 1634 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1635}
1636
a7812ae4 1637static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1638{
0ecb72a5 1639 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1640}
1641
39d5492a 1642static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1643{
39d5492a 1644 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1645 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1646 return var;
e677137d
PB
1647}
1648
39d5492a 1649static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1650{
0ecb72a5 1651 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1652 tcg_temp_free_i32(var);
e677137d
PB
1653}
1654
1655static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1656{
1657 iwmmxt_store_reg(cpu_M0, rn);
1658}
1659
1660static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1661{
1662 iwmmxt_load_reg(cpu_M0, rn);
1663}
1664
1665static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1666{
1667 iwmmxt_load_reg(cpu_V1, rn);
1668 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1669}
1670
1671static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1672{
1673 iwmmxt_load_reg(cpu_V1, rn);
1674 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1675}
1676
1677static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1678{
1679 iwmmxt_load_reg(cpu_V1, rn);
1680 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1681}
1682
1683#define IWMMXT_OP(name) \
1684static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1685{ \
1686 iwmmxt_load_reg(cpu_V1, rn); \
1687 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1688}
1689
477955bd
PM
1690#define IWMMXT_OP_ENV(name) \
1691static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1692{ \
1693 iwmmxt_load_reg(cpu_V1, rn); \
1694 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1695}
1696
1697#define IWMMXT_OP_ENV_SIZE(name) \
1698IWMMXT_OP_ENV(name##b) \
1699IWMMXT_OP_ENV(name##w) \
1700IWMMXT_OP_ENV(name##l)
e677137d 1701
477955bd 1702#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1703static inline void gen_op_iwmmxt_##name##_M0(void) \
1704{ \
477955bd 1705 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1706}
1707
1708IWMMXT_OP(maddsq)
1709IWMMXT_OP(madduq)
1710IWMMXT_OP(sadb)
1711IWMMXT_OP(sadw)
1712IWMMXT_OP(mulslw)
1713IWMMXT_OP(mulshw)
1714IWMMXT_OP(mululw)
1715IWMMXT_OP(muluhw)
1716IWMMXT_OP(macsw)
1717IWMMXT_OP(macuw)
1718
477955bd
PM
1719IWMMXT_OP_ENV_SIZE(unpackl)
1720IWMMXT_OP_ENV_SIZE(unpackh)
1721
1722IWMMXT_OP_ENV1(unpacklub)
1723IWMMXT_OP_ENV1(unpackluw)
1724IWMMXT_OP_ENV1(unpacklul)
1725IWMMXT_OP_ENV1(unpackhub)
1726IWMMXT_OP_ENV1(unpackhuw)
1727IWMMXT_OP_ENV1(unpackhul)
1728IWMMXT_OP_ENV1(unpacklsb)
1729IWMMXT_OP_ENV1(unpacklsw)
1730IWMMXT_OP_ENV1(unpacklsl)
1731IWMMXT_OP_ENV1(unpackhsb)
1732IWMMXT_OP_ENV1(unpackhsw)
1733IWMMXT_OP_ENV1(unpackhsl)
1734
1735IWMMXT_OP_ENV_SIZE(cmpeq)
1736IWMMXT_OP_ENV_SIZE(cmpgtu)
1737IWMMXT_OP_ENV_SIZE(cmpgts)
1738
1739IWMMXT_OP_ENV_SIZE(mins)
1740IWMMXT_OP_ENV_SIZE(minu)
1741IWMMXT_OP_ENV_SIZE(maxs)
1742IWMMXT_OP_ENV_SIZE(maxu)
1743
1744IWMMXT_OP_ENV_SIZE(subn)
1745IWMMXT_OP_ENV_SIZE(addn)
1746IWMMXT_OP_ENV_SIZE(subu)
1747IWMMXT_OP_ENV_SIZE(addu)
1748IWMMXT_OP_ENV_SIZE(subs)
1749IWMMXT_OP_ENV_SIZE(adds)
1750
1751IWMMXT_OP_ENV(avgb0)
1752IWMMXT_OP_ENV(avgb1)
1753IWMMXT_OP_ENV(avgw0)
1754IWMMXT_OP_ENV(avgw1)
e677137d 1755
477955bd
PM
1756IWMMXT_OP_ENV(packuw)
1757IWMMXT_OP_ENV(packul)
1758IWMMXT_OP_ENV(packuq)
1759IWMMXT_OP_ENV(packsw)
1760IWMMXT_OP_ENV(packsl)
1761IWMMXT_OP_ENV(packsq)
e677137d 1762
e677137d
PB
1763static void gen_op_iwmmxt_set_mup(void)
1764{
39d5492a 1765 TCGv_i32 tmp;
e677137d
PB
1766 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1767 tcg_gen_ori_i32(tmp, tmp, 2);
1768 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1769}
1770
1771static void gen_op_iwmmxt_set_cup(void)
1772{
39d5492a 1773 TCGv_i32 tmp;
e677137d
PB
1774 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1775 tcg_gen_ori_i32(tmp, tmp, 1);
1776 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1777}
1778
1779static void gen_op_iwmmxt_setpsr_nz(void)
1780{
39d5492a 1781 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1782 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1783 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1784}
1785
1786static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1787{
1788 iwmmxt_load_reg(cpu_V1, rn);
86831435 1789 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1790 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1791}
1792
39d5492a
PM
1793static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1794 TCGv_i32 dest)
18c9b560
AZ
1795{
1796 int rd;
1797 uint32_t offset;
39d5492a 1798 TCGv_i32 tmp;
18c9b560
AZ
1799
1800 rd = (insn >> 16) & 0xf;
da6b5335 1801 tmp = load_reg(s, rd);
18c9b560
AZ
1802
1803 offset = (insn & 0xff) << ((insn >> 7) & 2);
1804 if (insn & (1 << 24)) {
1805 /* Pre indexed */
1806 if (insn & (1 << 23))
da6b5335 1807 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1808 else
da6b5335
FN
1809 tcg_gen_addi_i32(tmp, tmp, -offset);
1810 tcg_gen_mov_i32(dest, tmp);
18c9b560 1811 if (insn & (1 << 21))
da6b5335
FN
1812 store_reg(s, rd, tmp);
1813 else
7d1b0095 1814 tcg_temp_free_i32(tmp);
18c9b560
AZ
1815 } else if (insn & (1 << 21)) {
1816 /* Post indexed */
da6b5335 1817 tcg_gen_mov_i32(dest, tmp);
18c9b560 1818 if (insn & (1 << 23))
da6b5335 1819 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1820 else
da6b5335
FN
1821 tcg_gen_addi_i32(tmp, tmp, -offset);
1822 store_reg(s, rd, tmp);
18c9b560
AZ
1823 } else if (!(insn & (1 << 23)))
1824 return 1;
1825 return 0;
1826}
1827
39d5492a 1828static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1829{
1830 int rd = (insn >> 0) & 0xf;
39d5492a 1831 TCGv_i32 tmp;
18c9b560 1832
da6b5335
FN
1833 if (insn & (1 << 8)) {
1834 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1835 return 1;
da6b5335
FN
1836 } else {
1837 tmp = iwmmxt_load_creg(rd);
1838 }
1839 } else {
7d1b0095 1840 tmp = tcg_temp_new_i32();
da6b5335 1841 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1842 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1843 }
1844 tcg_gen_andi_i32(tmp, tmp, mask);
1845 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1846 tcg_temp_free_i32(tmp);
18c9b560
AZ
1847 return 0;
1848}
1849
a1c7273b 1850/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1851 (ie. an undefined instruction). */
7dcc1f89 1852static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1853{
1854 int rd, wrd;
1855 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1856 TCGv_i32 addr;
1857 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1858
1859 if ((insn & 0x0e000e00) == 0x0c000000) {
1860 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1861 wrd = insn & 0xf;
1862 rdlo = (insn >> 12) & 0xf;
1863 rdhi = (insn >> 16) & 0xf;
1864 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1865 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1866 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1867 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1868 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1869 } else { /* TMCRR */
da6b5335
FN
1870 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1871 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1872 gen_op_iwmmxt_set_mup();
1873 }
1874 return 0;
1875 }
1876
1877 wrd = (insn >> 12) & 0xf;
7d1b0095 1878 addr = tcg_temp_new_i32();
da6b5335 1879 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1880 tcg_temp_free_i32(addr);
18c9b560 1881 return 1;
da6b5335 1882 }
18c9b560
AZ
1883 if (insn & ARM_CP_RW_BIT) {
1884 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1885 tmp = tcg_temp_new_i32();
12dcc321 1886 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1887 iwmmxt_store_creg(wrd, tmp);
18c9b560 1888 } else {
e677137d
PB
1889 i = 1;
1890 if (insn & (1 << 8)) {
1891 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1892 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d
PB
1893 i = 0;
1894 } else { /* WLDRW wRd */
29531141 1895 tmp = tcg_temp_new_i32();
12dcc321 1896 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1897 }
1898 } else {
29531141 1899 tmp = tcg_temp_new_i32();
e677137d 1900 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1901 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
e677137d 1902 } else { /* WLDRB */
12dcc321 1903 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1904 }
1905 }
1906 if (i) {
1907 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1908 tcg_temp_free_i32(tmp);
e677137d 1909 }
18c9b560
AZ
1910 gen_op_iwmmxt_movq_wRn_M0(wrd);
1911 }
1912 } else {
1913 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1914 tmp = iwmmxt_load_creg(wrd);
12dcc321 1915 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1916 } else {
1917 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1918 tmp = tcg_temp_new_i32();
e677137d
PB
1919 if (insn & (1 << 8)) {
1920 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1921 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1922 } else { /* WSTRW wRd */
ecc7b3aa 1923 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1924 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1925 }
1926 } else {
1927 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1928 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1929 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
e677137d 1930 } else { /* WSTRB */
ecc7b3aa 1931 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1932 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1933 }
1934 }
18c9b560 1935 }
29531141 1936 tcg_temp_free_i32(tmp);
18c9b560 1937 }
7d1b0095 1938 tcg_temp_free_i32(addr);
18c9b560
AZ
1939 return 0;
1940 }
1941
1942 if ((insn & 0x0f000000) != 0x0e000000)
1943 return 1;
1944
1945 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1946 case 0x000: /* WOR */
1947 wrd = (insn >> 12) & 0xf;
1948 rd0 = (insn >> 0) & 0xf;
1949 rd1 = (insn >> 16) & 0xf;
1950 gen_op_iwmmxt_movq_M0_wRn(rd0);
1951 gen_op_iwmmxt_orq_M0_wRn(rd1);
1952 gen_op_iwmmxt_setpsr_nz();
1953 gen_op_iwmmxt_movq_wRn_M0(wrd);
1954 gen_op_iwmmxt_set_mup();
1955 gen_op_iwmmxt_set_cup();
1956 break;
1957 case 0x011: /* TMCR */
1958 if (insn & 0xf)
1959 return 1;
1960 rd = (insn >> 12) & 0xf;
1961 wrd = (insn >> 16) & 0xf;
1962 switch (wrd) {
1963 case ARM_IWMMXT_wCID:
1964 case ARM_IWMMXT_wCASF:
1965 break;
1966 case ARM_IWMMXT_wCon:
1967 gen_op_iwmmxt_set_cup();
1968 /* Fall through. */
1969 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1970 tmp = iwmmxt_load_creg(wrd);
1971 tmp2 = load_reg(s, rd);
f669df27 1972 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1973 tcg_temp_free_i32(tmp2);
da6b5335 1974 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1975 break;
1976 case ARM_IWMMXT_wCGR0:
1977 case ARM_IWMMXT_wCGR1:
1978 case ARM_IWMMXT_wCGR2:
1979 case ARM_IWMMXT_wCGR3:
1980 gen_op_iwmmxt_set_cup();
da6b5335
FN
1981 tmp = load_reg(s, rd);
1982 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1983 break;
1984 default:
1985 return 1;
1986 }
1987 break;
1988 case 0x100: /* WXOR */
1989 wrd = (insn >> 12) & 0xf;
1990 rd0 = (insn >> 0) & 0xf;
1991 rd1 = (insn >> 16) & 0xf;
1992 gen_op_iwmmxt_movq_M0_wRn(rd0);
1993 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1994 gen_op_iwmmxt_setpsr_nz();
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x111: /* TMRC */
2000 if (insn & 0xf)
2001 return 1;
2002 rd = (insn >> 12) & 0xf;
2003 wrd = (insn >> 16) & 0xf;
da6b5335
FN
2004 tmp = iwmmxt_load_creg(wrd);
2005 store_reg(s, rd, tmp);
18c9b560
AZ
2006 break;
2007 case 0x300: /* WANDN */
2008 wrd = (insn >> 12) & 0xf;
2009 rd0 = (insn >> 0) & 0xf;
2010 rd1 = (insn >> 16) & 0xf;
2011 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 2012 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
2013 gen_op_iwmmxt_andq_M0_wRn(rd1);
2014 gen_op_iwmmxt_setpsr_nz();
2015 gen_op_iwmmxt_movq_wRn_M0(wrd);
2016 gen_op_iwmmxt_set_mup();
2017 gen_op_iwmmxt_set_cup();
2018 break;
2019 case 0x200: /* WAND */
2020 wrd = (insn >> 12) & 0xf;
2021 rd0 = (insn >> 0) & 0xf;
2022 rd1 = (insn >> 16) & 0xf;
2023 gen_op_iwmmxt_movq_M0_wRn(rd0);
2024 gen_op_iwmmxt_andq_M0_wRn(rd1);
2025 gen_op_iwmmxt_setpsr_nz();
2026 gen_op_iwmmxt_movq_wRn_M0(wrd);
2027 gen_op_iwmmxt_set_mup();
2028 gen_op_iwmmxt_set_cup();
2029 break;
2030 case 0x810: case 0xa10: /* WMADD */
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 0) & 0xf;
2033 rd1 = (insn >> 16) & 0xf;
2034 gen_op_iwmmxt_movq_M0_wRn(rd0);
2035 if (insn & (1 << 21))
2036 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2037 else
2038 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2039 gen_op_iwmmxt_movq_wRn_M0(wrd);
2040 gen_op_iwmmxt_set_mup();
2041 break;
2042 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
2043 wrd = (insn >> 12) & 0xf;
2044 rd0 = (insn >> 16) & 0xf;
2045 rd1 = (insn >> 0) & 0xf;
2046 gen_op_iwmmxt_movq_M0_wRn(rd0);
2047 switch ((insn >> 22) & 3) {
2048 case 0:
2049 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2050 break;
2051 case 1:
2052 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2053 break;
2054 case 2:
2055 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2056 break;
2057 case 3:
2058 return 1;
2059 }
2060 gen_op_iwmmxt_movq_wRn_M0(wrd);
2061 gen_op_iwmmxt_set_mup();
2062 gen_op_iwmmxt_set_cup();
2063 break;
2064 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
2065 wrd = (insn >> 12) & 0xf;
2066 rd0 = (insn >> 16) & 0xf;
2067 rd1 = (insn >> 0) & 0xf;
2068 gen_op_iwmmxt_movq_M0_wRn(rd0);
2069 switch ((insn >> 22) & 3) {
2070 case 0:
2071 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2072 break;
2073 case 1:
2074 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2075 break;
2076 case 2:
2077 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2078 break;
2079 case 3:
2080 return 1;
2081 }
2082 gen_op_iwmmxt_movq_wRn_M0(wrd);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2085 break;
2086 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 rd1 = (insn >> 0) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0);
2091 if (insn & (1 << 22))
2092 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2093 else
2094 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2095 if (!(insn & (1 << 20)))
2096 gen_op_iwmmxt_addl_M0_wRn(wrd);
2097 gen_op_iwmmxt_movq_wRn_M0(wrd);
2098 gen_op_iwmmxt_set_mup();
2099 break;
2100 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2101 wrd = (insn >> 12) & 0xf;
2102 rd0 = (insn >> 16) & 0xf;
2103 rd1 = (insn >> 0) & 0xf;
2104 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2105 if (insn & (1 << 21)) {
2106 if (insn & (1 << 20))
2107 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2108 else
2109 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2110 } else {
2111 if (insn & (1 << 20))
2112 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2113 else
2114 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2115 }
18c9b560
AZ
2116 gen_op_iwmmxt_movq_wRn_M0(wrd);
2117 gen_op_iwmmxt_set_mup();
2118 break;
2119 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2120 wrd = (insn >> 12) & 0xf;
2121 rd0 = (insn >> 16) & 0xf;
2122 rd1 = (insn >> 0) & 0xf;
2123 gen_op_iwmmxt_movq_M0_wRn(rd0);
2124 if (insn & (1 << 21))
2125 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2126 else
2127 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2128 if (!(insn & (1 << 20))) {
e677137d
PB
2129 iwmmxt_load_reg(cpu_V1, wrd);
2130 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2131 }
2132 gen_op_iwmmxt_movq_wRn_M0(wrd);
2133 gen_op_iwmmxt_set_mup();
2134 break;
2135 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2136 wrd = (insn >> 12) & 0xf;
2137 rd0 = (insn >> 16) & 0xf;
2138 rd1 = (insn >> 0) & 0xf;
2139 gen_op_iwmmxt_movq_M0_wRn(rd0);
2140 switch ((insn >> 22) & 3) {
2141 case 0:
2142 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2143 break;
2144 case 1:
2145 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2146 break;
2147 case 2:
2148 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2149 break;
2150 case 3:
2151 return 1;
2152 }
2153 gen_op_iwmmxt_movq_wRn_M0(wrd);
2154 gen_op_iwmmxt_set_mup();
2155 gen_op_iwmmxt_set_cup();
2156 break;
2157 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2158 wrd = (insn >> 12) & 0xf;
2159 rd0 = (insn >> 16) & 0xf;
2160 rd1 = (insn >> 0) & 0xf;
2161 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2162 if (insn & (1 << 22)) {
2163 if (insn & (1 << 20))
2164 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2165 else
2166 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2167 } else {
2168 if (insn & (1 << 20))
2169 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2170 else
2171 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2172 }
18c9b560
AZ
2173 gen_op_iwmmxt_movq_wRn_M0(wrd);
2174 gen_op_iwmmxt_set_mup();
2175 gen_op_iwmmxt_set_cup();
2176 break;
2177 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2178 wrd = (insn >> 12) & 0xf;
2179 rd0 = (insn >> 16) & 0xf;
2180 rd1 = (insn >> 0) & 0xf;
2181 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2182 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2183 tcg_gen_andi_i32(tmp, tmp, 7);
2184 iwmmxt_load_reg(cpu_V1, rd1);
2185 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2186 tcg_temp_free_i32(tmp);
18c9b560
AZ
2187 gen_op_iwmmxt_movq_wRn_M0(wrd);
2188 gen_op_iwmmxt_set_mup();
2189 break;
2190 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2191 if (((insn >> 6) & 3) == 3)
2192 return 1;
18c9b560
AZ
2193 rd = (insn >> 12) & 0xf;
2194 wrd = (insn >> 16) & 0xf;
da6b5335 2195 tmp = load_reg(s, rd);
18c9b560
AZ
2196 gen_op_iwmmxt_movq_M0_wRn(wrd);
2197 switch ((insn >> 6) & 3) {
2198 case 0:
da6b5335
FN
2199 tmp2 = tcg_const_i32(0xff);
2200 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2201 break;
2202 case 1:
da6b5335
FN
2203 tmp2 = tcg_const_i32(0xffff);
2204 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2205 break;
2206 case 2:
da6b5335
FN
2207 tmp2 = tcg_const_i32(0xffffffff);
2208 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2209 break;
da6b5335 2210 default:
f764718d
RH
2211 tmp2 = NULL;
2212 tmp3 = NULL;
18c9b560 2213 }
da6b5335 2214 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2215 tcg_temp_free_i32(tmp3);
2216 tcg_temp_free_i32(tmp2);
7d1b0095 2217 tcg_temp_free_i32(tmp);
18c9b560
AZ
2218 gen_op_iwmmxt_movq_wRn_M0(wrd);
2219 gen_op_iwmmxt_set_mup();
2220 break;
2221 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2222 rd = (insn >> 12) & 0xf;
2223 wrd = (insn >> 16) & 0xf;
da6b5335 2224 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2225 return 1;
2226 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2227 tmp = tcg_temp_new_i32();
18c9b560
AZ
2228 switch ((insn >> 22) & 3) {
2229 case 0:
da6b5335 2230 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2231 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2232 if (insn & 8) {
2233 tcg_gen_ext8s_i32(tmp, tmp);
2234 } else {
2235 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2236 }
2237 break;
2238 case 1:
da6b5335 2239 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2240 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2241 if (insn & 8) {
2242 tcg_gen_ext16s_i32(tmp, tmp);
2243 } else {
2244 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2245 }
2246 break;
2247 case 2:
da6b5335 2248 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2249 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2250 break;
18c9b560 2251 }
da6b5335 2252 store_reg(s, rd, tmp);
18c9b560
AZ
2253 break;
2254 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2255 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2256 return 1;
da6b5335 2257 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2258 switch ((insn >> 22) & 3) {
2259 case 0:
da6b5335 2260 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2261 break;
2262 case 1:
da6b5335 2263 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2264 break;
2265 case 2:
da6b5335 2266 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2267 break;
18c9b560 2268 }
da6b5335
FN
2269 tcg_gen_shli_i32(tmp, tmp, 28);
2270 gen_set_nzcv(tmp);
7d1b0095 2271 tcg_temp_free_i32(tmp);
18c9b560
AZ
2272 break;
2273 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2274 if (((insn >> 6) & 3) == 3)
2275 return 1;
18c9b560
AZ
2276 rd = (insn >> 12) & 0xf;
2277 wrd = (insn >> 16) & 0xf;
da6b5335 2278 tmp = load_reg(s, rd);
18c9b560
AZ
2279 switch ((insn >> 6) & 3) {
2280 case 0:
da6b5335 2281 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2282 break;
2283 case 1:
da6b5335 2284 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2285 break;
2286 case 2:
da6b5335 2287 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2288 break;
18c9b560 2289 }
7d1b0095 2290 tcg_temp_free_i32(tmp);
18c9b560
AZ
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 break;
2294 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2295 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2296 return 1;
da6b5335 2297 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2298 tmp2 = tcg_temp_new_i32();
da6b5335 2299 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2300 switch ((insn >> 22) & 3) {
2301 case 0:
2302 for (i = 0; i < 7; i ++) {
da6b5335
FN
2303 tcg_gen_shli_i32(tmp2, tmp2, 4);
2304 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2305 }
2306 break;
2307 case 1:
2308 for (i = 0; i < 3; i ++) {
da6b5335
FN
2309 tcg_gen_shli_i32(tmp2, tmp2, 8);
2310 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2311 }
2312 break;
2313 case 2:
da6b5335
FN
2314 tcg_gen_shli_i32(tmp2, tmp2, 16);
2315 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2316 break;
18c9b560 2317 }
da6b5335 2318 gen_set_nzcv(tmp);
7d1b0095
PM
2319 tcg_temp_free_i32(tmp2);
2320 tcg_temp_free_i32(tmp);
18c9b560
AZ
2321 break;
2322 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2323 wrd = (insn >> 12) & 0xf;
2324 rd0 = (insn >> 16) & 0xf;
2325 gen_op_iwmmxt_movq_M0_wRn(rd0);
2326 switch ((insn >> 22) & 3) {
2327 case 0:
e677137d 2328 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2329 break;
2330 case 1:
e677137d 2331 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2332 break;
2333 case 2:
e677137d 2334 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2335 break;
2336 case 3:
2337 return 1;
2338 }
2339 gen_op_iwmmxt_movq_wRn_M0(wrd);
2340 gen_op_iwmmxt_set_mup();
2341 break;
2342 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2343 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2344 return 1;
da6b5335 2345 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2346 tmp2 = tcg_temp_new_i32();
da6b5335 2347 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2348 switch ((insn >> 22) & 3) {
2349 case 0:
2350 for (i = 0; i < 7; i ++) {
da6b5335
FN
2351 tcg_gen_shli_i32(tmp2, tmp2, 4);
2352 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2353 }
2354 break;
2355 case 1:
2356 for (i = 0; i < 3; i ++) {
da6b5335
FN
2357 tcg_gen_shli_i32(tmp2, tmp2, 8);
2358 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2359 }
2360 break;
2361 case 2:
da6b5335
FN
2362 tcg_gen_shli_i32(tmp2, tmp2, 16);
2363 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2364 break;
18c9b560 2365 }
da6b5335 2366 gen_set_nzcv(tmp);
7d1b0095
PM
2367 tcg_temp_free_i32(tmp2);
2368 tcg_temp_free_i32(tmp);
18c9b560
AZ
2369 break;
2370 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2371 rd = (insn >> 12) & 0xf;
2372 rd0 = (insn >> 16) & 0xf;
da6b5335 2373 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2374 return 1;
2375 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2376 tmp = tcg_temp_new_i32();
18c9b560
AZ
2377 switch ((insn >> 22) & 3) {
2378 case 0:
da6b5335 2379 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2380 break;
2381 case 1:
da6b5335 2382 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2383 break;
2384 case 2:
da6b5335 2385 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2386 break;
18c9b560 2387 }
da6b5335 2388 store_reg(s, rd, tmp);
18c9b560
AZ
2389 break;
2390 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2391 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2392 wrd = (insn >> 12) & 0xf;
2393 rd0 = (insn >> 16) & 0xf;
2394 rd1 = (insn >> 0) & 0xf;
2395 gen_op_iwmmxt_movq_M0_wRn(rd0);
2396 switch ((insn >> 22) & 3) {
2397 case 0:
2398 if (insn & (1 << 21))
2399 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2400 else
2401 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2402 break;
2403 case 1:
2404 if (insn & (1 << 21))
2405 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2406 else
2407 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2408 break;
2409 case 2:
2410 if (insn & (1 << 21))
2411 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2412 else
2413 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2414 break;
2415 case 3:
2416 return 1;
2417 }
2418 gen_op_iwmmxt_movq_wRn_M0(wrd);
2419 gen_op_iwmmxt_set_mup();
2420 gen_op_iwmmxt_set_cup();
2421 break;
2422 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2423 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2424 wrd = (insn >> 12) & 0xf;
2425 rd0 = (insn >> 16) & 0xf;
2426 gen_op_iwmmxt_movq_M0_wRn(rd0);
2427 switch ((insn >> 22) & 3) {
2428 case 0:
2429 if (insn & (1 << 21))
2430 gen_op_iwmmxt_unpacklsb_M0();
2431 else
2432 gen_op_iwmmxt_unpacklub_M0();
2433 break;
2434 case 1:
2435 if (insn & (1 << 21))
2436 gen_op_iwmmxt_unpacklsw_M0();
2437 else
2438 gen_op_iwmmxt_unpackluw_M0();
2439 break;
2440 case 2:
2441 if (insn & (1 << 21))
2442 gen_op_iwmmxt_unpacklsl_M0();
2443 else
2444 gen_op_iwmmxt_unpacklul_M0();
2445 break;
2446 case 3:
2447 return 1;
2448 }
2449 gen_op_iwmmxt_movq_wRn_M0(wrd);
2450 gen_op_iwmmxt_set_mup();
2451 gen_op_iwmmxt_set_cup();
2452 break;
2453 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2454 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2455 wrd = (insn >> 12) & 0xf;
2456 rd0 = (insn >> 16) & 0xf;
2457 gen_op_iwmmxt_movq_M0_wRn(rd0);
2458 switch ((insn >> 22) & 3) {
2459 case 0:
2460 if (insn & (1 << 21))
2461 gen_op_iwmmxt_unpackhsb_M0();
2462 else
2463 gen_op_iwmmxt_unpackhub_M0();
2464 break;
2465 case 1:
2466 if (insn & (1 << 21))
2467 gen_op_iwmmxt_unpackhsw_M0();
2468 else
2469 gen_op_iwmmxt_unpackhuw_M0();
2470 break;
2471 case 2:
2472 if (insn & (1 << 21))
2473 gen_op_iwmmxt_unpackhsl_M0();
2474 else
2475 gen_op_iwmmxt_unpackhul_M0();
2476 break;
2477 case 3:
2478 return 1;
2479 }
2480 gen_op_iwmmxt_movq_wRn_M0(wrd);
2481 gen_op_iwmmxt_set_mup();
2482 gen_op_iwmmxt_set_cup();
2483 break;
2484 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2485 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2486 if (((insn >> 22) & 3) == 0)
2487 return 1;
18c9b560
AZ
2488 wrd = (insn >> 12) & 0xf;
2489 rd0 = (insn >> 16) & 0xf;
2490 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2491 tmp = tcg_temp_new_i32();
da6b5335 2492 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2493 tcg_temp_free_i32(tmp);
18c9b560 2494 return 1;
da6b5335 2495 }
18c9b560 2496 switch ((insn >> 22) & 3) {
18c9b560 2497 case 1:
477955bd 2498 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2499 break;
2500 case 2:
477955bd 2501 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2502 break;
2503 case 3:
477955bd 2504 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2505 break;
2506 }
7d1b0095 2507 tcg_temp_free_i32(tmp);
18c9b560
AZ
2508 gen_op_iwmmxt_movq_wRn_M0(wrd);
2509 gen_op_iwmmxt_set_mup();
2510 gen_op_iwmmxt_set_cup();
2511 break;
2512 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2513 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2514 if (((insn >> 22) & 3) == 0)
2515 return 1;
18c9b560
AZ
2516 wrd = (insn >> 12) & 0xf;
2517 rd0 = (insn >> 16) & 0xf;
2518 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2519 tmp = tcg_temp_new_i32();
da6b5335 2520 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2521 tcg_temp_free_i32(tmp);
18c9b560 2522 return 1;
da6b5335 2523 }
18c9b560 2524 switch ((insn >> 22) & 3) {
18c9b560 2525 case 1:
477955bd 2526 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2527 break;
2528 case 2:
477955bd 2529 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2530 break;
2531 case 3:
477955bd 2532 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2533 break;
2534 }
7d1b0095 2535 tcg_temp_free_i32(tmp);
18c9b560
AZ
2536 gen_op_iwmmxt_movq_wRn_M0(wrd);
2537 gen_op_iwmmxt_set_mup();
2538 gen_op_iwmmxt_set_cup();
2539 break;
2540 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2541 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2542 if (((insn >> 22) & 3) == 0)
2543 return 1;
18c9b560
AZ
2544 wrd = (insn >> 12) & 0xf;
2545 rd0 = (insn >> 16) & 0xf;
2546 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2547 tmp = tcg_temp_new_i32();
da6b5335 2548 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2549 tcg_temp_free_i32(tmp);
18c9b560 2550 return 1;
da6b5335 2551 }
18c9b560 2552 switch ((insn >> 22) & 3) {
18c9b560 2553 case 1:
477955bd 2554 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2555 break;
2556 case 2:
477955bd 2557 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2558 break;
2559 case 3:
477955bd 2560 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2561 break;
2562 }
7d1b0095 2563 tcg_temp_free_i32(tmp);
18c9b560
AZ
2564 gen_op_iwmmxt_movq_wRn_M0(wrd);
2565 gen_op_iwmmxt_set_mup();
2566 gen_op_iwmmxt_set_cup();
2567 break;
2568 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2569 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2570 if (((insn >> 22) & 3) == 0)
2571 return 1;
18c9b560
AZ
2572 wrd = (insn >> 12) & 0xf;
2573 rd0 = (insn >> 16) & 0xf;
2574 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2575 tmp = tcg_temp_new_i32();
18c9b560 2576 switch ((insn >> 22) & 3) {
18c9b560 2577 case 1:
da6b5335 2578 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2579 tcg_temp_free_i32(tmp);
18c9b560 2580 return 1;
da6b5335 2581 }
477955bd 2582 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2583 break;
2584 case 2:
da6b5335 2585 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2586 tcg_temp_free_i32(tmp);
18c9b560 2587 return 1;
da6b5335 2588 }
477955bd 2589 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2590 break;
2591 case 3:
da6b5335 2592 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2593 tcg_temp_free_i32(tmp);
18c9b560 2594 return 1;
da6b5335 2595 }
477955bd 2596 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2597 break;
2598 }
7d1b0095 2599 tcg_temp_free_i32(tmp);
18c9b560
AZ
2600 gen_op_iwmmxt_movq_wRn_M0(wrd);
2601 gen_op_iwmmxt_set_mup();
2602 gen_op_iwmmxt_set_cup();
2603 break;
2604 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2605 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2606 wrd = (insn >> 12) & 0xf;
2607 rd0 = (insn >> 16) & 0xf;
2608 rd1 = (insn >> 0) & 0xf;
2609 gen_op_iwmmxt_movq_M0_wRn(rd0);
2610 switch ((insn >> 22) & 3) {
2611 case 0:
2612 if (insn & (1 << 21))
2613 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2614 else
2615 gen_op_iwmmxt_minub_M0_wRn(rd1);
2616 break;
2617 case 1:
2618 if (insn & (1 << 21))
2619 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2620 else
2621 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2622 break;
2623 case 2:
2624 if (insn & (1 << 21))
2625 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2626 else
2627 gen_op_iwmmxt_minul_M0_wRn(rd1);
2628 break;
2629 case 3:
2630 return 1;
2631 }
2632 gen_op_iwmmxt_movq_wRn_M0(wrd);
2633 gen_op_iwmmxt_set_mup();
2634 break;
2635 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2636 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2637 wrd = (insn >> 12) & 0xf;
2638 rd0 = (insn >> 16) & 0xf;
2639 rd1 = (insn >> 0) & 0xf;
2640 gen_op_iwmmxt_movq_M0_wRn(rd0);
2641 switch ((insn >> 22) & 3) {
2642 case 0:
2643 if (insn & (1 << 21))
2644 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2645 else
2646 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2647 break;
2648 case 1:
2649 if (insn & (1 << 21))
2650 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2651 else
2652 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2653 break;
2654 case 2:
2655 if (insn & (1 << 21))
2656 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2657 else
2658 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2659 break;
2660 case 3:
2661 return 1;
2662 }
2663 gen_op_iwmmxt_movq_wRn_M0(wrd);
2664 gen_op_iwmmxt_set_mup();
2665 break;
2666 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2667 case 0x402: case 0x502: case 0x602: case 0x702:
2668 wrd = (insn >> 12) & 0xf;
2669 rd0 = (insn >> 16) & 0xf;
2670 rd1 = (insn >> 0) & 0xf;
2671 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2672 tmp = tcg_const_i32((insn >> 20) & 3);
2673 iwmmxt_load_reg(cpu_V1, rd1);
2674 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2675 tcg_temp_free_i32(tmp);
18c9b560
AZ
2676 gen_op_iwmmxt_movq_wRn_M0(wrd);
2677 gen_op_iwmmxt_set_mup();
2678 break;
2679 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2680 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2681 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2682 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2683 wrd = (insn >> 12) & 0xf;
2684 rd0 = (insn >> 16) & 0xf;
2685 rd1 = (insn >> 0) & 0xf;
2686 gen_op_iwmmxt_movq_M0_wRn(rd0);
2687 switch ((insn >> 20) & 0xf) {
2688 case 0x0:
2689 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2690 break;
2691 case 0x1:
2692 gen_op_iwmmxt_subub_M0_wRn(rd1);
2693 break;
2694 case 0x3:
2695 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2696 break;
2697 case 0x4:
2698 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2699 break;
2700 case 0x5:
2701 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2702 break;
2703 case 0x7:
2704 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2705 break;
2706 case 0x8:
2707 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2708 break;
2709 case 0x9:
2710 gen_op_iwmmxt_subul_M0_wRn(rd1);
2711 break;
2712 case 0xb:
2713 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2714 break;
2715 default:
2716 return 1;
2717 }
2718 gen_op_iwmmxt_movq_wRn_M0(wrd);
2719 gen_op_iwmmxt_set_mup();
2720 gen_op_iwmmxt_set_cup();
2721 break;
2722 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2723 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2724 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2725 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2726 wrd = (insn >> 12) & 0xf;
2727 rd0 = (insn >> 16) & 0xf;
2728 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2729 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2730 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2731 tcg_temp_free_i32(tmp);
18c9b560
AZ
2732 gen_op_iwmmxt_movq_wRn_M0(wrd);
2733 gen_op_iwmmxt_set_mup();
2734 gen_op_iwmmxt_set_cup();
2735 break;
2736 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2737 case 0x418: case 0x518: case 0x618: case 0x718:
2738 case 0x818: case 0x918: case 0xa18: case 0xb18:
2739 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2740 wrd = (insn >> 12) & 0xf;
2741 rd0 = (insn >> 16) & 0xf;
2742 rd1 = (insn >> 0) & 0xf;
2743 gen_op_iwmmxt_movq_M0_wRn(rd0);
2744 switch ((insn >> 20) & 0xf) {
2745 case 0x0:
2746 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2747 break;
2748 case 0x1:
2749 gen_op_iwmmxt_addub_M0_wRn(rd1);
2750 break;
2751 case 0x3:
2752 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2753 break;
2754 case 0x4:
2755 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2756 break;
2757 case 0x5:
2758 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2759 break;
2760 case 0x7:
2761 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2762 break;
2763 case 0x8:
2764 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2765 break;
2766 case 0x9:
2767 gen_op_iwmmxt_addul_M0_wRn(rd1);
2768 break;
2769 case 0xb:
2770 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2771 break;
2772 default:
2773 return 1;
2774 }
2775 gen_op_iwmmxt_movq_wRn_M0(wrd);
2776 gen_op_iwmmxt_set_mup();
2777 gen_op_iwmmxt_set_cup();
2778 break;
2779 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2780 case 0x408: case 0x508: case 0x608: case 0x708:
2781 case 0x808: case 0x908: case 0xa08: case 0xb08:
2782 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2783 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2784 return 1;
18c9b560
AZ
2785 wrd = (insn >> 12) & 0xf;
2786 rd0 = (insn >> 16) & 0xf;
2787 rd1 = (insn >> 0) & 0xf;
2788 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2789 switch ((insn >> 22) & 3) {
18c9b560
AZ
2790 case 1:
2791 if (insn & (1 << 21))
2792 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2793 else
2794 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2795 break;
2796 case 2:
2797 if (insn & (1 << 21))
2798 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2799 else
2800 gen_op_iwmmxt_packul_M0_wRn(rd1);
2801 break;
2802 case 3:
2803 if (insn & (1 << 21))
2804 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2805 else
2806 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2807 break;
2808 }
2809 gen_op_iwmmxt_movq_wRn_M0(wrd);
2810 gen_op_iwmmxt_set_mup();
2811 gen_op_iwmmxt_set_cup();
2812 break;
2813 case 0x201: case 0x203: case 0x205: case 0x207:
2814 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2815 case 0x211: case 0x213: case 0x215: case 0x217:
2816 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2817 wrd = (insn >> 5) & 0xf;
2818 rd0 = (insn >> 12) & 0xf;
2819 rd1 = (insn >> 0) & 0xf;
2820 if (rd0 == 0xf || rd1 == 0xf)
2821 return 1;
2822 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2823 tmp = load_reg(s, rd0);
2824 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2825 switch ((insn >> 16) & 0xf) {
2826 case 0x0: /* TMIA */
da6b5335 2827 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2828 break;
2829 case 0x8: /* TMIAPH */
da6b5335 2830 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2831 break;
2832 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2833 if (insn & (1 << 16))
da6b5335 2834 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2835 if (insn & (1 << 17))
da6b5335
FN
2836 tcg_gen_shri_i32(tmp2, tmp2, 16);
2837 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2838 break;
2839 default:
7d1b0095
PM
2840 tcg_temp_free_i32(tmp2);
2841 tcg_temp_free_i32(tmp);
18c9b560
AZ
2842 return 1;
2843 }
7d1b0095
PM
2844 tcg_temp_free_i32(tmp2);
2845 tcg_temp_free_i32(tmp);
18c9b560
AZ
2846 gen_op_iwmmxt_movq_wRn_M0(wrd);
2847 gen_op_iwmmxt_set_mup();
2848 break;
2849 default:
2850 return 1;
2851 }
2852
2853 return 0;
2854}
2855
a1c7273b 2856/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2857 (ie. an undefined instruction). */
7dcc1f89 2858static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2859{
2860 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2861 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2862
2863 if ((insn & 0x0ff00f10) == 0x0e200010) {
2864 /* Multiply with Internal Accumulate Format */
2865 rd0 = (insn >> 12) & 0xf;
2866 rd1 = insn & 0xf;
2867 acc = (insn >> 5) & 7;
2868
2869 if (acc != 0)
2870 return 1;
2871
3a554c0f
FN
2872 tmp = load_reg(s, rd0);
2873 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2874 switch ((insn >> 16) & 0xf) {
2875 case 0x0: /* MIA */
3a554c0f 2876 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2877 break;
2878 case 0x8: /* MIAPH */
3a554c0f 2879 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2880 break;
2881 case 0xc: /* MIABB */
2882 case 0xd: /* MIABT */
2883 case 0xe: /* MIATB */
2884 case 0xf: /* MIATT */
18c9b560 2885 if (insn & (1 << 16))
3a554c0f 2886 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2887 if (insn & (1 << 17))
3a554c0f
FN
2888 tcg_gen_shri_i32(tmp2, tmp2, 16);
2889 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2890 break;
2891 default:
2892 return 1;
2893 }
7d1b0095
PM
2894 tcg_temp_free_i32(tmp2);
2895 tcg_temp_free_i32(tmp);
18c9b560
AZ
2896
2897 gen_op_iwmmxt_movq_wRn_M0(acc);
2898 return 0;
2899 }
2900
2901 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2902 /* Internal Accumulator Access Format */
2903 rdhi = (insn >> 16) & 0xf;
2904 rdlo = (insn >> 12) & 0xf;
2905 acc = insn & 7;
2906
2907 if (acc != 0)
2908 return 1;
2909
2910 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2911 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2912 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2913 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2914 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2915 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2916 } else { /* MAR */
3a554c0f
FN
2917 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2918 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2919 }
2920 return 0;
2921 }
2922
2923 return 1;
2924}
2925
9ee6e8bb
PB
2926#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2927#define VFP_SREG(insn, bigbit, smallbit) \
2928 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2929#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2930 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2931 reg = (((insn) >> (bigbit)) & 0x0f) \
2932 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2933 } else { \
2934 if (insn & (1 << (smallbit))) \
2935 return 1; \
2936 reg = ((insn) >> (bigbit)) & 0x0f; \
2937 }} while (0)
2938
2939#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2940#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2941#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2942#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2943#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2944#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2945
4373f3ce 2946/* Move between integer and VFP cores. */
39d5492a 2947static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2948{
39d5492a 2949 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2950 tcg_gen_mov_i32(tmp, cpu_F0s);
2951 return tmp;
2952}
2953
39d5492a 2954static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2955{
2956 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2957 tcg_temp_free_i32(tmp);
4373f3ce
PB
2958}
2959
39d5492a 2960static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2961{
39d5492a 2962 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2963 if (shift)
2964 tcg_gen_shri_i32(var, var, shift);
86831435 2965 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2966 tcg_gen_shli_i32(tmp, var, 8);
2967 tcg_gen_or_i32(var, var, tmp);
2968 tcg_gen_shli_i32(tmp, var, 16);
2969 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2970 tcg_temp_free_i32(tmp);
ad69471c
PB
2971}
2972
39d5492a 2973static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2974{
39d5492a 2975 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2976 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2977 tcg_gen_shli_i32(tmp, var, 16);
2978 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2979 tcg_temp_free_i32(tmp);
ad69471c
PB
2980}
2981
39d5492a 2982static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2983{
39d5492a 2984 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2985 tcg_gen_andi_i32(var, var, 0xffff0000);
2986 tcg_gen_shri_i32(tmp, var, 16);
2987 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2988 tcg_temp_free_i32(tmp);
ad69471c
PB
2989}
2990
39d5492a 2991static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2992{
2993 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2994 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2995 switch (size) {
2996 case 0:
12dcc321 2997 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2998 gen_neon_dup_u8(tmp, 0);
2999 break;
3000 case 1:
12dcc321 3001 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
3002 gen_neon_dup_low16(tmp);
3003 break;
3004 case 2:
12dcc321 3005 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
3006 break;
3007 default: /* Avoid compiler warnings. */
3008 abort();
3009 }
3010 return tmp;
3011}
3012
04731fb5
WN
3013static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
3014 uint32_t dp)
3015{
3016 uint32_t cc = extract32(insn, 20, 2);
3017
3018 if (dp) {
3019 TCGv_i64 frn, frm, dest;
3020 TCGv_i64 tmp, zero, zf, nf, vf;
3021
3022 zero = tcg_const_i64(0);
3023
3024 frn = tcg_temp_new_i64();
3025 frm = tcg_temp_new_i64();
3026 dest = tcg_temp_new_i64();
3027
3028 zf = tcg_temp_new_i64();
3029 nf = tcg_temp_new_i64();
3030 vf = tcg_temp_new_i64();
3031
3032 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3033 tcg_gen_ext_i32_i64(nf, cpu_NF);
3034 tcg_gen_ext_i32_i64(vf, cpu_VF);
3035
3036 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3037 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3038 switch (cc) {
3039 case 0: /* eq: Z */
3040 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3041 frn, frm);
3042 break;
3043 case 1: /* vs: V */
3044 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3045 frn, frm);
3046 break;
3047 case 2: /* ge: N == V -> N ^ V == 0 */
3048 tmp = tcg_temp_new_i64();
3049 tcg_gen_xor_i64(tmp, vf, nf);
3050 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3051 frn, frm);
3052 tcg_temp_free_i64(tmp);
3053 break;
3054 case 3: /* gt: !Z && N == V */
3055 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3056 frn, frm);
3057 tmp = tcg_temp_new_i64();
3058 tcg_gen_xor_i64(tmp, vf, nf);
3059 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3060 dest, frm);
3061 tcg_temp_free_i64(tmp);
3062 break;
3063 }
3064 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3065 tcg_temp_free_i64(frn);
3066 tcg_temp_free_i64(frm);
3067 tcg_temp_free_i64(dest);
3068
3069 tcg_temp_free_i64(zf);
3070 tcg_temp_free_i64(nf);
3071 tcg_temp_free_i64(vf);
3072
3073 tcg_temp_free_i64(zero);
3074 } else {
3075 TCGv_i32 frn, frm, dest;
3076 TCGv_i32 tmp, zero;
3077
3078 zero = tcg_const_i32(0);
3079
3080 frn = tcg_temp_new_i32();
3081 frm = tcg_temp_new_i32();
3082 dest = tcg_temp_new_i32();
3083 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3084 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3085 switch (cc) {
3086 case 0: /* eq: Z */
3087 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3088 frn, frm);
3089 break;
3090 case 1: /* vs: V */
3091 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3092 frn, frm);
3093 break;
3094 case 2: /* ge: N == V -> N ^ V == 0 */
3095 tmp = tcg_temp_new_i32();
3096 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3097 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3098 frn, frm);
3099 tcg_temp_free_i32(tmp);
3100 break;
3101 case 3: /* gt: !Z && N == V */
3102 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3103 frn, frm);
3104 tmp = tcg_temp_new_i32();
3105 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3106 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3107 dest, frm);
3108 tcg_temp_free_i32(tmp);
3109 break;
3110 }
3111 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3112 tcg_temp_free_i32(frn);
3113 tcg_temp_free_i32(frm);
3114 tcg_temp_free_i32(dest);
3115
3116 tcg_temp_free_i32(zero);
3117 }
3118
3119 return 0;
3120}
3121
40cfacdd
WN
3122static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3123 uint32_t rm, uint32_t dp)
3124{
3125 uint32_t vmin = extract32(insn, 6, 1);
3126 TCGv_ptr fpst = get_fpstatus_ptr(0);
3127
3128 if (dp) {
3129 TCGv_i64 frn, frm, dest;
3130
3131 frn = tcg_temp_new_i64();
3132 frm = tcg_temp_new_i64();
3133 dest = tcg_temp_new_i64();
3134
3135 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3136 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3137 if (vmin) {
f71a2ae5 3138 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3139 } else {
f71a2ae5 3140 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3141 }
3142 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3143 tcg_temp_free_i64(frn);
3144 tcg_temp_free_i64(frm);
3145 tcg_temp_free_i64(dest);
3146 } else {
3147 TCGv_i32 frn, frm, dest;
3148
3149 frn = tcg_temp_new_i32();
3150 frm = tcg_temp_new_i32();
3151 dest = tcg_temp_new_i32();
3152
3153 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3154 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3155 if (vmin) {
f71a2ae5 3156 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3157 } else {
f71a2ae5 3158 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3159 }
3160 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3161 tcg_temp_free_i32(frn);
3162 tcg_temp_free_i32(frm);
3163 tcg_temp_free_i32(dest);
3164 }
3165
3166 tcg_temp_free_ptr(fpst);
3167 return 0;
3168}
3169
7655f39b
WN
3170static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3171 int rounding)
3172{
3173 TCGv_ptr fpst = get_fpstatus_ptr(0);
3174 TCGv_i32 tcg_rmode;
3175
3176 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3177 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3178
3179 if (dp) {
3180 TCGv_i64 tcg_op;
3181 TCGv_i64 tcg_res;
3182 tcg_op = tcg_temp_new_i64();
3183 tcg_res = tcg_temp_new_i64();
3184 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3185 gen_helper_rintd(tcg_res, tcg_op, fpst);
3186 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3187 tcg_temp_free_i64(tcg_op);
3188 tcg_temp_free_i64(tcg_res);
3189 } else {
3190 TCGv_i32 tcg_op;
3191 TCGv_i32 tcg_res;
3192 tcg_op = tcg_temp_new_i32();
3193 tcg_res = tcg_temp_new_i32();
3194 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3195 gen_helper_rints(tcg_res, tcg_op, fpst);
3196 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3197 tcg_temp_free_i32(tcg_op);
3198 tcg_temp_free_i32(tcg_res);
3199 }
3200
9b049916 3201 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3202 tcg_temp_free_i32(tcg_rmode);
3203
3204 tcg_temp_free_ptr(fpst);
3205 return 0;
3206}
3207
c9975a83
WN
3208static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3209 int rounding)
3210{
3211 bool is_signed = extract32(insn, 7, 1);
3212 TCGv_ptr fpst = get_fpstatus_ptr(0);
3213 TCGv_i32 tcg_rmode, tcg_shift;
3214
3215 tcg_shift = tcg_const_i32(0);
3216
3217 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3218 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3219
3220 if (dp) {
3221 TCGv_i64 tcg_double, tcg_res;
3222 TCGv_i32 tcg_tmp;
3223 /* Rd is encoded as a single precision register even when the source
3224 * is double precision.
3225 */
3226 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3227 tcg_double = tcg_temp_new_i64();
3228 tcg_res = tcg_temp_new_i64();
3229 tcg_tmp = tcg_temp_new_i32();
3230 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3231 if (is_signed) {
3232 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3233 } else {
3234 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3235 }
ecc7b3aa 3236 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3237 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3238 tcg_temp_free_i32(tcg_tmp);
3239 tcg_temp_free_i64(tcg_res);
3240 tcg_temp_free_i64(tcg_double);
3241 } else {
3242 TCGv_i32 tcg_single, tcg_res;
3243 tcg_single = tcg_temp_new_i32();
3244 tcg_res = tcg_temp_new_i32();
3245 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3246 if (is_signed) {
3247 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3248 } else {
3249 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3250 }
3251 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3252 tcg_temp_free_i32(tcg_res);
3253 tcg_temp_free_i32(tcg_single);
3254 }
3255
9b049916 3256 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3257 tcg_temp_free_i32(tcg_rmode);
3258
3259 tcg_temp_free_i32(tcg_shift);
3260
3261 tcg_temp_free_ptr(fpst);
3262
3263 return 0;
3264}
7655f39b
WN
3265
3266/* Table for converting the most common AArch32 encoding of
3267 * rounding mode to arm_fprounding order (which matches the
3268 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3269 */
3270static const uint8_t fp_decode_rm[] = {
3271 FPROUNDING_TIEAWAY,
3272 FPROUNDING_TIEEVEN,
3273 FPROUNDING_POSINF,
3274 FPROUNDING_NEGINF,
3275};
3276
7dcc1f89 3277static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3278{
3279 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3280
d614a513 3281 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3282 return 1;
3283 }
3284
3285 if (dp) {
3286 VFP_DREG_D(rd, insn);
3287 VFP_DREG_N(rn, insn);
3288 VFP_DREG_M(rm, insn);
3289 } else {
3290 rd = VFP_SREG_D(insn);
3291 rn = VFP_SREG_N(insn);
3292 rm = VFP_SREG_M(insn);
3293 }
3294
3295 if ((insn & 0x0f800e50) == 0x0e000a00) {
3296 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3297 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3298 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3299 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3300 /* VRINTA, VRINTN, VRINTP, VRINTM */
3301 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3302 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3303 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3304 /* VCVTA, VCVTN, VCVTP, VCVTM */
3305 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3306 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3307 }
3308 return 1;
3309}
3310
a1c7273b 3311/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3312 (ie. an undefined instruction). */
7dcc1f89 3313static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3314{
3315 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3316 int dp, veclen;
39d5492a
PM
3317 TCGv_i32 addr;
3318 TCGv_i32 tmp;
3319 TCGv_i32 tmp2;
b7bcbe95 3320
d614a513 3321 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3322 return 1;
d614a513 3323 }
40f137e1 3324
2c7ffc41
PM
3325 /* FIXME: this access check should not take precedence over UNDEF
3326 * for invalid encodings; we will generate incorrect syndrome information
3327 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3328 */
9dbbc748 3329 if (s->fp_excp_el) {
2c7ffc41 3330 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3331 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3332 return 0;
3333 }
3334
5df8bac1 3335 if (!s->vfp_enabled) {
9ee6e8bb 3336 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3337 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3338 return 1;
3339 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3340 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3341 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3342 return 1;
a50c0f51 3343 }
40f137e1 3344 }
6a57f3eb
WN
3345
3346 if (extract32(insn, 28, 4) == 0xf) {
3347 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3348 * only used in v8 and above.
3349 */
7dcc1f89 3350 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3351 }
3352
b7bcbe95
FB
3353 dp = ((insn & 0xf00) == 0xb00);
3354 switch ((insn >> 24) & 0xf) {
3355 case 0xe:
3356 if (insn & (1 << 4)) {
3357 /* single register transfer */
b7bcbe95
FB
3358 rd = (insn >> 12) & 0xf;
3359 if (dp) {
9ee6e8bb
PB
3360 int size;
3361 int pass;
3362
3363 VFP_DREG_N(rn, insn);
3364 if (insn & 0xf)
b7bcbe95 3365 return 1;
9ee6e8bb 3366 if (insn & 0x00c00060
d614a513 3367 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3368 return 1;
d614a513 3369 }
9ee6e8bb
PB
3370
3371 pass = (insn >> 21) & 1;
3372 if (insn & (1 << 22)) {
3373 size = 0;
3374 offset = ((insn >> 5) & 3) * 8;
3375 } else if (insn & (1 << 5)) {
3376 size = 1;
3377 offset = (insn & (1 << 6)) ? 16 : 0;
3378 } else {
3379 size = 2;
3380 offset = 0;
3381 }
18c9b560 3382 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3383 /* vfp->arm */
ad69471c 3384 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3385 switch (size) {
3386 case 0:
9ee6e8bb 3387 if (offset)
ad69471c 3388 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3389 if (insn & (1 << 23))
ad69471c 3390 gen_uxtb(tmp);
9ee6e8bb 3391 else
ad69471c 3392 gen_sxtb(tmp);
9ee6e8bb
PB
3393 break;
3394 case 1:
9ee6e8bb
PB
3395 if (insn & (1 << 23)) {
3396 if (offset) {
ad69471c 3397 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3398 } else {
ad69471c 3399 gen_uxth(tmp);
9ee6e8bb
PB
3400 }
3401 } else {
3402 if (offset) {
ad69471c 3403 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3404 } else {
ad69471c 3405 gen_sxth(tmp);
9ee6e8bb
PB
3406 }
3407 }
3408 break;
3409 case 2:
9ee6e8bb
PB
3410 break;
3411 }
ad69471c 3412 store_reg(s, rd, tmp);
b7bcbe95
FB
3413 } else {
3414 /* arm->vfp */
ad69471c 3415 tmp = load_reg(s, rd);
9ee6e8bb
PB
3416 if (insn & (1 << 23)) {
3417 /* VDUP */
3418 if (size == 0) {
ad69471c 3419 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3420 } else if (size == 1) {
ad69471c 3421 gen_neon_dup_low16(tmp);
9ee6e8bb 3422 }
cbbccffc 3423 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3424 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3425 tcg_gen_mov_i32(tmp2, tmp);
3426 neon_store_reg(rn, n, tmp2);
3427 }
3428 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3429 } else {
3430 /* VMOV */
3431 switch (size) {
3432 case 0:
ad69471c 3433 tmp2 = neon_load_reg(rn, pass);
d593c48e 3434 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3435 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3436 break;
3437 case 1:
ad69471c 3438 tmp2 = neon_load_reg(rn, pass);
d593c48e 3439 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3440 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3441 break;
3442 case 2:
9ee6e8bb
PB
3443 break;
3444 }
ad69471c 3445 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3446 }
b7bcbe95 3447 }
9ee6e8bb
PB
3448 } else { /* !dp */
3449 if ((insn & 0x6f) != 0x00)
3450 return 1;
3451 rn = VFP_SREG_N(insn);
18c9b560 3452 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3453 /* vfp->arm */
3454 if (insn & (1 << 21)) {
3455 /* system register */
40f137e1 3456 rn >>= 1;
9ee6e8bb 3457
b7bcbe95 3458 switch (rn) {
40f137e1 3459 case ARM_VFP_FPSID:
4373f3ce 3460 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3461 VFP3 restricts all id registers to privileged
3462 accesses. */
3463 if (IS_USER(s)
d614a513 3464 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3465 return 1;
d614a513 3466 }
4373f3ce 3467 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3468 break;
40f137e1 3469 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3470 if (IS_USER(s))
3471 return 1;
4373f3ce 3472 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3473 break;
40f137e1
PB
3474 case ARM_VFP_FPINST:
3475 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3476 /* Not present in VFP3. */
3477 if (IS_USER(s)
d614a513 3478 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3479 return 1;
d614a513 3480 }
4373f3ce 3481 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3482 break;
40f137e1 3483 case ARM_VFP_FPSCR:
601d70b9 3484 if (rd == 15) {
4373f3ce
PB
3485 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3486 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3487 } else {
7d1b0095 3488 tmp = tcg_temp_new_i32();
4373f3ce
PB
3489 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3490 }
b7bcbe95 3491 break;
a50c0f51 3492 case ARM_VFP_MVFR2:
d614a513 3493 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3494 return 1;
3495 }
3496 /* fall through */
9ee6e8bb
PB
3497 case ARM_VFP_MVFR0:
3498 case ARM_VFP_MVFR1:
3499 if (IS_USER(s)
d614a513 3500 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3501 return 1;
d614a513 3502 }
4373f3ce 3503 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3504 break;
b7bcbe95
FB
3505 default:
3506 return 1;
3507 }
3508 } else {
3509 gen_mov_F0_vreg(0, rn);
4373f3ce 3510 tmp = gen_vfp_mrs();
b7bcbe95
FB
3511 }
3512 if (rd == 15) {
b5ff1b31 3513 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3514 gen_set_nzcv(tmp);
7d1b0095 3515 tcg_temp_free_i32(tmp);
4373f3ce
PB
3516 } else {
3517 store_reg(s, rd, tmp);
3518 }
b7bcbe95
FB
3519 } else {
3520 /* arm->vfp */
b7bcbe95 3521 if (insn & (1 << 21)) {
40f137e1 3522 rn >>= 1;
b7bcbe95
FB
3523 /* system register */
3524 switch (rn) {
40f137e1 3525 case ARM_VFP_FPSID:
9ee6e8bb
PB
3526 case ARM_VFP_MVFR0:
3527 case ARM_VFP_MVFR1:
b7bcbe95
FB
3528 /* Writes are ignored. */
3529 break;
40f137e1 3530 case ARM_VFP_FPSCR:
e4c1cfa5 3531 tmp = load_reg(s, rd);
4373f3ce 3532 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3533 tcg_temp_free_i32(tmp);
b5ff1b31 3534 gen_lookup_tb(s);
b7bcbe95 3535 break;
40f137e1 3536 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3537 if (IS_USER(s))
3538 return 1;
71b3c3de
JR
3539 /* TODO: VFP subarchitecture support.
3540 * For now, keep the EN bit only */
e4c1cfa5 3541 tmp = load_reg(s, rd);
71b3c3de 3542 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3543 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3544 gen_lookup_tb(s);
3545 break;
3546 case ARM_VFP_FPINST:
3547 case ARM_VFP_FPINST2:
23adb861
PM
3548 if (IS_USER(s)) {
3549 return 1;
3550 }
e4c1cfa5 3551 tmp = load_reg(s, rd);
4373f3ce 3552 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3553 break;
b7bcbe95
FB
3554 default:
3555 return 1;
3556 }
3557 } else {
e4c1cfa5 3558 tmp = load_reg(s, rd);
4373f3ce 3559 gen_vfp_msr(tmp);
b7bcbe95
FB
3560 gen_mov_vreg_F0(0, rn);
3561 }
3562 }
3563 }
3564 } else {
3565 /* data processing */
3566 /* The opcode is in bits 23, 21, 20 and 6. */
3567 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3568 if (dp) {
3569 if (op == 15) {
3570 /* rn is opcode */
3571 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3572 } else {
3573 /* rn is register number */
9ee6e8bb 3574 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3575 }
3576
239c20c7
WN
3577 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3578 ((rn & 0x1e) == 0x6))) {
3579 /* Integer or single/half precision destination. */
9ee6e8bb 3580 rd = VFP_SREG_D(insn);
b7bcbe95 3581 } else {
9ee6e8bb 3582 VFP_DREG_D(rd, insn);
b7bcbe95 3583 }
04595bf6 3584 if (op == 15 &&
239c20c7
WN
3585 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3586 ((rn & 0x1e) == 0x4))) {
3587 /* VCVT from int or half precision is always from S reg
3588 * regardless of dp bit. VCVT with immediate frac_bits
3589 * has same format as SREG_M.
04595bf6
PM
3590 */
3591 rm = VFP_SREG_M(insn);
b7bcbe95 3592 } else {
9ee6e8bb 3593 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3594 }
3595 } else {
9ee6e8bb 3596 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3597 if (op == 15 && rn == 15) {
3598 /* Double precision destination. */
9ee6e8bb
PB
3599 VFP_DREG_D(rd, insn);
3600 } else {
3601 rd = VFP_SREG_D(insn);
3602 }
04595bf6
PM
3603 /* NB that we implicitly rely on the encoding for the frac_bits
3604 * in VCVT of fixed to float being the same as that of an SREG_M
3605 */
9ee6e8bb 3606 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3607 }
3608
69d1fc22 3609 veclen = s->vec_len;
b7bcbe95
FB
3610 if (op == 15 && rn > 3)
3611 veclen = 0;
3612
3613 /* Shut up compiler warnings. */
3614 delta_m = 0;
3615 delta_d = 0;
3616 bank_mask = 0;
3b46e624 3617
b7bcbe95
FB
3618 if (veclen > 0) {
3619 if (dp)
3620 bank_mask = 0xc;
3621 else
3622 bank_mask = 0x18;
3623
3624 /* Figure out what type of vector operation this is. */
3625 if ((rd & bank_mask) == 0) {
3626 /* scalar */
3627 veclen = 0;
3628 } else {
3629 if (dp)
69d1fc22 3630 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3631 else
69d1fc22 3632 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3633
3634 if ((rm & bank_mask) == 0) {
3635 /* mixed scalar/vector */
3636 delta_m = 0;
3637 } else {
3638 /* vector */
3639 delta_m = delta_d;
3640 }
3641 }
3642 }
3643
3644 /* Load the initial operands. */
3645 if (op == 15) {
3646 switch (rn) {
3647 case 16:
3648 case 17:
3649 /* Integer source */
3650 gen_mov_F0_vreg(0, rm);
3651 break;
3652 case 8:
3653 case 9:
3654 /* Compare */
3655 gen_mov_F0_vreg(dp, rd);
3656 gen_mov_F1_vreg(dp, rm);
3657 break;
3658 case 10:
3659 case 11:
3660 /* Compare with zero */
3661 gen_mov_F0_vreg(dp, rd);
3662 gen_vfp_F1_ld0(dp);
3663 break;
9ee6e8bb
PB
3664 case 20:
3665 case 21:
3666 case 22:
3667 case 23:
644ad806
PB
3668 case 28:
3669 case 29:
3670 case 30:
3671 case 31:
9ee6e8bb
PB
3672 /* Source and destination the same. */
3673 gen_mov_F0_vreg(dp, rd);
3674 break;
6e0c0ed1
PM
3675 case 4:
3676 case 5:
3677 case 6:
3678 case 7:
239c20c7
WN
3679 /* VCVTB, VCVTT: only present with the halfprec extension
3680 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3681 * (we choose to UNDEF)
6e0c0ed1 3682 */
d614a513
PM
3683 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3684 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3685 return 1;
3686 }
239c20c7
WN
3687 if (!extract32(rn, 1, 1)) {
3688 /* Half precision source. */
3689 gen_mov_F0_vreg(0, rm);
3690 break;
3691 }
6e0c0ed1 3692 /* Otherwise fall through */
b7bcbe95
FB
3693 default:
3694 /* One source operand. */
3695 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3696 break;
b7bcbe95
FB
3697 }
3698 } else {
3699 /* Two source operands. */
3700 gen_mov_F0_vreg(dp, rn);
3701 gen_mov_F1_vreg(dp, rm);
3702 }
3703
3704 for (;;) {
3705 /* Perform the calculation. */
3706 switch (op) {
605a6aed
PM
3707 case 0: /* VMLA: fd + (fn * fm) */
3708 /* Note that order of inputs to the add matters for NaNs */
3709 gen_vfp_F1_mul(dp);
3710 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3711 gen_vfp_add(dp);
3712 break;
605a6aed 3713 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3714 gen_vfp_mul(dp);
605a6aed
PM
3715 gen_vfp_F1_neg(dp);
3716 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3717 gen_vfp_add(dp);
3718 break;
605a6aed
PM
3719 case 2: /* VNMLS: -fd + (fn * fm) */
3720 /* Note that it isn't valid to replace (-A + B) with (B - A)
3721 * or similar plausible looking simplifications
3722 * because this will give wrong results for NaNs.
3723 */
3724 gen_vfp_F1_mul(dp);
3725 gen_mov_F0_vreg(dp, rd);
3726 gen_vfp_neg(dp);
3727 gen_vfp_add(dp);
b7bcbe95 3728 break;
605a6aed 3729 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3730 gen_vfp_mul(dp);
605a6aed
PM
3731 gen_vfp_F1_neg(dp);
3732 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3733 gen_vfp_neg(dp);
605a6aed 3734 gen_vfp_add(dp);
b7bcbe95
FB
3735 break;
3736 case 4: /* mul: fn * fm */
3737 gen_vfp_mul(dp);
3738 break;
3739 case 5: /* nmul: -(fn * fm) */
3740 gen_vfp_mul(dp);
3741 gen_vfp_neg(dp);
3742 break;
3743 case 6: /* add: fn + fm */
3744 gen_vfp_add(dp);
3745 break;
3746 case 7: /* sub: fn - fm */
3747 gen_vfp_sub(dp);
3748 break;
3749 case 8: /* div: fn / fm */
3750 gen_vfp_div(dp);
3751 break;
da97f52c
PM
3752 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3753 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3754 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3755 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3756 /* These are fused multiply-add, and must be done as one
3757 * floating point operation with no rounding between the
3758 * multiplication and addition steps.
3759 * NB that doing the negations here as separate steps is
3760 * correct : an input NaN should come out with its sign bit
3761 * flipped if it is a negated-input.
3762 */
d614a513 3763 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3764 return 1;
3765 }
3766 if (dp) {
3767 TCGv_ptr fpst;
3768 TCGv_i64 frd;
3769 if (op & 1) {
3770 /* VFNMS, VFMS */
3771 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3772 }
3773 frd = tcg_temp_new_i64();
3774 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3775 if (op & 2) {
3776 /* VFNMA, VFNMS */
3777 gen_helper_vfp_negd(frd, frd);
3778 }
3779 fpst = get_fpstatus_ptr(0);
3780 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3781 cpu_F1d, frd, fpst);
3782 tcg_temp_free_ptr(fpst);
3783 tcg_temp_free_i64(frd);
3784 } else {
3785 TCGv_ptr fpst;
3786 TCGv_i32 frd;
3787 if (op & 1) {
3788 /* VFNMS, VFMS */
3789 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3790 }
3791 frd = tcg_temp_new_i32();
3792 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3793 if (op & 2) {
3794 gen_helper_vfp_negs(frd, frd);
3795 }
3796 fpst = get_fpstatus_ptr(0);
3797 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3798 cpu_F1s, frd, fpst);
3799 tcg_temp_free_ptr(fpst);
3800 tcg_temp_free_i32(frd);
3801 }
3802 break;
9ee6e8bb 3803 case 14: /* fconst */
d614a513
PM
3804 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3805 return 1;
3806 }
9ee6e8bb
PB
3807
3808 n = (insn << 12) & 0x80000000;
3809 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3810 if (dp) {
3811 if (i & 0x40)
3812 i |= 0x3f80;
3813 else
3814 i |= 0x4000;
3815 n |= i << 16;
4373f3ce 3816 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3817 } else {
3818 if (i & 0x40)
3819 i |= 0x780;
3820 else
3821 i |= 0x800;
3822 n |= i << 19;
5b340b51 3823 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3824 }
9ee6e8bb 3825 break;
b7bcbe95
FB
3826 case 15: /* extension space */
3827 switch (rn) {
3828 case 0: /* cpy */
3829 /* no-op */
3830 break;
3831 case 1: /* abs */
3832 gen_vfp_abs(dp);
3833 break;
3834 case 2: /* neg */
3835 gen_vfp_neg(dp);
3836 break;
3837 case 3: /* sqrt */
3838 gen_vfp_sqrt(dp);
3839 break;
239c20c7 3840 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
486624fc
AB
3841 {
3842 TCGv_ptr fpst = get_fpstatus_ptr(false);
3843 TCGv_i32 ahp_mode = get_ahp_flag();
60011498
PB
3844 tmp = gen_vfp_mrs();
3845 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3846 if (dp) {
3847 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 3848 fpst, ahp_mode);
239c20c7
WN
3849 } else {
3850 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 3851 fpst, ahp_mode);
239c20c7 3852 }
486624fc
AB
3853 tcg_temp_free_i32(ahp_mode);
3854 tcg_temp_free_ptr(fpst);
7d1b0095 3855 tcg_temp_free_i32(tmp);
60011498 3856 break;
486624fc 3857 }
239c20c7 3858 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
486624fc
AB
3859 {
3860 TCGv_ptr fpst = get_fpstatus_ptr(false);
3861 TCGv_i32 ahp = get_ahp_flag();
60011498
PB
3862 tmp = gen_vfp_mrs();
3863 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3864 if (dp) {
3865 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 3866 fpst, ahp);
239c20c7
WN
3867 } else {
3868 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 3869 fpst, ahp);
239c20c7 3870 }
7d1b0095 3871 tcg_temp_free_i32(tmp);
486624fc
AB
3872 tcg_temp_free_i32(ahp);
3873 tcg_temp_free_ptr(fpst);
60011498 3874 break;
486624fc 3875 }
239c20c7 3876 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
486624fc
AB
3877 {
3878 TCGv_ptr fpst = get_fpstatus_ptr(false);
3879 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 3880 tmp = tcg_temp_new_i32();
486624fc 3881
239c20c7
WN
3882 if (dp) {
3883 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 3884 fpst, ahp);
239c20c7
WN
3885 } else {
3886 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 3887 fpst, ahp);
239c20c7 3888 }
486624fc
AB
3889 tcg_temp_free_i32(ahp);
3890 tcg_temp_free_ptr(fpst);
60011498
PB
3891 gen_mov_F0_vreg(0, rd);
3892 tmp2 = gen_vfp_mrs();
3893 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3894 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3895 tcg_temp_free_i32(tmp2);
60011498
PB
3896 gen_vfp_msr(tmp);
3897 break;
486624fc 3898 }
239c20c7 3899 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
486624fc
AB
3900 {
3901 TCGv_ptr fpst = get_fpstatus_ptr(false);
3902 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 3903 tmp = tcg_temp_new_i32();
239c20c7
WN
3904 if (dp) {
3905 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 3906 fpst, ahp);
239c20c7
WN
3907 } else {
3908 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 3909 fpst, ahp);
239c20c7 3910 }
486624fc
AB
3911 tcg_temp_free_i32(ahp);
3912 tcg_temp_free_ptr(fpst);
60011498
PB
3913 tcg_gen_shli_i32(tmp, tmp, 16);
3914 gen_mov_F0_vreg(0, rd);
3915 tmp2 = gen_vfp_mrs();
3916 tcg_gen_ext16u_i32(tmp2, tmp2);
3917 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3918 tcg_temp_free_i32(tmp2);
60011498
PB
3919 gen_vfp_msr(tmp);
3920 break;
486624fc 3921 }
b7bcbe95
FB
3922 case 8: /* cmp */
3923 gen_vfp_cmp(dp);
3924 break;
3925 case 9: /* cmpe */
3926 gen_vfp_cmpe(dp);
3927 break;
3928 case 10: /* cmpz */
3929 gen_vfp_cmp(dp);
3930 break;
3931 case 11: /* cmpez */
3932 gen_vfp_F1_ld0(dp);
3933 gen_vfp_cmpe(dp);
3934 break;
664c6733
WN
3935 case 12: /* vrintr */
3936 {
3937 TCGv_ptr fpst = get_fpstatus_ptr(0);
3938 if (dp) {
3939 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3940 } else {
3941 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3942 }
3943 tcg_temp_free_ptr(fpst);
3944 break;
3945 }
a290c62a
WN
3946 case 13: /* vrintz */
3947 {
3948 TCGv_ptr fpst = get_fpstatus_ptr(0);
3949 TCGv_i32 tcg_rmode;
3950 tcg_rmode = tcg_const_i32(float_round_to_zero);
9b049916 3951 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
3952 if (dp) {
3953 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3954 } else {
3955 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3956 }
9b049916 3957 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
3958 tcg_temp_free_i32(tcg_rmode);
3959 tcg_temp_free_ptr(fpst);
3960 break;
3961 }
4e82bc01
WN
3962 case 14: /* vrintx */
3963 {
3964 TCGv_ptr fpst = get_fpstatus_ptr(0);
3965 if (dp) {
3966 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3967 } else {
3968 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3969 }
3970 tcg_temp_free_ptr(fpst);
3971 break;
3972 }
b7bcbe95
FB
3973 case 15: /* single<->double conversion */
3974 if (dp)
4373f3ce 3975 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3976 else
4373f3ce 3977 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3978 break;
3979 case 16: /* fuito */
5500b06c 3980 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3981 break;
3982 case 17: /* fsito */
5500b06c 3983 gen_vfp_sito(dp, 0);
b7bcbe95 3984 break;
9ee6e8bb 3985 case 20: /* fshto */
d614a513
PM
3986 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3987 return 1;
3988 }
5500b06c 3989 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3990 break;
3991 case 21: /* fslto */
d614a513
PM
3992 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3993 return 1;
3994 }
5500b06c 3995 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3996 break;
3997 case 22: /* fuhto */
d614a513
PM
3998 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3999 return 1;
4000 }
5500b06c 4001 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
4002 break;
4003 case 23: /* fulto */
d614a513
PM
4004 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4005 return 1;
4006 }
5500b06c 4007 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 4008 break;
b7bcbe95 4009 case 24: /* ftoui */
5500b06c 4010 gen_vfp_toui(dp, 0);
b7bcbe95
FB
4011 break;
4012 case 25: /* ftouiz */
5500b06c 4013 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
4014 break;
4015 case 26: /* ftosi */
5500b06c 4016 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
4017 break;
4018 case 27: /* ftosiz */
5500b06c 4019 gen_vfp_tosiz(dp, 0);
b7bcbe95 4020 break;
9ee6e8bb 4021 case 28: /* ftosh */
d614a513
PM
4022 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4023 return 1;
4024 }
5500b06c 4025 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
4026 break;
4027 case 29: /* ftosl */
d614a513
PM
4028 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4029 return 1;
4030 }
5500b06c 4031 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
4032 break;
4033 case 30: /* ftouh */
d614a513
PM
4034 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4035 return 1;
4036 }
5500b06c 4037 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
4038 break;
4039 case 31: /* ftoul */
d614a513
PM
4040 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4041 return 1;
4042 }
5500b06c 4043 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 4044 break;
b7bcbe95 4045 default: /* undefined */
b7bcbe95
FB
4046 return 1;
4047 }
4048 break;
4049 default: /* undefined */
b7bcbe95
FB
4050 return 1;
4051 }
4052
4053 /* Write back the result. */
239c20c7
WN
4054 if (op == 15 && (rn >= 8 && rn <= 11)) {
4055 /* Comparison, do nothing. */
4056 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
4057 (rn & 0x1e) == 0x6)) {
4058 /* VCVT double to int: always integer result.
4059 * VCVT double to half precision is always a single
4060 * precision result.
4061 */
b7bcbe95 4062 gen_mov_vreg_F0(0, rd);
239c20c7 4063 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
4064 /* conversion */
4065 gen_mov_vreg_F0(!dp, rd);
239c20c7 4066 } else {
b7bcbe95 4067 gen_mov_vreg_F0(dp, rd);
239c20c7 4068 }
b7bcbe95
FB
4069
4070 /* break out of the loop if we have finished */
4071 if (veclen == 0)
4072 break;
4073
4074 if (op == 15 && delta_m == 0) {
4075 /* single source one-many */
4076 while (veclen--) {
4077 rd = ((rd + delta_d) & (bank_mask - 1))
4078 | (rd & bank_mask);
4079 gen_mov_vreg_F0(dp, rd);
4080 }
4081 break;
4082 }
4083 /* Setup the next operands. */
4084 veclen--;
4085 rd = ((rd + delta_d) & (bank_mask - 1))
4086 | (rd & bank_mask);
4087
4088 if (op == 15) {
4089 /* One source operand. */
4090 rm = ((rm + delta_m) & (bank_mask - 1))
4091 | (rm & bank_mask);
4092 gen_mov_F0_vreg(dp, rm);
4093 } else {
4094 /* Two source operands. */
4095 rn = ((rn + delta_d) & (bank_mask - 1))
4096 | (rn & bank_mask);
4097 gen_mov_F0_vreg(dp, rn);
4098 if (delta_m) {
4099 rm = ((rm + delta_m) & (bank_mask - 1))
4100 | (rm & bank_mask);
4101 gen_mov_F1_vreg(dp, rm);
4102 }
4103 }
4104 }
4105 }
4106 break;
4107 case 0xc:
4108 case 0xd:
8387da81 4109 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
4110 /* two-register transfer */
4111 rn = (insn >> 16) & 0xf;
4112 rd = (insn >> 12) & 0xf;
4113 if (dp) {
9ee6e8bb
PB
4114 VFP_DREG_M(rm, insn);
4115 } else {
4116 rm = VFP_SREG_M(insn);
4117 }
b7bcbe95 4118
18c9b560 4119 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4120 /* vfp->arm */
4121 if (dp) {
4373f3ce
PB
4122 gen_mov_F0_vreg(0, rm * 2);
4123 tmp = gen_vfp_mrs();
4124 store_reg(s, rd, tmp);
4125 gen_mov_F0_vreg(0, rm * 2 + 1);
4126 tmp = gen_vfp_mrs();
4127 store_reg(s, rn, tmp);
b7bcbe95
FB
4128 } else {
4129 gen_mov_F0_vreg(0, rm);
4373f3ce 4130 tmp = gen_vfp_mrs();
8387da81 4131 store_reg(s, rd, tmp);
b7bcbe95 4132 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4133 tmp = gen_vfp_mrs();
8387da81 4134 store_reg(s, rn, tmp);
b7bcbe95
FB
4135 }
4136 } else {
4137 /* arm->vfp */
4138 if (dp) {
4373f3ce
PB
4139 tmp = load_reg(s, rd);
4140 gen_vfp_msr(tmp);
4141 gen_mov_vreg_F0(0, rm * 2);
4142 tmp = load_reg(s, rn);
4143 gen_vfp_msr(tmp);
4144 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4145 } else {
8387da81 4146 tmp = load_reg(s, rd);
4373f3ce 4147 gen_vfp_msr(tmp);
b7bcbe95 4148 gen_mov_vreg_F0(0, rm);
8387da81 4149 tmp = load_reg(s, rn);
4373f3ce 4150 gen_vfp_msr(tmp);
b7bcbe95
FB
4151 gen_mov_vreg_F0(0, rm + 1);
4152 }
4153 }
4154 } else {
4155 /* Load/store */
4156 rn = (insn >> 16) & 0xf;
4157 if (dp)
9ee6e8bb 4158 VFP_DREG_D(rd, insn);
b7bcbe95 4159 else
9ee6e8bb 4160 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4161 if ((insn & 0x01200000) == 0x01000000) {
4162 /* Single load/store */
4163 offset = (insn & 0xff) << 2;
4164 if ((insn & (1 << 23)) == 0)
4165 offset = -offset;
934814f1
PM
4166 if (s->thumb && rn == 15) {
4167 /* This is actually UNPREDICTABLE */
4168 addr = tcg_temp_new_i32();
4169 tcg_gen_movi_i32(addr, s->pc & ~2);
4170 } else {
4171 addr = load_reg(s, rn);
4172 }
312eea9f 4173 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4174 if (insn & (1 << 20)) {
312eea9f 4175 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4176 gen_mov_vreg_F0(dp, rd);
4177 } else {
4178 gen_mov_F0_vreg(dp, rd);
312eea9f 4179 gen_vfp_st(s, dp, addr);
b7bcbe95 4180 }
7d1b0095 4181 tcg_temp_free_i32(addr);
b7bcbe95
FB
4182 } else {
4183 /* load/store multiple */
934814f1 4184 int w = insn & (1 << 21);
b7bcbe95
FB
4185 if (dp)
4186 n = (insn >> 1) & 0x7f;
4187 else
4188 n = insn & 0xff;
4189
934814f1
PM
4190 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4191 /* P == U , W == 1 => UNDEF */
4192 return 1;
4193 }
4194 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4195 /* UNPREDICTABLE cases for bad immediates: we choose to
4196 * UNDEF to avoid generating huge numbers of TCG ops
4197 */
4198 return 1;
4199 }
4200 if (rn == 15 && w) {
4201 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4202 return 1;
4203 }
4204
4205 if (s->thumb && rn == 15) {
4206 /* This is actually UNPREDICTABLE */
4207 addr = tcg_temp_new_i32();
4208 tcg_gen_movi_i32(addr, s->pc & ~2);
4209 } else {
4210 addr = load_reg(s, rn);
4211 }
b7bcbe95 4212 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4213 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4214
4215 if (dp)
4216 offset = 8;
4217 else
4218 offset = 4;
4219 for (i = 0; i < n; i++) {
18c9b560 4220 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4221 /* load */
312eea9f 4222 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4223 gen_mov_vreg_F0(dp, rd + i);
4224 } else {
4225 /* store */
4226 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4227 gen_vfp_st(s, dp, addr);
b7bcbe95 4228 }
312eea9f 4229 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4230 }
934814f1 4231 if (w) {
b7bcbe95
FB
4232 /* writeback */
4233 if (insn & (1 << 24))
4234 offset = -offset * n;
4235 else if (dp && (insn & 1))
4236 offset = 4;
4237 else
4238 offset = 0;
4239
4240 if (offset != 0)
312eea9f
FN
4241 tcg_gen_addi_i32(addr, addr, offset);
4242 store_reg(s, rn, addr);
4243 } else {
7d1b0095 4244 tcg_temp_free_i32(addr);
b7bcbe95
FB
4245 }
4246 }
4247 }
4248 break;
4249 default:
4250 /* Should never happen. */
4251 return 1;
4252 }
4253 return 0;
4254}
4255
90aa39a1 4256static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4257{
90aa39a1 4258#ifndef CONFIG_USER_ONLY
dcba3a8d 4259 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
4260 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4261#else
4262 return true;
4263#endif
4264}
6e256c93 4265
8a6b28c7
EC
4266static void gen_goto_ptr(void)
4267{
7f11636d 4268 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
4269}
4270
4cae8f56
AB
4271/* This will end the TB but doesn't guarantee we'll return to
4272 * cpu_loop_exec. Any live exit_requests will be processed as we
4273 * enter the next TB.
4274 */
8a6b28c7 4275static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4276{
4277 if (use_goto_tb(s, dest)) {
57fec1fe 4278 tcg_gen_goto_tb(n);
eaed129d 4279 gen_set_pc_im(s, dest);
07ea28b4 4280 tcg_gen_exit_tb(s->base.tb, n);
6e256c93 4281 } else {
eaed129d 4282 gen_set_pc_im(s, dest);
8a6b28c7 4283 gen_goto_ptr();
6e256c93 4284 }
dcba3a8d 4285 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
4286}
4287
8aaca4c0
FB
4288static inline void gen_jmp (DisasContext *s, uint32_t dest)
4289{
b636649f 4290 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4291 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4292 if (s->thumb)
d9ba4830
PB
4293 dest |= 1;
4294 gen_bx_im(s, dest);
8aaca4c0 4295 } else {
6e256c93 4296 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4297 }
4298}
4299
39d5492a 4300static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4301{
ee097184 4302 if (x)
d9ba4830 4303 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4304 else
d9ba4830 4305 gen_sxth(t0);
ee097184 4306 if (y)
d9ba4830 4307 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4308 else
d9ba4830
PB
4309 gen_sxth(t1);
4310 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4311}
4312
4313/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4314static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4315{
b5ff1b31
FB
4316 uint32_t mask;
4317
4318 mask = 0;
4319 if (flags & (1 << 0))
4320 mask |= 0xff;
4321 if (flags & (1 << 1))
4322 mask |= 0xff00;
4323 if (flags & (1 << 2))
4324 mask |= 0xff0000;
4325 if (flags & (1 << 3))
4326 mask |= 0xff000000;
9ee6e8bb 4327
2ae23e75 4328 /* Mask out undefined bits. */
9ee6e8bb 4329 mask &= ~CPSR_RESERVED;
d614a513 4330 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4331 mask &= ~CPSR_T;
d614a513
PM
4332 }
4333 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4334 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4335 }
4336 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4337 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4338 }
4339 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4340 mask &= ~CPSR_IT;
d614a513 4341 }
4051e12c
PM
4342 /* Mask out execution state and reserved bits. */
4343 if (!spsr) {
4344 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4345 }
b5ff1b31
FB
4346 /* Mask out privileged bits. */
4347 if (IS_USER(s))
9ee6e8bb 4348 mask &= CPSR_USER;
b5ff1b31
FB
4349 return mask;
4350}
4351
2fbac54b 4352/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4353static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4354{
39d5492a 4355 TCGv_i32 tmp;
b5ff1b31
FB
4356 if (spsr) {
4357 /* ??? This is also undefined in system mode. */
4358 if (IS_USER(s))
4359 return 1;
d9ba4830
PB
4360
4361 tmp = load_cpu_field(spsr);
4362 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4363 tcg_gen_andi_i32(t0, t0, mask);
4364 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4365 store_cpu_field(tmp, spsr);
b5ff1b31 4366 } else {
2fbac54b 4367 gen_set_cpsr(t0, mask);
b5ff1b31 4368 }
7d1b0095 4369 tcg_temp_free_i32(t0);
b5ff1b31
FB
4370 gen_lookup_tb(s);
4371 return 0;
4372}
4373
2fbac54b
FN
4374/* Returns nonzero if access to the PSR is not permitted. */
4375static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4376{
39d5492a 4377 TCGv_i32 tmp;
7d1b0095 4378 tmp = tcg_temp_new_i32();
2fbac54b
FN
4379 tcg_gen_movi_i32(tmp, val);
4380 return gen_set_psr(s, mask, spsr, tmp);
4381}
4382
8bfd0550
PM
4383static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4384 int *tgtmode, int *regno)
4385{
4386 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4387 * the target mode and register number, and identify the various
4388 * unpredictable cases.
4389 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4390 * + executed in user mode
4391 * + using R15 as the src/dest register
4392 * + accessing an unimplemented register
4393 * + accessing a register that's inaccessible at current PL/security state*
4394 * + accessing a register that you could access with a different insn
4395 * We choose to UNDEF in all these cases.
4396 * Since we don't know which of the various AArch32 modes we are in
4397 * we have to defer some checks to runtime.
4398 * Accesses to Monitor mode registers from Secure EL1 (which implies
4399 * that EL3 is AArch64) must trap to EL3.
4400 *
4401 * If the access checks fail this function will emit code to take
4402 * an exception and return false. Otherwise it will return true,
4403 * and set *tgtmode and *regno appropriately.
4404 */
4405 int exc_target = default_exception_el(s);
4406
4407 /* These instructions are present only in ARMv8, or in ARMv7 with the
4408 * Virtualization Extensions.
4409 */
4410 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4411 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4412 goto undef;
4413 }
4414
4415 if (IS_USER(s) || rn == 15) {
4416 goto undef;
4417 }
4418
4419 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4420 * of registers into (r, sysm).
4421 */
4422 if (r) {
4423 /* SPSRs for other modes */
4424 switch (sysm) {
4425 case 0xe: /* SPSR_fiq */
4426 *tgtmode = ARM_CPU_MODE_FIQ;
4427 break;
4428 case 0x10: /* SPSR_irq */
4429 *tgtmode = ARM_CPU_MODE_IRQ;
4430 break;
4431 case 0x12: /* SPSR_svc */
4432 *tgtmode = ARM_CPU_MODE_SVC;
4433 break;
4434 case 0x14: /* SPSR_abt */
4435 *tgtmode = ARM_CPU_MODE_ABT;
4436 break;
4437 case 0x16: /* SPSR_und */
4438 *tgtmode = ARM_CPU_MODE_UND;
4439 break;
4440 case 0x1c: /* SPSR_mon */
4441 *tgtmode = ARM_CPU_MODE_MON;
4442 break;
4443 case 0x1e: /* SPSR_hyp */
4444 *tgtmode = ARM_CPU_MODE_HYP;
4445 break;
4446 default: /* unallocated */
4447 goto undef;
4448 }
4449 /* We arbitrarily assign SPSR a register number of 16. */
4450 *regno = 16;
4451 } else {
4452 /* general purpose registers for other modes */
4453 switch (sysm) {
4454 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4455 *tgtmode = ARM_CPU_MODE_USR;
4456 *regno = sysm + 8;
4457 break;
4458 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4459 *tgtmode = ARM_CPU_MODE_FIQ;
4460 *regno = sysm;
4461 break;
4462 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4463 *tgtmode = ARM_CPU_MODE_IRQ;
4464 *regno = sysm & 1 ? 13 : 14;
4465 break;
4466 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4467 *tgtmode = ARM_CPU_MODE_SVC;
4468 *regno = sysm & 1 ? 13 : 14;
4469 break;
4470 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4471 *tgtmode = ARM_CPU_MODE_ABT;
4472 *regno = sysm & 1 ? 13 : 14;
4473 break;
4474 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4475 *tgtmode = ARM_CPU_MODE_UND;
4476 *regno = sysm & 1 ? 13 : 14;
4477 break;
4478 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4479 *tgtmode = ARM_CPU_MODE_MON;
4480 *regno = sysm & 1 ? 13 : 14;
4481 break;
4482 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4483 *tgtmode = ARM_CPU_MODE_HYP;
4484 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4485 *regno = sysm & 1 ? 13 : 17;
4486 break;
4487 default: /* unallocated */
4488 goto undef;
4489 }
4490 }
4491
4492 /* Catch the 'accessing inaccessible register' cases we can detect
4493 * at translate time.
4494 */
4495 switch (*tgtmode) {
4496 case ARM_CPU_MODE_MON:
4497 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4498 goto undef;
4499 }
4500 if (s->current_el == 1) {
4501 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4502 * then accesses to Mon registers trap to EL3
4503 */
4504 exc_target = 3;
4505 goto undef;
4506 }
4507 break;
4508 case ARM_CPU_MODE_HYP:
4509 /* Note that we can forbid accesses from EL2 here because they
4510 * must be from Hyp mode itself
4511 */
4512 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4513 goto undef;
4514 }
4515 break;
4516 default:
4517 break;
4518 }
4519
4520 return true;
4521
4522undef:
4523 /* If we get here then some access check did not pass */
4524 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4525 return false;
4526}
4527
4528static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4529{
4530 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4531 int tgtmode = 0, regno = 0;
4532
4533 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4534 return;
4535 }
4536
4537 /* Sync state because msr_banked() can raise exceptions */
4538 gen_set_condexec(s);
4539 gen_set_pc_im(s, s->pc - 4);
4540 tcg_reg = load_reg(s, rn);
4541 tcg_tgtmode = tcg_const_i32(tgtmode);
4542 tcg_regno = tcg_const_i32(regno);
4543 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4544 tcg_temp_free_i32(tcg_tgtmode);
4545 tcg_temp_free_i32(tcg_regno);
4546 tcg_temp_free_i32(tcg_reg);
dcba3a8d 4547 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4548}
4549
4550static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4551{
4552 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4553 int tgtmode = 0, regno = 0;
4554
4555 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4556 return;
4557 }
4558
4559 /* Sync state because mrs_banked() can raise exceptions */
4560 gen_set_condexec(s);
4561 gen_set_pc_im(s, s->pc - 4);
4562 tcg_reg = tcg_temp_new_i32();
4563 tcg_tgtmode = tcg_const_i32(tgtmode);
4564 tcg_regno = tcg_const_i32(regno);
4565 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4566 tcg_temp_free_i32(tcg_tgtmode);
4567 tcg_temp_free_i32(tcg_regno);
4568 store_reg(s, rn, tcg_reg);
dcba3a8d 4569 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4570}
4571
fb0e8e79
PM
4572/* Store value to PC as for an exception return (ie don't
4573 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4574 * will do the masking based on the new value of the Thumb bit.
4575 */
4576static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4577{
fb0e8e79
PM
4578 tcg_gen_mov_i32(cpu_R[15], pc);
4579 tcg_temp_free_i32(pc);
b5ff1b31
FB
4580}
4581
b0109805 4582/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4583static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4584{
fb0e8e79
PM
4585 store_pc_exc_ret(s, pc);
4586 /* The cpsr_write_eret helper will mask the low bits of PC
4587 * appropriately depending on the new Thumb bit, so it must
4588 * be called after storing the new PC.
4589 */
e69ad9df
AL
4590 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4591 gen_io_start();
4592 }
235ea1f5 4593 gen_helper_cpsr_write_eret(cpu_env, cpsr);
e69ad9df
AL
4594 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4595 gen_io_end();
4596 }
7d1b0095 4597 tcg_temp_free_i32(cpsr);
b29fd33d 4598 /* Must exit loop to check un-masked IRQs */
dcba3a8d 4599 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 4600}
3b46e624 4601
fb0e8e79
PM
4602/* Generate an old-style exception return. Marks pc as dead. */
4603static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4604{
4605 gen_rfe(s, pc, load_cpu_field(spsr));
4606}
4607
c22edfeb
AB
4608/*
4609 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4610 * only call the helper when running single threaded TCG code to ensure
4611 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4612 * just skip this instruction. Currently the SEV/SEVL instructions
4613 * which are *one* of many ways to wake the CPU from WFE are not
4614 * implemented so we can't sleep like WFI does.
4615 */
9ee6e8bb
PB
4616static void gen_nop_hint(DisasContext *s, int val)
4617{
4618 switch (val) {
2399d4e7
EC
4619 /* When running in MTTCG we don't generate jumps to the yield and
4620 * WFE helpers as it won't affect the scheduling of other vCPUs.
4621 * If we wanted to more completely model WFE/SEV so we don't busy
4622 * spin unnecessarily we would need to do something more involved.
4623 */
c87e5a61 4624 case 1: /* yield */
2399d4e7 4625 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4626 gen_set_pc_im(s, s->pc);
dcba3a8d 4627 s->base.is_jmp = DISAS_YIELD;
c22edfeb 4628 }
c87e5a61 4629 break;
9ee6e8bb 4630 case 3: /* wfi */
eaed129d 4631 gen_set_pc_im(s, s->pc);
dcba3a8d 4632 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
4633 break;
4634 case 2: /* wfe */
2399d4e7 4635 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4636 gen_set_pc_im(s, s->pc);
dcba3a8d 4637 s->base.is_jmp = DISAS_WFE;
c22edfeb 4638 }
72c1d3af 4639 break;
9ee6e8bb 4640 case 4: /* sev */
12b10571
MR
4641 case 5: /* sevl */
4642 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4643 default: /* nop */
4644 break;
4645 }
4646}
99c475ab 4647
ad69471c 4648#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4649
39d5492a 4650static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4651{
4652 switch (size) {
dd8fbd78
FN
4653 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4654 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4655 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4656 default: abort();
9ee6e8bb 4657 }
9ee6e8bb
PB
4658}
4659
39d5492a 4660static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4661{
4662 switch (size) {
dd8fbd78
FN
4663 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4664 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4665 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4666 default: return;
4667 }
4668}
4669
4670/* 32-bit pairwise ops end up the same as the elementwise versions. */
4671#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4672#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4673#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4674#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4675
ad69471c
PB
4676#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4677 switch ((size << 1) | u) { \
4678 case 0: \
dd8fbd78 4679 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4680 break; \
4681 case 1: \
dd8fbd78 4682 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4683 break; \
4684 case 2: \
dd8fbd78 4685 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4686 break; \
4687 case 3: \
dd8fbd78 4688 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4689 break; \
4690 case 4: \
dd8fbd78 4691 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4692 break; \
4693 case 5: \
dd8fbd78 4694 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4695 break; \
4696 default: return 1; \
4697 }} while (0)
9ee6e8bb
PB
4698
4699#define GEN_NEON_INTEGER_OP(name) do { \
4700 switch ((size << 1) | u) { \
ad69471c 4701 case 0: \
dd8fbd78 4702 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4703 break; \
4704 case 1: \
dd8fbd78 4705 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4706 break; \
4707 case 2: \
dd8fbd78 4708 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4709 break; \
4710 case 3: \
dd8fbd78 4711 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4712 break; \
4713 case 4: \
dd8fbd78 4714 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4715 break; \
4716 case 5: \
dd8fbd78 4717 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4718 break; \
9ee6e8bb
PB
4719 default: return 1; \
4720 }} while (0)
4721
39d5492a 4722static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4723{
39d5492a 4724 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4725 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4726 return tmp;
9ee6e8bb
PB
4727}
4728
39d5492a 4729static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4730{
dd8fbd78 4731 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4732 tcg_temp_free_i32(var);
9ee6e8bb
PB
4733}
4734
39d5492a 4735static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4736{
39d5492a 4737 TCGv_i32 tmp;
9ee6e8bb 4738 if (size == 1) {
0fad6efc
PM
4739 tmp = neon_load_reg(reg & 7, reg >> 4);
4740 if (reg & 8) {
dd8fbd78 4741 gen_neon_dup_high16(tmp);
0fad6efc
PM
4742 } else {
4743 gen_neon_dup_low16(tmp);
dd8fbd78 4744 }
0fad6efc
PM
4745 } else {
4746 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4747 }
dd8fbd78 4748 return tmp;
9ee6e8bb
PB
4749}
4750
02acedf9 4751static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4752{
b13708bb
RH
4753 TCGv_ptr pd, pm;
4754
600b828c 4755 if (!q && size == 2) {
02acedf9
PM
4756 return 1;
4757 }
b13708bb
RH
4758 pd = vfp_reg_ptr(true, rd);
4759 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
4760 if (q) {
4761 switch (size) {
4762 case 0:
b13708bb 4763 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
4764 break;
4765 case 1:
b13708bb 4766 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
4767 break;
4768 case 2:
b13708bb 4769 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
4770 break;
4771 default:
4772 abort();
4773 }
4774 } else {
4775 switch (size) {
4776 case 0:
b13708bb 4777 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
4778 break;
4779 case 1:
b13708bb 4780 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
4781 break;
4782 default:
4783 abort();
4784 }
4785 }
b13708bb
RH
4786 tcg_temp_free_ptr(pd);
4787 tcg_temp_free_ptr(pm);
02acedf9 4788 return 0;
19457615
FN
4789}
4790
d68a6f3a 4791static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4792{
b13708bb
RH
4793 TCGv_ptr pd, pm;
4794
600b828c 4795 if (!q && size == 2) {
d68a6f3a
PM
4796 return 1;
4797 }
b13708bb
RH
4798 pd = vfp_reg_ptr(true, rd);
4799 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
4800 if (q) {
4801 switch (size) {
4802 case 0:
b13708bb 4803 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
4804 break;
4805 case 1:
b13708bb 4806 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
4807 break;
4808 case 2:
b13708bb 4809 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
4810 break;
4811 default:
4812 abort();
4813 }
4814 } else {
4815 switch (size) {
4816 case 0:
b13708bb 4817 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
4818 break;
4819 case 1:
b13708bb 4820 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
4821 break;
4822 default:
4823 abort();
4824 }
4825 }
b13708bb
RH
4826 tcg_temp_free_ptr(pd);
4827 tcg_temp_free_ptr(pm);
d68a6f3a 4828 return 0;
19457615
FN
4829}
4830
39d5492a 4831static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4832{
39d5492a 4833 TCGv_i32 rd, tmp;
19457615 4834
7d1b0095
PM
4835 rd = tcg_temp_new_i32();
4836 tmp = tcg_temp_new_i32();
19457615
FN
4837
4838 tcg_gen_shli_i32(rd, t0, 8);
4839 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4840 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4841 tcg_gen_or_i32(rd, rd, tmp);
4842
4843 tcg_gen_shri_i32(t1, t1, 8);
4844 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4845 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4846 tcg_gen_or_i32(t1, t1, tmp);
4847 tcg_gen_mov_i32(t0, rd);
4848
7d1b0095
PM
4849 tcg_temp_free_i32(tmp);
4850 tcg_temp_free_i32(rd);
19457615
FN
4851}
4852
39d5492a 4853static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4854{
39d5492a 4855 TCGv_i32 rd, tmp;
19457615 4856
7d1b0095
PM
4857 rd = tcg_temp_new_i32();
4858 tmp = tcg_temp_new_i32();
19457615
FN
4859
4860 tcg_gen_shli_i32(rd, t0, 16);
4861 tcg_gen_andi_i32(tmp, t1, 0xffff);
4862 tcg_gen_or_i32(rd, rd, tmp);
4863 tcg_gen_shri_i32(t1, t1, 16);
4864 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4865 tcg_gen_or_i32(t1, t1, tmp);
4866 tcg_gen_mov_i32(t0, rd);
4867
7d1b0095
PM
4868 tcg_temp_free_i32(tmp);
4869 tcg_temp_free_i32(rd);
19457615
FN
4870}
4871
4872
9ee6e8bb
PB
4873static struct {
4874 int nregs;
4875 int interleave;
4876 int spacing;
4877} neon_ls_element_type[11] = {
4878 {4, 4, 1},
4879 {4, 4, 2},
4880 {4, 1, 1},
4881 {4, 2, 1},
4882 {3, 3, 1},
4883 {3, 3, 2},
4884 {3, 1, 1},
4885 {1, 1, 1},
4886 {2, 2, 1},
4887 {2, 2, 2},
4888 {2, 1, 1}
4889};
4890
4891/* Translate a NEON load/store element instruction. Return nonzero if the
4892 instruction is invalid. */
7dcc1f89 4893static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4894{
4895 int rd, rn, rm;
4896 int op;
4897 int nregs;
4898 int interleave;
84496233 4899 int spacing;
9ee6e8bb
PB
4900 int stride;
4901 int size;
4902 int reg;
4903 int pass;
4904 int load;
4905 int shift;
9ee6e8bb 4906 int n;
39d5492a
PM
4907 TCGv_i32 addr;
4908 TCGv_i32 tmp;
4909 TCGv_i32 tmp2;
84496233 4910 TCGv_i64 tmp64;
9ee6e8bb 4911
2c7ffc41
PM
4912 /* FIXME: this access check should not take precedence over UNDEF
4913 * for invalid encodings; we will generate incorrect syndrome information
4914 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4915 */
9dbbc748 4916 if (s->fp_excp_el) {
2c7ffc41 4917 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4918 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4919 return 0;
4920 }
4921
5df8bac1 4922 if (!s->vfp_enabled)
9ee6e8bb
PB
4923 return 1;
4924 VFP_DREG_D(rd, insn);
4925 rn = (insn >> 16) & 0xf;
4926 rm = insn & 0xf;
4927 load = (insn & (1 << 21)) != 0;
4928 if ((insn & (1 << 23)) == 0) {
4929 /* Load store all elements. */
4930 op = (insn >> 8) & 0xf;
4931 size = (insn >> 6) & 3;
84496233 4932 if (op > 10)
9ee6e8bb 4933 return 1;
f2dd89d0
PM
4934 /* Catch UNDEF cases for bad values of align field */
4935 switch (op & 0xc) {
4936 case 4:
4937 if (((insn >> 5) & 1) == 1) {
4938 return 1;
4939 }
4940 break;
4941 case 8:
4942 if (((insn >> 4) & 3) == 3) {
4943 return 1;
4944 }
4945 break;
4946 default:
4947 break;
4948 }
9ee6e8bb
PB
4949 nregs = neon_ls_element_type[op].nregs;
4950 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4951 spacing = neon_ls_element_type[op].spacing;
4952 if (size == 3 && (interleave | spacing) != 1)
4953 return 1;
e318a60b 4954 addr = tcg_temp_new_i32();
dcc65026 4955 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4956 stride = (1 << size) * interleave;
4957 for (reg = 0; reg < nregs; reg++) {
4958 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4959 load_reg_var(s, addr, rn);
4960 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4961 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4962 load_reg_var(s, addr, rn);
4963 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4964 }
84496233 4965 if (size == 3) {
8ed1237d 4966 tmp64 = tcg_temp_new_i64();
84496233 4967 if (load) {
12dcc321 4968 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4969 neon_store_reg64(tmp64, rd);
84496233 4970 } else {
84496233 4971 neon_load_reg64(tmp64, rd);
12dcc321 4972 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4973 }
8ed1237d 4974 tcg_temp_free_i64(tmp64);
84496233
JR
4975 tcg_gen_addi_i32(addr, addr, stride);
4976 } else {
4977 for (pass = 0; pass < 2; pass++) {
4978 if (size == 2) {
4979 if (load) {
58ab8e96 4980 tmp = tcg_temp_new_i32();
12dcc321 4981 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
4982 neon_store_reg(rd, pass, tmp);
4983 } else {
4984 tmp = neon_load_reg(rd, pass);
12dcc321 4985 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 4986 tcg_temp_free_i32(tmp);
84496233 4987 }
1b2b1e54 4988 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4989 } else if (size == 1) {
4990 if (load) {
58ab8e96 4991 tmp = tcg_temp_new_i32();
12dcc321 4992 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 4993 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4994 tmp2 = tcg_temp_new_i32();
12dcc321 4995 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 4996 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4997 tcg_gen_shli_i32(tmp2, tmp2, 16);
4998 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4999 tcg_temp_free_i32(tmp2);
84496233
JR
5000 neon_store_reg(rd, pass, tmp);
5001 } else {
5002 tmp = neon_load_reg(rd, pass);
7d1b0095 5003 tmp2 = tcg_temp_new_i32();
84496233 5004 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 5005 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 5006 tcg_temp_free_i32(tmp);
84496233 5007 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 5008 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 5009 tcg_temp_free_i32(tmp2);
1b2b1e54 5010 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 5011 }
84496233
JR
5012 } else /* size == 0 */ {
5013 if (load) {
f764718d 5014 tmp2 = NULL;
84496233 5015 for (n = 0; n < 4; n++) {
58ab8e96 5016 tmp = tcg_temp_new_i32();
12dcc321 5017 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
5018 tcg_gen_addi_i32(addr, addr, stride);
5019 if (n == 0) {
5020 tmp2 = tmp;
5021 } else {
41ba8341
PB
5022 tcg_gen_shli_i32(tmp, tmp, n * 8);
5023 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 5024 tcg_temp_free_i32(tmp);
84496233 5025 }
9ee6e8bb 5026 }
84496233
JR
5027 neon_store_reg(rd, pass, tmp2);
5028 } else {
5029 tmp2 = neon_load_reg(rd, pass);
5030 for (n = 0; n < 4; n++) {
7d1b0095 5031 tmp = tcg_temp_new_i32();
84496233
JR
5032 if (n == 0) {
5033 tcg_gen_mov_i32(tmp, tmp2);
5034 } else {
5035 tcg_gen_shri_i32(tmp, tmp2, n * 8);
5036 }
12dcc321 5037 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 5038 tcg_temp_free_i32(tmp);
84496233
JR
5039 tcg_gen_addi_i32(addr, addr, stride);
5040 }
7d1b0095 5041 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5042 }
5043 }
5044 }
5045 }
84496233 5046 rd += spacing;
9ee6e8bb 5047 }
e318a60b 5048 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5049 stride = nregs * 8;
5050 } else {
5051 size = (insn >> 10) & 3;
5052 if (size == 3) {
5053 /* Load single element to all lanes. */
8e18cde3
PM
5054 int a = (insn >> 4) & 1;
5055 if (!load) {
9ee6e8bb 5056 return 1;
8e18cde3 5057 }
9ee6e8bb
PB
5058 size = (insn >> 6) & 3;
5059 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
5060
5061 if (size == 3) {
5062 if (nregs != 4 || a == 0) {
9ee6e8bb 5063 return 1;
99c475ab 5064 }
8e18cde3
PM
5065 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5066 size = 2;
5067 }
5068 if (nregs == 1 && a == 1 && size == 0) {
5069 return 1;
5070 }
5071 if (nregs == 3 && a == 1) {
5072 return 1;
5073 }
e318a60b 5074 addr = tcg_temp_new_i32();
8e18cde3
PM
5075 load_reg_var(s, addr, rn);
5076 if (nregs == 1) {
5077 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
5078 tmp = gen_load_and_replicate(s, addr, size);
5079 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5080 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5081 if (insn & (1 << 5)) {
5082 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
5083 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
5084 }
5085 tcg_temp_free_i32(tmp);
5086 } else {
5087 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
5088 stride = (insn & (1 << 5)) ? 2 : 1;
5089 for (reg = 0; reg < nregs; reg++) {
5090 tmp = gen_load_and_replicate(s, addr, size);
5091 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5092 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5093 tcg_temp_free_i32(tmp);
5094 tcg_gen_addi_i32(addr, addr, 1 << size);
5095 rd += stride;
5096 }
9ee6e8bb 5097 }
e318a60b 5098 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5099 stride = (1 << size) * nregs;
5100 } else {
5101 /* Single element. */
93262b16 5102 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
5103 pass = (insn >> 7) & 1;
5104 switch (size) {
5105 case 0:
5106 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
5107 stride = 1;
5108 break;
5109 case 1:
5110 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
5111 stride = (insn & (1 << 5)) ? 2 : 1;
5112 break;
5113 case 2:
5114 shift = 0;
9ee6e8bb
PB
5115 stride = (insn & (1 << 6)) ? 2 : 1;
5116 break;
5117 default:
5118 abort();
5119 }
5120 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
5121 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5122 switch (nregs) {
5123 case 1:
5124 if (((idx & (1 << size)) != 0) ||
5125 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5126 return 1;
5127 }
5128 break;
5129 case 3:
5130 if ((idx & 1) != 0) {
5131 return 1;
5132 }
5133 /* fall through */
5134 case 2:
5135 if (size == 2 && (idx & 2) != 0) {
5136 return 1;
5137 }
5138 break;
5139 case 4:
5140 if ((size == 2) && ((idx & 3) == 3)) {
5141 return 1;
5142 }
5143 break;
5144 default:
5145 abort();
5146 }
5147 if ((rd + stride * (nregs - 1)) > 31) {
5148 /* Attempts to write off the end of the register file
5149 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5150 * the neon_load_reg() would write off the end of the array.
5151 */
5152 return 1;
5153 }
e318a60b 5154 addr = tcg_temp_new_i32();
dcc65026 5155 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5156 for (reg = 0; reg < nregs; reg++) {
5157 if (load) {
58ab8e96 5158 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
5159 switch (size) {
5160 case 0:
12dcc321 5161 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5162 break;
5163 case 1:
12dcc321 5164 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5165 break;
5166 case 2:
12dcc321 5167 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5168 break;
a50f5b91
PB
5169 default: /* Avoid compiler warnings. */
5170 abort();
9ee6e8bb
PB
5171 }
5172 if (size != 2) {
8f8e3aa4 5173 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
5174 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5175 shift, size ? 16 : 8);
7d1b0095 5176 tcg_temp_free_i32(tmp2);
9ee6e8bb 5177 }
8f8e3aa4 5178 neon_store_reg(rd, pass, tmp);
9ee6e8bb 5179 } else { /* Store */
8f8e3aa4
PB
5180 tmp = neon_load_reg(rd, pass);
5181 if (shift)
5182 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
5183 switch (size) {
5184 case 0:
12dcc321 5185 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5186 break;
5187 case 1:
12dcc321 5188 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5189 break;
5190 case 2:
12dcc321 5191 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5192 break;
99c475ab 5193 }
58ab8e96 5194 tcg_temp_free_i32(tmp);
99c475ab 5195 }
9ee6e8bb 5196 rd += stride;
1b2b1e54 5197 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5198 }
e318a60b 5199 tcg_temp_free_i32(addr);
9ee6e8bb 5200 stride = nregs * (1 << size);
99c475ab 5201 }
9ee6e8bb
PB
5202 }
5203 if (rm != 15) {
39d5492a 5204 TCGv_i32 base;
b26eefb6
PB
5205
5206 base = load_reg(s, rn);
9ee6e8bb 5207 if (rm == 13) {
b26eefb6 5208 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5209 } else {
39d5492a 5210 TCGv_i32 index;
b26eefb6
PB
5211 index = load_reg(s, rm);
5212 tcg_gen_add_i32(base, base, index);
7d1b0095 5213 tcg_temp_free_i32(index);
9ee6e8bb 5214 }
b26eefb6 5215 store_reg(s, rn, base);
9ee6e8bb
PB
5216 }
5217 return 0;
5218}
3b46e624 5219
8f8e3aa4 5220/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 5221static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
5222{
5223 tcg_gen_and_i32(t, t, c);
f669df27 5224 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
5225 tcg_gen_or_i32(dest, t, f);
5226}
5227
39d5492a 5228static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5229{
5230 switch (size) {
5231 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5232 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5233 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5234 default: abort();
5235 }
5236}
5237
39d5492a 5238static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5239{
5240 switch (size) {
02da0b2d
PM
5241 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5242 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5243 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5244 default: abort();
5245 }
5246}
5247
39d5492a 5248static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5249{
5250 switch (size) {
02da0b2d
PM
5251 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5252 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5253 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5254 default: abort();
5255 }
5256}
5257
39d5492a 5258static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5259{
5260 switch (size) {
02da0b2d
PM
5261 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5262 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5263 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5264 default: abort();
5265 }
5266}
5267
39d5492a 5268static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5269 int q, int u)
5270{
5271 if (q) {
5272 if (u) {
5273 switch (size) {
5274 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5275 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5276 default: abort();
5277 }
5278 } else {
5279 switch (size) {
5280 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5281 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5282 default: abort();
5283 }
5284 }
5285 } else {
5286 if (u) {
5287 switch (size) {
b408a9b0
CL
5288 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5289 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5290 default: abort();
5291 }
5292 } else {
5293 switch (size) {
5294 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5295 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5296 default: abort();
5297 }
5298 }
5299 }
5300}
5301
39d5492a 5302static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5303{
5304 if (u) {
5305 switch (size) {
5306 case 0: gen_helper_neon_widen_u8(dest, src); break;
5307 case 1: gen_helper_neon_widen_u16(dest, src); break;
5308 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5309 default: abort();
5310 }
5311 } else {
5312 switch (size) {
5313 case 0: gen_helper_neon_widen_s8(dest, src); break;
5314 case 1: gen_helper_neon_widen_s16(dest, src); break;
5315 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5316 default: abort();
5317 }
5318 }
7d1b0095 5319 tcg_temp_free_i32(src);
ad69471c
PB
5320}
5321
5322static inline void gen_neon_addl(int size)
5323{
5324 switch (size) {
5325 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5326 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5327 case 2: tcg_gen_add_i64(CPU_V001); break;
5328 default: abort();
5329 }
5330}
5331
5332static inline void gen_neon_subl(int size)
5333{
5334 switch (size) {
5335 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5336 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5337 case 2: tcg_gen_sub_i64(CPU_V001); break;
5338 default: abort();
5339 }
5340}
5341
a7812ae4 5342static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5343{
5344 switch (size) {
5345 case 0: gen_helper_neon_negl_u16(var, var); break;
5346 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5347 case 2:
5348 tcg_gen_neg_i64(var, var);
5349 break;
ad69471c
PB
5350 default: abort();
5351 }
5352}
5353
a7812ae4 5354static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5355{
5356 switch (size) {
02da0b2d
PM
5357 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5358 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5359 default: abort();
5360 }
5361}
5362
39d5492a
PM
5363static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5364 int size, int u)
ad69471c 5365{
a7812ae4 5366 TCGv_i64 tmp;
ad69471c
PB
5367
5368 switch ((size << 1) | u) {
5369 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5370 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5371 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5372 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5373 case 4:
5374 tmp = gen_muls_i64_i32(a, b);
5375 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5376 tcg_temp_free_i64(tmp);
ad69471c
PB
5377 break;
5378 case 5:
5379 tmp = gen_mulu_i64_i32(a, b);
5380 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5381 tcg_temp_free_i64(tmp);
ad69471c
PB
5382 break;
5383 default: abort();
5384 }
c6067f04
CL
5385
5386 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5387 Don't forget to clean them now. */
5388 if (size < 2) {
7d1b0095
PM
5389 tcg_temp_free_i32(a);
5390 tcg_temp_free_i32(b);
c6067f04 5391 }
ad69471c
PB
5392}
5393
39d5492a
PM
5394static void gen_neon_narrow_op(int op, int u, int size,
5395 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5396{
5397 if (op) {
5398 if (u) {
5399 gen_neon_unarrow_sats(size, dest, src);
5400 } else {
5401 gen_neon_narrow(size, dest, src);
5402 }
5403 } else {
5404 if (u) {
5405 gen_neon_narrow_satu(size, dest, src);
5406 } else {
5407 gen_neon_narrow_sats(size, dest, src);
5408 }
5409 }
5410}
5411
62698be3
PM
5412/* Symbolic constants for op fields for Neon 3-register same-length.
5413 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5414 * table A7-9.
5415 */
5416#define NEON_3R_VHADD 0
5417#define NEON_3R_VQADD 1
5418#define NEON_3R_VRHADD 2
5419#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5420#define NEON_3R_VHSUB 4
5421#define NEON_3R_VQSUB 5
5422#define NEON_3R_VCGT 6
5423#define NEON_3R_VCGE 7
5424#define NEON_3R_VSHL 8
5425#define NEON_3R_VQSHL 9
5426#define NEON_3R_VRSHL 10
5427#define NEON_3R_VQRSHL 11
5428#define NEON_3R_VMAX 12
5429#define NEON_3R_VMIN 13
5430#define NEON_3R_VABD 14
5431#define NEON_3R_VABA 15
5432#define NEON_3R_VADD_VSUB 16
5433#define NEON_3R_VTST_VCEQ 17
5434#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5435#define NEON_3R_VMUL 19
5436#define NEON_3R_VPMAX 20
5437#define NEON_3R_VPMIN 21
5438#define NEON_3R_VQDMULH_VQRDMULH 22
36a71934 5439#define NEON_3R_VPADD_VQRDMLAH 23
f1ecb913 5440#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
36a71934 5441#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
62698be3
PM
5442#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5443#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5444#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5445#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5446#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5447#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5448
5449static const uint8_t neon_3r_sizes[] = {
5450 [NEON_3R_VHADD] = 0x7,
5451 [NEON_3R_VQADD] = 0xf,
5452 [NEON_3R_VRHADD] = 0x7,
5453 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5454 [NEON_3R_VHSUB] = 0x7,
5455 [NEON_3R_VQSUB] = 0xf,
5456 [NEON_3R_VCGT] = 0x7,
5457 [NEON_3R_VCGE] = 0x7,
5458 [NEON_3R_VSHL] = 0xf,
5459 [NEON_3R_VQSHL] = 0xf,
5460 [NEON_3R_VRSHL] = 0xf,
5461 [NEON_3R_VQRSHL] = 0xf,
5462 [NEON_3R_VMAX] = 0x7,
5463 [NEON_3R_VMIN] = 0x7,
5464 [NEON_3R_VABD] = 0x7,
5465 [NEON_3R_VABA] = 0x7,
5466 [NEON_3R_VADD_VSUB] = 0xf,
5467 [NEON_3R_VTST_VCEQ] = 0x7,
5468 [NEON_3R_VML] = 0x7,
5469 [NEON_3R_VMUL] = 0x7,
5470 [NEON_3R_VPMAX] = 0x7,
5471 [NEON_3R_VPMIN] = 0x7,
5472 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
36a71934 5473 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
f1ecb913 5474 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
36a71934 5475 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
62698be3
PM
5476 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5477 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5478 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5479 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5480 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5481 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5482};
5483
600b828c
PM
5484/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5485 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5486 * table A7-13.
5487 */
5488#define NEON_2RM_VREV64 0
5489#define NEON_2RM_VREV32 1
5490#define NEON_2RM_VREV16 2
5491#define NEON_2RM_VPADDL 4
5492#define NEON_2RM_VPADDL_U 5
9d935509
AB
5493#define NEON_2RM_AESE 6 /* Includes AESD */
5494#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5495#define NEON_2RM_VCLS 8
5496#define NEON_2RM_VCLZ 9
5497#define NEON_2RM_VCNT 10
5498#define NEON_2RM_VMVN 11
5499#define NEON_2RM_VPADAL 12
5500#define NEON_2RM_VPADAL_U 13
5501#define NEON_2RM_VQABS 14
5502#define NEON_2RM_VQNEG 15
5503#define NEON_2RM_VCGT0 16
5504#define NEON_2RM_VCGE0 17
5505#define NEON_2RM_VCEQ0 18
5506#define NEON_2RM_VCLE0 19
5507#define NEON_2RM_VCLT0 20
f1ecb913 5508#define NEON_2RM_SHA1H 21
600b828c
PM
5509#define NEON_2RM_VABS 22
5510#define NEON_2RM_VNEG 23
5511#define NEON_2RM_VCGT0_F 24
5512#define NEON_2RM_VCGE0_F 25
5513#define NEON_2RM_VCEQ0_F 26
5514#define NEON_2RM_VCLE0_F 27
5515#define NEON_2RM_VCLT0_F 28
5516#define NEON_2RM_VABS_F 30
5517#define NEON_2RM_VNEG_F 31
5518#define NEON_2RM_VSWP 32
5519#define NEON_2RM_VTRN 33
5520#define NEON_2RM_VUZP 34
5521#define NEON_2RM_VZIP 35
5522#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5523#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5524#define NEON_2RM_VSHLL 38
f1ecb913 5525#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5526#define NEON_2RM_VRINTN 40
2ce70625 5527#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5528#define NEON_2RM_VRINTA 42
5529#define NEON_2RM_VRINTZ 43
600b828c 5530#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5531#define NEON_2RM_VRINTM 45
600b828c 5532#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5533#define NEON_2RM_VRINTP 47
901ad525
WN
5534#define NEON_2RM_VCVTAU 48
5535#define NEON_2RM_VCVTAS 49
5536#define NEON_2RM_VCVTNU 50
5537#define NEON_2RM_VCVTNS 51
5538#define NEON_2RM_VCVTPU 52
5539#define NEON_2RM_VCVTPS 53
5540#define NEON_2RM_VCVTMU 54
5541#define NEON_2RM_VCVTMS 55
600b828c
PM
5542#define NEON_2RM_VRECPE 56
5543#define NEON_2RM_VRSQRTE 57
5544#define NEON_2RM_VRECPE_F 58
5545#define NEON_2RM_VRSQRTE_F 59
5546#define NEON_2RM_VCVT_FS 60
5547#define NEON_2RM_VCVT_FU 61
5548#define NEON_2RM_VCVT_SF 62
5549#define NEON_2RM_VCVT_UF 63
5550
5551static int neon_2rm_is_float_op(int op)
5552{
5553 /* Return true if this neon 2reg-misc op is float-to-float */
5554 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5555 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5556 op == NEON_2RM_VRINTM ||
5557 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5558 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5559}
5560
fe8fcf3d
PM
5561static bool neon_2rm_is_v8_op(int op)
5562{
5563 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5564 switch (op) {
5565 case NEON_2RM_VRINTN:
5566 case NEON_2RM_VRINTA:
5567 case NEON_2RM_VRINTM:
5568 case NEON_2RM_VRINTP:
5569 case NEON_2RM_VRINTZ:
5570 case NEON_2RM_VRINTX:
5571 case NEON_2RM_VCVTAU:
5572 case NEON_2RM_VCVTAS:
5573 case NEON_2RM_VCVTNU:
5574 case NEON_2RM_VCVTNS:
5575 case NEON_2RM_VCVTPU:
5576 case NEON_2RM_VCVTPS:
5577 case NEON_2RM_VCVTMU:
5578 case NEON_2RM_VCVTMS:
5579 return true;
5580 default:
5581 return false;
5582 }
5583}
5584
600b828c
PM
5585/* Each entry in this array has bit n set if the insn allows
5586 * size value n (otherwise it will UNDEF). Since unallocated
5587 * op values will have no bits set they always UNDEF.
5588 */
5589static const uint8_t neon_2rm_sizes[] = {
5590 [NEON_2RM_VREV64] = 0x7,
5591 [NEON_2RM_VREV32] = 0x3,
5592 [NEON_2RM_VREV16] = 0x1,
5593 [NEON_2RM_VPADDL] = 0x7,
5594 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5595 [NEON_2RM_AESE] = 0x1,
5596 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5597 [NEON_2RM_VCLS] = 0x7,
5598 [NEON_2RM_VCLZ] = 0x7,
5599 [NEON_2RM_VCNT] = 0x1,
5600 [NEON_2RM_VMVN] = 0x1,
5601 [NEON_2RM_VPADAL] = 0x7,
5602 [NEON_2RM_VPADAL_U] = 0x7,
5603 [NEON_2RM_VQABS] = 0x7,
5604 [NEON_2RM_VQNEG] = 0x7,
5605 [NEON_2RM_VCGT0] = 0x7,
5606 [NEON_2RM_VCGE0] = 0x7,
5607 [NEON_2RM_VCEQ0] = 0x7,
5608 [NEON_2RM_VCLE0] = 0x7,
5609 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5610 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5611 [NEON_2RM_VABS] = 0x7,
5612 [NEON_2RM_VNEG] = 0x7,
5613 [NEON_2RM_VCGT0_F] = 0x4,
5614 [NEON_2RM_VCGE0_F] = 0x4,
5615 [NEON_2RM_VCEQ0_F] = 0x4,
5616 [NEON_2RM_VCLE0_F] = 0x4,
5617 [NEON_2RM_VCLT0_F] = 0x4,
5618 [NEON_2RM_VABS_F] = 0x4,
5619 [NEON_2RM_VNEG_F] = 0x4,
5620 [NEON_2RM_VSWP] = 0x1,
5621 [NEON_2RM_VTRN] = 0x7,
5622 [NEON_2RM_VUZP] = 0x7,
5623 [NEON_2RM_VZIP] = 0x7,
5624 [NEON_2RM_VMOVN] = 0x7,
5625 [NEON_2RM_VQMOVN] = 0x7,
5626 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5627 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5628 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5629 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5630 [NEON_2RM_VRINTA] = 0x4,
5631 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5632 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5633 [NEON_2RM_VRINTM] = 0x4,
600b828c 5634 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5635 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5636 [NEON_2RM_VCVTAU] = 0x4,
5637 [NEON_2RM_VCVTAS] = 0x4,
5638 [NEON_2RM_VCVTNU] = 0x4,
5639 [NEON_2RM_VCVTNS] = 0x4,
5640 [NEON_2RM_VCVTPU] = 0x4,
5641 [NEON_2RM_VCVTPS] = 0x4,
5642 [NEON_2RM_VCVTMU] = 0x4,
5643 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5644 [NEON_2RM_VRECPE] = 0x4,
5645 [NEON_2RM_VRSQRTE] = 0x4,
5646 [NEON_2RM_VRECPE_F] = 0x4,
5647 [NEON_2RM_VRSQRTE_F] = 0x4,
5648 [NEON_2RM_VCVT_FS] = 0x4,
5649 [NEON_2RM_VCVT_FU] = 0x4,
5650 [NEON_2RM_VCVT_SF] = 0x4,
5651 [NEON_2RM_VCVT_UF] = 0x4,
5652};
5653
36a71934
RH
5654
5655/* Expand v8.1 simd helper. */
5656static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
5657 int q, int rd, int rn, int rm)
5658{
5659 if (arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
5660 int opr_sz = (1 + q) * 8;
5661 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
5662 vfp_reg_offset(1, rn),
5663 vfp_reg_offset(1, rm), cpu_env,
5664 opr_sz, opr_sz, 0, fn);
5665 return 0;
5666 }
5667 return 1;
5668}
5669
9ee6e8bb
PB
5670/* Translate a NEON data processing instruction. Return nonzero if the
5671 instruction is invalid.
ad69471c
PB
5672 We process data in a mixture of 32-bit and 64-bit chunks.
5673 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5674
7dcc1f89 5675static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5676{
5677 int op;
5678 int q;
5679 int rd, rn, rm;
5680 int size;
5681 int shift;
5682 int pass;
5683 int count;
5684 int pairwise;
5685 int u;
ca9a32e4 5686 uint32_t imm, mask;
39d5492a 5687 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 5688 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 5689 TCGv_i64 tmp64;
9ee6e8bb 5690
2c7ffc41
PM
5691 /* FIXME: this access check should not take precedence over UNDEF
5692 * for invalid encodings; we will generate incorrect syndrome information
5693 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5694 */
9dbbc748 5695 if (s->fp_excp_el) {
2c7ffc41 5696 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5697 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5698 return 0;
5699 }
5700
5df8bac1 5701 if (!s->vfp_enabled)
9ee6e8bb
PB
5702 return 1;
5703 q = (insn & (1 << 6)) != 0;
5704 u = (insn >> 24) & 1;
5705 VFP_DREG_D(rd, insn);
5706 VFP_DREG_N(rn, insn);
5707 VFP_DREG_M(rm, insn);
5708 size = (insn >> 20) & 3;
5709 if ((insn & (1 << 23)) == 0) {
5710 /* Three register same length. */
5711 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5712 /* Catch invalid op and bad size combinations: UNDEF */
5713 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5714 return 1;
5715 }
25f84f79
PM
5716 /* All insns of this form UNDEF for either this condition or the
5717 * superset of cases "Q==1"; we catch the latter later.
5718 */
5719 if (q && ((rd | rn | rm) & 1)) {
5720 return 1;
5721 }
36a71934
RH
5722 switch (op) {
5723 case NEON_3R_SHA:
5724 /* The SHA-1/SHA-256 3-register instructions require special
5725 * treatment here, as their size field is overloaded as an
5726 * op type selector, and they all consume their input in a
5727 * single pass.
5728 */
f1ecb913
AB
5729 if (!q) {
5730 return 1;
5731 }
5732 if (!u) { /* SHA-1 */
d614a513 5733 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5734 return 1;
5735 }
1a66ac61
RH
5736 ptr1 = vfp_reg_ptr(true, rd);
5737 ptr2 = vfp_reg_ptr(true, rn);
5738 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 5739 tmp4 = tcg_const_i32(size);
1a66ac61 5740 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
5741 tcg_temp_free_i32(tmp4);
5742 } else { /* SHA-256 */
d614a513 5743 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5744 return 1;
5745 }
1a66ac61
RH
5746 ptr1 = vfp_reg_ptr(true, rd);
5747 ptr2 = vfp_reg_ptr(true, rn);
5748 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
5749 switch (size) {
5750 case 0:
1a66ac61 5751 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
5752 break;
5753 case 1:
1a66ac61 5754 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
5755 break;
5756 case 2:
1a66ac61 5757 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
5758 break;
5759 }
5760 }
1a66ac61
RH
5761 tcg_temp_free_ptr(ptr1);
5762 tcg_temp_free_ptr(ptr2);
5763 tcg_temp_free_ptr(ptr3);
f1ecb913 5764 return 0;
36a71934
RH
5765
5766 case NEON_3R_VPADD_VQRDMLAH:
5767 if (!u) {
5768 break; /* VPADD */
5769 }
5770 /* VQRDMLAH */
5771 switch (size) {
5772 case 1:
5773 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
5774 q, rd, rn, rm);
5775 case 2:
5776 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
5777 q, rd, rn, rm);
5778 }
5779 return 1;
5780
5781 case NEON_3R_VFM_VQRDMLSH:
5782 if (!u) {
5783 /* VFM, VFMS */
5784 if (size == 1) {
5785 return 1;
5786 }
5787 break;
5788 }
5789 /* VQRDMLSH */
5790 switch (size) {
5791 case 1:
5792 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
5793 q, rd, rn, rm);
5794 case 2:
5795 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
5796 q, rd, rn, rm);
5797 }
5798 return 1;
f1ecb913 5799 }
62698be3
PM
5800 if (size == 3 && op != NEON_3R_LOGIC) {
5801 /* 64-bit element instructions. */
9ee6e8bb 5802 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5803 neon_load_reg64(cpu_V0, rn + pass);
5804 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5805 switch (op) {
62698be3 5806 case NEON_3R_VQADD:
9ee6e8bb 5807 if (u) {
02da0b2d
PM
5808 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5809 cpu_V0, cpu_V1);
2c0262af 5810 } else {
02da0b2d
PM
5811 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5812 cpu_V0, cpu_V1);
2c0262af 5813 }
9ee6e8bb 5814 break;
62698be3 5815 case NEON_3R_VQSUB:
9ee6e8bb 5816 if (u) {
02da0b2d
PM
5817 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5818 cpu_V0, cpu_V1);
ad69471c 5819 } else {
02da0b2d
PM
5820 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5821 cpu_V0, cpu_V1);
ad69471c
PB
5822 }
5823 break;
62698be3 5824 case NEON_3R_VSHL:
ad69471c
PB
5825 if (u) {
5826 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5827 } else {
5828 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5829 }
5830 break;
62698be3 5831 case NEON_3R_VQSHL:
ad69471c 5832 if (u) {
02da0b2d
PM
5833 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5834 cpu_V1, cpu_V0);
ad69471c 5835 } else {
02da0b2d
PM
5836 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5837 cpu_V1, cpu_V0);
ad69471c
PB
5838 }
5839 break;
62698be3 5840 case NEON_3R_VRSHL:
ad69471c
PB
5841 if (u) {
5842 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5843 } else {
ad69471c
PB
5844 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5845 }
5846 break;
62698be3 5847 case NEON_3R_VQRSHL:
ad69471c 5848 if (u) {
02da0b2d
PM
5849 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5850 cpu_V1, cpu_V0);
ad69471c 5851 } else {
02da0b2d
PM
5852 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5853 cpu_V1, cpu_V0);
1e8d4eec 5854 }
9ee6e8bb 5855 break;
62698be3 5856 case NEON_3R_VADD_VSUB:
9ee6e8bb 5857 if (u) {
ad69471c 5858 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5859 } else {
ad69471c 5860 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5861 }
5862 break;
5863 default:
5864 abort();
2c0262af 5865 }
ad69471c 5866 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5867 }
9ee6e8bb 5868 return 0;
2c0262af 5869 }
25f84f79 5870 pairwise = 0;
9ee6e8bb 5871 switch (op) {
62698be3
PM
5872 case NEON_3R_VSHL:
5873 case NEON_3R_VQSHL:
5874 case NEON_3R_VRSHL:
5875 case NEON_3R_VQRSHL:
9ee6e8bb 5876 {
ad69471c
PB
5877 int rtmp;
5878 /* Shift instruction operands are reversed. */
5879 rtmp = rn;
9ee6e8bb 5880 rn = rm;
ad69471c 5881 rm = rtmp;
9ee6e8bb 5882 }
2c0262af 5883 break;
36a71934 5884 case NEON_3R_VPADD_VQRDMLAH:
62698be3
PM
5885 case NEON_3R_VPMAX:
5886 case NEON_3R_VPMIN:
9ee6e8bb 5887 pairwise = 1;
2c0262af 5888 break;
25f84f79
PM
5889 case NEON_3R_FLOAT_ARITH:
5890 pairwise = (u && size < 2); /* if VPADD (float) */
5891 break;
5892 case NEON_3R_FLOAT_MINMAX:
5893 pairwise = u; /* if VPMIN/VPMAX (float) */
5894 break;
5895 case NEON_3R_FLOAT_CMP:
5896 if (!u && size) {
5897 /* no encoding for U=0 C=1x */
5898 return 1;
5899 }
5900 break;
5901 case NEON_3R_FLOAT_ACMP:
5902 if (!u) {
5903 return 1;
5904 }
5905 break;
505935fc
WN
5906 case NEON_3R_FLOAT_MISC:
5907 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5908 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5909 return 1;
5910 }
2c0262af 5911 break;
25f84f79
PM
5912 case NEON_3R_VMUL:
5913 if (u && (size != 0)) {
5914 /* UNDEF on invalid size for polynomial subcase */
5915 return 1;
5916 }
2c0262af 5917 break;
36a71934
RH
5918 case NEON_3R_VFM_VQRDMLSH:
5919 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
5920 return 1;
5921 }
5922 break;
9ee6e8bb 5923 default:
2c0262af 5924 break;
9ee6e8bb 5925 }
dd8fbd78 5926
25f84f79
PM
5927 if (pairwise && q) {
5928 /* All the pairwise insns UNDEF if Q is set */
5929 return 1;
5930 }
5931
9ee6e8bb
PB
5932 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5933
5934 if (pairwise) {
5935 /* Pairwise. */
a5a14945
JR
5936 if (pass < 1) {
5937 tmp = neon_load_reg(rn, 0);
5938 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5939 } else {
a5a14945
JR
5940 tmp = neon_load_reg(rm, 0);
5941 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5942 }
5943 } else {
5944 /* Elementwise. */
dd8fbd78
FN
5945 tmp = neon_load_reg(rn, pass);
5946 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5947 }
5948 switch (op) {
62698be3 5949 case NEON_3R_VHADD:
9ee6e8bb
PB
5950 GEN_NEON_INTEGER_OP(hadd);
5951 break;
62698be3 5952 case NEON_3R_VQADD:
02da0b2d 5953 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5954 break;
62698be3 5955 case NEON_3R_VRHADD:
9ee6e8bb 5956 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5957 break;
62698be3 5958 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5959 switch ((u << 2) | size) {
5960 case 0: /* VAND */
dd8fbd78 5961 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5962 break;
5963 case 1: /* BIC */
f669df27 5964 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5965 break;
5966 case 2: /* VORR */
dd8fbd78 5967 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5968 break;
5969 case 3: /* VORN */
f669df27 5970 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5971 break;
5972 case 4: /* VEOR */
dd8fbd78 5973 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5974 break;
5975 case 5: /* VBSL */
dd8fbd78
FN
5976 tmp3 = neon_load_reg(rd, pass);
5977 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5978 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5979 break;
5980 case 6: /* VBIT */
dd8fbd78
FN
5981 tmp3 = neon_load_reg(rd, pass);
5982 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5983 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5984 break;
5985 case 7: /* VBIF */
dd8fbd78
FN
5986 tmp3 = neon_load_reg(rd, pass);
5987 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5988 tcg_temp_free_i32(tmp3);
9ee6e8bb 5989 break;
2c0262af
FB
5990 }
5991 break;
62698be3 5992 case NEON_3R_VHSUB:
9ee6e8bb
PB
5993 GEN_NEON_INTEGER_OP(hsub);
5994 break;
62698be3 5995 case NEON_3R_VQSUB:
02da0b2d 5996 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5997 break;
62698be3 5998 case NEON_3R_VCGT:
9ee6e8bb
PB
5999 GEN_NEON_INTEGER_OP(cgt);
6000 break;
62698be3 6001 case NEON_3R_VCGE:
9ee6e8bb
PB
6002 GEN_NEON_INTEGER_OP(cge);
6003 break;
62698be3 6004 case NEON_3R_VSHL:
ad69471c 6005 GEN_NEON_INTEGER_OP(shl);
2c0262af 6006 break;
62698be3 6007 case NEON_3R_VQSHL:
02da0b2d 6008 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 6009 break;
62698be3 6010 case NEON_3R_VRSHL:
ad69471c 6011 GEN_NEON_INTEGER_OP(rshl);
2c0262af 6012 break;
62698be3 6013 case NEON_3R_VQRSHL:
02da0b2d 6014 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 6015 break;
62698be3 6016 case NEON_3R_VMAX:
9ee6e8bb
PB
6017 GEN_NEON_INTEGER_OP(max);
6018 break;
62698be3 6019 case NEON_3R_VMIN:
9ee6e8bb
PB
6020 GEN_NEON_INTEGER_OP(min);
6021 break;
62698be3 6022 case NEON_3R_VABD:
9ee6e8bb
PB
6023 GEN_NEON_INTEGER_OP(abd);
6024 break;
62698be3 6025 case NEON_3R_VABA:
9ee6e8bb 6026 GEN_NEON_INTEGER_OP(abd);
7d1b0095 6027 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
6028 tmp2 = neon_load_reg(rd, pass);
6029 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 6030 break;
62698be3 6031 case NEON_3R_VADD_VSUB:
9ee6e8bb 6032 if (!u) { /* VADD */
62698be3 6033 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6034 } else { /* VSUB */
6035 switch (size) {
dd8fbd78
FN
6036 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
6037 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
6038 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 6039 default: abort();
9ee6e8bb
PB
6040 }
6041 }
6042 break;
62698be3 6043 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
6044 if (!u) { /* VTST */
6045 switch (size) {
dd8fbd78
FN
6046 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
6047 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
6048 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 6049 default: abort();
9ee6e8bb
PB
6050 }
6051 } else { /* VCEQ */
6052 switch (size) {
dd8fbd78
FN
6053 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6054 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6055 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 6056 default: abort();
9ee6e8bb
PB
6057 }
6058 }
6059 break;
62698be3 6060 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 6061 switch (size) {
dd8fbd78
FN
6062 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6063 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6064 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 6065 default: abort();
9ee6e8bb 6066 }
7d1b0095 6067 tcg_temp_free_i32(tmp2);
dd8fbd78 6068 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6069 if (u) { /* VMLS */
dd8fbd78 6070 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 6071 } else { /* VMLA */
dd8fbd78 6072 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6073 }
6074 break;
62698be3 6075 case NEON_3R_VMUL:
9ee6e8bb 6076 if (u) { /* polynomial */
dd8fbd78 6077 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
6078 } else { /* Integer */
6079 switch (size) {
dd8fbd78
FN
6080 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6081 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6082 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 6083 default: abort();
9ee6e8bb
PB
6084 }
6085 }
6086 break;
62698be3 6087 case NEON_3R_VPMAX:
9ee6e8bb
PB
6088 GEN_NEON_INTEGER_OP(pmax);
6089 break;
62698be3 6090 case NEON_3R_VPMIN:
9ee6e8bb
PB
6091 GEN_NEON_INTEGER_OP(pmin);
6092 break;
62698be3 6093 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
6094 if (!u) { /* VQDMULH */
6095 switch (size) {
02da0b2d
PM
6096 case 1:
6097 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6098 break;
6099 case 2:
6100 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6101 break;
62698be3 6102 default: abort();
9ee6e8bb 6103 }
62698be3 6104 } else { /* VQRDMULH */
9ee6e8bb 6105 switch (size) {
02da0b2d
PM
6106 case 1:
6107 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6108 break;
6109 case 2:
6110 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6111 break;
62698be3 6112 default: abort();
9ee6e8bb
PB
6113 }
6114 }
6115 break;
36a71934 6116 case NEON_3R_VPADD_VQRDMLAH:
9ee6e8bb 6117 switch (size) {
dd8fbd78
FN
6118 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6119 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6120 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 6121 default: abort();
9ee6e8bb
PB
6122 }
6123 break;
62698be3 6124 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
6125 {
6126 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
6127 switch ((u << 2) | size) {
6128 case 0: /* VADD */
aa47cfdd
PM
6129 case 4: /* VPADD */
6130 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6131 break;
6132 case 2: /* VSUB */
aa47cfdd 6133 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6134 break;
6135 case 6: /* VABD */
aa47cfdd 6136 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6137 break;
6138 default:
62698be3 6139 abort();
9ee6e8bb 6140 }
aa47cfdd 6141 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6142 break;
aa47cfdd 6143 }
62698be3 6144 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
6145 {
6146 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6147 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6148 if (!u) {
7d1b0095 6149 tcg_temp_free_i32(tmp2);
dd8fbd78 6150 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6151 if (size == 0) {
aa47cfdd 6152 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6153 } else {
aa47cfdd 6154 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
6155 }
6156 }
aa47cfdd 6157 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6158 break;
aa47cfdd 6159 }
62698be3 6160 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
6161 {
6162 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 6163 if (!u) {
aa47cfdd 6164 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 6165 } else {
aa47cfdd
PM
6166 if (size == 0) {
6167 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6168 } else {
6169 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6170 }
b5ff1b31 6171 }
aa47cfdd 6172 tcg_temp_free_ptr(fpstatus);
2c0262af 6173 break;
aa47cfdd 6174 }
62698be3 6175 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6176 {
6177 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6178 if (size == 0) {
6179 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6180 } else {
6181 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6182 }
6183 tcg_temp_free_ptr(fpstatus);
2c0262af 6184 break;
aa47cfdd 6185 }
62698be3 6186 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6187 {
6188 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6189 if (size == 0) {
f71a2ae5 6190 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6191 } else {
f71a2ae5 6192 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6193 }
6194 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6195 break;
aa47cfdd 6196 }
505935fc
WN
6197 case NEON_3R_FLOAT_MISC:
6198 if (u) {
6199 /* VMAXNM/VMINNM */
6200 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6201 if (size == 0) {
f71a2ae5 6202 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6203 } else {
f71a2ae5 6204 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6205 }
6206 tcg_temp_free_ptr(fpstatus);
6207 } else {
6208 if (size == 0) {
6209 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6210 } else {
6211 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6212 }
6213 }
2c0262af 6214 break;
36a71934 6215 case NEON_3R_VFM_VQRDMLSH:
da97f52c
PM
6216 {
6217 /* VFMA, VFMS: fused multiply-add */
6218 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6219 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6220 if (size) {
6221 /* VFMS */
6222 gen_helper_vfp_negs(tmp, tmp);
6223 }
6224 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6225 tcg_temp_free_i32(tmp3);
6226 tcg_temp_free_ptr(fpstatus);
6227 break;
6228 }
9ee6e8bb
PB
6229 default:
6230 abort();
2c0262af 6231 }
7d1b0095 6232 tcg_temp_free_i32(tmp2);
dd8fbd78 6233
9ee6e8bb
PB
6234 /* Save the result. For elementwise operations we can put it
6235 straight into the destination register. For pairwise operations
6236 we have to be careful to avoid clobbering the source operands. */
6237 if (pairwise && rd == rm) {
dd8fbd78 6238 neon_store_scratch(pass, tmp);
9ee6e8bb 6239 } else {
dd8fbd78 6240 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6241 }
6242
6243 } /* for pass */
6244 if (pairwise && rd == rm) {
6245 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6246 tmp = neon_load_scratch(pass);
6247 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6248 }
6249 }
ad69471c 6250 /* End of 3 register same size operations. */
9ee6e8bb
PB
6251 } else if (insn & (1 << 4)) {
6252 if ((insn & 0x00380080) != 0) {
6253 /* Two registers and shift. */
6254 op = (insn >> 8) & 0xf;
6255 if (insn & (1 << 7)) {
cc13115b
PM
6256 /* 64-bit shift. */
6257 if (op > 7) {
6258 return 1;
6259 }
9ee6e8bb
PB
6260 size = 3;
6261 } else {
6262 size = 2;
6263 while ((insn & (1 << (size + 19))) == 0)
6264 size--;
6265 }
6266 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 6267 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
6268 by immediate using the variable shift operations. */
6269 if (op < 8) {
6270 /* Shift by immediate:
6271 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6272 if (q && ((rd | rm) & 1)) {
6273 return 1;
6274 }
6275 if (!u && (op == 4 || op == 6)) {
6276 return 1;
6277 }
9ee6e8bb
PB
6278 /* Right shifts are encoded as N - shift, where N is the
6279 element size in bits. */
6280 if (op <= 4)
6281 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
6282 if (size == 3) {
6283 count = q + 1;
6284 } else {
6285 count = q ? 4: 2;
6286 }
6287 switch (size) {
6288 case 0:
6289 imm = (uint8_t) shift;
6290 imm |= imm << 8;
6291 imm |= imm << 16;
6292 break;
6293 case 1:
6294 imm = (uint16_t) shift;
6295 imm |= imm << 16;
6296 break;
6297 case 2:
6298 case 3:
6299 imm = shift;
6300 break;
6301 default:
6302 abort();
6303 }
6304
6305 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6306 if (size == 3) {
6307 neon_load_reg64(cpu_V0, rm + pass);
6308 tcg_gen_movi_i64(cpu_V1, imm);
6309 switch (op) {
6310 case 0: /* VSHR */
6311 case 1: /* VSRA */
6312 if (u)
6313 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6314 else
ad69471c 6315 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6316 break;
ad69471c
PB
6317 case 2: /* VRSHR */
6318 case 3: /* VRSRA */
6319 if (u)
6320 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6321 else
ad69471c 6322 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6323 break;
ad69471c 6324 case 4: /* VSRI */
ad69471c
PB
6325 case 5: /* VSHL, VSLI */
6326 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6327 break;
0322b26e 6328 case 6: /* VQSHLU */
02da0b2d
PM
6329 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6330 cpu_V0, cpu_V1);
ad69471c 6331 break;
0322b26e
PM
6332 case 7: /* VQSHL */
6333 if (u) {
02da0b2d 6334 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6335 cpu_V0, cpu_V1);
6336 } else {
02da0b2d 6337 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6338 cpu_V0, cpu_V1);
6339 }
9ee6e8bb 6340 break;
9ee6e8bb 6341 }
ad69471c
PB
6342 if (op == 1 || op == 3) {
6343 /* Accumulate. */
5371cb81 6344 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6345 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6346 } else if (op == 4 || (op == 5 && u)) {
6347 /* Insert */
923e6509
CL
6348 neon_load_reg64(cpu_V1, rd + pass);
6349 uint64_t mask;
6350 if (shift < -63 || shift > 63) {
6351 mask = 0;
6352 } else {
6353 if (op == 4) {
6354 mask = 0xffffffffffffffffull >> -shift;
6355 } else {
6356 mask = 0xffffffffffffffffull << shift;
6357 }
6358 }
6359 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6360 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6361 }
6362 neon_store_reg64(cpu_V0, rd + pass);
6363 } else { /* size < 3 */
6364 /* Operands in T0 and T1. */
dd8fbd78 6365 tmp = neon_load_reg(rm, pass);
7d1b0095 6366 tmp2 = tcg_temp_new_i32();
dd8fbd78 6367 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6368 switch (op) {
6369 case 0: /* VSHR */
6370 case 1: /* VSRA */
6371 GEN_NEON_INTEGER_OP(shl);
6372 break;
6373 case 2: /* VRSHR */
6374 case 3: /* VRSRA */
6375 GEN_NEON_INTEGER_OP(rshl);
6376 break;
6377 case 4: /* VSRI */
ad69471c
PB
6378 case 5: /* VSHL, VSLI */
6379 switch (size) {
dd8fbd78
FN
6380 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6381 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6382 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6383 default: abort();
ad69471c
PB
6384 }
6385 break;
0322b26e 6386 case 6: /* VQSHLU */
ad69471c 6387 switch (size) {
0322b26e 6388 case 0:
02da0b2d
PM
6389 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6390 tmp, tmp2);
0322b26e
PM
6391 break;
6392 case 1:
02da0b2d
PM
6393 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6394 tmp, tmp2);
0322b26e
PM
6395 break;
6396 case 2:
02da0b2d
PM
6397 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6398 tmp, tmp2);
0322b26e
PM
6399 break;
6400 default:
cc13115b 6401 abort();
ad69471c
PB
6402 }
6403 break;
0322b26e 6404 case 7: /* VQSHL */
02da0b2d 6405 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6406 break;
ad69471c 6407 }
7d1b0095 6408 tcg_temp_free_i32(tmp2);
ad69471c
PB
6409
6410 if (op == 1 || op == 3) {
6411 /* Accumulate. */
dd8fbd78 6412 tmp2 = neon_load_reg(rd, pass);
5371cb81 6413 gen_neon_add(size, tmp, tmp2);
7d1b0095 6414 tcg_temp_free_i32(tmp2);
ad69471c
PB
6415 } else if (op == 4 || (op == 5 && u)) {
6416 /* Insert */
6417 switch (size) {
6418 case 0:
6419 if (op == 4)
ca9a32e4 6420 mask = 0xff >> -shift;
ad69471c 6421 else
ca9a32e4
JR
6422 mask = (uint8_t)(0xff << shift);
6423 mask |= mask << 8;
6424 mask |= mask << 16;
ad69471c
PB
6425 break;
6426 case 1:
6427 if (op == 4)
ca9a32e4 6428 mask = 0xffff >> -shift;
ad69471c 6429 else
ca9a32e4
JR
6430 mask = (uint16_t)(0xffff << shift);
6431 mask |= mask << 16;
ad69471c
PB
6432 break;
6433 case 2:
ca9a32e4
JR
6434 if (shift < -31 || shift > 31) {
6435 mask = 0;
6436 } else {
6437 if (op == 4)
6438 mask = 0xffffffffu >> -shift;
6439 else
6440 mask = 0xffffffffu << shift;
6441 }
ad69471c
PB
6442 break;
6443 default:
6444 abort();
6445 }
dd8fbd78 6446 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6447 tcg_gen_andi_i32(tmp, tmp, mask);
6448 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6449 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6450 tcg_temp_free_i32(tmp2);
ad69471c 6451 }
dd8fbd78 6452 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6453 }
6454 } /* for pass */
6455 } else if (op < 10) {
ad69471c 6456 /* Shift by immediate and narrow:
9ee6e8bb 6457 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6458 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6459 if (rm & 1) {
6460 return 1;
6461 }
9ee6e8bb
PB
6462 shift = shift - (1 << (size + 3));
6463 size++;
92cdfaeb 6464 if (size == 3) {
a7812ae4 6465 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6466 neon_load_reg64(cpu_V0, rm);
6467 neon_load_reg64(cpu_V1, rm + 1);
6468 for (pass = 0; pass < 2; pass++) {
6469 TCGv_i64 in;
6470 if (pass == 0) {
6471 in = cpu_V0;
6472 } else {
6473 in = cpu_V1;
6474 }
ad69471c 6475 if (q) {
0b36f4cd 6476 if (input_unsigned) {
92cdfaeb 6477 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6478 } else {
92cdfaeb 6479 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6480 }
ad69471c 6481 } else {
0b36f4cd 6482 if (input_unsigned) {
92cdfaeb 6483 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6484 } else {
92cdfaeb 6485 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6486 }
ad69471c 6487 }
7d1b0095 6488 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6489 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6490 neon_store_reg(rd, pass, tmp);
6491 } /* for pass */
6492 tcg_temp_free_i64(tmp64);
6493 } else {
6494 if (size == 1) {
6495 imm = (uint16_t)shift;
6496 imm |= imm << 16;
2c0262af 6497 } else {
92cdfaeb
PM
6498 /* size == 2 */
6499 imm = (uint32_t)shift;
6500 }
6501 tmp2 = tcg_const_i32(imm);
6502 tmp4 = neon_load_reg(rm + 1, 0);
6503 tmp5 = neon_load_reg(rm + 1, 1);
6504 for (pass = 0; pass < 2; pass++) {
6505 if (pass == 0) {
6506 tmp = neon_load_reg(rm, 0);
6507 } else {
6508 tmp = tmp4;
6509 }
0b36f4cd
CL
6510 gen_neon_shift_narrow(size, tmp, tmp2, q,
6511 input_unsigned);
92cdfaeb
PM
6512 if (pass == 0) {
6513 tmp3 = neon_load_reg(rm, 1);
6514 } else {
6515 tmp3 = tmp5;
6516 }
0b36f4cd
CL
6517 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6518 input_unsigned);
36aa55dc 6519 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6520 tcg_temp_free_i32(tmp);
6521 tcg_temp_free_i32(tmp3);
6522 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6523 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6524 neon_store_reg(rd, pass, tmp);
6525 } /* for pass */
c6067f04 6526 tcg_temp_free_i32(tmp2);
b75263d6 6527 }
9ee6e8bb 6528 } else if (op == 10) {
cc13115b
PM
6529 /* VSHLL, VMOVL */
6530 if (q || (rd & 1)) {
9ee6e8bb 6531 return 1;
cc13115b 6532 }
ad69471c
PB
6533 tmp = neon_load_reg(rm, 0);
6534 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6535 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6536 if (pass == 1)
6537 tmp = tmp2;
6538
6539 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6540
9ee6e8bb
PB
6541 if (shift != 0) {
6542 /* The shift is less than the width of the source
ad69471c
PB
6543 type, so we can just shift the whole register. */
6544 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6545 /* Widen the result of shift: we need to clear
6546 * the potential overflow bits resulting from
6547 * left bits of the narrow input appearing as
6548 * right bits of left the neighbour narrow
6549 * input. */
ad69471c
PB
6550 if (size < 2 || !u) {
6551 uint64_t imm64;
6552 if (size == 0) {
6553 imm = (0xffu >> (8 - shift));
6554 imm |= imm << 16;
acdf01ef 6555 } else if (size == 1) {
ad69471c 6556 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6557 } else {
6558 /* size == 2 */
6559 imm = 0xffffffff >> (32 - shift);
6560 }
6561 if (size < 2) {
6562 imm64 = imm | (((uint64_t)imm) << 32);
6563 } else {
6564 imm64 = imm;
9ee6e8bb 6565 }
acdf01ef 6566 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6567 }
6568 }
ad69471c 6569 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6570 }
f73534a5 6571 } else if (op >= 14) {
9ee6e8bb 6572 /* VCVT fixed-point. */
cc13115b
PM
6573 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6574 return 1;
6575 }
f73534a5
PM
6576 /* We have already masked out the must-be-1 top bit of imm6,
6577 * hence this 32-shift where the ARM ARM has 64-imm6.
6578 */
6579 shift = 32 - shift;
9ee6e8bb 6580 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6581 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6582 if (!(op & 1)) {
9ee6e8bb 6583 if (u)
5500b06c 6584 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6585 else
5500b06c 6586 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6587 } else {
6588 if (u)
5500b06c 6589 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6590 else
5500b06c 6591 gen_vfp_tosl(0, shift, 1);
2c0262af 6592 }
4373f3ce 6593 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6594 }
6595 } else {
9ee6e8bb
PB
6596 return 1;
6597 }
6598 } else { /* (insn & 0x00380080) == 0 */
6599 int invert;
7d80fee5
PM
6600 if (q && (rd & 1)) {
6601 return 1;
6602 }
9ee6e8bb
PB
6603
6604 op = (insn >> 8) & 0xf;
6605 /* One register and immediate. */
6606 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6607 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6608 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6609 * We choose to not special-case this and will behave as if a
6610 * valid constant encoding of 0 had been given.
6611 */
9ee6e8bb
PB
6612 switch (op) {
6613 case 0: case 1:
6614 /* no-op */
6615 break;
6616 case 2: case 3:
6617 imm <<= 8;
6618 break;
6619 case 4: case 5:
6620 imm <<= 16;
6621 break;
6622 case 6: case 7:
6623 imm <<= 24;
6624 break;
6625 case 8: case 9:
6626 imm |= imm << 16;
6627 break;
6628 case 10: case 11:
6629 imm = (imm << 8) | (imm << 24);
6630 break;
6631 case 12:
8e31209e 6632 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6633 break;
6634 case 13:
6635 imm = (imm << 16) | 0xffff;
6636 break;
6637 case 14:
6638 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6639 if (invert)
6640 imm = ~imm;
6641 break;
6642 case 15:
7d80fee5
PM
6643 if (invert) {
6644 return 1;
6645 }
9ee6e8bb
PB
6646 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6647 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6648 break;
6649 }
6650 if (invert)
6651 imm = ~imm;
6652
9ee6e8bb
PB
6653 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6654 if (op & 1 && op < 12) {
ad69471c 6655 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6656 if (invert) {
6657 /* The immediate value has already been inverted, so
6658 BIC becomes AND. */
ad69471c 6659 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6660 } else {
ad69471c 6661 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6662 }
9ee6e8bb 6663 } else {
ad69471c 6664 /* VMOV, VMVN. */
7d1b0095 6665 tmp = tcg_temp_new_i32();
9ee6e8bb 6666 if (op == 14 && invert) {
a5a14945 6667 int n;
ad69471c
PB
6668 uint32_t val;
6669 val = 0;
9ee6e8bb
PB
6670 for (n = 0; n < 4; n++) {
6671 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6672 val |= 0xff << (n * 8);
9ee6e8bb 6673 }
ad69471c
PB
6674 tcg_gen_movi_i32(tmp, val);
6675 } else {
6676 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6677 }
9ee6e8bb 6678 }
ad69471c 6679 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6680 }
6681 }
e4b3861d 6682 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6683 if (size != 3) {
6684 op = (insn >> 8) & 0xf;
6685 if ((insn & (1 << 6)) == 0) {
6686 /* Three registers of different lengths. */
6687 int src1_wide;
6688 int src2_wide;
6689 int prewiden;
526d0096
PM
6690 /* undefreq: bit 0 : UNDEF if size == 0
6691 * bit 1 : UNDEF if size == 1
6692 * bit 2 : UNDEF if size == 2
6693 * bit 3 : UNDEF if U == 1
6694 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6695 */
6696 int undefreq;
6697 /* prewiden, src1_wide, src2_wide, undefreq */
6698 static const int neon_3reg_wide[16][4] = {
6699 {1, 0, 0, 0}, /* VADDL */
6700 {1, 1, 0, 0}, /* VADDW */
6701 {1, 0, 0, 0}, /* VSUBL */
6702 {1, 1, 0, 0}, /* VSUBW */
6703 {0, 1, 1, 0}, /* VADDHN */
6704 {0, 0, 0, 0}, /* VABAL */
6705 {0, 1, 1, 0}, /* VSUBHN */
6706 {0, 0, 0, 0}, /* VABDL */
6707 {0, 0, 0, 0}, /* VMLAL */
526d0096 6708 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6709 {0, 0, 0, 0}, /* VMLSL */
526d0096 6710 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6711 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6712 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6713 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6714 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6715 };
6716
6717 prewiden = neon_3reg_wide[op][0];
6718 src1_wide = neon_3reg_wide[op][1];
6719 src2_wide = neon_3reg_wide[op][2];
695272dc 6720 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6721
526d0096
PM
6722 if ((undefreq & (1 << size)) ||
6723 ((undefreq & 8) && u)) {
695272dc
PM
6724 return 1;
6725 }
6726 if ((src1_wide && (rn & 1)) ||
6727 (src2_wide && (rm & 1)) ||
6728 (!src2_wide && (rd & 1))) {
ad69471c 6729 return 1;
695272dc 6730 }
ad69471c 6731
4e624eda
PM
6732 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6733 * outside the loop below as it only performs a single pass.
6734 */
6735 if (op == 14 && size == 2) {
6736 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6737
d614a513 6738 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6739 return 1;
6740 }
6741 tcg_rn = tcg_temp_new_i64();
6742 tcg_rm = tcg_temp_new_i64();
6743 tcg_rd = tcg_temp_new_i64();
6744 neon_load_reg64(tcg_rn, rn);
6745 neon_load_reg64(tcg_rm, rm);
6746 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6747 neon_store_reg64(tcg_rd, rd);
6748 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6749 neon_store_reg64(tcg_rd, rd + 1);
6750 tcg_temp_free_i64(tcg_rn);
6751 tcg_temp_free_i64(tcg_rm);
6752 tcg_temp_free_i64(tcg_rd);
6753 return 0;
6754 }
6755
9ee6e8bb
PB
6756 /* Avoid overlapping operands. Wide source operands are
6757 always aligned so will never overlap with wide
6758 destinations in problematic ways. */
8f8e3aa4 6759 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6760 tmp = neon_load_reg(rm, 1);
6761 neon_store_scratch(2, tmp);
8f8e3aa4 6762 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6763 tmp = neon_load_reg(rn, 1);
6764 neon_store_scratch(2, tmp);
9ee6e8bb 6765 }
f764718d 6766 tmp3 = NULL;
9ee6e8bb 6767 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6768 if (src1_wide) {
6769 neon_load_reg64(cpu_V0, rn + pass);
f764718d 6770 tmp = NULL;
9ee6e8bb 6771 } else {
ad69471c 6772 if (pass == 1 && rd == rn) {
dd8fbd78 6773 tmp = neon_load_scratch(2);
9ee6e8bb 6774 } else {
ad69471c
PB
6775 tmp = neon_load_reg(rn, pass);
6776 }
6777 if (prewiden) {
6778 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6779 }
6780 }
ad69471c
PB
6781 if (src2_wide) {
6782 neon_load_reg64(cpu_V1, rm + pass);
f764718d 6783 tmp2 = NULL;
9ee6e8bb 6784 } else {
ad69471c 6785 if (pass == 1 && rd == rm) {
dd8fbd78 6786 tmp2 = neon_load_scratch(2);
9ee6e8bb 6787 } else {
ad69471c
PB
6788 tmp2 = neon_load_reg(rm, pass);
6789 }
6790 if (prewiden) {
6791 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6792 }
9ee6e8bb
PB
6793 }
6794 switch (op) {
6795 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6796 gen_neon_addl(size);
9ee6e8bb 6797 break;
79b0e534 6798 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6799 gen_neon_subl(size);
9ee6e8bb
PB
6800 break;
6801 case 5: case 7: /* VABAL, VABDL */
6802 switch ((size << 1) | u) {
ad69471c
PB
6803 case 0:
6804 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6805 break;
6806 case 1:
6807 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6808 break;
6809 case 2:
6810 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6811 break;
6812 case 3:
6813 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6814 break;
6815 case 4:
6816 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6817 break;
6818 case 5:
6819 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6820 break;
9ee6e8bb
PB
6821 default: abort();
6822 }
7d1b0095
PM
6823 tcg_temp_free_i32(tmp2);
6824 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6825 break;
6826 case 8: case 9: case 10: case 11: case 12: case 13:
6827 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6828 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6829 break;
6830 case 14: /* Polynomial VMULL */
e5ca24cb 6831 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6832 tcg_temp_free_i32(tmp2);
6833 tcg_temp_free_i32(tmp);
e5ca24cb 6834 break;
695272dc
PM
6835 default: /* 15 is RESERVED: caught earlier */
6836 abort();
9ee6e8bb 6837 }
ebcd88ce
PM
6838 if (op == 13) {
6839 /* VQDMULL */
6840 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6841 neon_store_reg64(cpu_V0, rd + pass);
6842 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6843 /* Accumulate. */
ebcd88ce 6844 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6845 switch (op) {
4dc064e6
PM
6846 case 10: /* VMLSL */
6847 gen_neon_negl(cpu_V0, size);
6848 /* Fall through */
6849 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6850 gen_neon_addl(size);
9ee6e8bb
PB
6851 break;
6852 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6853 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6854 if (op == 11) {
6855 gen_neon_negl(cpu_V0, size);
6856 }
ad69471c
PB
6857 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6858 break;
9ee6e8bb
PB
6859 default:
6860 abort();
6861 }
ad69471c 6862 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6863 } else if (op == 4 || op == 6) {
6864 /* Narrowing operation. */
7d1b0095 6865 tmp = tcg_temp_new_i32();
79b0e534 6866 if (!u) {
9ee6e8bb 6867 switch (size) {
ad69471c
PB
6868 case 0:
6869 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6870 break;
6871 case 1:
6872 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6873 break;
6874 case 2:
6875 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6876 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6877 break;
9ee6e8bb
PB
6878 default: abort();
6879 }
6880 } else {
6881 switch (size) {
ad69471c
PB
6882 case 0:
6883 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6884 break;
6885 case 1:
6886 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6887 break;
6888 case 2:
6889 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6890 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6891 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6892 break;
9ee6e8bb
PB
6893 default: abort();
6894 }
6895 }
ad69471c
PB
6896 if (pass == 0) {
6897 tmp3 = tmp;
6898 } else {
6899 neon_store_reg(rd, 0, tmp3);
6900 neon_store_reg(rd, 1, tmp);
6901 }
9ee6e8bb
PB
6902 } else {
6903 /* Write back the result. */
ad69471c 6904 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6905 }
6906 }
6907 } else {
3e3326df
PM
6908 /* Two registers and a scalar. NB that for ops of this form
6909 * the ARM ARM labels bit 24 as Q, but it is in our variable
6910 * 'u', not 'q'.
6911 */
6912 if (size == 0) {
6913 return 1;
6914 }
9ee6e8bb 6915 switch (op) {
9ee6e8bb 6916 case 1: /* Float VMLA scalar */
9ee6e8bb 6917 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6918 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6919 if (size == 1) {
6920 return 1;
6921 }
6922 /* fall through */
6923 case 0: /* Integer VMLA scalar */
6924 case 4: /* Integer VMLS scalar */
6925 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6926 case 12: /* VQDMULH scalar */
6927 case 13: /* VQRDMULH scalar */
3e3326df
PM
6928 if (u && ((rd | rn) & 1)) {
6929 return 1;
6930 }
dd8fbd78
FN
6931 tmp = neon_get_scalar(size, rm);
6932 neon_store_scratch(0, tmp);
9ee6e8bb 6933 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6934 tmp = neon_load_scratch(0);
6935 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6936 if (op == 12) {
6937 if (size == 1) {
02da0b2d 6938 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6939 } else {
02da0b2d 6940 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6941 }
6942 } else if (op == 13) {
6943 if (size == 1) {
02da0b2d 6944 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6945 } else {
02da0b2d 6946 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6947 }
6948 } else if (op & 1) {
aa47cfdd
PM
6949 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6950 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6951 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6952 } else {
6953 switch (size) {
dd8fbd78
FN
6954 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6955 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6956 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6957 default: abort();
9ee6e8bb
PB
6958 }
6959 }
7d1b0095 6960 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6961 if (op < 8) {
6962 /* Accumulate. */
dd8fbd78 6963 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6964 switch (op) {
6965 case 0:
dd8fbd78 6966 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6967 break;
6968 case 1:
aa47cfdd
PM
6969 {
6970 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6971 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6972 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6973 break;
aa47cfdd 6974 }
9ee6e8bb 6975 case 4:
dd8fbd78 6976 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6977 break;
6978 case 5:
aa47cfdd
PM
6979 {
6980 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6981 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6982 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6983 break;
aa47cfdd 6984 }
9ee6e8bb
PB
6985 default:
6986 abort();
6987 }
7d1b0095 6988 tcg_temp_free_i32(tmp2);
9ee6e8bb 6989 }
dd8fbd78 6990 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6991 }
6992 break;
9ee6e8bb 6993 case 3: /* VQDMLAL scalar */
9ee6e8bb 6994 case 7: /* VQDMLSL scalar */
9ee6e8bb 6995 case 11: /* VQDMULL scalar */
3e3326df 6996 if (u == 1) {
ad69471c 6997 return 1;
3e3326df
PM
6998 }
6999 /* fall through */
7000 case 2: /* VMLAL sclar */
7001 case 6: /* VMLSL scalar */
7002 case 10: /* VMULL scalar */
7003 if (rd & 1) {
7004 return 1;
7005 }
dd8fbd78 7006 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
7007 /* We need a copy of tmp2 because gen_neon_mull
7008 * deletes it during pass 0. */
7d1b0095 7009 tmp4 = tcg_temp_new_i32();
c6067f04 7010 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 7011 tmp3 = neon_load_reg(rn, 1);
ad69471c 7012
9ee6e8bb 7013 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7014 if (pass == 0) {
7015 tmp = neon_load_reg(rn, 0);
9ee6e8bb 7016 } else {
dd8fbd78 7017 tmp = tmp3;
c6067f04 7018 tmp2 = tmp4;
9ee6e8bb 7019 }
ad69471c 7020 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
7021 if (op != 11) {
7022 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 7023 }
9ee6e8bb 7024 switch (op) {
4dc064e6
PM
7025 case 6:
7026 gen_neon_negl(cpu_V0, size);
7027 /* Fall through */
7028 case 2:
ad69471c 7029 gen_neon_addl(size);
9ee6e8bb
PB
7030 break;
7031 case 3: case 7:
ad69471c 7032 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
7033 if (op == 7) {
7034 gen_neon_negl(cpu_V0, size);
7035 }
ad69471c 7036 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
7037 break;
7038 case 10:
7039 /* no-op */
7040 break;
7041 case 11:
ad69471c 7042 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
7043 break;
7044 default:
7045 abort();
7046 }
ad69471c 7047 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 7048 }
61adacc8
RH
7049 break;
7050 case 14: /* VQRDMLAH scalar */
7051 case 15: /* VQRDMLSH scalar */
7052 {
7053 NeonGenThreeOpEnvFn *fn;
dd8fbd78 7054
61adacc8
RH
7055 if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
7056 return 1;
7057 }
7058 if (u && ((rd | rn) & 1)) {
7059 return 1;
7060 }
7061 if (op == 14) {
7062 if (size == 1) {
7063 fn = gen_helper_neon_qrdmlah_s16;
7064 } else {
7065 fn = gen_helper_neon_qrdmlah_s32;
7066 }
7067 } else {
7068 if (size == 1) {
7069 fn = gen_helper_neon_qrdmlsh_s16;
7070 } else {
7071 fn = gen_helper_neon_qrdmlsh_s32;
7072 }
7073 }
dd8fbd78 7074
61adacc8
RH
7075 tmp2 = neon_get_scalar(size, rm);
7076 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7077 tmp = neon_load_reg(rn, pass);
7078 tmp3 = neon_load_reg(rd, pass);
7079 fn(tmp, cpu_env, tmp, tmp2, tmp3);
7080 tcg_temp_free_i32(tmp3);
7081 neon_store_reg(rd, pass, tmp);
7082 }
7083 tcg_temp_free_i32(tmp2);
7084 }
9ee6e8bb 7085 break;
61adacc8
RH
7086 default:
7087 g_assert_not_reached();
9ee6e8bb
PB
7088 }
7089 }
7090 } else { /* size == 3 */
7091 if (!u) {
7092 /* Extract. */
9ee6e8bb 7093 imm = (insn >> 8) & 0xf;
ad69471c
PB
7094
7095 if (imm > 7 && !q)
7096 return 1;
7097
52579ea1
PM
7098 if (q && ((rd | rn | rm) & 1)) {
7099 return 1;
7100 }
7101
ad69471c
PB
7102 if (imm == 0) {
7103 neon_load_reg64(cpu_V0, rn);
7104 if (q) {
7105 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 7106 }
ad69471c
PB
7107 } else if (imm == 8) {
7108 neon_load_reg64(cpu_V0, rn + 1);
7109 if (q) {
7110 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7111 }
ad69471c 7112 } else if (q) {
a7812ae4 7113 tmp64 = tcg_temp_new_i64();
ad69471c
PB
7114 if (imm < 8) {
7115 neon_load_reg64(cpu_V0, rn);
a7812ae4 7116 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
7117 } else {
7118 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 7119 neon_load_reg64(tmp64, rm);
ad69471c
PB
7120 }
7121 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 7122 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
7123 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7124 if (imm < 8) {
7125 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7126 } else {
ad69471c
PB
7127 neon_load_reg64(cpu_V1, rm + 1);
7128 imm -= 8;
9ee6e8bb 7129 }
ad69471c 7130 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
7131 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
7132 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 7133 tcg_temp_free_i64(tmp64);
ad69471c 7134 } else {
a7812ae4 7135 /* BUGFIX */
ad69471c 7136 neon_load_reg64(cpu_V0, rn);
a7812ae4 7137 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 7138 neon_load_reg64(cpu_V1, rm);
a7812ae4 7139 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
7140 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7141 }
7142 neon_store_reg64(cpu_V0, rd);
7143 if (q) {
7144 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
7145 }
7146 } else if ((insn & (1 << 11)) == 0) {
7147 /* Two register misc. */
7148 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7149 size = (insn >> 18) & 3;
600b828c
PM
7150 /* UNDEF for unknown op values and bad op-size combinations */
7151 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7152 return 1;
7153 }
fe8fcf3d
PM
7154 if (neon_2rm_is_v8_op(op) &&
7155 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7156 return 1;
7157 }
fc2a9b37
PM
7158 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7159 q && ((rm | rd) & 1)) {
7160 return 1;
7161 }
9ee6e8bb 7162 switch (op) {
600b828c 7163 case NEON_2RM_VREV64:
9ee6e8bb 7164 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
7165 tmp = neon_load_reg(rm, pass * 2);
7166 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 7167 switch (size) {
dd8fbd78
FN
7168 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7169 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
7170 case 2: /* no-op */ break;
7171 default: abort();
7172 }
dd8fbd78 7173 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 7174 if (size == 2) {
dd8fbd78 7175 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 7176 } else {
9ee6e8bb 7177 switch (size) {
dd8fbd78
FN
7178 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7179 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
7180 default: abort();
7181 }
dd8fbd78 7182 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
7183 }
7184 }
7185 break;
600b828c
PM
7186 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7187 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
7188 for (pass = 0; pass < q + 1; pass++) {
7189 tmp = neon_load_reg(rm, pass * 2);
7190 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7191 tmp = neon_load_reg(rm, pass * 2 + 1);
7192 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7193 switch (size) {
7194 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7195 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7196 case 2: tcg_gen_add_i64(CPU_V001); break;
7197 default: abort();
7198 }
600b828c 7199 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 7200 /* Accumulate. */
ad69471c
PB
7201 neon_load_reg64(cpu_V1, rd + pass);
7202 gen_neon_addl(size);
9ee6e8bb 7203 }
ad69471c 7204 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7205 }
7206 break;
600b828c 7207 case NEON_2RM_VTRN:
9ee6e8bb 7208 if (size == 2) {
a5a14945 7209 int n;
9ee6e8bb 7210 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7211 tmp = neon_load_reg(rm, n);
7212 tmp2 = neon_load_reg(rd, n + 1);
7213 neon_store_reg(rm, n, tmp2);
7214 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7215 }
7216 } else {
7217 goto elementwise;
7218 }
7219 break;
600b828c 7220 case NEON_2RM_VUZP:
02acedf9 7221 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7222 return 1;
9ee6e8bb
PB
7223 }
7224 break;
600b828c 7225 case NEON_2RM_VZIP:
d68a6f3a 7226 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7227 return 1;
9ee6e8bb
PB
7228 }
7229 break;
600b828c
PM
7230 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7231 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7232 if (rm & 1) {
7233 return 1;
7234 }
f764718d 7235 tmp2 = NULL;
9ee6e8bb 7236 for (pass = 0; pass < 2; pass++) {
ad69471c 7237 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7238 tmp = tcg_temp_new_i32();
600b828c
PM
7239 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7240 tmp, cpu_V0);
ad69471c
PB
7241 if (pass == 0) {
7242 tmp2 = tmp;
7243 } else {
7244 neon_store_reg(rd, 0, tmp2);
7245 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7246 }
9ee6e8bb
PB
7247 }
7248 break;
600b828c 7249 case NEON_2RM_VSHLL:
fc2a9b37 7250 if (q || (rd & 1)) {
9ee6e8bb 7251 return 1;
600b828c 7252 }
ad69471c
PB
7253 tmp = neon_load_reg(rm, 0);
7254 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7255 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7256 if (pass == 1)
7257 tmp = tmp2;
7258 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7259 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7260 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7261 }
7262 break;
600b828c 7263 case NEON_2RM_VCVT_F16_F32:
486624fc
AB
7264 {
7265 TCGv_ptr fpst;
7266 TCGv_i32 ahp;
7267
d614a513 7268 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7269 q || (rm & 1)) {
7270 return 1;
7271 }
7d1b0095
PM
7272 tmp = tcg_temp_new_i32();
7273 tmp2 = tcg_temp_new_i32();
486624fc
AB
7274 fpst = get_fpstatus_ptr(true);
7275 ahp = get_ahp_flag();
60011498 7276 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
486624fc 7277 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498 7278 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
486624fc 7279 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7280 tcg_gen_shli_i32(tmp2, tmp2, 16);
7281 tcg_gen_or_i32(tmp2, tmp2, tmp);
7282 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
486624fc 7283 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498
PB
7284 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7285 neon_store_reg(rd, 0, tmp2);
7d1b0095 7286 tmp2 = tcg_temp_new_i32();
486624fc 7287 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7288 tcg_gen_shli_i32(tmp2, tmp2, 16);
7289 tcg_gen_or_i32(tmp2, tmp2, tmp);
7290 neon_store_reg(rd, 1, tmp2);
7d1b0095 7291 tcg_temp_free_i32(tmp);
486624fc
AB
7292 tcg_temp_free_i32(ahp);
7293 tcg_temp_free_ptr(fpst);
60011498 7294 break;
486624fc 7295 }
600b828c 7296 case NEON_2RM_VCVT_F32_F16:
486624fc
AB
7297 {
7298 TCGv_ptr fpst;
7299 TCGv_i32 ahp;
d614a513 7300 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7301 q || (rd & 1)) {
7302 return 1;
7303 }
486624fc
AB
7304 fpst = get_fpstatus_ptr(true);
7305 ahp = get_ahp_flag();
7d1b0095 7306 tmp3 = tcg_temp_new_i32();
60011498
PB
7307 tmp = neon_load_reg(rm, 0);
7308 tmp2 = neon_load_reg(rm, 1);
7309 tcg_gen_ext16u_i32(tmp3, tmp);
486624fc 7310 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7311 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7312 tcg_gen_shri_i32(tmp3, tmp, 16);
486624fc 7313 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7314 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7315 tcg_temp_free_i32(tmp);
60011498 7316 tcg_gen_ext16u_i32(tmp3, tmp2);
486624fc 7317 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7318 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7319 tcg_gen_shri_i32(tmp3, tmp2, 16);
486624fc 7320 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7321 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7322 tcg_temp_free_i32(tmp2);
7323 tcg_temp_free_i32(tmp3);
486624fc
AB
7324 tcg_temp_free_i32(ahp);
7325 tcg_temp_free_ptr(fpst);
60011498 7326 break;
486624fc 7327 }
9d935509 7328 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 7329 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
7330 || ((rm | rd) & 1)) {
7331 return 1;
7332 }
1a66ac61
RH
7333 ptr1 = vfp_reg_ptr(true, rd);
7334 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
7335
7336 /* Bit 6 is the lowest opcode bit; it distinguishes between
7337 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7338 */
7339 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7340
7341 if (op == NEON_2RM_AESE) {
1a66ac61 7342 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 7343 } else {
1a66ac61 7344 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 7345 }
1a66ac61
RH
7346 tcg_temp_free_ptr(ptr1);
7347 tcg_temp_free_ptr(ptr2);
9d935509
AB
7348 tcg_temp_free_i32(tmp3);
7349 break;
f1ecb913 7350 case NEON_2RM_SHA1H:
d614a513 7351 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
7352 || ((rm | rd) & 1)) {
7353 return 1;
7354 }
1a66ac61
RH
7355 ptr1 = vfp_reg_ptr(true, rd);
7356 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7357
1a66ac61 7358 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 7359
1a66ac61
RH
7360 tcg_temp_free_ptr(ptr1);
7361 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
7362 break;
7363 case NEON_2RM_SHA1SU1:
7364 if ((rm | rd) & 1) {
7365 return 1;
7366 }
7367 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7368 if (q) {
d614a513 7369 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
7370 return 1;
7371 }
d614a513 7372 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
7373 return 1;
7374 }
1a66ac61
RH
7375 ptr1 = vfp_reg_ptr(true, rd);
7376 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7377 if (q) {
1a66ac61 7378 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 7379 } else {
1a66ac61 7380 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 7381 }
1a66ac61
RH
7382 tcg_temp_free_ptr(ptr1);
7383 tcg_temp_free_ptr(ptr2);
f1ecb913 7384 break;
9ee6e8bb
PB
7385 default:
7386 elementwise:
7387 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7388 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7389 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7390 neon_reg_offset(rm, pass));
f764718d 7391 tmp = NULL;
9ee6e8bb 7392 } else {
dd8fbd78 7393 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7394 }
7395 switch (op) {
600b828c 7396 case NEON_2RM_VREV32:
9ee6e8bb 7397 switch (size) {
dd8fbd78
FN
7398 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7399 case 1: gen_swap_half(tmp); break;
600b828c 7400 default: abort();
9ee6e8bb
PB
7401 }
7402 break;
600b828c 7403 case NEON_2RM_VREV16:
dd8fbd78 7404 gen_rev16(tmp);
9ee6e8bb 7405 break;
600b828c 7406 case NEON_2RM_VCLS:
9ee6e8bb 7407 switch (size) {
dd8fbd78
FN
7408 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7409 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7410 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7411 default: abort();
9ee6e8bb
PB
7412 }
7413 break;
600b828c 7414 case NEON_2RM_VCLZ:
9ee6e8bb 7415 switch (size) {
dd8fbd78
FN
7416 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7417 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 7418 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 7419 default: abort();
9ee6e8bb
PB
7420 }
7421 break;
600b828c 7422 case NEON_2RM_VCNT:
dd8fbd78 7423 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7424 break;
600b828c 7425 case NEON_2RM_VMVN:
dd8fbd78 7426 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7427 break;
600b828c 7428 case NEON_2RM_VQABS:
9ee6e8bb 7429 switch (size) {
02da0b2d
PM
7430 case 0:
7431 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7432 break;
7433 case 1:
7434 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7435 break;
7436 case 2:
7437 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7438 break;
600b828c 7439 default: abort();
9ee6e8bb
PB
7440 }
7441 break;
600b828c 7442 case NEON_2RM_VQNEG:
9ee6e8bb 7443 switch (size) {
02da0b2d
PM
7444 case 0:
7445 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7446 break;
7447 case 1:
7448 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7449 break;
7450 case 2:
7451 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7452 break;
600b828c 7453 default: abort();
9ee6e8bb
PB
7454 }
7455 break;
600b828c 7456 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7457 tmp2 = tcg_const_i32(0);
9ee6e8bb 7458 switch(size) {
dd8fbd78
FN
7459 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7460 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7461 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7462 default: abort();
9ee6e8bb 7463 }
39d5492a 7464 tcg_temp_free_i32(tmp2);
600b828c 7465 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7466 tcg_gen_not_i32(tmp, tmp);
600b828c 7467 }
9ee6e8bb 7468 break;
600b828c 7469 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7470 tmp2 = tcg_const_i32(0);
9ee6e8bb 7471 switch(size) {
dd8fbd78
FN
7472 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7473 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7474 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7475 default: abort();
9ee6e8bb 7476 }
39d5492a 7477 tcg_temp_free_i32(tmp2);
600b828c 7478 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7479 tcg_gen_not_i32(tmp, tmp);
600b828c 7480 }
9ee6e8bb 7481 break;
600b828c 7482 case NEON_2RM_VCEQ0:
dd8fbd78 7483 tmp2 = tcg_const_i32(0);
9ee6e8bb 7484 switch(size) {
dd8fbd78
FN
7485 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7486 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7487 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7488 default: abort();
9ee6e8bb 7489 }
39d5492a 7490 tcg_temp_free_i32(tmp2);
9ee6e8bb 7491 break;
600b828c 7492 case NEON_2RM_VABS:
9ee6e8bb 7493 switch(size) {
dd8fbd78
FN
7494 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7495 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7496 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7497 default: abort();
9ee6e8bb
PB
7498 }
7499 break;
600b828c 7500 case NEON_2RM_VNEG:
dd8fbd78
FN
7501 tmp2 = tcg_const_i32(0);
7502 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7503 tcg_temp_free_i32(tmp2);
9ee6e8bb 7504 break;
600b828c 7505 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7506 {
7507 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7508 tmp2 = tcg_const_i32(0);
aa47cfdd 7509 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7510 tcg_temp_free_i32(tmp2);
aa47cfdd 7511 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7512 break;
aa47cfdd 7513 }
600b828c 7514 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7515 {
7516 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7517 tmp2 = tcg_const_i32(0);
aa47cfdd 7518 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7519 tcg_temp_free_i32(tmp2);
aa47cfdd 7520 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7521 break;
aa47cfdd 7522 }
600b828c 7523 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7524 {
7525 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7526 tmp2 = tcg_const_i32(0);
aa47cfdd 7527 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7528 tcg_temp_free_i32(tmp2);
aa47cfdd 7529 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7530 break;
aa47cfdd 7531 }
600b828c 7532 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7533 {
7534 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7535 tmp2 = tcg_const_i32(0);
aa47cfdd 7536 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7537 tcg_temp_free_i32(tmp2);
aa47cfdd 7538 tcg_temp_free_ptr(fpstatus);
0e326109 7539 break;
aa47cfdd 7540 }
600b828c 7541 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7542 {
7543 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7544 tmp2 = tcg_const_i32(0);
aa47cfdd 7545 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7546 tcg_temp_free_i32(tmp2);
aa47cfdd 7547 tcg_temp_free_ptr(fpstatus);
0e326109 7548 break;
aa47cfdd 7549 }
600b828c 7550 case NEON_2RM_VABS_F:
4373f3ce 7551 gen_vfp_abs(0);
9ee6e8bb 7552 break;
600b828c 7553 case NEON_2RM_VNEG_F:
4373f3ce 7554 gen_vfp_neg(0);
9ee6e8bb 7555 break;
600b828c 7556 case NEON_2RM_VSWP:
dd8fbd78
FN
7557 tmp2 = neon_load_reg(rd, pass);
7558 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7559 break;
600b828c 7560 case NEON_2RM_VTRN:
dd8fbd78 7561 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7562 switch (size) {
dd8fbd78
FN
7563 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7564 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7565 default: abort();
9ee6e8bb 7566 }
dd8fbd78 7567 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7568 break;
34f7b0a2
WN
7569 case NEON_2RM_VRINTN:
7570 case NEON_2RM_VRINTA:
7571 case NEON_2RM_VRINTM:
7572 case NEON_2RM_VRINTP:
7573 case NEON_2RM_VRINTZ:
7574 {
7575 TCGv_i32 tcg_rmode;
7576 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7577 int rmode;
7578
7579 if (op == NEON_2RM_VRINTZ) {
7580 rmode = FPROUNDING_ZERO;
7581 } else {
7582 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7583 }
7584
7585 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7586 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7587 cpu_env);
7588 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7589 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7590 cpu_env);
7591 tcg_temp_free_ptr(fpstatus);
7592 tcg_temp_free_i32(tcg_rmode);
7593 break;
7594 }
2ce70625
WN
7595 case NEON_2RM_VRINTX:
7596 {
7597 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7598 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7599 tcg_temp_free_ptr(fpstatus);
7600 break;
7601 }
901ad525
WN
7602 case NEON_2RM_VCVTAU:
7603 case NEON_2RM_VCVTAS:
7604 case NEON_2RM_VCVTNU:
7605 case NEON_2RM_VCVTNS:
7606 case NEON_2RM_VCVTPU:
7607 case NEON_2RM_VCVTPS:
7608 case NEON_2RM_VCVTMU:
7609 case NEON_2RM_VCVTMS:
7610 {
7611 bool is_signed = !extract32(insn, 7, 1);
7612 TCGv_ptr fpst = get_fpstatus_ptr(1);
7613 TCGv_i32 tcg_rmode, tcg_shift;
7614 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7615
7616 tcg_shift = tcg_const_i32(0);
7617 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7618 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7619 cpu_env);
7620
7621 if (is_signed) {
7622 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7623 tcg_shift, fpst);
7624 } else {
7625 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7626 tcg_shift, fpst);
7627 }
7628
7629 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7630 cpu_env);
7631 tcg_temp_free_i32(tcg_rmode);
7632 tcg_temp_free_i32(tcg_shift);
7633 tcg_temp_free_ptr(fpst);
7634 break;
7635 }
600b828c 7636 case NEON_2RM_VRECPE:
b6d4443a
AB
7637 {
7638 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7639 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7640 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7641 break;
b6d4443a 7642 }
600b828c 7643 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7644 {
7645 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7646 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7647 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7648 break;
c2fb418e 7649 }
600b828c 7650 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7651 {
7652 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7653 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7654 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7655 break;
b6d4443a 7656 }
600b828c 7657 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7658 {
7659 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7660 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7661 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7662 break;
c2fb418e 7663 }
600b828c 7664 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7665 gen_vfp_sito(0, 1);
9ee6e8bb 7666 break;
600b828c 7667 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7668 gen_vfp_uito(0, 1);
9ee6e8bb 7669 break;
600b828c 7670 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7671 gen_vfp_tosiz(0, 1);
9ee6e8bb 7672 break;
600b828c 7673 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7674 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7675 break;
7676 default:
600b828c
PM
7677 /* Reserved op values were caught by the
7678 * neon_2rm_sizes[] check earlier.
7679 */
7680 abort();
9ee6e8bb 7681 }
600b828c 7682 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7683 tcg_gen_st_f32(cpu_F0s, cpu_env,
7684 neon_reg_offset(rd, pass));
9ee6e8bb 7685 } else {
dd8fbd78 7686 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7687 }
7688 }
7689 break;
7690 }
7691 } else if ((insn & (1 << 10)) == 0) {
7692 /* VTBL, VTBX. */
56907d77
PM
7693 int n = ((insn >> 8) & 3) + 1;
7694 if ((rn + n) > 32) {
7695 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7696 * helper function running off the end of the register file.
7697 */
7698 return 1;
7699 }
7700 n <<= 3;
9ee6e8bb 7701 if (insn & (1 << 6)) {
8f8e3aa4 7702 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7703 } else {
7d1b0095 7704 tmp = tcg_temp_new_i32();
8f8e3aa4 7705 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7706 }
8f8e3aa4 7707 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 7708 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 7709 tmp5 = tcg_const_i32(n);
e7c06c4e 7710 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 7711 tcg_temp_free_i32(tmp);
9ee6e8bb 7712 if (insn & (1 << 6)) {
8f8e3aa4 7713 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7714 } else {
7d1b0095 7715 tmp = tcg_temp_new_i32();
8f8e3aa4 7716 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7717 }
8f8e3aa4 7718 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 7719 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 7720 tcg_temp_free_i32(tmp5);
e7c06c4e 7721 tcg_temp_free_ptr(ptr1);
8f8e3aa4 7722 neon_store_reg(rd, 0, tmp2);
3018f259 7723 neon_store_reg(rd, 1, tmp3);
7d1b0095 7724 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7725 } else if ((insn & 0x380) == 0) {
7726 /* VDUP */
133da6aa
JR
7727 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7728 return 1;
7729 }
9ee6e8bb 7730 if (insn & (1 << 19)) {
dd8fbd78 7731 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7732 } else {
dd8fbd78 7733 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7734 }
7735 if (insn & (1 << 16)) {
dd8fbd78 7736 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7737 } else if (insn & (1 << 17)) {
7738 if ((insn >> 18) & 1)
dd8fbd78 7739 gen_neon_dup_high16(tmp);
9ee6e8bb 7740 else
dd8fbd78 7741 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7742 }
7743 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7744 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7745 tcg_gen_mov_i32(tmp2, tmp);
7746 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7747 }
7d1b0095 7748 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7749 } else {
7750 return 1;
7751 }
7752 }
7753 }
7754 return 0;
7755}
7756
8b7209fa
RH
7757/* Advanced SIMD three registers of the same length extension.
7758 * 31 25 23 22 20 16 12 11 10 9 8 3 0
7759 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7760 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7761 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7762 */
7763static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
7764{
26c470a7
RH
7765 gen_helper_gvec_3 *fn_gvec = NULL;
7766 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
7767 int rd, rn, rm, opr_sz;
7768 int data = 0;
8b7209fa
RH
7769 bool q;
7770
7771 q = extract32(insn, 6, 1);
7772 VFP_DREG_D(rd, insn);
7773 VFP_DREG_N(rn, insn);
7774 VFP_DREG_M(rm, insn);
7775 if ((rd | rn | rm) & q) {
7776 return 1;
7777 }
7778
7779 if ((insn & 0xfe200f10) == 0xfc200800) {
7780 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
26c470a7
RH
7781 int size = extract32(insn, 20, 1);
7782 data = extract32(insn, 23, 2); /* rot */
8b7209fa
RH
7783 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
7784 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7785 return 1;
7786 }
7787 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
7788 } else if ((insn & 0xfea00f10) == 0xfc800800) {
7789 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
26c470a7
RH
7790 int size = extract32(insn, 20, 1);
7791 data = extract32(insn, 24, 1); /* rot */
8b7209fa
RH
7792 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
7793 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7794 return 1;
7795 }
7796 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
26c470a7
RH
7797 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
7798 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
7799 bool u = extract32(insn, 4, 1);
7800 if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
7801 return 1;
7802 }
7803 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
8b7209fa
RH
7804 } else {
7805 return 1;
7806 }
7807
7808 if (s->fp_excp_el) {
7809 gen_exception_insn(s, 4, EXCP_UDEF,
7810 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
7811 return 0;
7812 }
7813 if (!s->vfp_enabled) {
7814 return 1;
7815 }
7816
7817 opr_sz = (1 + q) * 8;
26c470a7
RH
7818 if (fn_gvec_ptr) {
7819 TCGv_ptr fpst = get_fpstatus_ptr(1);
7820 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
7821 vfp_reg_offset(1, rn),
7822 vfp_reg_offset(1, rm), fpst,
7823 opr_sz, opr_sz, data, fn_gvec_ptr);
7824 tcg_temp_free_ptr(fpst);
7825 } else {
7826 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
7827 vfp_reg_offset(1, rn),
7828 vfp_reg_offset(1, rm),
7829 opr_sz, opr_sz, data, fn_gvec);
7830 }
8b7209fa
RH
7831 return 0;
7832}
7833
638808ff
RH
7834/* Advanced SIMD two registers and a scalar extension.
7835 * 31 24 23 22 20 16 12 11 10 9 8 3 0
7836 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7837 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7838 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7839 *
7840 */
7841
7842static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
7843{
26c470a7
RH
7844 gen_helper_gvec_3 *fn_gvec = NULL;
7845 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
2cc99919 7846 int rd, rn, rm, opr_sz, data;
638808ff
RH
7847 bool q;
7848
7849 q = extract32(insn, 6, 1);
7850 VFP_DREG_D(rd, insn);
7851 VFP_DREG_N(rn, insn);
638808ff
RH
7852 if ((rd | rn) & q) {
7853 return 1;
7854 }
7855
7856 if ((insn & 0xff000f10) == 0xfe000800) {
7857 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
2cc99919
RH
7858 int rot = extract32(insn, 20, 2);
7859 int size = extract32(insn, 23, 1);
7860 int index;
7861
7862 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) {
638808ff
RH
7863 return 1;
7864 }
2cc99919
RH
7865 if (size == 0) {
7866 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
7867 return 1;
7868 }
7869 /* For fp16, rm is just Vm, and index is M. */
7870 rm = extract32(insn, 0, 4);
7871 index = extract32(insn, 5, 1);
7872 } else {
7873 /* For fp32, rm is the usual M:Vm, and index is 0. */
7874 VFP_DREG_M(rm, insn);
7875 index = 0;
7876 }
7877 data = (index << 2) | rot;
7878 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
7879 : gen_helper_gvec_fcmlah_idx);
26c470a7
RH
7880 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
7881 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
7882 int u = extract32(insn, 4, 1);
7883 if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
7884 return 1;
7885 }
7886 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
7887 /* rm is just Vm, and index is M. */
7888 data = extract32(insn, 5, 1); /* index */
7889 rm = extract32(insn, 0, 4);
638808ff
RH
7890 } else {
7891 return 1;
7892 }
7893
7894 if (s->fp_excp_el) {
7895 gen_exception_insn(s, 4, EXCP_UDEF,
7896 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
7897 return 0;
7898 }
7899 if (!s->vfp_enabled) {
7900 return 1;
7901 }
7902
7903 opr_sz = (1 + q) * 8;
26c470a7
RH
7904 if (fn_gvec_ptr) {
7905 TCGv_ptr fpst = get_fpstatus_ptr(1);
7906 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
7907 vfp_reg_offset(1, rn),
7908 vfp_reg_offset(1, rm), fpst,
7909 opr_sz, opr_sz, data, fn_gvec_ptr);
7910 tcg_temp_free_ptr(fpst);
7911 } else {
7912 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
7913 vfp_reg_offset(1, rn),
7914 vfp_reg_offset(1, rm),
7915 opr_sz, opr_sz, data, fn_gvec);
7916 }
638808ff
RH
7917 return 0;
7918}
7919
7dcc1f89 7920static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7921{
4b6a83fb
PM
7922 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7923 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7924
7925 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7926
7927 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7928 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7929 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7930 return 1;
7931 }
d614a513 7932 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7933 return disas_iwmmxt_insn(s, insn);
d614a513 7934 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7935 return disas_dsp_insn(s, insn);
c0f4af17
PM
7936 }
7937 return 1;
4b6a83fb
PM
7938 }
7939
7940 /* Otherwise treat as a generic register access */
7941 is64 = (insn & (1 << 25)) == 0;
7942 if (!is64 && ((insn & (1 << 4)) == 0)) {
7943 /* cdp */
7944 return 1;
7945 }
7946
7947 crm = insn & 0xf;
7948 if (is64) {
7949 crn = 0;
7950 opc1 = (insn >> 4) & 0xf;
7951 opc2 = 0;
7952 rt2 = (insn >> 16) & 0xf;
7953 } else {
7954 crn = (insn >> 16) & 0xf;
7955 opc1 = (insn >> 21) & 7;
7956 opc2 = (insn >> 5) & 7;
7957 rt2 = 0;
7958 }
7959 isread = (insn >> 20) & 1;
7960 rt = (insn >> 12) & 0xf;
7961
60322b39 7962 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7963 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7964 if (ri) {
7965 /* Check access permissions */
dcbff19b 7966 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7967 return 1;
7968 }
7969
c0f4af17 7970 if (ri->accessfn ||
d614a513 7971 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7972 /* Emit code to perform further access permissions checks at
7973 * runtime; this may result in an exception.
c0f4af17
PM
7974 * Note that on XScale all cp0..c13 registers do an access check
7975 * call in order to handle c15_cpar.
f59df3f2
PM
7976 */
7977 TCGv_ptr tmpptr;
3f208fd7 7978 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7979 uint32_t syndrome;
7980
7981 /* Note that since we are an implementation which takes an
7982 * exception on a trapped conditional instruction only if the
7983 * instruction passes its condition code check, we can take
7984 * advantage of the clause in the ARM ARM that allows us to set
7985 * the COND field in the instruction to 0xE in all cases.
7986 * We could fish the actual condition out of the insn (ARM)
7987 * or the condexec bits (Thumb) but it isn't necessary.
7988 */
7989 switch (cpnum) {
7990 case 14:
7991 if (is64) {
7992 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7993 isread, false);
8bcbf37c
PM
7994 } else {
7995 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7996 rt, isread, false);
8bcbf37c
PM
7997 }
7998 break;
7999 case 15:
8000 if (is64) {
8001 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8002 isread, false);
8bcbf37c
PM
8003 } else {
8004 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8005 rt, isread, false);
8bcbf37c
PM
8006 }
8007 break;
8008 default:
8009 /* ARMv8 defines that only coprocessors 14 and 15 exist,
8010 * so this can only happen if this is an ARMv7 or earlier CPU,
8011 * in which case the syndrome information won't actually be
8012 * guest visible.
8013 */
d614a513 8014 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
8015 syndrome = syn_uncategorized();
8016 break;
8017 }
8018
43bfa4a1 8019 gen_set_condexec(s);
3977ee5d 8020 gen_set_pc_im(s, s->pc - 4);
f59df3f2 8021 tmpptr = tcg_const_ptr(ri);
8bcbf37c 8022 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
8023 tcg_isread = tcg_const_i32(isread);
8024 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
8025 tcg_isread);
f59df3f2 8026 tcg_temp_free_ptr(tmpptr);
8bcbf37c 8027 tcg_temp_free_i32(tcg_syn);
3f208fd7 8028 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
8029 }
8030
4b6a83fb
PM
8031 /* Handle special cases first */
8032 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
8033 case ARM_CP_NOP:
8034 return 0;
8035 case ARM_CP_WFI:
8036 if (isread) {
8037 return 1;
8038 }
eaed129d 8039 gen_set_pc_im(s, s->pc);
dcba3a8d 8040 s->base.is_jmp = DISAS_WFI;
2bee5105 8041 return 0;
4b6a83fb
PM
8042 default:
8043 break;
8044 }
8045
c5a49c63 8046 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8047 gen_io_start();
8048 }
8049
4b6a83fb
PM
8050 if (isread) {
8051 /* Read */
8052 if (is64) {
8053 TCGv_i64 tmp64;
8054 TCGv_i32 tmp;
8055 if (ri->type & ARM_CP_CONST) {
8056 tmp64 = tcg_const_i64(ri->resetvalue);
8057 } else if (ri->readfn) {
8058 TCGv_ptr tmpptr;
4b6a83fb
PM
8059 tmp64 = tcg_temp_new_i64();
8060 tmpptr = tcg_const_ptr(ri);
8061 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
8062 tcg_temp_free_ptr(tmpptr);
8063 } else {
8064 tmp64 = tcg_temp_new_i64();
8065 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
8066 }
8067 tmp = tcg_temp_new_i32();
ecc7b3aa 8068 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
8069 store_reg(s, rt, tmp);
8070 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 8071 tmp = tcg_temp_new_i32();
ecc7b3aa 8072 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 8073 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
8074 store_reg(s, rt2, tmp);
8075 } else {
39d5492a 8076 TCGv_i32 tmp;
4b6a83fb
PM
8077 if (ri->type & ARM_CP_CONST) {
8078 tmp = tcg_const_i32(ri->resetvalue);
8079 } else if (ri->readfn) {
8080 TCGv_ptr tmpptr;
4b6a83fb
PM
8081 tmp = tcg_temp_new_i32();
8082 tmpptr = tcg_const_ptr(ri);
8083 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
8084 tcg_temp_free_ptr(tmpptr);
8085 } else {
8086 tmp = load_cpu_offset(ri->fieldoffset);
8087 }
8088 if (rt == 15) {
8089 /* Destination register of r15 for 32 bit loads sets
8090 * the condition codes from the high 4 bits of the value
8091 */
8092 gen_set_nzcv(tmp);
8093 tcg_temp_free_i32(tmp);
8094 } else {
8095 store_reg(s, rt, tmp);
8096 }
8097 }
8098 } else {
8099 /* Write */
8100 if (ri->type & ARM_CP_CONST) {
8101 /* If not forbidden by access permissions, treat as WI */
8102 return 0;
8103 }
8104
8105 if (is64) {
39d5492a 8106 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
8107 TCGv_i64 tmp64 = tcg_temp_new_i64();
8108 tmplo = load_reg(s, rt);
8109 tmphi = load_reg(s, rt2);
8110 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
8111 tcg_temp_free_i32(tmplo);
8112 tcg_temp_free_i32(tmphi);
8113 if (ri->writefn) {
8114 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
8115 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
8116 tcg_temp_free_ptr(tmpptr);
8117 } else {
8118 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
8119 }
8120 tcg_temp_free_i64(tmp64);
8121 } else {
8122 if (ri->writefn) {
39d5492a 8123 TCGv_i32 tmp;
4b6a83fb 8124 TCGv_ptr tmpptr;
4b6a83fb
PM
8125 tmp = load_reg(s, rt);
8126 tmpptr = tcg_const_ptr(ri);
8127 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
8128 tcg_temp_free_ptr(tmpptr);
8129 tcg_temp_free_i32(tmp);
8130 } else {
39d5492a 8131 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
8132 store_cpu_offset(tmp, ri->fieldoffset);
8133 }
8134 }
2452731c
PM
8135 }
8136
c5a49c63 8137 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8138 /* I/O operations must end the TB here (whether read or write) */
8139 gen_io_end();
8140 gen_lookup_tb(s);
8141 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
8142 /* We default to ending the TB on a coprocessor register write,
8143 * but allow this to be suppressed by the register definition
8144 * (usually only necessary to work around guest bugs).
8145 */
2452731c 8146 gen_lookup_tb(s);
4b6a83fb 8147 }
2452731c 8148
4b6a83fb
PM
8149 return 0;
8150 }
8151
626187d8
PM
8152 /* Unknown register; this might be a guest error or a QEMU
8153 * unimplemented feature.
8154 */
8155 if (is64) {
8156 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8157 "64 bit system register cp:%d opc1: %d crm:%d "
8158 "(%s)\n",
8159 isread ? "read" : "write", cpnum, opc1, crm,
8160 s->ns ? "non-secure" : "secure");
626187d8
PM
8161 } else {
8162 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8163 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
8164 "(%s)\n",
8165 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
8166 s->ns ? "non-secure" : "secure");
626187d8
PM
8167 }
8168
4a9a539f 8169 return 1;
9ee6e8bb
PB
8170}
8171
5e3f878a
PB
8172
8173/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 8174static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 8175{
39d5492a 8176 TCGv_i32 tmp;
7d1b0095 8177 tmp = tcg_temp_new_i32();
ecc7b3aa 8178 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 8179 store_reg(s, rlow, tmp);
7d1b0095 8180 tmp = tcg_temp_new_i32();
5e3f878a 8181 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 8182 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
8183 store_reg(s, rhigh, tmp);
8184}
8185
8186/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 8187static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 8188{
a7812ae4 8189 TCGv_i64 tmp;
39d5492a 8190 TCGv_i32 tmp2;
5e3f878a 8191
36aa55dc 8192 /* Load value and extend to 64 bits. */
a7812ae4 8193 tmp = tcg_temp_new_i64();
5e3f878a
PB
8194 tmp2 = load_reg(s, rlow);
8195 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 8196 tcg_temp_free_i32(tmp2);
5e3f878a 8197 tcg_gen_add_i64(val, val, tmp);
b75263d6 8198 tcg_temp_free_i64(tmp);
5e3f878a
PB
8199}
8200
8201/* load and add a 64-bit value from a register pair. */
a7812ae4 8202static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 8203{
a7812ae4 8204 TCGv_i64 tmp;
39d5492a
PM
8205 TCGv_i32 tmpl;
8206 TCGv_i32 tmph;
5e3f878a
PB
8207
8208 /* Load 64-bit value rd:rn. */
36aa55dc
PB
8209 tmpl = load_reg(s, rlow);
8210 tmph = load_reg(s, rhigh);
a7812ae4 8211 tmp = tcg_temp_new_i64();
36aa55dc 8212 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
8213 tcg_temp_free_i32(tmpl);
8214 tcg_temp_free_i32(tmph);
5e3f878a 8215 tcg_gen_add_i64(val, val, tmp);
b75263d6 8216 tcg_temp_free_i64(tmp);
5e3f878a
PB
8217}
8218
c9f10124 8219/* Set N and Z flags from hi|lo. */
39d5492a 8220static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 8221{
c9f10124
RH
8222 tcg_gen_mov_i32(cpu_NF, hi);
8223 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
8224}
8225
426f5abc
PB
8226/* Load/Store exclusive instructions are implemented by remembering
8227 the value/address loaded, and seeing if these are the same
354161b3 8228 when the store is performed. This should be sufficient to implement
426f5abc 8229 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
8230 regular stores. The compare vs the remembered value is done during
8231 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 8232static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 8233 TCGv_i32 addr, int size)
426f5abc 8234{
94ee24e7 8235 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 8236 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 8237
50225ad0
PM
8238 s->is_ldex = true;
8239
426f5abc 8240 if (size == 3) {
39d5492a 8241 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 8242 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 8243
3448d47b
PM
8244 /* For AArch32, architecturally the 32-bit word at the lowest
8245 * address is always Rt and the one at addr+4 is Rt2, even if
8246 * the CPU is big-endian. That means we don't want to do a
8247 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
8248 * for an architecturally 64-bit access, but instead do a
8249 * 64-bit access using MO_BE if appropriate and then split
8250 * the two halves.
8251 * This only makes a difference for BE32 user-mode, where
8252 * frob64() must not flip the two halves of the 64-bit data
8253 * but this code must treat BE32 user-mode like BE32 system.
8254 */
8255 TCGv taddr = gen_aa32_addr(s, addr, opc);
8256
8257 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
8258 tcg_temp_free(taddr);
354161b3 8259 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
8260 if (s->be_data == MO_BE) {
8261 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
8262 } else {
8263 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
8264 }
354161b3
EC
8265 tcg_temp_free_i64(t64);
8266
8267 store_reg(s, rt2, tmp2);
03d05e2d 8268 } else {
354161b3 8269 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 8270 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 8271 }
03d05e2d
PM
8272
8273 store_reg(s, rt, tmp);
8274 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
8275}
8276
8277static void gen_clrex(DisasContext *s)
8278{
03d05e2d 8279 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
8280}
8281
426f5abc 8282static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 8283 TCGv_i32 addr, int size)
426f5abc 8284{
354161b3
EC
8285 TCGv_i32 t0, t1, t2;
8286 TCGv_i64 extaddr;
8287 TCGv taddr;
42a268c2
RH
8288 TCGLabel *done_label;
8289 TCGLabel *fail_label;
354161b3 8290 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
8291
8292 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
8293 [addr] = {Rt};
8294 {Rd} = 0;
8295 } else {
8296 {Rd} = 1;
8297 } */
8298 fail_label = gen_new_label();
8299 done_label = gen_new_label();
03d05e2d
PM
8300 extaddr = tcg_temp_new_i64();
8301 tcg_gen_extu_i32_i64(extaddr, addr);
8302 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
8303 tcg_temp_free_i64(extaddr);
8304
354161b3
EC
8305 taddr = gen_aa32_addr(s, addr, opc);
8306 t0 = tcg_temp_new_i32();
8307 t1 = load_reg(s, rt);
426f5abc 8308 if (size == 3) {
354161b3
EC
8309 TCGv_i64 o64 = tcg_temp_new_i64();
8310 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 8311
354161b3 8312 t2 = load_reg(s, rt2);
3448d47b
PM
8313 /* For AArch32, architecturally the 32-bit word at the lowest
8314 * address is always Rt and the one at addr+4 is Rt2, even if
8315 * the CPU is big-endian. Since we're going to treat this as a
8316 * single 64-bit BE store, we need to put the two halves in the
8317 * opposite order for BE to LE, so that they end up in the right
8318 * places.
8319 * We don't want gen_aa32_frob64() because that does the wrong
8320 * thing for BE32 usermode.
8321 */
8322 if (s->be_data == MO_BE) {
8323 tcg_gen_concat_i32_i64(n64, t2, t1);
8324 } else {
8325 tcg_gen_concat_i32_i64(n64, t1, t2);
8326 }
354161b3 8327 tcg_temp_free_i32(t2);
03d05e2d 8328
354161b3
EC
8329 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
8330 get_mem_index(s), opc);
8331 tcg_temp_free_i64(n64);
8332
354161b3
EC
8333 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
8334 tcg_gen_extrl_i64_i32(t0, o64);
8335
8336 tcg_temp_free_i64(o64);
8337 } else {
8338 t2 = tcg_temp_new_i32();
8339 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
8340 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
8341 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
8342 tcg_temp_free_i32(t2);
426f5abc 8343 }
354161b3
EC
8344 tcg_temp_free_i32(t1);
8345 tcg_temp_free(taddr);
8346 tcg_gen_mov_i32(cpu_R[rd], t0);
8347 tcg_temp_free_i32(t0);
426f5abc 8348 tcg_gen_br(done_label);
354161b3 8349
426f5abc
PB
8350 gen_set_label(fail_label);
8351 tcg_gen_movi_i32(cpu_R[rd], 1);
8352 gen_set_label(done_label);
03d05e2d 8353 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 8354}
426f5abc 8355
81465888
PM
8356/* gen_srs:
8357 * @env: CPUARMState
8358 * @s: DisasContext
8359 * @mode: mode field from insn (which stack to store to)
8360 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
8361 * @writeback: true if writeback bit set
8362 *
8363 * Generate code for the SRS (Store Return State) insn.
8364 */
8365static void gen_srs(DisasContext *s,
8366 uint32_t mode, uint32_t amode, bool writeback)
8367{
8368 int32_t offset;
cbc0326b
PM
8369 TCGv_i32 addr, tmp;
8370 bool undef = false;
8371
8372 /* SRS is:
8373 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 8374 * and specified mode is monitor mode
cbc0326b
PM
8375 * - UNDEFINED in Hyp mode
8376 * - UNPREDICTABLE in User or System mode
8377 * - UNPREDICTABLE if the specified mode is:
8378 * -- not implemented
8379 * -- not a valid mode number
8380 * -- a mode that's at a higher exception level
8381 * -- Monitor, if we are Non-secure
f01377f5 8382 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 8383 */
ba63cf47 8384 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
8385 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
8386 return;
8387 }
8388
8389 if (s->current_el == 0 || s->current_el == 2) {
8390 undef = true;
8391 }
8392
8393 switch (mode) {
8394 case ARM_CPU_MODE_USR:
8395 case ARM_CPU_MODE_FIQ:
8396 case ARM_CPU_MODE_IRQ:
8397 case ARM_CPU_MODE_SVC:
8398 case ARM_CPU_MODE_ABT:
8399 case ARM_CPU_MODE_UND:
8400 case ARM_CPU_MODE_SYS:
8401 break;
8402 case ARM_CPU_MODE_HYP:
8403 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8404 undef = true;
8405 }
8406 break;
8407 case ARM_CPU_MODE_MON:
8408 /* No need to check specifically for "are we non-secure" because
8409 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8410 * so if this isn't EL3 then we must be non-secure.
8411 */
8412 if (s->current_el != 3) {
8413 undef = true;
8414 }
8415 break;
8416 default:
8417 undef = true;
8418 }
8419
8420 if (undef) {
8421 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8422 default_exception_el(s));
8423 return;
8424 }
8425
8426 addr = tcg_temp_new_i32();
8427 tmp = tcg_const_i32(mode);
f01377f5
PM
8428 /* get_r13_banked() will raise an exception if called from System mode */
8429 gen_set_condexec(s);
8430 gen_set_pc_im(s, s->pc - 4);
81465888
PM
8431 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8432 tcg_temp_free_i32(tmp);
8433 switch (amode) {
8434 case 0: /* DA */
8435 offset = -4;
8436 break;
8437 case 1: /* IA */
8438 offset = 0;
8439 break;
8440 case 2: /* DB */
8441 offset = -8;
8442 break;
8443 case 3: /* IB */
8444 offset = 4;
8445 break;
8446 default:
8447 abort();
8448 }
8449 tcg_gen_addi_i32(addr, addr, offset);
8450 tmp = load_reg(s, 14);
12dcc321 8451 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8452 tcg_temp_free_i32(tmp);
81465888
PM
8453 tmp = load_cpu_field(spsr);
8454 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 8455 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8456 tcg_temp_free_i32(tmp);
81465888
PM
8457 if (writeback) {
8458 switch (amode) {
8459 case 0:
8460 offset = -8;
8461 break;
8462 case 1:
8463 offset = 4;
8464 break;
8465 case 2:
8466 offset = -4;
8467 break;
8468 case 3:
8469 offset = 0;
8470 break;
8471 default:
8472 abort();
8473 }
8474 tcg_gen_addi_i32(addr, addr, offset);
8475 tmp = tcg_const_i32(mode);
8476 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8477 tcg_temp_free_i32(tmp);
8478 }
8479 tcg_temp_free_i32(addr);
dcba3a8d 8480 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
8481}
8482
f4df2210 8483static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8484{
f4df2210 8485 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8486 TCGv_i32 tmp;
8487 TCGv_i32 tmp2;
8488 TCGv_i32 tmp3;
8489 TCGv_i32 addr;
a7812ae4 8490 TCGv_i64 tmp64;
9ee6e8bb 8491
e13886e3
PM
8492 /* M variants do not implement ARM mode; this must raise the INVSTATE
8493 * UsageFault exception.
8494 */
b53d8923 8495 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
8496 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8497 default_exception_el(s));
8498 return;
b53d8923 8499 }
9ee6e8bb
PB
8500 cond = insn >> 28;
8501 if (cond == 0xf){
be5e7a76
DES
8502 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8503 * choose to UNDEF. In ARMv5 and above the space is used
8504 * for miscellaneous unconditional instructions.
8505 */
8506 ARCH(5);
8507
9ee6e8bb
PB
8508 /* Unconditional instructions. */
8509 if (((insn >> 25) & 7) == 1) {
8510 /* NEON Data processing. */
d614a513 8511 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8512 goto illegal_op;
d614a513 8513 }
9ee6e8bb 8514
7dcc1f89 8515 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8516 goto illegal_op;
7dcc1f89 8517 }
9ee6e8bb
PB
8518 return;
8519 }
8520 if ((insn & 0x0f100000) == 0x04000000) {
8521 /* NEON load/store. */
d614a513 8522 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8523 goto illegal_op;
d614a513 8524 }
9ee6e8bb 8525
7dcc1f89 8526 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8527 goto illegal_op;
7dcc1f89 8528 }
9ee6e8bb
PB
8529 return;
8530 }
6a57f3eb
WN
8531 if ((insn & 0x0f000e10) == 0x0e000a00) {
8532 /* VFP. */
7dcc1f89 8533 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8534 goto illegal_op;
8535 }
8536 return;
8537 }
3d185e5d
PM
8538 if (((insn & 0x0f30f000) == 0x0510f000) ||
8539 ((insn & 0x0f30f010) == 0x0710f000)) {
8540 if ((insn & (1 << 22)) == 0) {
8541 /* PLDW; v7MP */
d614a513 8542 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8543 goto illegal_op;
8544 }
8545 }
8546 /* Otherwise PLD; v5TE+ */
be5e7a76 8547 ARCH(5TE);
3d185e5d
PM
8548 return;
8549 }
8550 if (((insn & 0x0f70f000) == 0x0450f000) ||
8551 ((insn & 0x0f70f010) == 0x0650f000)) {
8552 ARCH(7);
8553 return; /* PLI; V7 */
8554 }
8555 if (((insn & 0x0f700000) == 0x04100000) ||
8556 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8557 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8558 goto illegal_op;
8559 }
8560 return; /* v7MP: Unallocated memory hint: must NOP */
8561 }
8562
8563 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8564 ARCH(6);
8565 /* setend */
9886ecdf
PB
8566 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8567 gen_helper_setend(cpu_env);
dcba3a8d 8568 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8569 }
8570 return;
8571 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8572 switch ((insn >> 4) & 0xf) {
8573 case 1: /* clrex */
8574 ARCH(6K);
426f5abc 8575 gen_clrex(s);
9ee6e8bb
PB
8576 return;
8577 case 4: /* dsb */
8578 case 5: /* dmb */
9ee6e8bb 8579 ARCH(7);
61e4c432 8580 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8581 return;
6df99dec
SS
8582 case 6: /* isb */
8583 /* We need to break the TB after this insn to execute
8584 * self-modifying code correctly and also to take
8585 * any pending interrupts immediately.
8586 */
0b609cc1 8587 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 8588 return;
9ee6e8bb
PB
8589 default:
8590 goto illegal_op;
8591 }
8592 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8593 /* srs */
81465888
PM
8594 ARCH(6);
8595 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8596 return;
ea825eee 8597 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8598 /* rfe */
c67b6b71 8599 int32_t offset;
9ee6e8bb
PB
8600 if (IS_USER(s))
8601 goto illegal_op;
8602 ARCH(6);
8603 rn = (insn >> 16) & 0xf;
b0109805 8604 addr = load_reg(s, rn);
9ee6e8bb
PB
8605 i = (insn >> 23) & 3;
8606 switch (i) {
b0109805 8607 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8608 case 1: offset = 0; break; /* IA */
8609 case 2: offset = -8; break; /* DB */
b0109805 8610 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8611 default: abort();
8612 }
8613 if (offset)
b0109805
PB
8614 tcg_gen_addi_i32(addr, addr, offset);
8615 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8616 tmp = tcg_temp_new_i32();
12dcc321 8617 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8618 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8619 tmp2 = tcg_temp_new_i32();
12dcc321 8620 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8621 if (insn & (1 << 21)) {
8622 /* Base writeback. */
8623 switch (i) {
b0109805 8624 case 0: offset = -8; break;
c67b6b71
FN
8625 case 1: offset = 4; break;
8626 case 2: offset = -4; break;
b0109805 8627 case 3: offset = 0; break;
9ee6e8bb
PB
8628 default: abort();
8629 }
8630 if (offset)
b0109805
PB
8631 tcg_gen_addi_i32(addr, addr, offset);
8632 store_reg(s, rn, addr);
8633 } else {
7d1b0095 8634 tcg_temp_free_i32(addr);
9ee6e8bb 8635 }
b0109805 8636 gen_rfe(s, tmp, tmp2);
c67b6b71 8637 return;
9ee6e8bb
PB
8638 } else if ((insn & 0x0e000000) == 0x0a000000) {
8639 /* branch link and change to thumb (blx <offset>) */
8640 int32_t offset;
8641
8642 val = (uint32_t)s->pc;
7d1b0095 8643 tmp = tcg_temp_new_i32();
d9ba4830
PB
8644 tcg_gen_movi_i32(tmp, val);
8645 store_reg(s, 14, tmp);
9ee6e8bb
PB
8646 /* Sign-extend the 24-bit offset */
8647 offset = (((int32_t)insn) << 8) >> 8;
8648 /* offset * 4 + bit24 * 2 + (thumb bit) */
8649 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8650 /* pipeline offset */
8651 val += 4;
be5e7a76 8652 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8653 gen_bx_im(s, val);
9ee6e8bb
PB
8654 return;
8655 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8656 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8657 /* iWMMXt register transfer. */
c0f4af17 8658 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8659 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8660 return;
c0f4af17
PM
8661 }
8662 }
9ee6e8bb 8663 }
8b7209fa
RH
8664 } else if ((insn & 0x0e000a00) == 0x0c000800
8665 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8666 if (disas_neon_insn_3same_ext(s, insn)) {
8667 goto illegal_op;
8668 }
8669 return;
638808ff
RH
8670 } else if ((insn & 0x0f000a00) == 0x0e000800
8671 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8672 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
8673 goto illegal_op;
8674 }
8675 return;
9ee6e8bb
PB
8676 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8677 /* Coprocessor double register transfer. */
be5e7a76 8678 ARCH(5TE);
9ee6e8bb
PB
8679 } else if ((insn & 0x0f000010) == 0x0e000010) {
8680 /* Additional coprocessor register transfer. */
7997d92f 8681 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8682 uint32_t mask;
8683 uint32_t val;
8684 /* cps (privileged) */
8685 if (IS_USER(s))
8686 return;
8687 mask = val = 0;
8688 if (insn & (1 << 19)) {
8689 if (insn & (1 << 8))
8690 mask |= CPSR_A;
8691 if (insn & (1 << 7))
8692 mask |= CPSR_I;
8693 if (insn & (1 << 6))
8694 mask |= CPSR_F;
8695 if (insn & (1 << 18))
8696 val |= mask;
8697 }
7997d92f 8698 if (insn & (1 << 17)) {
9ee6e8bb
PB
8699 mask |= CPSR_M;
8700 val |= (insn & 0x1f);
8701 }
8702 if (mask) {
2fbac54b 8703 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8704 }
8705 return;
8706 }
8707 goto illegal_op;
8708 }
8709 if (cond != 0xe) {
8710 /* if not always execute, we generate a conditional jump to
8711 next instruction */
8712 s->condlabel = gen_new_label();
39fb730a 8713 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8714 s->condjmp = 1;
8715 }
8716 if ((insn & 0x0f900000) == 0x03000000) {
8717 if ((insn & (1 << 21)) == 0) {
8718 ARCH(6T2);
8719 rd = (insn >> 12) & 0xf;
8720 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8721 if ((insn & (1 << 22)) == 0) {
8722 /* MOVW */
7d1b0095 8723 tmp = tcg_temp_new_i32();
5e3f878a 8724 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8725 } else {
8726 /* MOVT */
5e3f878a 8727 tmp = load_reg(s, rd);
86831435 8728 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8729 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8730 }
5e3f878a 8731 store_reg(s, rd, tmp);
9ee6e8bb
PB
8732 } else {
8733 if (((insn >> 12) & 0xf) != 0xf)
8734 goto illegal_op;
8735 if (((insn >> 16) & 0xf) == 0) {
8736 gen_nop_hint(s, insn & 0xff);
8737 } else {
8738 /* CPSR = immediate */
8739 val = insn & 0xff;
8740 shift = ((insn >> 8) & 0xf) * 2;
8741 if (shift)
8742 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8743 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8744 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8745 i, val)) {
9ee6e8bb 8746 goto illegal_op;
7dcc1f89 8747 }
9ee6e8bb
PB
8748 }
8749 }
8750 } else if ((insn & 0x0f900000) == 0x01000000
8751 && (insn & 0x00000090) != 0x00000090) {
8752 /* miscellaneous instructions */
8753 op1 = (insn >> 21) & 3;
8754 sh = (insn >> 4) & 0xf;
8755 rm = insn & 0xf;
8756 switch (sh) {
8bfd0550
PM
8757 case 0x0: /* MSR, MRS */
8758 if (insn & (1 << 9)) {
8759 /* MSR (banked) and MRS (banked) */
8760 int sysm = extract32(insn, 16, 4) |
8761 (extract32(insn, 8, 1) << 4);
8762 int r = extract32(insn, 22, 1);
8763
8764 if (op1 & 1) {
8765 /* MSR (banked) */
8766 gen_msr_banked(s, r, sysm, rm);
8767 } else {
8768 /* MRS (banked) */
8769 int rd = extract32(insn, 12, 4);
8770
8771 gen_mrs_banked(s, r, sysm, rd);
8772 }
8773 break;
8774 }
8775
8776 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8777 if (op1 & 1) {
8778 /* PSR = reg */
2fbac54b 8779 tmp = load_reg(s, rm);
9ee6e8bb 8780 i = ((op1 & 2) != 0);
7dcc1f89 8781 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8782 goto illegal_op;
8783 } else {
8784 /* reg = PSR */
8785 rd = (insn >> 12) & 0xf;
8786 if (op1 & 2) {
8787 if (IS_USER(s))
8788 goto illegal_op;
d9ba4830 8789 tmp = load_cpu_field(spsr);
9ee6e8bb 8790 } else {
7d1b0095 8791 tmp = tcg_temp_new_i32();
9ef39277 8792 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8793 }
d9ba4830 8794 store_reg(s, rd, tmp);
9ee6e8bb
PB
8795 }
8796 break;
8797 case 0x1:
8798 if (op1 == 1) {
8799 /* branch/exchange thumb (bx). */
be5e7a76 8800 ARCH(4T);
d9ba4830
PB
8801 tmp = load_reg(s, rm);
8802 gen_bx(s, tmp);
9ee6e8bb
PB
8803 } else if (op1 == 3) {
8804 /* clz */
be5e7a76 8805 ARCH(5);
9ee6e8bb 8806 rd = (insn >> 12) & 0xf;
1497c961 8807 tmp = load_reg(s, rm);
7539a012 8808 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8809 store_reg(s, rd, tmp);
9ee6e8bb
PB
8810 } else {
8811 goto illegal_op;
8812 }
8813 break;
8814 case 0x2:
8815 if (op1 == 1) {
8816 ARCH(5J); /* bxj */
8817 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8818 tmp = load_reg(s, rm);
8819 gen_bx(s, tmp);
9ee6e8bb
PB
8820 } else {
8821 goto illegal_op;
8822 }
8823 break;
8824 case 0x3:
8825 if (op1 != 1)
8826 goto illegal_op;
8827
be5e7a76 8828 ARCH(5);
9ee6e8bb 8829 /* branch link/exchange thumb (blx) */
d9ba4830 8830 tmp = load_reg(s, rm);
7d1b0095 8831 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8832 tcg_gen_movi_i32(tmp2, s->pc);
8833 store_reg(s, 14, tmp2);
8834 gen_bx(s, tmp);
9ee6e8bb 8835 break;
eb0ecd5a
WN
8836 case 0x4:
8837 {
8838 /* crc32/crc32c */
8839 uint32_t c = extract32(insn, 8, 4);
8840
8841 /* Check this CPU supports ARMv8 CRC instructions.
8842 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8843 * Bits 8, 10 and 11 should be zero.
8844 */
d614a513 8845 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8846 (c & 0xd) != 0) {
8847 goto illegal_op;
8848 }
8849
8850 rn = extract32(insn, 16, 4);
8851 rd = extract32(insn, 12, 4);
8852
8853 tmp = load_reg(s, rn);
8854 tmp2 = load_reg(s, rm);
aa633469
PM
8855 if (op1 == 0) {
8856 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8857 } else if (op1 == 1) {
8858 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8859 }
eb0ecd5a
WN
8860 tmp3 = tcg_const_i32(1 << op1);
8861 if (c & 0x2) {
8862 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8863 } else {
8864 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8865 }
8866 tcg_temp_free_i32(tmp2);
8867 tcg_temp_free_i32(tmp3);
8868 store_reg(s, rd, tmp);
8869 break;
8870 }
9ee6e8bb 8871 case 0x5: /* saturating add/subtract */
be5e7a76 8872 ARCH(5TE);
9ee6e8bb
PB
8873 rd = (insn >> 12) & 0xf;
8874 rn = (insn >> 16) & 0xf;
b40d0353 8875 tmp = load_reg(s, rm);
5e3f878a 8876 tmp2 = load_reg(s, rn);
9ee6e8bb 8877 if (op1 & 2)
9ef39277 8878 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8879 if (op1 & 1)
9ef39277 8880 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8881 else
9ef39277 8882 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8883 tcg_temp_free_i32(tmp2);
5e3f878a 8884 store_reg(s, rd, tmp);
9ee6e8bb 8885 break;
49e14940 8886 case 7:
d4a2dc67
PM
8887 {
8888 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8889 switch (op1) {
19a6e31c
PM
8890 case 0:
8891 /* HLT */
8892 gen_hlt(s, imm16);
8893 break;
37e6456e
PM
8894 case 1:
8895 /* bkpt */
8896 ARCH(5);
c900a2e6 8897 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
37e6456e
PM
8898 break;
8899 case 2:
8900 /* Hypervisor call (v7) */
8901 ARCH(7);
8902 if (IS_USER(s)) {
8903 goto illegal_op;
8904 }
8905 gen_hvc(s, imm16);
8906 break;
8907 case 3:
8908 /* Secure monitor call (v6+) */
8909 ARCH(6K);
8910 if (IS_USER(s)) {
8911 goto illegal_op;
8912 }
8913 gen_smc(s);
8914 break;
8915 default:
19a6e31c 8916 g_assert_not_reached();
49e14940 8917 }
9ee6e8bb 8918 break;
d4a2dc67 8919 }
9ee6e8bb
PB
8920 case 0x8: /* signed multiply */
8921 case 0xa:
8922 case 0xc:
8923 case 0xe:
be5e7a76 8924 ARCH(5TE);
9ee6e8bb
PB
8925 rs = (insn >> 8) & 0xf;
8926 rn = (insn >> 12) & 0xf;
8927 rd = (insn >> 16) & 0xf;
8928 if (op1 == 1) {
8929 /* (32 * 16) >> 16 */
5e3f878a
PB
8930 tmp = load_reg(s, rm);
8931 tmp2 = load_reg(s, rs);
9ee6e8bb 8932 if (sh & 4)
5e3f878a 8933 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8934 else
5e3f878a 8935 gen_sxth(tmp2);
a7812ae4
PB
8936 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8937 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8938 tmp = tcg_temp_new_i32();
ecc7b3aa 8939 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8940 tcg_temp_free_i64(tmp64);
9ee6e8bb 8941 if ((sh & 2) == 0) {
5e3f878a 8942 tmp2 = load_reg(s, rn);
9ef39277 8943 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8944 tcg_temp_free_i32(tmp2);
9ee6e8bb 8945 }
5e3f878a 8946 store_reg(s, rd, tmp);
9ee6e8bb
PB
8947 } else {
8948 /* 16 * 16 */
5e3f878a
PB
8949 tmp = load_reg(s, rm);
8950 tmp2 = load_reg(s, rs);
8951 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8952 tcg_temp_free_i32(tmp2);
9ee6e8bb 8953 if (op1 == 2) {
a7812ae4
PB
8954 tmp64 = tcg_temp_new_i64();
8955 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8956 tcg_temp_free_i32(tmp);
a7812ae4
PB
8957 gen_addq(s, tmp64, rn, rd);
8958 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8959 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8960 } else {
8961 if (op1 == 0) {
5e3f878a 8962 tmp2 = load_reg(s, rn);
9ef39277 8963 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8964 tcg_temp_free_i32(tmp2);
9ee6e8bb 8965 }
5e3f878a 8966 store_reg(s, rd, tmp);
9ee6e8bb
PB
8967 }
8968 }
8969 break;
8970 default:
8971 goto illegal_op;
8972 }
8973 } else if (((insn & 0x0e000000) == 0 &&
8974 (insn & 0x00000090) != 0x90) ||
8975 ((insn & 0x0e000000) == (1 << 25))) {
8976 int set_cc, logic_cc, shiftop;
8977
8978 op1 = (insn >> 21) & 0xf;
8979 set_cc = (insn >> 20) & 1;
8980 logic_cc = table_logic_cc[op1] & set_cc;
8981
8982 /* data processing instruction */
8983 if (insn & (1 << 25)) {
8984 /* immediate operand */
8985 val = insn & 0xff;
8986 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8987 if (shift) {
9ee6e8bb 8988 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8989 }
7d1b0095 8990 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8991 tcg_gen_movi_i32(tmp2, val);
8992 if (logic_cc && shift) {
8993 gen_set_CF_bit31(tmp2);
8994 }
9ee6e8bb
PB
8995 } else {
8996 /* register */
8997 rm = (insn) & 0xf;
e9bb4aa9 8998 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8999 shiftop = (insn >> 5) & 3;
9000 if (!(insn & (1 << 4))) {
9001 shift = (insn >> 7) & 0x1f;
e9bb4aa9 9002 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
9003 } else {
9004 rs = (insn >> 8) & 0xf;
8984bd2e 9005 tmp = load_reg(s, rs);
e9bb4aa9 9006 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
9007 }
9008 }
9009 if (op1 != 0x0f && op1 != 0x0d) {
9010 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
9011 tmp = load_reg(s, rn);
9012 } else {
f764718d 9013 tmp = NULL;
9ee6e8bb
PB
9014 }
9015 rd = (insn >> 12) & 0xf;
9016 switch(op1) {
9017 case 0x00:
e9bb4aa9
JR
9018 tcg_gen_and_i32(tmp, tmp, tmp2);
9019 if (logic_cc) {
9020 gen_logic_CC(tmp);
9021 }
7dcc1f89 9022 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9023 break;
9024 case 0x01:
e9bb4aa9
JR
9025 tcg_gen_xor_i32(tmp, tmp, tmp2);
9026 if (logic_cc) {
9027 gen_logic_CC(tmp);
9028 }
7dcc1f89 9029 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9030 break;
9031 case 0x02:
9032 if (set_cc && rd == 15) {
9033 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 9034 if (IS_USER(s)) {
9ee6e8bb 9035 goto illegal_op;
e9bb4aa9 9036 }
72485ec4 9037 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 9038 gen_exception_return(s, tmp);
9ee6e8bb 9039 } else {
e9bb4aa9 9040 if (set_cc) {
72485ec4 9041 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9042 } else {
9043 tcg_gen_sub_i32(tmp, tmp, tmp2);
9044 }
7dcc1f89 9045 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9046 }
9047 break;
9048 case 0x03:
e9bb4aa9 9049 if (set_cc) {
72485ec4 9050 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9051 } else {
9052 tcg_gen_sub_i32(tmp, tmp2, tmp);
9053 }
7dcc1f89 9054 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9055 break;
9056 case 0x04:
e9bb4aa9 9057 if (set_cc) {
72485ec4 9058 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9059 } else {
9060 tcg_gen_add_i32(tmp, tmp, tmp2);
9061 }
7dcc1f89 9062 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9063 break;
9064 case 0x05:
e9bb4aa9 9065 if (set_cc) {
49b4c31e 9066 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9067 } else {
9068 gen_add_carry(tmp, tmp, tmp2);
9069 }
7dcc1f89 9070 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9071 break;
9072 case 0x06:
e9bb4aa9 9073 if (set_cc) {
2de68a49 9074 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9075 } else {
9076 gen_sub_carry(tmp, tmp, tmp2);
9077 }
7dcc1f89 9078 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9079 break;
9080 case 0x07:
e9bb4aa9 9081 if (set_cc) {
2de68a49 9082 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9083 } else {
9084 gen_sub_carry(tmp, tmp2, tmp);
9085 }
7dcc1f89 9086 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9087 break;
9088 case 0x08:
9089 if (set_cc) {
e9bb4aa9
JR
9090 tcg_gen_and_i32(tmp, tmp, tmp2);
9091 gen_logic_CC(tmp);
9ee6e8bb 9092 }
7d1b0095 9093 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9094 break;
9095 case 0x09:
9096 if (set_cc) {
e9bb4aa9
JR
9097 tcg_gen_xor_i32(tmp, tmp, tmp2);
9098 gen_logic_CC(tmp);
9ee6e8bb 9099 }
7d1b0095 9100 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9101 break;
9102 case 0x0a:
9103 if (set_cc) {
72485ec4 9104 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 9105 }
7d1b0095 9106 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9107 break;
9108 case 0x0b:
9109 if (set_cc) {
72485ec4 9110 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9111 }
7d1b0095 9112 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9113 break;
9114 case 0x0c:
e9bb4aa9
JR
9115 tcg_gen_or_i32(tmp, tmp, tmp2);
9116 if (logic_cc) {
9117 gen_logic_CC(tmp);
9118 }
7dcc1f89 9119 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9120 break;
9121 case 0x0d:
9122 if (logic_cc && rd == 15) {
9123 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 9124 if (IS_USER(s)) {
9ee6e8bb 9125 goto illegal_op;
e9bb4aa9
JR
9126 }
9127 gen_exception_return(s, tmp2);
9ee6e8bb 9128 } else {
e9bb4aa9
JR
9129 if (logic_cc) {
9130 gen_logic_CC(tmp2);
9131 }
7dcc1f89 9132 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9133 }
9134 break;
9135 case 0x0e:
f669df27 9136 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
9137 if (logic_cc) {
9138 gen_logic_CC(tmp);
9139 }
7dcc1f89 9140 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9141 break;
9142 default:
9143 case 0x0f:
e9bb4aa9
JR
9144 tcg_gen_not_i32(tmp2, tmp2);
9145 if (logic_cc) {
9146 gen_logic_CC(tmp2);
9147 }
7dcc1f89 9148 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9149 break;
9150 }
e9bb4aa9 9151 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 9152 tcg_temp_free_i32(tmp2);
e9bb4aa9 9153 }
9ee6e8bb
PB
9154 } else {
9155 /* other instructions */
9156 op1 = (insn >> 24) & 0xf;
9157 switch(op1) {
9158 case 0x0:
9159 case 0x1:
9160 /* multiplies, extra load/stores */
9161 sh = (insn >> 5) & 3;
9162 if (sh == 0) {
9163 if (op1 == 0x0) {
9164 rd = (insn >> 16) & 0xf;
9165 rn = (insn >> 12) & 0xf;
9166 rs = (insn >> 8) & 0xf;
9167 rm = (insn) & 0xf;
9168 op1 = (insn >> 20) & 0xf;
9169 switch (op1) {
9170 case 0: case 1: case 2: case 3: case 6:
9171 /* 32 bit mul */
5e3f878a
PB
9172 tmp = load_reg(s, rs);
9173 tmp2 = load_reg(s, rm);
9174 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9175 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9176 if (insn & (1 << 22)) {
9177 /* Subtract (mls) */
9178 ARCH(6T2);
5e3f878a
PB
9179 tmp2 = load_reg(s, rn);
9180 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 9181 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9182 } else if (insn & (1 << 21)) {
9183 /* Add */
5e3f878a
PB
9184 tmp2 = load_reg(s, rn);
9185 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9186 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9187 }
9188 if (insn & (1 << 20))
5e3f878a
PB
9189 gen_logic_CC(tmp);
9190 store_reg(s, rd, tmp);
9ee6e8bb 9191 break;
8aac08b1
AJ
9192 case 4:
9193 /* 64 bit mul double accumulate (UMAAL) */
9194 ARCH(6);
9195 tmp = load_reg(s, rs);
9196 tmp2 = load_reg(s, rm);
9197 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9198 gen_addq_lo(s, tmp64, rn);
9199 gen_addq_lo(s, tmp64, rd);
9200 gen_storeq_reg(s, rn, rd, tmp64);
9201 tcg_temp_free_i64(tmp64);
9202 break;
9203 case 8: case 9: case 10: case 11:
9204 case 12: case 13: case 14: case 15:
9205 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
9206 tmp = load_reg(s, rs);
9207 tmp2 = load_reg(s, rm);
8aac08b1 9208 if (insn & (1 << 22)) {
c9f10124 9209 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 9210 } else {
c9f10124 9211 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
9212 }
9213 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
9214 TCGv_i32 al = load_reg(s, rn);
9215 TCGv_i32 ah = load_reg(s, rd);
c9f10124 9216 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
9217 tcg_temp_free_i32(al);
9218 tcg_temp_free_i32(ah);
9ee6e8bb 9219 }
8aac08b1 9220 if (insn & (1 << 20)) {
c9f10124 9221 gen_logicq_cc(tmp, tmp2);
8aac08b1 9222 }
c9f10124
RH
9223 store_reg(s, rn, tmp);
9224 store_reg(s, rd, tmp2);
9ee6e8bb 9225 break;
8aac08b1
AJ
9226 default:
9227 goto illegal_op;
9ee6e8bb
PB
9228 }
9229 } else {
9230 rn = (insn >> 16) & 0xf;
9231 rd = (insn >> 12) & 0xf;
9232 if (insn & (1 << 23)) {
9233 /* load/store exclusive */
2359bf80 9234 int op2 = (insn >> 8) & 3;
86753403 9235 op1 = (insn >> 21) & 0x3;
2359bf80
MR
9236
9237 switch (op2) {
9238 case 0: /* lda/stl */
9239 if (op1 == 1) {
9240 goto illegal_op;
9241 }
9242 ARCH(8);
9243 break;
9244 case 1: /* reserved */
9245 goto illegal_op;
9246 case 2: /* ldaex/stlex */
9247 ARCH(8);
9248 break;
9249 case 3: /* ldrex/strex */
9250 if (op1) {
9251 ARCH(6K);
9252 } else {
9253 ARCH(6);
9254 }
9255 break;
9256 }
9257
3174f8e9 9258 addr = tcg_temp_local_new_i32();
98a46317 9259 load_reg_var(s, addr, rn);
2359bf80
MR
9260
9261 /* Since the emulation does not have barriers,
9262 the acquire/release semantics need no special
9263 handling */
9264 if (op2 == 0) {
9265 if (insn & (1 << 20)) {
9266 tmp = tcg_temp_new_i32();
9267 switch (op1) {
9268 case 0: /* lda */
9bb6558a
PM
9269 gen_aa32_ld32u_iss(s, tmp, addr,
9270 get_mem_index(s),
9271 rd | ISSIsAcqRel);
2359bf80
MR
9272 break;
9273 case 2: /* ldab */
9bb6558a
PM
9274 gen_aa32_ld8u_iss(s, tmp, addr,
9275 get_mem_index(s),
9276 rd | ISSIsAcqRel);
2359bf80
MR
9277 break;
9278 case 3: /* ldah */
9bb6558a
PM
9279 gen_aa32_ld16u_iss(s, tmp, addr,
9280 get_mem_index(s),
9281 rd | ISSIsAcqRel);
2359bf80
MR
9282 break;
9283 default:
9284 abort();
9285 }
9286 store_reg(s, rd, tmp);
9287 } else {
9288 rm = insn & 0xf;
9289 tmp = load_reg(s, rm);
9290 switch (op1) {
9291 case 0: /* stl */
9bb6558a
PM
9292 gen_aa32_st32_iss(s, tmp, addr,
9293 get_mem_index(s),
9294 rm | ISSIsAcqRel);
2359bf80
MR
9295 break;
9296 case 2: /* stlb */
9bb6558a
PM
9297 gen_aa32_st8_iss(s, tmp, addr,
9298 get_mem_index(s),
9299 rm | ISSIsAcqRel);
2359bf80
MR
9300 break;
9301 case 3: /* stlh */
9bb6558a
PM
9302 gen_aa32_st16_iss(s, tmp, addr,
9303 get_mem_index(s),
9304 rm | ISSIsAcqRel);
2359bf80
MR
9305 break;
9306 default:
9307 abort();
9308 }
9309 tcg_temp_free_i32(tmp);
9310 }
9311 } else if (insn & (1 << 20)) {
86753403
PB
9312 switch (op1) {
9313 case 0: /* ldrex */
426f5abc 9314 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
9315 break;
9316 case 1: /* ldrexd */
426f5abc 9317 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
9318 break;
9319 case 2: /* ldrexb */
426f5abc 9320 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
9321 break;
9322 case 3: /* ldrexh */
426f5abc 9323 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
9324 break;
9325 default:
9326 abort();
9327 }
9ee6e8bb
PB
9328 } else {
9329 rm = insn & 0xf;
86753403
PB
9330 switch (op1) {
9331 case 0: /* strex */
426f5abc 9332 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
9333 break;
9334 case 1: /* strexd */
502e64fe 9335 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
9336 break;
9337 case 2: /* strexb */
426f5abc 9338 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
9339 break;
9340 case 3: /* strexh */
426f5abc 9341 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
9342 break;
9343 default:
9344 abort();
9345 }
9ee6e8bb 9346 }
39d5492a 9347 tcg_temp_free_i32(addr);
c4869ca6
OS
9348 } else if ((insn & 0x00300f00) == 0) {
9349 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
9350 * - SWP, SWPB
9351 */
9352
cf12bce0
EC
9353 TCGv taddr;
9354 TCGMemOp opc = s->be_data;
9355
9ee6e8bb
PB
9356 rm = (insn) & 0xf;
9357
9ee6e8bb 9358 if (insn & (1 << 22)) {
cf12bce0 9359 opc |= MO_UB;
9ee6e8bb 9360 } else {
cf12bce0 9361 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 9362 }
cf12bce0
EC
9363
9364 addr = load_reg(s, rn);
9365 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 9366 tcg_temp_free_i32(addr);
cf12bce0
EC
9367
9368 tmp = load_reg(s, rm);
9369 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
9370 get_mem_index(s), opc);
9371 tcg_temp_free(taddr);
9372 store_reg(s, rd, tmp);
c4869ca6
OS
9373 } else {
9374 goto illegal_op;
9ee6e8bb
PB
9375 }
9376 }
9377 } else {
9378 int address_offset;
3960c336 9379 bool load = insn & (1 << 20);
63f26fcf
PM
9380 bool wbit = insn & (1 << 21);
9381 bool pbit = insn & (1 << 24);
3960c336 9382 bool doubleword = false;
9bb6558a
PM
9383 ISSInfo issinfo;
9384
9ee6e8bb
PB
9385 /* Misc load/store */
9386 rn = (insn >> 16) & 0xf;
9387 rd = (insn >> 12) & 0xf;
3960c336 9388
9bb6558a
PM
9389 /* ISS not valid if writeback */
9390 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
9391
3960c336
PM
9392 if (!load && (sh & 2)) {
9393 /* doubleword */
9394 ARCH(5TE);
9395 if (rd & 1) {
9396 /* UNPREDICTABLE; we choose to UNDEF */
9397 goto illegal_op;
9398 }
9399 load = (sh & 1) == 0;
9400 doubleword = true;
9401 }
9402
b0109805 9403 addr = load_reg(s, rn);
63f26fcf 9404 if (pbit) {
b0109805 9405 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 9406 }
9ee6e8bb 9407 address_offset = 0;
3960c336
PM
9408
9409 if (doubleword) {
9410 if (!load) {
9ee6e8bb 9411 /* store */
b0109805 9412 tmp = load_reg(s, rd);
12dcc321 9413 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9414 tcg_temp_free_i32(tmp);
b0109805
PB
9415 tcg_gen_addi_i32(addr, addr, 4);
9416 tmp = load_reg(s, rd + 1);
12dcc321 9417 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9418 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9419 } else {
9420 /* load */
5a839c0d 9421 tmp = tcg_temp_new_i32();
12dcc321 9422 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9423 store_reg(s, rd, tmp);
9424 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9425 tmp = tcg_temp_new_i32();
12dcc321 9426 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9427 rd++;
9ee6e8bb
PB
9428 }
9429 address_offset = -4;
3960c336
PM
9430 } else if (load) {
9431 /* load */
9432 tmp = tcg_temp_new_i32();
9433 switch (sh) {
9434 case 1:
9bb6558a
PM
9435 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9436 issinfo);
3960c336
PM
9437 break;
9438 case 2:
9bb6558a
PM
9439 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9440 issinfo);
3960c336
PM
9441 break;
9442 default:
9443 case 3:
9bb6558a
PM
9444 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9445 issinfo);
3960c336
PM
9446 break;
9447 }
9ee6e8bb
PB
9448 } else {
9449 /* store */
b0109805 9450 tmp = load_reg(s, rd);
9bb6558a 9451 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 9452 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9453 }
9454 /* Perform base writeback before the loaded value to
9455 ensure correct behavior with overlapping index registers.
b6af0975 9456 ldrd with base writeback is undefined if the
9ee6e8bb 9457 destination and index registers overlap. */
63f26fcf 9458 if (!pbit) {
b0109805
PB
9459 gen_add_datah_offset(s, insn, address_offset, addr);
9460 store_reg(s, rn, addr);
63f26fcf 9461 } else if (wbit) {
9ee6e8bb 9462 if (address_offset)
b0109805
PB
9463 tcg_gen_addi_i32(addr, addr, address_offset);
9464 store_reg(s, rn, addr);
9465 } else {
7d1b0095 9466 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9467 }
9468 if (load) {
9469 /* Complete the load. */
b0109805 9470 store_reg(s, rd, tmp);
9ee6e8bb
PB
9471 }
9472 }
9473 break;
9474 case 0x4:
9475 case 0x5:
9476 goto do_ldst;
9477 case 0x6:
9478 case 0x7:
9479 if (insn & (1 << 4)) {
9480 ARCH(6);
9481 /* Armv6 Media instructions. */
9482 rm = insn & 0xf;
9483 rn = (insn >> 16) & 0xf;
2c0262af 9484 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
9485 rs = (insn >> 8) & 0xf;
9486 switch ((insn >> 23) & 3) {
9487 case 0: /* Parallel add/subtract. */
9488 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
9489 tmp = load_reg(s, rn);
9490 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9491 sh = (insn >> 5) & 7;
9492 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9493 goto illegal_op;
6ddbc6e4 9494 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 9495 tcg_temp_free_i32(tmp2);
6ddbc6e4 9496 store_reg(s, rd, tmp);
9ee6e8bb
PB
9497 break;
9498 case 1:
9499 if ((insn & 0x00700020) == 0) {
6c95676b 9500 /* Halfword pack. */
3670669c
PB
9501 tmp = load_reg(s, rn);
9502 tmp2 = load_reg(s, rm);
9ee6e8bb 9503 shift = (insn >> 7) & 0x1f;
3670669c
PB
9504 if (insn & (1 << 6)) {
9505 /* pkhtb */
22478e79
AZ
9506 if (shift == 0)
9507 shift = 31;
9508 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 9509 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 9510 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
9511 } else {
9512 /* pkhbt */
22478e79
AZ
9513 if (shift)
9514 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 9515 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
9516 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9517 }
9518 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9519 tcg_temp_free_i32(tmp2);
3670669c 9520 store_reg(s, rd, tmp);
9ee6e8bb
PB
9521 } else if ((insn & 0x00200020) == 0x00200000) {
9522 /* [us]sat */
6ddbc6e4 9523 tmp = load_reg(s, rm);
9ee6e8bb
PB
9524 shift = (insn >> 7) & 0x1f;
9525 if (insn & (1 << 6)) {
9526 if (shift == 0)
9527 shift = 31;
6ddbc6e4 9528 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9529 } else {
6ddbc6e4 9530 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9531 }
9532 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9533 tmp2 = tcg_const_i32(sh);
9534 if (insn & (1 << 22))
9ef39277 9535 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9536 else
9ef39277 9537 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9538 tcg_temp_free_i32(tmp2);
6ddbc6e4 9539 store_reg(s, rd, tmp);
9ee6e8bb
PB
9540 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9541 /* [us]sat16 */
6ddbc6e4 9542 tmp = load_reg(s, rm);
9ee6e8bb 9543 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9544 tmp2 = tcg_const_i32(sh);
9545 if (insn & (1 << 22))
9ef39277 9546 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9547 else
9ef39277 9548 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9549 tcg_temp_free_i32(tmp2);
6ddbc6e4 9550 store_reg(s, rd, tmp);
9ee6e8bb
PB
9551 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9552 /* Select bytes. */
6ddbc6e4
PB
9553 tmp = load_reg(s, rn);
9554 tmp2 = load_reg(s, rm);
7d1b0095 9555 tmp3 = tcg_temp_new_i32();
0ecb72a5 9556 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9557 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9558 tcg_temp_free_i32(tmp3);
9559 tcg_temp_free_i32(tmp2);
6ddbc6e4 9560 store_reg(s, rd, tmp);
9ee6e8bb 9561 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9562 tmp = load_reg(s, rm);
9ee6e8bb 9563 shift = (insn >> 10) & 3;
1301f322 9564 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9565 rotate, a shift is sufficient. */
9566 if (shift != 0)
f669df27 9567 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9568 op1 = (insn >> 20) & 7;
9569 switch (op1) {
5e3f878a
PB
9570 case 0: gen_sxtb16(tmp); break;
9571 case 2: gen_sxtb(tmp); break;
9572 case 3: gen_sxth(tmp); break;
9573 case 4: gen_uxtb16(tmp); break;
9574 case 6: gen_uxtb(tmp); break;
9575 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9576 default: goto illegal_op;
9577 }
9578 if (rn != 15) {
5e3f878a 9579 tmp2 = load_reg(s, rn);
9ee6e8bb 9580 if ((op1 & 3) == 0) {
5e3f878a 9581 gen_add16(tmp, tmp2);
9ee6e8bb 9582 } else {
5e3f878a 9583 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9584 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9585 }
9586 }
6c95676b 9587 store_reg(s, rd, tmp);
9ee6e8bb
PB
9588 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9589 /* rev */
b0109805 9590 tmp = load_reg(s, rm);
9ee6e8bb
PB
9591 if (insn & (1 << 22)) {
9592 if (insn & (1 << 7)) {
b0109805 9593 gen_revsh(tmp);
9ee6e8bb
PB
9594 } else {
9595 ARCH(6T2);
b0109805 9596 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9597 }
9598 } else {
9599 if (insn & (1 << 7))
b0109805 9600 gen_rev16(tmp);
9ee6e8bb 9601 else
66896cb8 9602 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9603 }
b0109805 9604 store_reg(s, rd, tmp);
9ee6e8bb
PB
9605 } else {
9606 goto illegal_op;
9607 }
9608 break;
9609 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9610 switch ((insn >> 20) & 0x7) {
9611 case 5:
9612 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9613 /* op2 not 00x or 11x : UNDEF */
9614 goto illegal_op;
9615 }
838fa72d
AJ
9616 /* Signed multiply most significant [accumulate].
9617 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9618 tmp = load_reg(s, rm);
9619 tmp2 = load_reg(s, rs);
a7812ae4 9620 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9621
955a7dd5 9622 if (rd != 15) {
838fa72d 9623 tmp = load_reg(s, rd);
9ee6e8bb 9624 if (insn & (1 << 6)) {
838fa72d 9625 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9626 } else {
838fa72d 9627 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9628 }
9629 }
838fa72d
AJ
9630 if (insn & (1 << 5)) {
9631 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9632 }
9633 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9634 tmp = tcg_temp_new_i32();
ecc7b3aa 9635 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9636 tcg_temp_free_i64(tmp64);
955a7dd5 9637 store_reg(s, rn, tmp);
41e9564d
PM
9638 break;
9639 case 0:
9640 case 4:
9641 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9642 if (insn & (1 << 7)) {
9643 goto illegal_op;
9644 }
9645 tmp = load_reg(s, rm);
9646 tmp2 = load_reg(s, rs);
9ee6e8bb 9647 if (insn & (1 << 5))
5e3f878a
PB
9648 gen_swap_half(tmp2);
9649 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9650 if (insn & (1 << 22)) {
5e3f878a 9651 /* smlald, smlsld */
33bbd75a
PC
9652 TCGv_i64 tmp64_2;
9653
a7812ae4 9654 tmp64 = tcg_temp_new_i64();
33bbd75a 9655 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9656 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9657 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9658 tcg_temp_free_i32(tmp);
33bbd75a
PC
9659 tcg_temp_free_i32(tmp2);
9660 if (insn & (1 << 6)) {
9661 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9662 } else {
9663 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9664 }
9665 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9666 gen_addq(s, tmp64, rd, rn);
9667 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9668 tcg_temp_free_i64(tmp64);
9ee6e8bb 9669 } else {
5e3f878a 9670 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9671 if (insn & (1 << 6)) {
9672 /* This subtraction cannot overflow. */
9673 tcg_gen_sub_i32(tmp, tmp, tmp2);
9674 } else {
9675 /* This addition cannot overflow 32 bits;
9676 * however it may overflow considered as a
9677 * signed operation, in which case we must set
9678 * the Q flag.
9679 */
9680 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9681 }
9682 tcg_temp_free_i32(tmp2);
22478e79 9683 if (rd != 15)
9ee6e8bb 9684 {
22478e79 9685 tmp2 = load_reg(s, rd);
9ef39277 9686 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9687 tcg_temp_free_i32(tmp2);
9ee6e8bb 9688 }
22478e79 9689 store_reg(s, rn, tmp);
9ee6e8bb 9690 }
41e9564d 9691 break;
b8b8ea05
PM
9692 case 1:
9693 case 3:
9694 /* SDIV, UDIV */
d614a513 9695 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9696 goto illegal_op;
9697 }
9698 if (((insn >> 5) & 7) || (rd != 15)) {
9699 goto illegal_op;
9700 }
9701 tmp = load_reg(s, rm);
9702 tmp2 = load_reg(s, rs);
9703 if (insn & (1 << 21)) {
9704 gen_helper_udiv(tmp, tmp, tmp2);
9705 } else {
9706 gen_helper_sdiv(tmp, tmp, tmp2);
9707 }
9708 tcg_temp_free_i32(tmp2);
9709 store_reg(s, rn, tmp);
9710 break;
41e9564d
PM
9711 default:
9712 goto illegal_op;
9ee6e8bb
PB
9713 }
9714 break;
9715 case 3:
9716 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9717 switch (op1) {
9718 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9719 ARCH(6);
9720 tmp = load_reg(s, rm);
9721 tmp2 = load_reg(s, rs);
9722 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9723 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9724 if (rd != 15) {
9725 tmp2 = load_reg(s, rd);
6ddbc6e4 9726 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9727 tcg_temp_free_i32(tmp2);
9ee6e8bb 9728 }
ded9d295 9729 store_reg(s, rn, tmp);
9ee6e8bb
PB
9730 break;
9731 case 0x20: case 0x24: case 0x28: case 0x2c:
9732 /* Bitfield insert/clear. */
9733 ARCH(6T2);
9734 shift = (insn >> 7) & 0x1f;
9735 i = (insn >> 16) & 0x1f;
45140a57
KB
9736 if (i < shift) {
9737 /* UNPREDICTABLE; we choose to UNDEF */
9738 goto illegal_op;
9739 }
9ee6e8bb
PB
9740 i = i + 1 - shift;
9741 if (rm == 15) {
7d1b0095 9742 tmp = tcg_temp_new_i32();
5e3f878a 9743 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9744 } else {
5e3f878a 9745 tmp = load_reg(s, rm);
9ee6e8bb
PB
9746 }
9747 if (i != 32) {
5e3f878a 9748 tmp2 = load_reg(s, rd);
d593c48e 9749 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9750 tcg_temp_free_i32(tmp2);
9ee6e8bb 9751 }
5e3f878a 9752 store_reg(s, rd, tmp);
9ee6e8bb
PB
9753 break;
9754 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9755 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9756 ARCH(6T2);
5e3f878a 9757 tmp = load_reg(s, rm);
9ee6e8bb
PB
9758 shift = (insn >> 7) & 0x1f;
9759 i = ((insn >> 16) & 0x1f) + 1;
9760 if (shift + i > 32)
9761 goto illegal_op;
9762 if (i < 32) {
9763 if (op1 & 0x20) {
59a71b4c 9764 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9765 } else {
59a71b4c 9766 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9767 }
9768 }
5e3f878a 9769 store_reg(s, rd, tmp);
9ee6e8bb
PB
9770 break;
9771 default:
9772 goto illegal_op;
9773 }
9774 break;
9775 }
9776 break;
9777 }
9778 do_ldst:
9779 /* Check for undefined extension instructions
9780 * per the ARM Bible IE:
9781 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9782 */
9783 sh = (0xf << 20) | (0xf << 4);
9784 if (op1 == 0x7 && ((insn & sh) == sh))
9785 {
9786 goto illegal_op;
9787 }
9788 /* load/store byte/word */
9789 rn = (insn >> 16) & 0xf;
9790 rd = (insn >> 12) & 0xf;
b0109805 9791 tmp2 = load_reg(s, rn);
a99caa48
PM
9792 if ((insn & 0x01200000) == 0x00200000) {
9793 /* ldrt/strt */
579d21cc 9794 i = get_a32_user_mem_index(s);
a99caa48
PM
9795 } else {
9796 i = get_mem_index(s);
9797 }
9ee6e8bb 9798 if (insn & (1 << 24))
b0109805 9799 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9800 if (insn & (1 << 20)) {
9801 /* load */
5a839c0d 9802 tmp = tcg_temp_new_i32();
9ee6e8bb 9803 if (insn & (1 << 22)) {
9bb6558a 9804 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9805 } else {
9bb6558a 9806 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9807 }
9ee6e8bb
PB
9808 } else {
9809 /* store */
b0109805 9810 tmp = load_reg(s, rd);
5a839c0d 9811 if (insn & (1 << 22)) {
9bb6558a 9812 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9813 } else {
9bb6558a 9814 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9815 }
9816 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9817 }
9818 if (!(insn & (1 << 24))) {
b0109805
PB
9819 gen_add_data_offset(s, insn, tmp2);
9820 store_reg(s, rn, tmp2);
9821 } else if (insn & (1 << 21)) {
9822 store_reg(s, rn, tmp2);
9823 } else {
7d1b0095 9824 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9825 }
9826 if (insn & (1 << 20)) {
9827 /* Complete the load. */
7dcc1f89 9828 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9829 }
9830 break;
9831 case 0x08:
9832 case 0x09:
9833 {
da3e53dd
PM
9834 int j, n, loaded_base;
9835 bool exc_return = false;
9836 bool is_load = extract32(insn, 20, 1);
9837 bool user = false;
39d5492a 9838 TCGv_i32 loaded_var;
9ee6e8bb
PB
9839 /* load/store multiple words */
9840 /* XXX: store correct base if write back */
9ee6e8bb 9841 if (insn & (1 << 22)) {
da3e53dd 9842 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9843 if (IS_USER(s))
9844 goto illegal_op; /* only usable in supervisor mode */
9845
da3e53dd
PM
9846 if (is_load && extract32(insn, 15, 1)) {
9847 exc_return = true;
9848 } else {
9849 user = true;
9850 }
9ee6e8bb
PB
9851 }
9852 rn = (insn >> 16) & 0xf;
b0109805 9853 addr = load_reg(s, rn);
9ee6e8bb
PB
9854
9855 /* compute total size */
9856 loaded_base = 0;
f764718d 9857 loaded_var = NULL;
9ee6e8bb
PB
9858 n = 0;
9859 for(i=0;i<16;i++) {
9860 if (insn & (1 << i))
9861 n++;
9862 }
9863 /* XXX: test invalid n == 0 case ? */
9864 if (insn & (1 << 23)) {
9865 if (insn & (1 << 24)) {
9866 /* pre increment */
b0109805 9867 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9868 } else {
9869 /* post increment */
9870 }
9871 } else {
9872 if (insn & (1 << 24)) {
9873 /* pre decrement */
b0109805 9874 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9875 } else {
9876 /* post decrement */
9877 if (n != 1)
b0109805 9878 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9879 }
9880 }
9881 j = 0;
9882 for(i=0;i<16;i++) {
9883 if (insn & (1 << i)) {
da3e53dd 9884 if (is_load) {
9ee6e8bb 9885 /* load */
5a839c0d 9886 tmp = tcg_temp_new_i32();
12dcc321 9887 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9888 if (user) {
b75263d6 9889 tmp2 = tcg_const_i32(i);
1ce94f81 9890 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9891 tcg_temp_free_i32(tmp2);
7d1b0095 9892 tcg_temp_free_i32(tmp);
9ee6e8bb 9893 } else if (i == rn) {
b0109805 9894 loaded_var = tmp;
9ee6e8bb 9895 loaded_base = 1;
fb0e8e79
PM
9896 } else if (rn == 15 && exc_return) {
9897 store_pc_exc_ret(s, tmp);
9ee6e8bb 9898 } else {
7dcc1f89 9899 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9900 }
9901 } else {
9902 /* store */
9903 if (i == 15) {
9904 /* special case: r15 = PC + 8 */
9905 val = (long)s->pc + 4;
7d1b0095 9906 tmp = tcg_temp_new_i32();
b0109805 9907 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9908 } else if (user) {
7d1b0095 9909 tmp = tcg_temp_new_i32();
b75263d6 9910 tmp2 = tcg_const_i32(i);
9ef39277 9911 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9912 tcg_temp_free_i32(tmp2);
9ee6e8bb 9913 } else {
b0109805 9914 tmp = load_reg(s, i);
9ee6e8bb 9915 }
12dcc321 9916 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9917 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9918 }
9919 j++;
9920 /* no need to add after the last transfer */
9921 if (j != n)
b0109805 9922 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9923 }
9924 }
9925 if (insn & (1 << 21)) {
9926 /* write back */
9927 if (insn & (1 << 23)) {
9928 if (insn & (1 << 24)) {
9929 /* pre increment */
9930 } else {
9931 /* post increment */
b0109805 9932 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9933 }
9934 } else {
9935 if (insn & (1 << 24)) {
9936 /* pre decrement */
9937 if (n != 1)
b0109805 9938 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9939 } else {
9940 /* post decrement */
b0109805 9941 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9942 }
9943 }
b0109805
PB
9944 store_reg(s, rn, addr);
9945 } else {
7d1b0095 9946 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9947 }
9948 if (loaded_base) {
b0109805 9949 store_reg(s, rn, loaded_var);
9ee6e8bb 9950 }
da3e53dd 9951 if (exc_return) {
9ee6e8bb 9952 /* Restore CPSR from SPSR. */
d9ba4830 9953 tmp = load_cpu_field(spsr);
e69ad9df
AL
9954 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9955 gen_io_start();
9956 }
235ea1f5 9957 gen_helper_cpsr_write_eret(cpu_env, tmp);
e69ad9df
AL
9958 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9959 gen_io_end();
9960 }
7d1b0095 9961 tcg_temp_free_i32(tmp);
b29fd33d 9962 /* Must exit loop to check un-masked IRQs */
dcba3a8d 9963 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
9964 }
9965 }
9966 break;
9967 case 0xa:
9968 case 0xb:
9969 {
9970 int32_t offset;
9971
9972 /* branch (and link) */
9973 val = (int32_t)s->pc;
9974 if (insn & (1 << 24)) {
7d1b0095 9975 tmp = tcg_temp_new_i32();
5e3f878a
PB
9976 tcg_gen_movi_i32(tmp, val);
9977 store_reg(s, 14, tmp);
9ee6e8bb 9978 }
534df156
PM
9979 offset = sextract32(insn << 2, 0, 26);
9980 val += offset + 4;
9ee6e8bb
PB
9981 gen_jmp(s, val);
9982 }
9983 break;
9984 case 0xc:
9985 case 0xd:
9986 case 0xe:
6a57f3eb
WN
9987 if (((insn >> 8) & 0xe) == 10) {
9988 /* VFP. */
7dcc1f89 9989 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9990 goto illegal_op;
9991 }
7dcc1f89 9992 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9993 /* Coprocessor. */
9ee6e8bb 9994 goto illegal_op;
6a57f3eb 9995 }
9ee6e8bb
PB
9996 break;
9997 case 0xf:
9998 /* swi */
eaed129d 9999 gen_set_pc_im(s, s->pc);
d4a2dc67 10000 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 10001 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
10002 break;
10003 default:
10004 illegal_op:
73710361
GB
10005 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
10006 default_exception_el(s));
9ee6e8bb
PB
10007 break;
10008 }
10009 }
10010}
10011
296e5a0a
PM
10012static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
10013{
10014 /* Return true if this is a 16 bit instruction. We must be precise
10015 * about this (matching the decode). We assume that s->pc still
10016 * points to the first 16 bits of the insn.
10017 */
10018 if ((insn >> 11) < 0x1d) {
10019 /* Definitely a 16-bit instruction */
10020 return true;
10021 }
10022
10023 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10024 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10025 * end up actually treating this as two 16-bit insns, though,
10026 * if it's half of a bl/blx pair that might span a page boundary.
10027 */
14120108
JS
10028 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
10029 arm_dc_feature(s, ARM_FEATURE_M)) {
296e5a0a
PM
10030 /* Thumb2 cores (including all M profile ones) always treat
10031 * 32-bit insns as 32-bit.
10032 */
10033 return false;
10034 }
10035
bfe7ad5b 10036 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
296e5a0a
PM
10037 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10038 * is not on the next page; we merge this into a 32-bit
10039 * insn.
10040 */
10041 return false;
10042 }
10043 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10044 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10045 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10046 * -- handle as single 16 bit insn
10047 */
10048 return true;
10049}
10050
9ee6e8bb
PB
10051/* Return true if this is a Thumb-2 logical op. */
10052static int
10053thumb2_logic_op(int op)
10054{
10055 return (op < 8);
10056}
10057
10058/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
10059 then set condition code flags based on the result of the operation.
10060 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
10061 to the high bit of T1.
10062 Returns zero if the opcode is valid. */
10063
10064static int
39d5492a
PM
10065gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
10066 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
10067{
10068 int logic_cc;
10069
10070 logic_cc = 0;
10071 switch (op) {
10072 case 0: /* and */
396e467c 10073 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
10074 logic_cc = conds;
10075 break;
10076 case 1: /* bic */
f669df27 10077 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
10078 logic_cc = conds;
10079 break;
10080 case 2: /* orr */
396e467c 10081 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
10082 logic_cc = conds;
10083 break;
10084 case 3: /* orn */
29501f1b 10085 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
10086 logic_cc = conds;
10087 break;
10088 case 4: /* eor */
396e467c 10089 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
10090 logic_cc = conds;
10091 break;
10092 case 8: /* add */
10093 if (conds)
72485ec4 10094 gen_add_CC(t0, t0, t1);
9ee6e8bb 10095 else
396e467c 10096 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
10097 break;
10098 case 10: /* adc */
10099 if (conds)
49b4c31e 10100 gen_adc_CC(t0, t0, t1);
9ee6e8bb 10101 else
396e467c 10102 gen_adc(t0, t1);
9ee6e8bb
PB
10103 break;
10104 case 11: /* sbc */
2de68a49
RH
10105 if (conds) {
10106 gen_sbc_CC(t0, t0, t1);
10107 } else {
396e467c 10108 gen_sub_carry(t0, t0, t1);
2de68a49 10109 }
9ee6e8bb
PB
10110 break;
10111 case 13: /* sub */
10112 if (conds)
72485ec4 10113 gen_sub_CC(t0, t0, t1);
9ee6e8bb 10114 else
396e467c 10115 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
10116 break;
10117 case 14: /* rsb */
10118 if (conds)
72485ec4 10119 gen_sub_CC(t0, t1, t0);
9ee6e8bb 10120 else
396e467c 10121 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
10122 break;
10123 default: /* 5, 6, 7, 9, 12, 15. */
10124 return 1;
10125 }
10126 if (logic_cc) {
396e467c 10127 gen_logic_CC(t0);
9ee6e8bb 10128 if (shifter_out)
396e467c 10129 gen_set_CF_bit31(t1);
9ee6e8bb
PB
10130 }
10131 return 0;
10132}
10133
2eea841c
PM
10134/* Translate a 32-bit thumb instruction. */
10135static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 10136{
296e5a0a 10137 uint32_t imm, shift, offset;
9ee6e8bb 10138 uint32_t rd, rn, rm, rs;
39d5492a
PM
10139 TCGv_i32 tmp;
10140 TCGv_i32 tmp2;
10141 TCGv_i32 tmp3;
10142 TCGv_i32 addr;
a7812ae4 10143 TCGv_i64 tmp64;
9ee6e8bb
PB
10144 int op;
10145 int shiftop;
10146 int conds;
10147 int logic_cc;
10148
14120108
JS
10149 /*
10150 * ARMv6-M supports a limited subset of Thumb2 instructions.
10151 * Other Thumb1 architectures allow only 32-bit
10152 * combined BL/BLX prefix and suffix.
296e5a0a 10153 */
14120108
JS
10154 if (arm_dc_feature(s, ARM_FEATURE_M) &&
10155 !arm_dc_feature(s, ARM_FEATURE_V7)) {
10156 int i;
10157 bool found = false;
8297cb13
JS
10158 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
10159 0xf3b08040 /* dsb */,
10160 0xf3b08050 /* dmb */,
10161 0xf3b08060 /* isb */,
10162 0xf3e08000 /* mrs */,
10163 0xf000d000 /* bl */};
10164 static const uint32_t armv6m_mask[] = {0xffe0d000,
10165 0xfff0d0f0,
10166 0xfff0d0f0,
10167 0xfff0d0f0,
10168 0xffe0d000,
10169 0xf800d000};
14120108
JS
10170
10171 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
10172 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
10173 found = true;
10174 break;
10175 }
10176 }
10177 if (!found) {
10178 goto illegal_op;
10179 }
10180 } else if ((insn & 0xf800e800) != 0xf000e800) {
9ee6e8bb
PB
10181 ARCH(6T2);
10182 }
10183
10184 rn = (insn >> 16) & 0xf;
10185 rs = (insn >> 12) & 0xf;
10186 rd = (insn >> 8) & 0xf;
10187 rm = insn & 0xf;
10188 switch ((insn >> 25) & 0xf) {
10189 case 0: case 1: case 2: case 3:
10190 /* 16-bit instructions. Should never happen. */
10191 abort();
10192 case 4:
10193 if (insn & (1 << 22)) {
ebfe27c5
PM
10194 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10195 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 10196 * table branch, TT.
ebfe27c5 10197 */
76eff04d
PM
10198 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
10199 arm_dc_feature(s, ARM_FEATURE_V8)) {
10200 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10201 * - SG (v8M only)
10202 * The bulk of the behaviour for this instruction is implemented
10203 * in v7m_handle_execute_nsc(), which deals with the insn when
10204 * it is executed by a CPU in non-secure state from memory
10205 * which is Secure & NonSecure-Callable.
10206 * Here we only need to handle the remaining cases:
10207 * * in NS memory (including the "security extension not
10208 * implemented" case) : NOP
10209 * * in S memory but CPU already secure (clear IT bits)
10210 * We know that the attribute for the memory this insn is
10211 * in must match the current CPU state, because otherwise
10212 * get_phys_addr_pmsav8 would have generated an exception.
10213 */
10214 if (s->v8m_secure) {
10215 /* Like the IT insn, we don't need to generate any code */
10216 s->condexec_cond = 0;
10217 s->condexec_mask = 0;
10218 }
10219 } else if (insn & 0x01200000) {
ebfe27c5
PM
10220 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10221 * - load/store dual (post-indexed)
10222 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
10223 * - load/store dual (literal and immediate)
10224 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10225 * - load/store dual (pre-indexed)
10226 */
9ee6e8bb 10227 if (rn == 15) {
ebfe27c5
PM
10228 if (insn & (1 << 21)) {
10229 /* UNPREDICTABLE */
10230 goto illegal_op;
10231 }
7d1b0095 10232 addr = tcg_temp_new_i32();
b0109805 10233 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 10234 } else {
b0109805 10235 addr = load_reg(s, rn);
9ee6e8bb
PB
10236 }
10237 offset = (insn & 0xff) * 4;
10238 if ((insn & (1 << 23)) == 0)
10239 offset = -offset;
10240 if (insn & (1 << 24)) {
b0109805 10241 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
10242 offset = 0;
10243 }
10244 if (insn & (1 << 20)) {
10245 /* ldrd */
e2592fad 10246 tmp = tcg_temp_new_i32();
12dcc321 10247 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
10248 store_reg(s, rs, tmp);
10249 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10250 tmp = tcg_temp_new_i32();
12dcc321 10251 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10252 store_reg(s, rd, tmp);
9ee6e8bb
PB
10253 } else {
10254 /* strd */
b0109805 10255 tmp = load_reg(s, rs);
12dcc321 10256 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10257 tcg_temp_free_i32(tmp);
b0109805
PB
10258 tcg_gen_addi_i32(addr, addr, 4);
10259 tmp = load_reg(s, rd);
12dcc321 10260 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10261 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10262 }
10263 if (insn & (1 << 21)) {
10264 /* Base writeback. */
b0109805
PB
10265 tcg_gen_addi_i32(addr, addr, offset - 4);
10266 store_reg(s, rn, addr);
10267 } else {
7d1b0095 10268 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10269 }
10270 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
10271 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
10272 * - load/store exclusive word
5158de24 10273 * - TT (v8M only)
ebfe27c5
PM
10274 */
10275 if (rs == 15) {
5158de24
PM
10276 if (!(insn & (1 << 20)) &&
10277 arm_dc_feature(s, ARM_FEATURE_M) &&
10278 arm_dc_feature(s, ARM_FEATURE_V8)) {
10279 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
10280 * - TT (v8M only)
10281 */
10282 bool alt = insn & (1 << 7);
10283 TCGv_i32 addr, op, ttresp;
10284
10285 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
10286 /* we UNDEF for these UNPREDICTABLE cases */
10287 goto illegal_op;
10288 }
10289
10290 if (alt && !s->v8m_secure) {
10291 goto illegal_op;
10292 }
10293
10294 addr = load_reg(s, rn);
10295 op = tcg_const_i32(extract32(insn, 6, 2));
10296 ttresp = tcg_temp_new_i32();
10297 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
10298 tcg_temp_free_i32(addr);
10299 tcg_temp_free_i32(op);
10300 store_reg(s, rd, ttresp);
384c6c03 10301 break;
5158de24 10302 }
ebfe27c5
PM
10303 goto illegal_op;
10304 }
39d5492a 10305 addr = tcg_temp_local_new_i32();
98a46317 10306 load_reg_var(s, addr, rn);
426f5abc 10307 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 10308 if (insn & (1 << 20)) {
426f5abc 10309 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 10310 } else {
426f5abc 10311 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 10312 }
39d5492a 10313 tcg_temp_free_i32(addr);
2359bf80 10314 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
10315 /* Table Branch. */
10316 if (rn == 15) {
7d1b0095 10317 addr = tcg_temp_new_i32();
b0109805 10318 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 10319 } else {
b0109805 10320 addr = load_reg(s, rn);
9ee6e8bb 10321 }
b26eefb6 10322 tmp = load_reg(s, rm);
b0109805 10323 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
10324 if (insn & (1 << 4)) {
10325 /* tbh */
b0109805 10326 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10327 tcg_temp_free_i32(tmp);
e2592fad 10328 tmp = tcg_temp_new_i32();
12dcc321 10329 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10330 } else { /* tbb */
7d1b0095 10331 tcg_temp_free_i32(tmp);
e2592fad 10332 tmp = tcg_temp_new_i32();
12dcc321 10333 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10334 }
7d1b0095 10335 tcg_temp_free_i32(addr);
b0109805
PB
10336 tcg_gen_shli_i32(tmp, tmp, 1);
10337 tcg_gen_addi_i32(tmp, tmp, s->pc);
10338 store_reg(s, 15, tmp);
9ee6e8bb 10339 } else {
2359bf80 10340 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 10341 op = (insn >> 4) & 0x3;
2359bf80
MR
10342 switch (op2) {
10343 case 0:
426f5abc 10344 goto illegal_op;
2359bf80
MR
10345 case 1:
10346 /* Load/store exclusive byte/halfword/doubleword */
10347 if (op == 2) {
10348 goto illegal_op;
10349 }
10350 ARCH(7);
10351 break;
10352 case 2:
10353 /* Load-acquire/store-release */
10354 if (op == 3) {
10355 goto illegal_op;
10356 }
10357 /* Fall through */
10358 case 3:
10359 /* Load-acquire/store-release exclusive */
10360 ARCH(8);
10361 break;
426f5abc 10362 }
39d5492a 10363 addr = tcg_temp_local_new_i32();
98a46317 10364 load_reg_var(s, addr, rn);
2359bf80
MR
10365 if (!(op2 & 1)) {
10366 if (insn & (1 << 20)) {
10367 tmp = tcg_temp_new_i32();
10368 switch (op) {
10369 case 0: /* ldab */
9bb6558a
PM
10370 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
10371 rs | ISSIsAcqRel);
2359bf80
MR
10372 break;
10373 case 1: /* ldah */
9bb6558a
PM
10374 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10375 rs | ISSIsAcqRel);
2359bf80
MR
10376 break;
10377 case 2: /* lda */
9bb6558a
PM
10378 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
10379 rs | ISSIsAcqRel);
2359bf80
MR
10380 break;
10381 default:
10382 abort();
10383 }
10384 store_reg(s, rs, tmp);
10385 } else {
10386 tmp = load_reg(s, rs);
10387 switch (op) {
10388 case 0: /* stlb */
9bb6558a
PM
10389 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
10390 rs | ISSIsAcqRel);
2359bf80
MR
10391 break;
10392 case 1: /* stlh */
9bb6558a
PM
10393 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
10394 rs | ISSIsAcqRel);
2359bf80
MR
10395 break;
10396 case 2: /* stl */
9bb6558a
PM
10397 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
10398 rs | ISSIsAcqRel);
2359bf80
MR
10399 break;
10400 default:
10401 abort();
10402 }
10403 tcg_temp_free_i32(tmp);
10404 }
10405 } else if (insn & (1 << 20)) {
426f5abc 10406 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 10407 } else {
426f5abc 10408 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 10409 }
39d5492a 10410 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10411 }
10412 } else {
10413 /* Load/store multiple, RFE, SRS. */
10414 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 10415 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 10416 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10417 goto illegal_op;
00115976 10418 }
9ee6e8bb
PB
10419 if (insn & (1 << 20)) {
10420 /* rfe */
b0109805
PB
10421 addr = load_reg(s, rn);
10422 if ((insn & (1 << 24)) == 0)
10423 tcg_gen_addi_i32(addr, addr, -8);
10424 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 10425 tmp = tcg_temp_new_i32();
12dcc321 10426 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10427 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10428 tmp2 = tcg_temp_new_i32();
12dcc321 10429 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
10430 if (insn & (1 << 21)) {
10431 /* Base writeback. */
b0109805
PB
10432 if (insn & (1 << 24)) {
10433 tcg_gen_addi_i32(addr, addr, 4);
10434 } else {
10435 tcg_gen_addi_i32(addr, addr, -4);
10436 }
10437 store_reg(s, rn, addr);
10438 } else {
7d1b0095 10439 tcg_temp_free_i32(addr);
9ee6e8bb 10440 }
b0109805 10441 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
10442 } else {
10443 /* srs */
81465888
PM
10444 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10445 insn & (1 << 21));
9ee6e8bb
PB
10446 }
10447 } else {
5856d44e 10448 int i, loaded_base = 0;
39d5492a 10449 TCGv_i32 loaded_var;
9ee6e8bb 10450 /* Load/store multiple. */
b0109805 10451 addr = load_reg(s, rn);
9ee6e8bb
PB
10452 offset = 0;
10453 for (i = 0; i < 16; i++) {
10454 if (insn & (1 << i))
10455 offset += 4;
10456 }
10457 if (insn & (1 << 24)) {
b0109805 10458 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10459 }
10460
f764718d 10461 loaded_var = NULL;
9ee6e8bb
PB
10462 for (i = 0; i < 16; i++) {
10463 if ((insn & (1 << i)) == 0)
10464 continue;
10465 if (insn & (1 << 20)) {
10466 /* Load. */
e2592fad 10467 tmp = tcg_temp_new_i32();
12dcc321 10468 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10469 if (i == 15) {
3bb8a96f 10470 gen_bx_excret(s, tmp);
5856d44e
YO
10471 } else if (i == rn) {
10472 loaded_var = tmp;
10473 loaded_base = 1;
9ee6e8bb 10474 } else {
b0109805 10475 store_reg(s, i, tmp);
9ee6e8bb
PB
10476 }
10477 } else {
10478 /* Store. */
b0109805 10479 tmp = load_reg(s, i);
12dcc321 10480 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10481 tcg_temp_free_i32(tmp);
9ee6e8bb 10482 }
b0109805 10483 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 10484 }
5856d44e
YO
10485 if (loaded_base) {
10486 store_reg(s, rn, loaded_var);
10487 }
9ee6e8bb
PB
10488 if (insn & (1 << 21)) {
10489 /* Base register writeback. */
10490 if (insn & (1 << 24)) {
b0109805 10491 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10492 }
10493 /* Fault if writeback register is in register list. */
10494 if (insn & (1 << rn))
10495 goto illegal_op;
b0109805
PB
10496 store_reg(s, rn, addr);
10497 } else {
7d1b0095 10498 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10499 }
10500 }
10501 }
10502 break;
2af9ab77
JB
10503 case 5:
10504
9ee6e8bb 10505 op = (insn >> 21) & 0xf;
2af9ab77 10506 if (op == 6) {
62b44f05
AR
10507 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10508 goto illegal_op;
10509 }
2af9ab77
JB
10510 /* Halfword pack. */
10511 tmp = load_reg(s, rn);
10512 tmp2 = load_reg(s, rm);
10513 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
10514 if (insn & (1 << 5)) {
10515 /* pkhtb */
10516 if (shift == 0)
10517 shift = 31;
10518 tcg_gen_sari_i32(tmp2, tmp2, shift);
10519 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10520 tcg_gen_ext16u_i32(tmp2, tmp2);
10521 } else {
10522 /* pkhbt */
10523 if (shift)
10524 tcg_gen_shli_i32(tmp2, tmp2, shift);
10525 tcg_gen_ext16u_i32(tmp, tmp);
10526 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10527 }
10528 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10529 tcg_temp_free_i32(tmp2);
3174f8e9
FN
10530 store_reg(s, rd, tmp);
10531 } else {
2af9ab77
JB
10532 /* Data processing register constant shift. */
10533 if (rn == 15) {
7d1b0095 10534 tmp = tcg_temp_new_i32();
2af9ab77
JB
10535 tcg_gen_movi_i32(tmp, 0);
10536 } else {
10537 tmp = load_reg(s, rn);
10538 }
10539 tmp2 = load_reg(s, rm);
10540
10541 shiftop = (insn >> 4) & 3;
10542 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10543 conds = (insn & (1 << 20)) != 0;
10544 logic_cc = (conds && thumb2_logic_op(op));
10545 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10546 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10547 goto illegal_op;
7d1b0095 10548 tcg_temp_free_i32(tmp2);
2af9ab77
JB
10549 if (rd != 15) {
10550 store_reg(s, rd, tmp);
10551 } else {
7d1b0095 10552 tcg_temp_free_i32(tmp);
2af9ab77 10553 }
3174f8e9 10554 }
9ee6e8bb
PB
10555 break;
10556 case 13: /* Misc data processing. */
10557 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10558 if (op < 4 && (insn & 0xf000) != 0xf000)
10559 goto illegal_op;
10560 switch (op) {
10561 case 0: /* Register controlled shift. */
8984bd2e
PB
10562 tmp = load_reg(s, rn);
10563 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10564 if ((insn & 0x70) != 0)
10565 goto illegal_op;
10566 op = (insn >> 21) & 3;
8984bd2e
PB
10567 logic_cc = (insn & (1 << 20)) != 0;
10568 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10569 if (logic_cc)
10570 gen_logic_CC(tmp);
bedb8a6b 10571 store_reg(s, rd, tmp);
9ee6e8bb
PB
10572 break;
10573 case 1: /* Sign/zero extend. */
62b44f05
AR
10574 op = (insn >> 20) & 7;
10575 switch (op) {
10576 case 0: /* SXTAH, SXTH */
10577 case 1: /* UXTAH, UXTH */
10578 case 4: /* SXTAB, SXTB */
10579 case 5: /* UXTAB, UXTB */
10580 break;
10581 case 2: /* SXTAB16, SXTB16 */
10582 case 3: /* UXTAB16, UXTB16 */
10583 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10584 goto illegal_op;
10585 }
10586 break;
10587 default:
10588 goto illegal_op;
10589 }
10590 if (rn != 15) {
10591 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10592 goto illegal_op;
10593 }
10594 }
5e3f878a 10595 tmp = load_reg(s, rm);
9ee6e8bb 10596 shift = (insn >> 4) & 3;
1301f322 10597 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10598 rotate, a shift is sufficient. */
10599 if (shift != 0)
f669df27 10600 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10601 op = (insn >> 20) & 7;
10602 switch (op) {
5e3f878a
PB
10603 case 0: gen_sxth(tmp); break;
10604 case 1: gen_uxth(tmp); break;
10605 case 2: gen_sxtb16(tmp); break;
10606 case 3: gen_uxtb16(tmp); break;
10607 case 4: gen_sxtb(tmp); break;
10608 case 5: gen_uxtb(tmp); break;
62b44f05
AR
10609 default:
10610 g_assert_not_reached();
9ee6e8bb
PB
10611 }
10612 if (rn != 15) {
5e3f878a 10613 tmp2 = load_reg(s, rn);
9ee6e8bb 10614 if ((op >> 1) == 1) {
5e3f878a 10615 gen_add16(tmp, tmp2);
9ee6e8bb 10616 } else {
5e3f878a 10617 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10618 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10619 }
10620 }
5e3f878a 10621 store_reg(s, rd, tmp);
9ee6e8bb
PB
10622 break;
10623 case 2: /* SIMD add/subtract. */
62b44f05
AR
10624 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10625 goto illegal_op;
10626 }
9ee6e8bb
PB
10627 op = (insn >> 20) & 7;
10628 shift = (insn >> 4) & 7;
10629 if ((op & 3) == 3 || (shift & 3) == 3)
10630 goto illegal_op;
6ddbc6e4
PB
10631 tmp = load_reg(s, rn);
10632 tmp2 = load_reg(s, rm);
10633 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10634 tcg_temp_free_i32(tmp2);
6ddbc6e4 10635 store_reg(s, rd, tmp);
9ee6e8bb
PB
10636 break;
10637 case 3: /* Other data processing. */
10638 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10639 if (op < 4) {
10640 /* Saturating add/subtract. */
62b44f05
AR
10641 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10642 goto illegal_op;
10643 }
d9ba4830
PB
10644 tmp = load_reg(s, rn);
10645 tmp2 = load_reg(s, rm);
9ee6e8bb 10646 if (op & 1)
9ef39277 10647 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10648 if (op & 2)
9ef39277 10649 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10650 else
9ef39277 10651 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10652 tcg_temp_free_i32(tmp2);
9ee6e8bb 10653 } else {
62b44f05
AR
10654 switch (op) {
10655 case 0x0a: /* rbit */
10656 case 0x08: /* rev */
10657 case 0x09: /* rev16 */
10658 case 0x0b: /* revsh */
10659 case 0x18: /* clz */
10660 break;
10661 case 0x10: /* sel */
10662 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10663 goto illegal_op;
10664 }
10665 break;
10666 case 0x20: /* crc32/crc32c */
10667 case 0x21:
10668 case 0x22:
10669 case 0x28:
10670 case 0x29:
10671 case 0x2a:
10672 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10673 goto illegal_op;
10674 }
10675 break;
10676 default:
10677 goto illegal_op;
10678 }
d9ba4830 10679 tmp = load_reg(s, rn);
9ee6e8bb
PB
10680 switch (op) {
10681 case 0x0a: /* rbit */
d9ba4830 10682 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10683 break;
10684 case 0x08: /* rev */
66896cb8 10685 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10686 break;
10687 case 0x09: /* rev16 */
d9ba4830 10688 gen_rev16(tmp);
9ee6e8bb
PB
10689 break;
10690 case 0x0b: /* revsh */
d9ba4830 10691 gen_revsh(tmp);
9ee6e8bb
PB
10692 break;
10693 case 0x10: /* sel */
d9ba4830 10694 tmp2 = load_reg(s, rm);
7d1b0095 10695 tmp3 = tcg_temp_new_i32();
0ecb72a5 10696 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10697 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10698 tcg_temp_free_i32(tmp3);
10699 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10700 break;
10701 case 0x18: /* clz */
7539a012 10702 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10703 break;
eb0ecd5a
WN
10704 case 0x20:
10705 case 0x21:
10706 case 0x22:
10707 case 0x28:
10708 case 0x29:
10709 case 0x2a:
10710 {
10711 /* crc32/crc32c */
10712 uint32_t sz = op & 0x3;
10713 uint32_t c = op & 0x8;
10714
eb0ecd5a 10715 tmp2 = load_reg(s, rm);
aa633469
PM
10716 if (sz == 0) {
10717 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10718 } else if (sz == 1) {
10719 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10720 }
eb0ecd5a
WN
10721 tmp3 = tcg_const_i32(1 << sz);
10722 if (c) {
10723 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10724 } else {
10725 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10726 }
10727 tcg_temp_free_i32(tmp2);
10728 tcg_temp_free_i32(tmp3);
10729 break;
10730 }
9ee6e8bb 10731 default:
62b44f05 10732 g_assert_not_reached();
9ee6e8bb
PB
10733 }
10734 }
d9ba4830 10735 store_reg(s, rd, tmp);
9ee6e8bb
PB
10736 break;
10737 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10738 switch ((insn >> 20) & 7) {
10739 case 0: /* 32 x 32 -> 32 */
10740 case 7: /* Unsigned sum of absolute differences. */
10741 break;
10742 case 1: /* 16 x 16 -> 32 */
10743 case 2: /* Dual multiply add. */
10744 case 3: /* 32 * 16 -> 32msb */
10745 case 4: /* Dual multiply subtract. */
10746 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10747 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10748 goto illegal_op;
10749 }
10750 break;
10751 }
9ee6e8bb 10752 op = (insn >> 4) & 0xf;
d9ba4830
PB
10753 tmp = load_reg(s, rn);
10754 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10755 switch ((insn >> 20) & 7) {
10756 case 0: /* 32 x 32 -> 32 */
d9ba4830 10757 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10758 tcg_temp_free_i32(tmp2);
9ee6e8bb 10759 if (rs != 15) {
d9ba4830 10760 tmp2 = load_reg(s, rs);
9ee6e8bb 10761 if (op)
d9ba4830 10762 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10763 else
d9ba4830 10764 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10765 tcg_temp_free_i32(tmp2);
9ee6e8bb 10766 }
9ee6e8bb
PB
10767 break;
10768 case 1: /* 16 x 16 -> 32 */
d9ba4830 10769 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10770 tcg_temp_free_i32(tmp2);
9ee6e8bb 10771 if (rs != 15) {
d9ba4830 10772 tmp2 = load_reg(s, rs);
9ef39277 10773 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10774 tcg_temp_free_i32(tmp2);
9ee6e8bb 10775 }
9ee6e8bb
PB
10776 break;
10777 case 2: /* Dual multiply add. */
10778 case 4: /* Dual multiply subtract. */
10779 if (op)
d9ba4830
PB
10780 gen_swap_half(tmp2);
10781 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10782 if (insn & (1 << 22)) {
e1d177b9 10783 /* This subtraction cannot overflow. */
d9ba4830 10784 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10785 } else {
e1d177b9
PM
10786 /* This addition cannot overflow 32 bits;
10787 * however it may overflow considered as a signed
10788 * operation, in which case we must set the Q flag.
10789 */
9ef39277 10790 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10791 }
7d1b0095 10792 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10793 if (rs != 15)
10794 {
d9ba4830 10795 tmp2 = load_reg(s, rs);
9ef39277 10796 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10797 tcg_temp_free_i32(tmp2);
9ee6e8bb 10798 }
9ee6e8bb
PB
10799 break;
10800 case 3: /* 32 * 16 -> 32msb */
10801 if (op)
d9ba4830 10802 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10803 else
d9ba4830 10804 gen_sxth(tmp2);
a7812ae4
PB
10805 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10806 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10807 tmp = tcg_temp_new_i32();
ecc7b3aa 10808 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10809 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10810 if (rs != 15)
10811 {
d9ba4830 10812 tmp2 = load_reg(s, rs);
9ef39277 10813 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10814 tcg_temp_free_i32(tmp2);
9ee6e8bb 10815 }
9ee6e8bb 10816 break;
838fa72d
AJ
10817 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10818 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10819 if (rs != 15) {
838fa72d
AJ
10820 tmp = load_reg(s, rs);
10821 if (insn & (1 << 20)) {
10822 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10823 } else {
838fa72d 10824 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10825 }
2c0262af 10826 }
838fa72d
AJ
10827 if (insn & (1 << 4)) {
10828 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10829 }
10830 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10831 tmp = tcg_temp_new_i32();
ecc7b3aa 10832 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10833 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10834 break;
10835 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10836 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10837 tcg_temp_free_i32(tmp2);
9ee6e8bb 10838 if (rs != 15) {
d9ba4830
PB
10839 tmp2 = load_reg(s, rs);
10840 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10841 tcg_temp_free_i32(tmp2);
5fd46862 10842 }
9ee6e8bb 10843 break;
2c0262af 10844 }
d9ba4830 10845 store_reg(s, rd, tmp);
2c0262af 10846 break;
9ee6e8bb
PB
10847 case 6: case 7: /* 64-bit multiply, Divide. */
10848 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10849 tmp = load_reg(s, rn);
10850 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10851 if ((op & 0x50) == 0x10) {
10852 /* sdiv, udiv */
d614a513 10853 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10854 goto illegal_op;
47789990 10855 }
9ee6e8bb 10856 if (op & 0x20)
5e3f878a 10857 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10858 else
5e3f878a 10859 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10860 tcg_temp_free_i32(tmp2);
5e3f878a 10861 store_reg(s, rd, tmp);
9ee6e8bb
PB
10862 } else if ((op & 0xe) == 0xc) {
10863 /* Dual multiply accumulate long. */
62b44f05
AR
10864 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10865 tcg_temp_free_i32(tmp);
10866 tcg_temp_free_i32(tmp2);
10867 goto illegal_op;
10868 }
9ee6e8bb 10869 if (op & 1)
5e3f878a
PB
10870 gen_swap_half(tmp2);
10871 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10872 if (op & 0x10) {
5e3f878a 10873 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10874 } else {
5e3f878a 10875 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10876 }
7d1b0095 10877 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10878 /* BUGFIX */
10879 tmp64 = tcg_temp_new_i64();
10880 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10881 tcg_temp_free_i32(tmp);
a7812ae4
PB
10882 gen_addq(s, tmp64, rs, rd);
10883 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10884 tcg_temp_free_i64(tmp64);
2c0262af 10885 } else {
9ee6e8bb
PB
10886 if (op & 0x20) {
10887 /* Unsigned 64-bit multiply */
a7812ae4 10888 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10889 } else {
9ee6e8bb
PB
10890 if (op & 8) {
10891 /* smlalxy */
62b44f05
AR
10892 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10893 tcg_temp_free_i32(tmp2);
10894 tcg_temp_free_i32(tmp);
10895 goto illegal_op;
10896 }
5e3f878a 10897 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10898 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10899 tmp64 = tcg_temp_new_i64();
10900 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10901 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10902 } else {
10903 /* Signed 64-bit multiply */
a7812ae4 10904 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10905 }
b5ff1b31 10906 }
9ee6e8bb
PB
10907 if (op & 4) {
10908 /* umaal */
62b44f05
AR
10909 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10910 tcg_temp_free_i64(tmp64);
10911 goto illegal_op;
10912 }
a7812ae4
PB
10913 gen_addq_lo(s, tmp64, rs);
10914 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10915 } else if (op & 0x40) {
10916 /* 64-bit accumulate. */
a7812ae4 10917 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10918 }
a7812ae4 10919 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10920 tcg_temp_free_i64(tmp64);
5fd46862 10921 }
2c0262af 10922 break;
9ee6e8bb
PB
10923 }
10924 break;
10925 case 6: case 7: case 14: case 15:
10926 /* Coprocessor. */
7517748e
PM
10927 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10928 /* We don't currently implement M profile FP support,
b1e5336a
PM
10929 * so this entire space should give a NOCP fault, with
10930 * the exception of the v8M VLLDM and VLSTM insns, which
10931 * must be NOPs in Secure state and UNDEF in Nonsecure state.
7517748e 10932 */
b1e5336a
PM
10933 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
10934 (insn & 0xffa00f00) == 0xec200a00) {
10935 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
10936 * - VLLDM, VLSTM
10937 * We choose to UNDEF if the RAZ bits are non-zero.
10938 */
10939 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
10940 goto illegal_op;
10941 }
10942 /* Just NOP since FP support is not implemented */
10943 break;
10944 }
10945 /* All other insns: NOCP */
7517748e
PM
10946 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10947 default_exception_el(s));
10948 break;
10949 }
0052087e
RH
10950 if ((insn & 0xfe000a00) == 0xfc000800
10951 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10952 /* The Thumb2 and ARM encodings are identical. */
10953 if (disas_neon_insn_3same_ext(s, insn)) {
10954 goto illegal_op;
10955 }
10956 } else if ((insn & 0xff000a00) == 0xfe000800
10957 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10958 /* The Thumb2 and ARM encodings are identical. */
10959 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
10960 goto illegal_op;
10961 }
10962 } else if (((insn >> 24) & 3) == 3) {
9ee6e8bb 10963 /* Translate into the equivalent ARM encoding. */
f06053e3 10964 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10965 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10966 goto illegal_op;
7dcc1f89 10967 }
6a57f3eb 10968 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10969 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10970 goto illegal_op;
10971 }
9ee6e8bb
PB
10972 } else {
10973 if (insn & (1 << 28))
10974 goto illegal_op;
7dcc1f89 10975 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10976 goto illegal_op;
7dcc1f89 10977 }
9ee6e8bb
PB
10978 }
10979 break;
10980 case 8: case 9: case 10: case 11:
10981 if (insn & (1 << 15)) {
10982 /* Branches, misc control. */
10983 if (insn & 0x5000) {
10984 /* Unconditional branch. */
10985 /* signextend(hw1[10:0]) -> offset[:12]. */
10986 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10987 /* hw1[10:0] -> offset[11:1]. */
10988 offset |= (insn & 0x7ff) << 1;
10989 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10990 offset[24:22] already have the same value because of the
10991 sign extension above. */
10992 offset ^= ((~insn) & (1 << 13)) << 10;
10993 offset ^= ((~insn) & (1 << 11)) << 11;
10994
9ee6e8bb
PB
10995 if (insn & (1 << 14)) {
10996 /* Branch and link. */
3174f8e9 10997 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10998 }
3b46e624 10999
b0109805 11000 offset += s->pc;
9ee6e8bb
PB
11001 if (insn & (1 << 12)) {
11002 /* b/bl */
b0109805 11003 gen_jmp(s, offset);
9ee6e8bb
PB
11004 } else {
11005 /* blx */
b0109805 11006 offset &= ~(uint32_t)2;
be5e7a76 11007 /* thumb2 bx, no need to check */
b0109805 11008 gen_bx_im(s, offset);
2c0262af 11009 }
9ee6e8bb
PB
11010 } else if (((insn >> 23) & 7) == 7) {
11011 /* Misc control */
11012 if (insn & (1 << 13))
11013 goto illegal_op;
11014
11015 if (insn & (1 << 26)) {
001b3cab
PM
11016 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11017 goto illegal_op;
11018 }
37e6456e
PM
11019 if (!(insn & (1 << 20))) {
11020 /* Hypervisor call (v7) */
11021 int imm16 = extract32(insn, 16, 4) << 12
11022 | extract32(insn, 0, 12);
11023 ARCH(7);
11024 if (IS_USER(s)) {
11025 goto illegal_op;
11026 }
11027 gen_hvc(s, imm16);
11028 } else {
11029 /* Secure monitor call (v6+) */
11030 ARCH(6K);
11031 if (IS_USER(s)) {
11032 goto illegal_op;
11033 }
11034 gen_smc(s);
11035 }
2c0262af 11036 } else {
9ee6e8bb
PB
11037 op = (insn >> 20) & 7;
11038 switch (op) {
11039 case 0: /* msr cpsr. */
b53d8923 11040 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 11041 tmp = load_reg(s, rn);
b28b3377
PM
11042 /* the constant is the mask and SYSm fields */
11043 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 11044 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 11045 tcg_temp_free_i32(addr);
7d1b0095 11046 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11047 gen_lookup_tb(s);
11048 break;
11049 }
11050 /* fall through */
11051 case 1: /* msr spsr. */
b53d8923 11052 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11053 goto illegal_op;
b53d8923 11054 }
8bfd0550
PM
11055
11056 if (extract32(insn, 5, 1)) {
11057 /* MSR (banked) */
11058 int sysm = extract32(insn, 8, 4) |
11059 (extract32(insn, 4, 1) << 4);
11060 int r = op & 1;
11061
11062 gen_msr_banked(s, r, sysm, rm);
11063 break;
11064 }
11065
11066 /* MSR (for PSRs) */
2fbac54b
FN
11067 tmp = load_reg(s, rn);
11068 if (gen_set_psr(s,
7dcc1f89 11069 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 11070 op == 1, tmp))
9ee6e8bb
PB
11071 goto illegal_op;
11072 break;
11073 case 2: /* cps, nop-hint. */
11074 if (((insn >> 8) & 7) == 0) {
11075 gen_nop_hint(s, insn & 0xff);
11076 }
11077 /* Implemented as NOP in user mode. */
11078 if (IS_USER(s))
11079 break;
11080 offset = 0;
11081 imm = 0;
11082 if (insn & (1 << 10)) {
11083 if (insn & (1 << 7))
11084 offset |= CPSR_A;
11085 if (insn & (1 << 6))
11086 offset |= CPSR_I;
11087 if (insn & (1 << 5))
11088 offset |= CPSR_F;
11089 if (insn & (1 << 9))
11090 imm = CPSR_A | CPSR_I | CPSR_F;
11091 }
11092 if (insn & (1 << 8)) {
11093 offset |= 0x1f;
11094 imm |= (insn & 0x1f);
11095 }
11096 if (offset) {
2fbac54b 11097 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
11098 }
11099 break;
11100 case 3: /* Special control operations. */
14120108 11101 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
8297cb13 11102 !arm_dc_feature(s, ARM_FEATURE_M)) {
14120108
JS
11103 goto illegal_op;
11104 }
9ee6e8bb
PB
11105 op = (insn >> 4) & 0xf;
11106 switch (op) {
11107 case 2: /* clrex */
426f5abc 11108 gen_clrex(s);
9ee6e8bb
PB
11109 break;
11110 case 4: /* dsb */
11111 case 5: /* dmb */
61e4c432 11112 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 11113 break;
6df99dec
SS
11114 case 6: /* isb */
11115 /* We need to break the TB after this insn
11116 * to execute self-modifying code correctly
11117 * and also to take any pending interrupts
11118 * immediately.
11119 */
0b609cc1 11120 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 11121 break;
9ee6e8bb
PB
11122 default:
11123 goto illegal_op;
11124 }
11125 break;
11126 case 4: /* bxj */
9d7c59c8
PM
11127 /* Trivial implementation equivalent to bx.
11128 * This instruction doesn't exist at all for M-profile.
11129 */
11130 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11131 goto illegal_op;
11132 }
d9ba4830
PB
11133 tmp = load_reg(s, rn);
11134 gen_bx(s, tmp);
9ee6e8bb
PB
11135 break;
11136 case 5: /* Exception return. */
b8b45b68
RV
11137 if (IS_USER(s)) {
11138 goto illegal_op;
11139 }
11140 if (rn != 14 || rd != 15) {
11141 goto illegal_op;
11142 }
11143 tmp = load_reg(s, rn);
11144 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
11145 gen_exception_return(s, tmp);
11146 break;
8bfd0550 11147 case 6: /* MRS */
43ac6574
PM
11148 if (extract32(insn, 5, 1) &&
11149 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
11150 /* MRS (banked) */
11151 int sysm = extract32(insn, 16, 4) |
11152 (extract32(insn, 4, 1) << 4);
11153
11154 gen_mrs_banked(s, 0, sysm, rd);
11155 break;
11156 }
11157
3d54026f
PM
11158 if (extract32(insn, 16, 4) != 0xf) {
11159 goto illegal_op;
11160 }
11161 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
11162 extract32(insn, 0, 8) != 0) {
11163 goto illegal_op;
11164 }
11165
8bfd0550 11166 /* mrs cpsr */
7d1b0095 11167 tmp = tcg_temp_new_i32();
b53d8923 11168 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
11169 addr = tcg_const_i32(insn & 0xff);
11170 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 11171 tcg_temp_free_i32(addr);
9ee6e8bb 11172 } else {
9ef39277 11173 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 11174 }
8984bd2e 11175 store_reg(s, rd, tmp);
9ee6e8bb 11176 break;
8bfd0550 11177 case 7: /* MRS */
43ac6574
PM
11178 if (extract32(insn, 5, 1) &&
11179 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
11180 /* MRS (banked) */
11181 int sysm = extract32(insn, 16, 4) |
11182 (extract32(insn, 4, 1) << 4);
11183
11184 gen_mrs_banked(s, 1, sysm, rd);
11185 break;
11186 }
11187
11188 /* mrs spsr. */
9ee6e8bb 11189 /* Not accessible in user mode. */
b53d8923 11190 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11191 goto illegal_op;
b53d8923 11192 }
3d54026f
PM
11193
11194 if (extract32(insn, 16, 4) != 0xf ||
11195 extract32(insn, 0, 8) != 0) {
11196 goto illegal_op;
11197 }
11198
d9ba4830
PB
11199 tmp = load_cpu_field(spsr);
11200 store_reg(s, rd, tmp);
9ee6e8bb 11201 break;
2c0262af
FB
11202 }
11203 }
9ee6e8bb
PB
11204 } else {
11205 /* Conditional branch. */
11206 op = (insn >> 22) & 0xf;
11207 /* Generate a conditional jump to next instruction. */
11208 s->condlabel = gen_new_label();
39fb730a 11209 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
11210 s->condjmp = 1;
11211
11212 /* offset[11:1] = insn[10:0] */
11213 offset = (insn & 0x7ff) << 1;
11214 /* offset[17:12] = insn[21:16]. */
11215 offset |= (insn & 0x003f0000) >> 4;
11216 /* offset[31:20] = insn[26]. */
11217 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
11218 /* offset[18] = insn[13]. */
11219 offset |= (insn & (1 << 13)) << 5;
11220 /* offset[19] = insn[11]. */
11221 offset |= (insn & (1 << 11)) << 8;
11222
11223 /* jump to the offset */
b0109805 11224 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
11225 }
11226 } else {
11227 /* Data processing immediate. */
11228 if (insn & (1 << 25)) {
11229 if (insn & (1 << 24)) {
11230 if (insn & (1 << 20))
11231 goto illegal_op;
11232 /* Bitfield/Saturate. */
11233 op = (insn >> 21) & 7;
11234 imm = insn & 0x1f;
11235 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 11236 if (rn == 15) {
7d1b0095 11237 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
11238 tcg_gen_movi_i32(tmp, 0);
11239 } else {
11240 tmp = load_reg(s, rn);
11241 }
9ee6e8bb
PB
11242 switch (op) {
11243 case 2: /* Signed bitfield extract. */
11244 imm++;
11245 if (shift + imm > 32)
11246 goto illegal_op;
59a71b4c
RH
11247 if (imm < 32) {
11248 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
11249 }
9ee6e8bb
PB
11250 break;
11251 case 6: /* Unsigned bitfield extract. */
11252 imm++;
11253 if (shift + imm > 32)
11254 goto illegal_op;
59a71b4c
RH
11255 if (imm < 32) {
11256 tcg_gen_extract_i32(tmp, tmp, shift, imm);
11257 }
9ee6e8bb
PB
11258 break;
11259 case 3: /* Bitfield insert/clear. */
11260 if (imm < shift)
11261 goto illegal_op;
11262 imm = imm + 1 - shift;
11263 if (imm != 32) {
6ddbc6e4 11264 tmp2 = load_reg(s, rd);
d593c48e 11265 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 11266 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11267 }
11268 break;
11269 case 7:
11270 goto illegal_op;
11271 default: /* Saturate. */
9ee6e8bb
PB
11272 if (shift) {
11273 if (op & 1)
6ddbc6e4 11274 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 11275 else
6ddbc6e4 11276 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 11277 }
6ddbc6e4 11278 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
11279 if (op & 4) {
11280 /* Unsigned. */
62b44f05
AR
11281 if ((op & 1) && shift == 0) {
11282 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11283 tcg_temp_free_i32(tmp);
11284 tcg_temp_free_i32(tmp2);
11285 goto illegal_op;
11286 }
9ef39277 11287 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 11288 } else {
9ef39277 11289 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 11290 }
2c0262af 11291 } else {
9ee6e8bb 11292 /* Signed. */
62b44f05
AR
11293 if ((op & 1) && shift == 0) {
11294 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11295 tcg_temp_free_i32(tmp);
11296 tcg_temp_free_i32(tmp2);
11297 goto illegal_op;
11298 }
9ef39277 11299 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 11300 } else {
9ef39277 11301 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 11302 }
2c0262af 11303 }
b75263d6 11304 tcg_temp_free_i32(tmp2);
9ee6e8bb 11305 break;
2c0262af 11306 }
6ddbc6e4 11307 store_reg(s, rd, tmp);
9ee6e8bb
PB
11308 } else {
11309 imm = ((insn & 0x04000000) >> 15)
11310 | ((insn & 0x7000) >> 4) | (insn & 0xff);
11311 if (insn & (1 << 22)) {
11312 /* 16-bit immediate. */
11313 imm |= (insn >> 4) & 0xf000;
11314 if (insn & (1 << 23)) {
11315 /* movt */
5e3f878a 11316 tmp = load_reg(s, rd);
86831435 11317 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 11318 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 11319 } else {
9ee6e8bb 11320 /* movw */
7d1b0095 11321 tmp = tcg_temp_new_i32();
5e3f878a 11322 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
11323 }
11324 } else {
9ee6e8bb
PB
11325 /* Add/sub 12-bit immediate. */
11326 if (rn == 15) {
b0109805 11327 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 11328 if (insn & (1 << 23))
b0109805 11329 offset -= imm;
9ee6e8bb 11330 else
b0109805 11331 offset += imm;
7d1b0095 11332 tmp = tcg_temp_new_i32();
5e3f878a 11333 tcg_gen_movi_i32(tmp, offset);
2c0262af 11334 } else {
5e3f878a 11335 tmp = load_reg(s, rn);
9ee6e8bb 11336 if (insn & (1 << 23))
5e3f878a 11337 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 11338 else
5e3f878a 11339 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 11340 }
9ee6e8bb 11341 }
5e3f878a 11342 store_reg(s, rd, tmp);
191abaa2 11343 }
9ee6e8bb
PB
11344 } else {
11345 int shifter_out = 0;
11346 /* modified 12-bit immediate. */
11347 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
11348 imm = (insn & 0xff);
11349 switch (shift) {
11350 case 0: /* XY */
11351 /* Nothing to do. */
11352 break;
11353 case 1: /* 00XY00XY */
11354 imm |= imm << 16;
11355 break;
11356 case 2: /* XY00XY00 */
11357 imm |= imm << 16;
11358 imm <<= 8;
11359 break;
11360 case 3: /* XYXYXYXY */
11361 imm |= imm << 16;
11362 imm |= imm << 8;
11363 break;
11364 default: /* Rotated constant. */
11365 shift = (shift << 1) | (imm >> 7);
11366 imm |= 0x80;
11367 imm = imm << (32 - shift);
11368 shifter_out = 1;
11369 break;
b5ff1b31 11370 }
7d1b0095 11371 tmp2 = tcg_temp_new_i32();
3174f8e9 11372 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 11373 rn = (insn >> 16) & 0xf;
3174f8e9 11374 if (rn == 15) {
7d1b0095 11375 tmp = tcg_temp_new_i32();
3174f8e9
FN
11376 tcg_gen_movi_i32(tmp, 0);
11377 } else {
11378 tmp = load_reg(s, rn);
11379 }
9ee6e8bb
PB
11380 op = (insn >> 21) & 0xf;
11381 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 11382 shifter_out, tmp, tmp2))
9ee6e8bb 11383 goto illegal_op;
7d1b0095 11384 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11385 rd = (insn >> 8) & 0xf;
11386 if (rd != 15) {
3174f8e9
FN
11387 store_reg(s, rd, tmp);
11388 } else {
7d1b0095 11389 tcg_temp_free_i32(tmp);
2c0262af 11390 }
2c0262af 11391 }
9ee6e8bb
PB
11392 }
11393 break;
11394 case 12: /* Load/store single data item. */
11395 {
11396 int postinc = 0;
11397 int writeback = 0;
a99caa48 11398 int memidx;
9bb6558a
PM
11399 ISSInfo issinfo;
11400
9ee6e8bb 11401 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 11402 if (disas_neon_ls_insn(s, insn)) {
c1713132 11403 goto illegal_op;
7dcc1f89 11404 }
9ee6e8bb
PB
11405 break;
11406 }
a2fdc890
PM
11407 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
11408 if (rs == 15) {
11409 if (!(insn & (1 << 20))) {
11410 goto illegal_op;
11411 }
11412 if (op != 2) {
11413 /* Byte or halfword load space with dest == r15 : memory hints.
11414 * Catch them early so we don't emit pointless addressing code.
11415 * This space is a mix of:
11416 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
11417 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
11418 * cores)
11419 * unallocated hints, which must be treated as NOPs
11420 * UNPREDICTABLE space, which we NOP or UNDEF depending on
11421 * which is easiest for the decoding logic
11422 * Some space which must UNDEF
11423 */
11424 int op1 = (insn >> 23) & 3;
11425 int op2 = (insn >> 6) & 0x3f;
11426 if (op & 2) {
11427 goto illegal_op;
11428 }
11429 if (rn == 15) {
02afbf64
PM
11430 /* UNPREDICTABLE, unallocated hint or
11431 * PLD/PLDW/PLI (literal)
11432 */
2eea841c 11433 return;
a2fdc890
PM
11434 }
11435 if (op1 & 1) {
2eea841c 11436 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11437 }
11438 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 11439 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11440 }
11441 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 11442 goto illegal_op;
a2fdc890
PM
11443 }
11444 }
a99caa48 11445 memidx = get_mem_index(s);
9ee6e8bb 11446 if (rn == 15) {
7d1b0095 11447 addr = tcg_temp_new_i32();
9ee6e8bb
PB
11448 /* PC relative. */
11449 /* s->pc has already been incremented by 4. */
11450 imm = s->pc & 0xfffffffc;
11451 if (insn & (1 << 23))
11452 imm += insn & 0xfff;
11453 else
11454 imm -= insn & 0xfff;
b0109805 11455 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 11456 } else {
b0109805 11457 addr = load_reg(s, rn);
9ee6e8bb
PB
11458 if (insn & (1 << 23)) {
11459 /* Positive offset. */
11460 imm = insn & 0xfff;
b0109805 11461 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 11462 } else {
9ee6e8bb 11463 imm = insn & 0xff;
2a0308c5
PM
11464 switch ((insn >> 8) & 0xf) {
11465 case 0x0: /* Shifted Register. */
9ee6e8bb 11466 shift = (insn >> 4) & 0xf;
2a0308c5
PM
11467 if (shift > 3) {
11468 tcg_temp_free_i32(addr);
18c9b560 11469 goto illegal_op;
2a0308c5 11470 }
b26eefb6 11471 tmp = load_reg(s, rm);
9ee6e8bb 11472 if (shift)
b26eefb6 11473 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 11474 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11475 tcg_temp_free_i32(tmp);
9ee6e8bb 11476 break;
2a0308c5 11477 case 0xc: /* Negative offset. */
b0109805 11478 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 11479 break;
2a0308c5 11480 case 0xe: /* User privilege. */
b0109805 11481 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 11482 memidx = get_a32_user_mem_index(s);
9ee6e8bb 11483 break;
2a0308c5 11484 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
11485 imm = -imm;
11486 /* Fall through. */
2a0308c5 11487 case 0xb: /* Post-increment. */
9ee6e8bb
PB
11488 postinc = 1;
11489 writeback = 1;
11490 break;
2a0308c5 11491 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
11492 imm = -imm;
11493 /* Fall through. */
2a0308c5 11494 case 0xf: /* Pre-increment. */
b0109805 11495 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
11496 writeback = 1;
11497 break;
11498 default:
2a0308c5 11499 tcg_temp_free_i32(addr);
b7bcbe95 11500 goto illegal_op;
9ee6e8bb
PB
11501 }
11502 }
11503 }
9bb6558a
PM
11504
11505 issinfo = writeback ? ISSInvalid : rs;
11506
9ee6e8bb
PB
11507 if (insn & (1 << 20)) {
11508 /* Load. */
5a839c0d 11509 tmp = tcg_temp_new_i32();
a2fdc890 11510 switch (op) {
5a839c0d 11511 case 0:
9bb6558a 11512 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11513 break;
11514 case 4:
9bb6558a 11515 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11516 break;
11517 case 1:
9bb6558a 11518 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11519 break;
11520 case 5:
9bb6558a 11521 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11522 break;
11523 case 2:
9bb6558a 11524 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11525 break;
2a0308c5 11526 default:
5a839c0d 11527 tcg_temp_free_i32(tmp);
2a0308c5
PM
11528 tcg_temp_free_i32(addr);
11529 goto illegal_op;
a2fdc890
PM
11530 }
11531 if (rs == 15) {
3bb8a96f 11532 gen_bx_excret(s, tmp);
9ee6e8bb 11533 } else {
a2fdc890 11534 store_reg(s, rs, tmp);
9ee6e8bb
PB
11535 }
11536 } else {
11537 /* Store. */
b0109805 11538 tmp = load_reg(s, rs);
9ee6e8bb 11539 switch (op) {
5a839c0d 11540 case 0:
9bb6558a 11541 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11542 break;
11543 case 1:
9bb6558a 11544 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11545 break;
11546 case 2:
9bb6558a 11547 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11548 break;
2a0308c5 11549 default:
5a839c0d 11550 tcg_temp_free_i32(tmp);
2a0308c5
PM
11551 tcg_temp_free_i32(addr);
11552 goto illegal_op;
b7bcbe95 11553 }
5a839c0d 11554 tcg_temp_free_i32(tmp);
2c0262af 11555 }
9ee6e8bb 11556 if (postinc)
b0109805
PB
11557 tcg_gen_addi_i32(addr, addr, imm);
11558 if (writeback) {
11559 store_reg(s, rn, addr);
11560 } else {
7d1b0095 11561 tcg_temp_free_i32(addr);
b0109805 11562 }
9ee6e8bb
PB
11563 }
11564 break;
11565 default:
11566 goto illegal_op;
2c0262af 11567 }
2eea841c 11568 return;
9ee6e8bb 11569illegal_op:
2eea841c
PM
11570 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11571 default_exception_el(s));
2c0262af
FB
11572}
11573
296e5a0a 11574static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 11575{
296e5a0a 11576 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
11577 int32_t offset;
11578 int i;
39d5492a
PM
11579 TCGv_i32 tmp;
11580 TCGv_i32 tmp2;
11581 TCGv_i32 addr;
99c475ab 11582
99c475ab
FB
11583 switch (insn >> 12) {
11584 case 0: case 1:
396e467c 11585
99c475ab
FB
11586 rd = insn & 7;
11587 op = (insn >> 11) & 3;
11588 if (op == 3) {
11589 /* add/subtract */
11590 rn = (insn >> 3) & 7;
396e467c 11591 tmp = load_reg(s, rn);
99c475ab
FB
11592 if (insn & (1 << 10)) {
11593 /* immediate */
7d1b0095 11594 tmp2 = tcg_temp_new_i32();
396e467c 11595 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
11596 } else {
11597 /* reg */
11598 rm = (insn >> 6) & 7;
396e467c 11599 tmp2 = load_reg(s, rm);
99c475ab 11600 }
9ee6e8bb
PB
11601 if (insn & (1 << 9)) {
11602 if (s->condexec_mask)
396e467c 11603 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11604 else
72485ec4 11605 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
11606 } else {
11607 if (s->condexec_mask)
396e467c 11608 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 11609 else
72485ec4 11610 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 11611 }
7d1b0095 11612 tcg_temp_free_i32(tmp2);
396e467c 11613 store_reg(s, rd, tmp);
99c475ab
FB
11614 } else {
11615 /* shift immediate */
11616 rm = (insn >> 3) & 7;
11617 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
11618 tmp = load_reg(s, rm);
11619 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11620 if (!s->condexec_mask)
11621 gen_logic_CC(tmp);
11622 store_reg(s, rd, tmp);
99c475ab
FB
11623 }
11624 break;
11625 case 2: case 3:
11626 /* arithmetic large immediate */
11627 op = (insn >> 11) & 3;
11628 rd = (insn >> 8) & 0x7;
396e467c 11629 if (op == 0) { /* mov */
7d1b0095 11630 tmp = tcg_temp_new_i32();
396e467c 11631 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11632 if (!s->condexec_mask)
396e467c
FN
11633 gen_logic_CC(tmp);
11634 store_reg(s, rd, tmp);
11635 } else {
11636 tmp = load_reg(s, rd);
7d1b0095 11637 tmp2 = tcg_temp_new_i32();
396e467c
FN
11638 tcg_gen_movi_i32(tmp2, insn & 0xff);
11639 switch (op) {
11640 case 1: /* cmp */
72485ec4 11641 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11642 tcg_temp_free_i32(tmp);
11643 tcg_temp_free_i32(tmp2);
396e467c
FN
11644 break;
11645 case 2: /* add */
11646 if (s->condexec_mask)
11647 tcg_gen_add_i32(tmp, tmp, tmp2);
11648 else
72485ec4 11649 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11650 tcg_temp_free_i32(tmp2);
396e467c
FN
11651 store_reg(s, rd, tmp);
11652 break;
11653 case 3: /* sub */
11654 if (s->condexec_mask)
11655 tcg_gen_sub_i32(tmp, tmp, tmp2);
11656 else
72485ec4 11657 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11658 tcg_temp_free_i32(tmp2);
396e467c
FN
11659 store_reg(s, rd, tmp);
11660 break;
11661 }
99c475ab 11662 }
99c475ab
FB
11663 break;
11664 case 4:
11665 if (insn & (1 << 11)) {
11666 rd = (insn >> 8) & 7;
5899f386
FB
11667 /* load pc-relative. Bit 1 of PC is ignored. */
11668 val = s->pc + 2 + ((insn & 0xff) * 4);
11669 val &= ~(uint32_t)2;
7d1b0095 11670 addr = tcg_temp_new_i32();
b0109805 11671 tcg_gen_movi_i32(addr, val);
c40c8556 11672 tmp = tcg_temp_new_i32();
9bb6558a
PM
11673 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11674 rd | ISSIs16Bit);
7d1b0095 11675 tcg_temp_free_i32(addr);
b0109805 11676 store_reg(s, rd, tmp);
99c475ab
FB
11677 break;
11678 }
11679 if (insn & (1 << 10)) {
ebfe27c5
PM
11680 /* 0b0100_01xx_xxxx_xxxx
11681 * - data processing extended, branch and exchange
11682 */
99c475ab
FB
11683 rd = (insn & 7) | ((insn >> 4) & 8);
11684 rm = (insn >> 3) & 0xf;
11685 op = (insn >> 8) & 3;
11686 switch (op) {
11687 case 0: /* add */
396e467c
FN
11688 tmp = load_reg(s, rd);
11689 tmp2 = load_reg(s, rm);
11690 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11691 tcg_temp_free_i32(tmp2);
396e467c 11692 store_reg(s, rd, tmp);
99c475ab
FB
11693 break;
11694 case 1: /* cmp */
396e467c
FN
11695 tmp = load_reg(s, rd);
11696 tmp2 = load_reg(s, rm);
72485ec4 11697 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11698 tcg_temp_free_i32(tmp2);
11699 tcg_temp_free_i32(tmp);
99c475ab
FB
11700 break;
11701 case 2: /* mov/cpy */
396e467c
FN
11702 tmp = load_reg(s, rm);
11703 store_reg(s, rd, tmp);
99c475ab 11704 break;
ebfe27c5
PM
11705 case 3:
11706 {
11707 /* 0b0100_0111_xxxx_xxxx
11708 * - branch [and link] exchange thumb register
11709 */
11710 bool link = insn & (1 << 7);
11711
fb602cb7 11712 if (insn & 3) {
ebfe27c5
PM
11713 goto undef;
11714 }
11715 if (link) {
be5e7a76 11716 ARCH(5);
ebfe27c5 11717 }
fb602cb7
PM
11718 if ((insn & 4)) {
11719 /* BXNS/BLXNS: only exists for v8M with the
11720 * security extensions, and always UNDEF if NonSecure.
11721 * We don't implement these in the user-only mode
11722 * either (in theory you can use them from Secure User
11723 * mode but they are too tied in to system emulation.)
11724 */
11725 if (!s->v8m_secure || IS_USER_ONLY) {
11726 goto undef;
11727 }
11728 if (link) {
3e3fa230 11729 gen_blxns(s, rm);
fb602cb7
PM
11730 } else {
11731 gen_bxns(s, rm);
11732 }
11733 break;
11734 }
11735 /* BLX/BX */
ebfe27c5
PM
11736 tmp = load_reg(s, rm);
11737 if (link) {
99c475ab 11738 val = (uint32_t)s->pc | 1;
7d1b0095 11739 tmp2 = tcg_temp_new_i32();
b0109805
PB
11740 tcg_gen_movi_i32(tmp2, val);
11741 store_reg(s, 14, tmp2);
3bb8a96f
PM
11742 gen_bx(s, tmp);
11743 } else {
11744 /* Only BX works as exception-return, not BLX */
11745 gen_bx_excret(s, tmp);
99c475ab 11746 }
99c475ab
FB
11747 break;
11748 }
ebfe27c5 11749 }
99c475ab
FB
11750 break;
11751 }
11752
11753 /* data processing register */
11754 rd = insn & 7;
11755 rm = (insn >> 3) & 7;
11756 op = (insn >> 6) & 0xf;
11757 if (op == 2 || op == 3 || op == 4 || op == 7) {
11758 /* the shift/rotate ops want the operands backwards */
11759 val = rm;
11760 rm = rd;
11761 rd = val;
11762 val = 1;
11763 } else {
11764 val = 0;
11765 }
11766
396e467c 11767 if (op == 9) { /* neg */
7d1b0095 11768 tmp = tcg_temp_new_i32();
396e467c
FN
11769 tcg_gen_movi_i32(tmp, 0);
11770 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11771 tmp = load_reg(s, rd);
11772 } else {
f764718d 11773 tmp = NULL;
396e467c 11774 }
99c475ab 11775
396e467c 11776 tmp2 = load_reg(s, rm);
5899f386 11777 switch (op) {
99c475ab 11778 case 0x0: /* and */
396e467c 11779 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11780 if (!s->condexec_mask)
396e467c 11781 gen_logic_CC(tmp);
99c475ab
FB
11782 break;
11783 case 0x1: /* eor */
396e467c 11784 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11785 if (!s->condexec_mask)
396e467c 11786 gen_logic_CC(tmp);
99c475ab
FB
11787 break;
11788 case 0x2: /* lsl */
9ee6e8bb 11789 if (s->condexec_mask) {
365af80e 11790 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11791 } else {
9ef39277 11792 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11793 gen_logic_CC(tmp2);
9ee6e8bb 11794 }
99c475ab
FB
11795 break;
11796 case 0x3: /* lsr */
9ee6e8bb 11797 if (s->condexec_mask) {
365af80e 11798 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11799 } else {
9ef39277 11800 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11801 gen_logic_CC(tmp2);
9ee6e8bb 11802 }
99c475ab
FB
11803 break;
11804 case 0x4: /* asr */
9ee6e8bb 11805 if (s->condexec_mask) {
365af80e 11806 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11807 } else {
9ef39277 11808 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11809 gen_logic_CC(tmp2);
9ee6e8bb 11810 }
99c475ab
FB
11811 break;
11812 case 0x5: /* adc */
49b4c31e 11813 if (s->condexec_mask) {
396e467c 11814 gen_adc(tmp, tmp2);
49b4c31e
RH
11815 } else {
11816 gen_adc_CC(tmp, tmp, tmp2);
11817 }
99c475ab
FB
11818 break;
11819 case 0x6: /* sbc */
2de68a49 11820 if (s->condexec_mask) {
396e467c 11821 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11822 } else {
11823 gen_sbc_CC(tmp, tmp, tmp2);
11824 }
99c475ab
FB
11825 break;
11826 case 0x7: /* ror */
9ee6e8bb 11827 if (s->condexec_mask) {
f669df27
AJ
11828 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11829 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11830 } else {
9ef39277 11831 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11832 gen_logic_CC(tmp2);
9ee6e8bb 11833 }
99c475ab
FB
11834 break;
11835 case 0x8: /* tst */
396e467c
FN
11836 tcg_gen_and_i32(tmp, tmp, tmp2);
11837 gen_logic_CC(tmp);
99c475ab 11838 rd = 16;
5899f386 11839 break;
99c475ab 11840 case 0x9: /* neg */
9ee6e8bb 11841 if (s->condexec_mask)
396e467c 11842 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11843 else
72485ec4 11844 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11845 break;
11846 case 0xa: /* cmp */
72485ec4 11847 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11848 rd = 16;
11849 break;
11850 case 0xb: /* cmn */
72485ec4 11851 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11852 rd = 16;
11853 break;
11854 case 0xc: /* orr */
396e467c 11855 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11856 if (!s->condexec_mask)
396e467c 11857 gen_logic_CC(tmp);
99c475ab
FB
11858 break;
11859 case 0xd: /* mul */
7b2919a0 11860 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11861 if (!s->condexec_mask)
396e467c 11862 gen_logic_CC(tmp);
99c475ab
FB
11863 break;
11864 case 0xe: /* bic */
f669df27 11865 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11866 if (!s->condexec_mask)
396e467c 11867 gen_logic_CC(tmp);
99c475ab
FB
11868 break;
11869 case 0xf: /* mvn */
396e467c 11870 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11871 if (!s->condexec_mask)
396e467c 11872 gen_logic_CC(tmp2);
99c475ab 11873 val = 1;
5899f386 11874 rm = rd;
99c475ab
FB
11875 break;
11876 }
11877 if (rd != 16) {
396e467c
FN
11878 if (val) {
11879 store_reg(s, rm, tmp2);
11880 if (op != 0xf)
7d1b0095 11881 tcg_temp_free_i32(tmp);
396e467c
FN
11882 } else {
11883 store_reg(s, rd, tmp);
7d1b0095 11884 tcg_temp_free_i32(tmp2);
396e467c
FN
11885 }
11886 } else {
7d1b0095
PM
11887 tcg_temp_free_i32(tmp);
11888 tcg_temp_free_i32(tmp2);
99c475ab
FB
11889 }
11890 break;
11891
11892 case 5:
11893 /* load/store register offset. */
11894 rd = insn & 7;
11895 rn = (insn >> 3) & 7;
11896 rm = (insn >> 6) & 7;
11897 op = (insn >> 9) & 7;
b0109805 11898 addr = load_reg(s, rn);
b26eefb6 11899 tmp = load_reg(s, rm);
b0109805 11900 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11901 tcg_temp_free_i32(tmp);
99c475ab 11902
c40c8556 11903 if (op < 3) { /* store */
b0109805 11904 tmp = load_reg(s, rd);
c40c8556
PM
11905 } else {
11906 tmp = tcg_temp_new_i32();
11907 }
99c475ab
FB
11908
11909 switch (op) {
11910 case 0: /* str */
9bb6558a 11911 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11912 break;
11913 case 1: /* strh */
9bb6558a 11914 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11915 break;
11916 case 2: /* strb */
9bb6558a 11917 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11918 break;
11919 case 3: /* ldrsb */
9bb6558a 11920 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11921 break;
11922 case 4: /* ldr */
9bb6558a 11923 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11924 break;
11925 case 5: /* ldrh */
9bb6558a 11926 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11927 break;
11928 case 6: /* ldrb */
9bb6558a 11929 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11930 break;
11931 case 7: /* ldrsh */
9bb6558a 11932 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11933 break;
11934 }
c40c8556 11935 if (op >= 3) { /* load */
b0109805 11936 store_reg(s, rd, tmp);
c40c8556
PM
11937 } else {
11938 tcg_temp_free_i32(tmp);
11939 }
7d1b0095 11940 tcg_temp_free_i32(addr);
99c475ab
FB
11941 break;
11942
11943 case 6:
11944 /* load/store word immediate offset */
11945 rd = insn & 7;
11946 rn = (insn >> 3) & 7;
b0109805 11947 addr = load_reg(s, rn);
99c475ab 11948 val = (insn >> 4) & 0x7c;
b0109805 11949 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11950
11951 if (insn & (1 << 11)) {
11952 /* load */
c40c8556 11953 tmp = tcg_temp_new_i32();
12dcc321 11954 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11955 store_reg(s, rd, tmp);
99c475ab
FB
11956 } else {
11957 /* store */
b0109805 11958 tmp = load_reg(s, rd);
12dcc321 11959 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11960 tcg_temp_free_i32(tmp);
99c475ab 11961 }
7d1b0095 11962 tcg_temp_free_i32(addr);
99c475ab
FB
11963 break;
11964
11965 case 7:
11966 /* load/store byte immediate offset */
11967 rd = insn & 7;
11968 rn = (insn >> 3) & 7;
b0109805 11969 addr = load_reg(s, rn);
99c475ab 11970 val = (insn >> 6) & 0x1f;
b0109805 11971 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11972
11973 if (insn & (1 << 11)) {
11974 /* load */
c40c8556 11975 tmp = tcg_temp_new_i32();
9bb6558a 11976 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11977 store_reg(s, rd, tmp);
99c475ab
FB
11978 } else {
11979 /* store */
b0109805 11980 tmp = load_reg(s, rd);
9bb6558a 11981 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11982 tcg_temp_free_i32(tmp);
99c475ab 11983 }
7d1b0095 11984 tcg_temp_free_i32(addr);
99c475ab
FB
11985 break;
11986
11987 case 8:
11988 /* load/store halfword immediate offset */
11989 rd = insn & 7;
11990 rn = (insn >> 3) & 7;
b0109805 11991 addr = load_reg(s, rn);
99c475ab 11992 val = (insn >> 5) & 0x3e;
b0109805 11993 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11994
11995 if (insn & (1 << 11)) {
11996 /* load */
c40c8556 11997 tmp = tcg_temp_new_i32();
9bb6558a 11998 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11999 store_reg(s, rd, tmp);
99c475ab
FB
12000 } else {
12001 /* store */
b0109805 12002 tmp = load_reg(s, rd);
9bb6558a 12003 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12004 tcg_temp_free_i32(tmp);
99c475ab 12005 }
7d1b0095 12006 tcg_temp_free_i32(addr);
99c475ab
FB
12007 break;
12008
12009 case 9:
12010 /* load/store from stack */
12011 rd = (insn >> 8) & 7;
b0109805 12012 addr = load_reg(s, 13);
99c475ab 12013 val = (insn & 0xff) * 4;
b0109805 12014 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12015
12016 if (insn & (1 << 11)) {
12017 /* load */
c40c8556 12018 tmp = tcg_temp_new_i32();
9bb6558a 12019 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12020 store_reg(s, rd, tmp);
99c475ab
FB
12021 } else {
12022 /* store */
b0109805 12023 tmp = load_reg(s, rd);
9bb6558a 12024 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12025 tcg_temp_free_i32(tmp);
99c475ab 12026 }
7d1b0095 12027 tcg_temp_free_i32(addr);
99c475ab
FB
12028 break;
12029
12030 case 10:
12031 /* add to high reg */
12032 rd = (insn >> 8) & 7;
5899f386
FB
12033 if (insn & (1 << 11)) {
12034 /* SP */
5e3f878a 12035 tmp = load_reg(s, 13);
5899f386
FB
12036 } else {
12037 /* PC. bit 1 is ignored. */
7d1b0095 12038 tmp = tcg_temp_new_i32();
5e3f878a 12039 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 12040 }
99c475ab 12041 val = (insn & 0xff) * 4;
5e3f878a
PB
12042 tcg_gen_addi_i32(tmp, tmp, val);
12043 store_reg(s, rd, tmp);
99c475ab
FB
12044 break;
12045
12046 case 11:
12047 /* misc */
12048 op = (insn >> 8) & 0xf;
12049 switch (op) {
12050 case 0:
12051 /* adjust stack pointer */
b26eefb6 12052 tmp = load_reg(s, 13);
99c475ab
FB
12053 val = (insn & 0x7f) * 4;
12054 if (insn & (1 << 7))
6a0d8a1d 12055 val = -(int32_t)val;
b26eefb6
PB
12056 tcg_gen_addi_i32(tmp, tmp, val);
12057 store_reg(s, 13, tmp);
99c475ab
FB
12058 break;
12059
9ee6e8bb
PB
12060 case 2: /* sign/zero extend. */
12061 ARCH(6);
12062 rd = insn & 7;
12063 rm = (insn >> 3) & 7;
b0109805 12064 tmp = load_reg(s, rm);
9ee6e8bb 12065 switch ((insn >> 6) & 3) {
b0109805
PB
12066 case 0: gen_sxth(tmp); break;
12067 case 1: gen_sxtb(tmp); break;
12068 case 2: gen_uxth(tmp); break;
12069 case 3: gen_uxtb(tmp); break;
9ee6e8bb 12070 }
b0109805 12071 store_reg(s, rd, tmp);
9ee6e8bb 12072 break;
99c475ab
FB
12073 case 4: case 5: case 0xc: case 0xd:
12074 /* push/pop */
b0109805 12075 addr = load_reg(s, 13);
5899f386
FB
12076 if (insn & (1 << 8))
12077 offset = 4;
99c475ab 12078 else
5899f386
FB
12079 offset = 0;
12080 for (i = 0; i < 8; i++) {
12081 if (insn & (1 << i))
12082 offset += 4;
12083 }
12084 if ((insn & (1 << 11)) == 0) {
b0109805 12085 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 12086 }
99c475ab
FB
12087 for (i = 0; i < 8; i++) {
12088 if (insn & (1 << i)) {
12089 if (insn & (1 << 11)) {
12090 /* pop */
c40c8556 12091 tmp = tcg_temp_new_i32();
12dcc321 12092 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 12093 store_reg(s, i, tmp);
99c475ab
FB
12094 } else {
12095 /* push */
b0109805 12096 tmp = load_reg(s, i);
12dcc321 12097 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12098 tcg_temp_free_i32(tmp);
99c475ab 12099 }
5899f386 12100 /* advance to the next address. */
b0109805 12101 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
12102 }
12103 }
f764718d 12104 tmp = NULL;
99c475ab
FB
12105 if (insn & (1 << 8)) {
12106 if (insn & (1 << 11)) {
12107 /* pop pc */
c40c8556 12108 tmp = tcg_temp_new_i32();
12dcc321 12109 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
12110 /* don't set the pc until the rest of the instruction
12111 has completed */
12112 } else {
12113 /* push lr */
b0109805 12114 tmp = load_reg(s, 14);
12dcc321 12115 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12116 tcg_temp_free_i32(tmp);
99c475ab 12117 }
b0109805 12118 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 12119 }
5899f386 12120 if ((insn & (1 << 11)) == 0) {
b0109805 12121 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 12122 }
99c475ab 12123 /* write back the new stack pointer */
b0109805 12124 store_reg(s, 13, addr);
99c475ab 12125 /* set the new PC value */
be5e7a76 12126 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 12127 store_reg_from_load(s, 15, tmp);
be5e7a76 12128 }
99c475ab
FB
12129 break;
12130
9ee6e8bb
PB
12131 case 1: case 3: case 9: case 11: /* czb */
12132 rm = insn & 7;
d9ba4830 12133 tmp = load_reg(s, rm);
9ee6e8bb
PB
12134 s->condlabel = gen_new_label();
12135 s->condjmp = 1;
12136 if (insn & (1 << 11))
cb63669a 12137 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 12138 else
cb63669a 12139 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 12140 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
12141 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
12142 val = (uint32_t)s->pc + 2;
12143 val += offset;
12144 gen_jmp(s, val);
12145 break;
12146
12147 case 15: /* IT, nop-hint. */
12148 if ((insn & 0xf) == 0) {
12149 gen_nop_hint(s, (insn >> 4) & 0xf);
12150 break;
12151 }
12152 /* If Then. */
12153 s->condexec_cond = (insn >> 4) & 0xe;
12154 s->condexec_mask = insn & 0x1f;
12155 /* No actual code generated for this insn, just setup state. */
12156 break;
12157
06c949e6 12158 case 0xe: /* bkpt */
d4a2dc67
PM
12159 {
12160 int imm8 = extract32(insn, 0, 8);
be5e7a76 12161 ARCH(5);
c900a2e6 12162 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
06c949e6 12163 break;
d4a2dc67 12164 }
06c949e6 12165
19a6e31c
PM
12166 case 0xa: /* rev, and hlt */
12167 {
12168 int op1 = extract32(insn, 6, 2);
12169
12170 if (op1 == 2) {
12171 /* HLT */
12172 int imm6 = extract32(insn, 0, 6);
12173
12174 gen_hlt(s, imm6);
12175 break;
12176 }
12177
12178 /* Otherwise this is rev */
9ee6e8bb
PB
12179 ARCH(6);
12180 rn = (insn >> 3) & 0x7;
12181 rd = insn & 0x7;
b0109805 12182 tmp = load_reg(s, rn);
19a6e31c 12183 switch (op1) {
66896cb8 12184 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
12185 case 1: gen_rev16(tmp); break;
12186 case 3: gen_revsh(tmp); break;
19a6e31c
PM
12187 default:
12188 g_assert_not_reached();
9ee6e8bb 12189 }
b0109805 12190 store_reg(s, rd, tmp);
9ee6e8bb 12191 break;
19a6e31c 12192 }
9ee6e8bb 12193
d9e028c1
PM
12194 case 6:
12195 switch ((insn >> 5) & 7) {
12196 case 2:
12197 /* setend */
12198 ARCH(6);
9886ecdf
PB
12199 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
12200 gen_helper_setend(cpu_env);
dcba3a8d 12201 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 12202 }
9ee6e8bb 12203 break;
d9e028c1
PM
12204 case 3:
12205 /* cps */
12206 ARCH(6);
12207 if (IS_USER(s)) {
12208 break;
8984bd2e 12209 }
b53d8923 12210 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
12211 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
12212 /* FAULTMASK */
12213 if (insn & 1) {
12214 addr = tcg_const_i32(19);
12215 gen_helper_v7m_msr(cpu_env, addr, tmp);
12216 tcg_temp_free_i32(addr);
12217 }
12218 /* PRIMASK */
12219 if (insn & 2) {
12220 addr = tcg_const_i32(16);
12221 gen_helper_v7m_msr(cpu_env, addr, tmp);
12222 tcg_temp_free_i32(addr);
12223 }
12224 tcg_temp_free_i32(tmp);
12225 gen_lookup_tb(s);
12226 } else {
12227 if (insn & (1 << 4)) {
12228 shift = CPSR_A | CPSR_I | CPSR_F;
12229 } else {
12230 shift = 0;
12231 }
12232 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 12233 }
d9e028c1
PM
12234 break;
12235 default:
12236 goto undef;
9ee6e8bb
PB
12237 }
12238 break;
12239
99c475ab
FB
12240 default:
12241 goto undef;
12242 }
12243 break;
12244
12245 case 12:
a7d3970d 12246 {
99c475ab 12247 /* load/store multiple */
f764718d 12248 TCGv_i32 loaded_var = NULL;
99c475ab 12249 rn = (insn >> 8) & 0x7;
b0109805 12250 addr = load_reg(s, rn);
99c475ab
FB
12251 for (i = 0; i < 8; i++) {
12252 if (insn & (1 << i)) {
99c475ab
FB
12253 if (insn & (1 << 11)) {
12254 /* load */
c40c8556 12255 tmp = tcg_temp_new_i32();
12dcc321 12256 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
12257 if (i == rn) {
12258 loaded_var = tmp;
12259 } else {
12260 store_reg(s, i, tmp);
12261 }
99c475ab
FB
12262 } else {
12263 /* store */
b0109805 12264 tmp = load_reg(s, i);
12dcc321 12265 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12266 tcg_temp_free_i32(tmp);
99c475ab 12267 }
5899f386 12268 /* advance to the next address */
b0109805 12269 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
12270 }
12271 }
b0109805 12272 if ((insn & (1 << rn)) == 0) {
a7d3970d 12273 /* base reg not in list: base register writeback */
b0109805
PB
12274 store_reg(s, rn, addr);
12275 } else {
a7d3970d
PM
12276 /* base reg in list: if load, complete it now */
12277 if (insn & (1 << 11)) {
12278 store_reg(s, rn, loaded_var);
12279 }
7d1b0095 12280 tcg_temp_free_i32(addr);
b0109805 12281 }
99c475ab 12282 break;
a7d3970d 12283 }
99c475ab
FB
12284 case 13:
12285 /* conditional branch or swi */
12286 cond = (insn >> 8) & 0xf;
12287 if (cond == 0xe)
12288 goto undef;
12289
12290 if (cond == 0xf) {
12291 /* swi */
eaed129d 12292 gen_set_pc_im(s, s->pc);
d4a2dc67 12293 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 12294 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
12295 break;
12296 }
12297 /* generate a conditional jump to next instruction */
e50e6a20 12298 s->condlabel = gen_new_label();
39fb730a 12299 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 12300 s->condjmp = 1;
99c475ab
FB
12301
12302 /* jump to the offset */
5899f386 12303 val = (uint32_t)s->pc + 2;
99c475ab 12304 offset = ((int32_t)insn << 24) >> 24;
5899f386 12305 val += offset << 1;
8aaca4c0 12306 gen_jmp(s, val);
99c475ab
FB
12307 break;
12308
12309 case 14:
358bf29e 12310 if (insn & (1 << 11)) {
296e5a0a
PM
12311 /* thumb_insn_is_16bit() ensures we can't get here for
12312 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
12313 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
12314 */
12315 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12316 ARCH(5);
12317 offset = ((insn & 0x7ff) << 1);
12318 tmp = load_reg(s, 14);
12319 tcg_gen_addi_i32(tmp, tmp, offset);
12320 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
12321
12322 tmp2 = tcg_temp_new_i32();
12323 tcg_gen_movi_i32(tmp2, s->pc | 1);
12324 store_reg(s, 14, tmp2);
12325 gen_bx(s, tmp);
358bf29e
PB
12326 break;
12327 }
9ee6e8bb 12328 /* unconditional branch */
99c475ab
FB
12329 val = (uint32_t)s->pc;
12330 offset = ((int32_t)insn << 21) >> 21;
12331 val += (offset << 1) + 2;
8aaca4c0 12332 gen_jmp(s, val);
99c475ab
FB
12333 break;
12334
12335 case 15:
296e5a0a
PM
12336 /* thumb_insn_is_16bit() ensures we can't get here for
12337 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
12338 */
12339 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12340
12341 if (insn & (1 << 11)) {
12342 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
12343 offset = ((insn & 0x7ff) << 1) | 1;
12344 tmp = load_reg(s, 14);
12345 tcg_gen_addi_i32(tmp, tmp, offset);
12346
12347 tmp2 = tcg_temp_new_i32();
12348 tcg_gen_movi_i32(tmp2, s->pc | 1);
12349 store_reg(s, 14, tmp2);
12350 gen_bx(s, tmp);
12351 } else {
12352 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
12353 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
12354
12355 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
12356 }
9ee6e8bb 12357 break;
99c475ab
FB
12358 }
12359 return;
9ee6e8bb 12360illegal_op:
99c475ab 12361undef:
73710361
GB
12362 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
12363 default_exception_el(s));
99c475ab
FB
12364}
12365
541ebcd4
PM
12366static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
12367{
12368 /* Return true if the insn at dc->pc might cross a page boundary.
12369 * (False positives are OK, false negatives are not.)
5b8d7289
PM
12370 * We know this is a Thumb insn, and our caller ensures we are
12371 * only called if dc->pc is less than 4 bytes from the page
12372 * boundary, so we cross the page if the first 16 bits indicate
12373 * that this is a 32 bit insn.
541ebcd4 12374 */
5b8d7289 12375 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 12376
5b8d7289 12377 return !thumb_insn_is_16bit(s, insn);
541ebcd4
PM
12378}
12379
b542683d 12380static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2c0262af 12381{
1d8a5535 12382 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 12383 CPUARMState *env = cs->env_ptr;
4e5e1215 12384 ARMCPU *cpu = arm_env_get_cpu(env);
3b46e624 12385
dcba3a8d 12386 dc->pc = dc->base.pc_first;
e50e6a20 12387 dc->condjmp = 0;
3926cc84 12388
40f860cd 12389 dc->aarch64 = 0;
cef9ee70
SS
12390 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
12391 * there is no secure EL1, so we route exceptions to EL3.
12392 */
12393 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
12394 !arm_el_is_aa64(env, 3);
1d8a5535
LV
12395 dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
12396 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
12397 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
12398 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
12399 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
12400 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
c1e37810 12401 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 12402#if !defined(CONFIG_USER_ONLY)
c1e37810 12403 dc->user = (dc->current_el == 0);
3926cc84 12404#endif
1d8a5535
LV
12405 dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
12406 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
12407 dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
12408 dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
12409 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
12410 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
12411 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
fb602cb7
PM
12412 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
12413 regime_is_secure(env, dc->mmu_idx);
60322b39 12414 dc->cp_regs = cpu->cp_regs;
a984e42c 12415 dc->features = env->features;
40f860cd 12416
50225ad0
PM
12417 /* Single step state. The code-generation logic here is:
12418 * SS_ACTIVE == 0:
12419 * generate code with no special handling for single-stepping (except
12420 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
12421 * this happens anyway because those changes are all system register or
12422 * PSTATE writes).
12423 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
12424 * emit code for one insn
12425 * emit code to clear PSTATE.SS
12426 * emit code to generate software step exception for completed step
12427 * end TB (as usual for having generated an exception)
12428 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
12429 * emit code to generate a software step exception
12430 * end the TB
12431 */
1d8a5535
LV
12432 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
12433 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
50225ad0
PM
12434 dc->is_ldex = false;
12435 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
12436
bfe7ad5b 12437 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1d8a5535 12438
f7708456
RH
12439 /* If architectural single step active, limit to 1. */
12440 if (is_singlestepping(dc)) {
b542683d 12441 dc->base.max_insns = 1;
f7708456
RH
12442 }
12443
d0264d86
RH
12444 /* ARM is a fixed-length ISA. Bound the number of insns to execute
12445 to those left on the page. */
12446 if (!dc->thumb) {
bfe7ad5b 12447 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 12448 dc->base.max_insns = MIN(dc->base.max_insns, bound);
d0264d86
RH
12449 }
12450
a7812ae4
PB
12451 cpu_F0s = tcg_temp_new_i32();
12452 cpu_F1s = tcg_temp_new_i32();
12453 cpu_F0d = tcg_temp_new_i64();
12454 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
12455 cpu_V0 = cpu_F0d;
12456 cpu_V1 = cpu_F1d;
e677137d 12457 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 12458 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
12459}
12460
b1476854
LV
12461static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
12462{
12463 DisasContext *dc = container_of(dcbase, DisasContext, base);
12464
12465 /* A note on handling of the condexec (IT) bits:
12466 *
12467 * We want to avoid the overhead of having to write the updated condexec
12468 * bits back to the CPUARMState for every instruction in an IT block. So:
12469 * (1) if the condexec bits are not already zero then we write
12470 * zero back into the CPUARMState now. This avoids complications trying
12471 * to do it at the end of the block. (For example if we don't do this
12472 * it's hard to identify whether we can safely skip writing condexec
12473 * at the end of the TB, which we definitely want to do for the case
12474 * where a TB doesn't do anything with the IT state at all.)
12475 * (2) if we are going to leave the TB then we call gen_set_condexec()
12476 * which will write the correct value into CPUARMState if zero is wrong.
12477 * This is done both for leaving the TB at the end, and for leaving
12478 * it because of an exception we know will happen, which is done in
12479 * gen_exception_insn(). The latter is necessary because we need to
12480 * leave the TB with the PC/IT state just prior to execution of the
12481 * instruction which caused the exception.
12482 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
12483 * then the CPUARMState will be wrong and we need to reset it.
12484 * This is handled in the same way as restoration of the
12485 * PC in these situations; we save the value of the condexec bits
12486 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
12487 * then uses this to restore them after an exception.
12488 *
12489 * Note that there are no instructions which can read the condexec
12490 * bits, and none which can write non-static values to them, so
12491 * we don't need to care about whether CPUARMState is correct in the
12492 * middle of a TB.
12493 */
12494
12495 /* Reset the conditional execution bits immediately. This avoids
12496 complications trying to do it at the end of the block. */
12497 if (dc->condexec_mask || dc->condexec_cond) {
12498 TCGv_i32 tmp = tcg_temp_new_i32();
12499 tcg_gen_movi_i32(tmp, 0);
12500 store_cpu_field(tmp, condexec_bits);
12501 }
23169224 12502 tcg_clear_temp_count();
b1476854
LV
12503}
12504
f62bd897
LV
12505static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
12506{
12507 DisasContext *dc = container_of(dcbase, DisasContext, base);
12508
f62bd897
LV
12509 tcg_gen_insn_start(dc->pc,
12510 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
12511 0);
15fa08f8 12512 dc->insn_start = tcg_last_op();
f62bd897
LV
12513}
12514
a68956ad
LV
12515static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
12516 const CPUBreakpoint *bp)
12517{
12518 DisasContext *dc = container_of(dcbase, DisasContext, base);
12519
12520 if (bp->flags & BP_CPU) {
12521 gen_set_condexec(dc);
12522 gen_set_pc_im(dc, dc->pc);
12523 gen_helper_check_breakpoints(cpu_env);
12524 /* End the TB early; it's likely not going to be executed */
12525 dc->base.is_jmp = DISAS_TOO_MANY;
12526 } else {
12527 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12528 /* The address covered by the breakpoint must be
12529 included in [tb->pc, tb->pc + tb->size) in order
12530 to for it to be properly cleared -- thus we
12531 increment the PC here so that the logic setting
12532 tb->size below does the right thing. */
12533 /* TODO: Advance PC by correct instruction length to
12534 * avoid disassembler error messages */
12535 dc->pc += 2;
12536 dc->base.is_jmp = DISAS_NORETURN;
12537 }
12538
12539 return true;
12540}
12541
722ef0a5 12542static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 12543{
13189a90
LV
12544#ifdef CONFIG_USER_ONLY
12545 /* Intercept jump to the magic kernel page. */
12546 if (dc->pc >= 0xffff0000) {
12547 /* We always get here via a jump, so know we are not in a
12548 conditional execution block. */
12549 gen_exception_internal(EXCP_KERNEL_TRAP);
12550 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12551 return true;
13189a90
LV
12552 }
12553#endif
12554
12555 if (dc->ss_active && !dc->pstate_ss) {
12556 /* Singlestep state is Active-pending.
12557 * If we're in this state at the start of a TB then either
12558 * a) we just took an exception to an EL which is being debugged
12559 * and this is the first insn in the exception handler
12560 * b) debug exceptions were masked and we just unmasked them
12561 * without changing EL (eg by clearing PSTATE.D)
12562 * In either case we're going to take a swstep exception in the
12563 * "did not step an insn" case, and so the syndrome ISV and EX
12564 * bits should be zero.
12565 */
12566 assert(dc->base.num_insns == 1);
12567 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12568 default_exception_el(dc));
12569 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12570 return true;
13189a90
LV
12571 }
12572
722ef0a5
RH
12573 return false;
12574}
13189a90 12575
d0264d86 12576static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 12577{
13189a90
LV
12578 if (dc->condjmp && !dc->base.is_jmp) {
12579 gen_set_label(dc->condlabel);
12580 dc->condjmp = 0;
12581 }
13189a90 12582 dc->base.pc_next = dc->pc;
23169224 12583 translator_loop_temp_check(&dc->base);
13189a90
LV
12584}
12585
722ef0a5
RH
12586static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12587{
12588 DisasContext *dc = container_of(dcbase, DisasContext, base);
12589 CPUARMState *env = cpu->env_ptr;
12590 unsigned int insn;
12591
12592 if (arm_pre_translate_insn(dc)) {
12593 return;
12594 }
12595
12596 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
58803318 12597 dc->insn = insn;
722ef0a5
RH
12598 dc->pc += 4;
12599 disas_arm_insn(dc, insn);
12600
d0264d86
RH
12601 arm_post_translate_insn(dc);
12602
12603 /* ARM is a fixed-length ISA. We performed the cross-page check
12604 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
12605}
12606
dcf14dfb
PM
12607static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12608{
12609 /* Return true if this Thumb insn is always unconditional,
12610 * even inside an IT block. This is true of only a very few
12611 * instructions: BKPT, HLT, and SG.
12612 *
12613 * A larger class of instructions are UNPREDICTABLE if used
12614 * inside an IT block; we do not need to detect those here, because
12615 * what we do by default (perform the cc check and update the IT
12616 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12617 * choice for those situations.
12618 *
12619 * insn is either a 16-bit or a 32-bit instruction; the two are
12620 * distinguishable because for the 16-bit case the top 16 bits
12621 * are zeroes, and that isn't a valid 32-bit encoding.
12622 */
12623 if ((insn & 0xffffff00) == 0xbe00) {
12624 /* BKPT */
12625 return true;
12626 }
12627
12628 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12629 !arm_dc_feature(s, ARM_FEATURE_M)) {
12630 /* HLT: v8A only. This is unconditional even when it is going to
12631 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12632 * For v7 cores this was a plain old undefined encoding and so
12633 * honours its cc check. (We might be using the encoding as
12634 * a semihosting trap, but we don't change the cc check behaviour
12635 * on that account, because a debugger connected to a real v7A
12636 * core and emulating semihosting traps by catching the UNDEF
12637 * exception would also only see cases where the cc check passed.
12638 * No guest code should be trying to do a HLT semihosting trap
12639 * in an IT block anyway.
12640 */
12641 return true;
12642 }
12643
12644 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12645 arm_dc_feature(s, ARM_FEATURE_M)) {
12646 /* SG: v8M only */
12647 return true;
12648 }
12649
12650 return false;
12651}
12652
722ef0a5
RH
12653static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12654{
12655 DisasContext *dc = container_of(dcbase, DisasContext, base);
12656 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
12657 uint32_t insn;
12658 bool is_16bit;
722ef0a5
RH
12659
12660 if (arm_pre_translate_insn(dc)) {
12661 return;
12662 }
12663
296e5a0a
PM
12664 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12665 is_16bit = thumb_insn_is_16bit(dc, insn);
12666 dc->pc += 2;
12667 if (!is_16bit) {
12668 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12669
12670 insn = insn << 16 | insn2;
12671 dc->pc += 2;
12672 }
58803318 12673 dc->insn = insn;
296e5a0a 12674
dcf14dfb 12675 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
12676 uint32_t cond = dc->condexec_cond;
12677
12678 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
12679 dc->condlabel = gen_new_label();
12680 arm_gen_test_cc(cond ^ 1, dc->condlabel);
12681 dc->condjmp = 1;
12682 }
12683 }
12684
12685 if (is_16bit) {
12686 disas_thumb_insn(dc, insn);
12687 } else {
2eea841c 12688 disas_thumb2_insn(dc, insn);
296e5a0a 12689 }
722ef0a5
RH
12690
12691 /* Advance the Thumb condexec condition. */
12692 if (dc->condexec_mask) {
12693 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12694 ((dc->condexec_mask >> 4) & 1));
12695 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12696 if (dc->condexec_mask == 0) {
12697 dc->condexec_cond = 0;
12698 }
12699 }
12700
d0264d86
RH
12701 arm_post_translate_insn(dc);
12702
12703 /* Thumb is a variable-length ISA. Stop translation when the next insn
12704 * will touch a new page. This ensures that prefetch aborts occur at
12705 * the right place.
12706 *
12707 * We want to stop the TB if the next insn starts in a new page,
12708 * or if it spans between this page and the next. This means that
12709 * if we're looking at the last halfword in the page we need to
12710 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12711 * or a 32-bit Thumb insn (which won't).
12712 * This is to avoid generating a silly TB with a single 16-bit insn
12713 * in it at the end of this page (which would execute correctly
12714 * but isn't very efficient).
12715 */
12716 if (dc->base.is_jmp == DISAS_NEXT
bfe7ad5b
EC
12717 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
12718 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
d0264d86
RH
12719 && insn_crosses_page(env, dc)))) {
12720 dc->base.is_jmp = DISAS_TOO_MANY;
12721 }
722ef0a5
RH
12722}
12723
70d3c035 12724static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 12725{
70d3c035 12726 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 12727
c5a49c63 12728 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
12729 /* FIXME: This can theoretically happen with self-modifying code. */
12730 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 12731 }
9ee6e8bb 12732
b5ff1b31 12733 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12734 instruction was a conditional branch or trap, and the PC has
12735 already been written. */
f021b2c4 12736 gen_set_condexec(dc);
dcba3a8d 12737 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
12738 /* Exception return branches need some special case code at the
12739 * end of the TB, which is complex enough that it has to
12740 * handle the single-step vs not and the condition-failed
12741 * insn codepath itself.
12742 */
12743 gen_bx_excret_final_code(dc);
12744 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12745 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 12746 switch (dc->base.is_jmp) {
7999a5c8 12747 case DISAS_SWI:
50225ad0 12748 gen_ss_advance(dc);
73710361
GB
12749 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12750 default_exception_el(dc));
7999a5c8
SF
12751 break;
12752 case DISAS_HVC:
37e6456e 12753 gen_ss_advance(dc);
73710361 12754 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12755 break;
12756 case DISAS_SMC:
37e6456e 12757 gen_ss_advance(dc);
73710361 12758 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12759 break;
12760 case DISAS_NEXT:
a68956ad 12761 case DISAS_TOO_MANY:
7999a5c8
SF
12762 case DISAS_UPDATE:
12763 gen_set_pc_im(dc, dc->pc);
12764 /* fall through */
12765 default:
5425415e
PM
12766 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12767 gen_singlestep_exception(dc);
a0c231e6
RH
12768 break;
12769 case DISAS_NORETURN:
12770 break;
7999a5c8 12771 }
8aaca4c0 12772 } else {
9ee6e8bb
PB
12773 /* While branches must always occur at the end of an IT block,
12774 there are a few other things that can cause us to terminate
65626741 12775 the TB in the middle of an IT block:
9ee6e8bb
PB
12776 - Exception generating instructions (bkpt, swi, undefined).
12777 - Page boundaries.
12778 - Hardware watchpoints.
12779 Hardware breakpoints have already been handled and skip this code.
12780 */
dcba3a8d 12781 switch(dc->base.is_jmp) {
8aaca4c0 12782 case DISAS_NEXT:
a68956ad 12783 case DISAS_TOO_MANY:
6e256c93 12784 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 12785 break;
577bf808 12786 case DISAS_JUMP:
8a6b28c7
EC
12787 gen_goto_ptr();
12788 break;
e8d52302
AB
12789 case DISAS_UPDATE:
12790 gen_set_pc_im(dc, dc->pc);
12791 /* fall through */
577bf808 12792 default:
8aaca4c0 12793 /* indicate that the hash table must be used to find the next TB */
07ea28b4 12794 tcg_gen_exit_tb(NULL, 0);
8aaca4c0 12795 break;
a0c231e6 12796 case DISAS_NORETURN:
8aaca4c0
FB
12797 /* nothing more to generate */
12798 break;
9ee6e8bb 12799 case DISAS_WFI:
58803318
SS
12800 {
12801 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12802 !(dc->insn & (1U << 31))) ? 2 : 4);
12803
12804 gen_helper_wfi(cpu_env, tmp);
12805 tcg_temp_free_i32(tmp);
84549b6d
PM
12806 /* The helper doesn't necessarily throw an exception, but we
12807 * must go back to the main loop to check for interrupts anyway.
12808 */
07ea28b4 12809 tcg_gen_exit_tb(NULL, 0);
9ee6e8bb 12810 break;
58803318 12811 }
72c1d3af
PM
12812 case DISAS_WFE:
12813 gen_helper_wfe(cpu_env);
12814 break;
c87e5a61
PM
12815 case DISAS_YIELD:
12816 gen_helper_yield(cpu_env);
12817 break;
9ee6e8bb 12818 case DISAS_SWI:
73710361
GB
12819 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12820 default_exception_el(dc));
9ee6e8bb 12821 break;
37e6456e 12822 case DISAS_HVC:
73710361 12823 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12824 break;
12825 case DISAS_SMC:
73710361 12826 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12827 break;
8aaca4c0 12828 }
f021b2c4
PM
12829 }
12830
12831 if (dc->condjmp) {
12832 /* "Condition failed" instruction codepath for the branch/trap insn */
12833 gen_set_label(dc->condlabel);
12834 gen_set_condexec(dc);
b636649f 12835 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
12836 gen_set_pc_im(dc, dc->pc);
12837 gen_singlestep_exception(dc);
12838 } else {
6e256c93 12839 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 12840 }
2c0262af 12841 }
23169224
LV
12842
12843 /* Functions above can change dc->pc, so re-align db->pc_next */
12844 dc->base.pc_next = dc->pc;
70d3c035
LV
12845}
12846
4013f7fc
LV
12847static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12848{
12849 DisasContext *dc = container_of(dcbase, DisasContext, base);
12850
12851 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 12852 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
12853}
12854
23169224
LV
12855static const TranslatorOps arm_translator_ops = {
12856 .init_disas_context = arm_tr_init_disas_context,
12857 .tb_start = arm_tr_tb_start,
12858 .insn_start = arm_tr_insn_start,
12859 .breakpoint_check = arm_tr_breakpoint_check,
12860 .translate_insn = arm_tr_translate_insn,
12861 .tb_stop = arm_tr_tb_stop,
12862 .disas_log = arm_tr_disas_log,
12863};
12864
722ef0a5
RH
12865static const TranslatorOps thumb_translator_ops = {
12866 .init_disas_context = arm_tr_init_disas_context,
12867 .tb_start = arm_tr_tb_start,
12868 .insn_start = arm_tr_insn_start,
12869 .breakpoint_check = arm_tr_breakpoint_check,
12870 .translate_insn = thumb_tr_translate_insn,
12871 .tb_stop = arm_tr_tb_stop,
12872 .disas_log = arm_tr_disas_log,
12873};
12874
70d3c035 12875/* generate intermediate code for basic block 'tb'. */
23169224 12876void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
70d3c035 12877{
23169224
LV
12878 DisasContext dc;
12879 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 12880
722ef0a5
RH
12881 if (ARM_TBFLAG_THUMB(tb->flags)) {
12882 ops = &thumb_translator_ops;
12883 }
23169224 12884#ifdef TARGET_AARCH64
70d3c035 12885 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
23169224 12886 ops = &aarch64_translator_ops;
2c0262af
FB
12887 }
12888#endif
23169224
LV
12889
12890 translator_loop(ops, &dc.base, cpu, tb);
2c0262af
FB
12891}
12892
b5ff1b31 12893static const char *cpu_mode_names[16] = {
28c9457d
EI
12894 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12895 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 12896};
9ee6e8bb 12897
878096ee
AF
12898void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12899 int flags)
2c0262af 12900{
878096ee
AF
12901 ARMCPU *cpu = ARM_CPU(cs);
12902 CPUARMState *env = &cpu->env;
2c0262af
FB
12903 int i;
12904
17731115
PM
12905 if (is_a64(env)) {
12906 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12907 return;
12908 }
12909
2c0262af 12910 for(i=0;i<16;i++) {
7fe48483 12911 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12912 if ((i % 4) == 3)
7fe48483 12913 cpu_fprintf(f, "\n");
2c0262af 12914 else
7fe48483 12915 cpu_fprintf(f, " ");
2c0262af 12916 }
06e5cf7a 12917
5b906f35
PM
12918 if (arm_feature(env, ARM_FEATURE_M)) {
12919 uint32_t xpsr = xpsr_read(env);
12920 const char *mode;
1e577cc7
PM
12921 const char *ns_status = "";
12922
12923 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12924 ns_status = env->v7m.secure ? "S " : "NS ";
12925 }
5b906f35
PM
12926
12927 if (xpsr & XPSR_EXCP) {
12928 mode = "handler";
12929 } else {
8bfc26ea 12930 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
12931 mode = "unpriv-thread";
12932 } else {
12933 mode = "priv-thread";
12934 }
12935 }
12936
1e577cc7 12937 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
5b906f35
PM
12938 xpsr,
12939 xpsr & XPSR_N ? 'N' : '-',
12940 xpsr & XPSR_Z ? 'Z' : '-',
12941 xpsr & XPSR_C ? 'C' : '-',
12942 xpsr & XPSR_V ? 'V' : '-',
12943 xpsr & XPSR_T ? 'T' : 'A',
1e577cc7 12944 ns_status,
5b906f35 12945 mode);
06e5cf7a 12946 } else {
5b906f35
PM
12947 uint32_t psr = cpsr_read(env);
12948 const char *ns_status = "";
12949
12950 if (arm_feature(env, ARM_FEATURE_EL3) &&
12951 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12952 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12953 }
12954
12955 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12956 psr,
12957 psr & CPSR_N ? 'N' : '-',
12958 psr & CPSR_Z ? 'Z' : '-',
12959 psr & CPSR_C ? 'C' : '-',
12960 psr & CPSR_V ? 'V' : '-',
12961 psr & CPSR_T ? 'T' : 'A',
12962 ns_status,
12963 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
12964 }
b7bcbe95 12965
f2617cfc
PM
12966 if (flags & CPU_DUMP_FPU) {
12967 int numvfpregs = 0;
12968 if (arm_feature(env, ARM_FEATURE_VFP)) {
12969 numvfpregs += 16;
12970 }
12971 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12972 numvfpregs += 16;
12973 }
12974 for (i = 0; i < numvfpregs; i++) {
9a2b5256 12975 uint64_t v = *aa32_vfp_dreg(env, i);
f2617cfc
PM
12976 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12977 i * 2, (uint32_t)v,
12978 i * 2 + 1, (uint32_t)(v >> 32),
12979 i, v);
12980 }
12981 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 12982 }
2c0262af 12983}
a6b025d3 12984
bad729e2
RH
12985void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12986 target_ulong *data)
d2856f1a 12987{
3926cc84 12988 if (is_a64(env)) {
bad729e2 12989 env->pc = data[0];
40f860cd 12990 env->condexec_bits = 0;
aaa1f954 12991 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12992 } else {
bad729e2
RH
12993 env->regs[15] = data[0];
12994 env->condexec_bits = data[1];
aaa1f954 12995 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12996 }
d2856f1a 12997}