]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
target/arm: [tcg,a64] Port to breakpoint_check
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
1de7afc9 28#include "qemu/log.h"
534df156 29#include "qemu/bitops.h"
1d854765 30#include "arm_ldst.h"
19a6e31c 31#include "exec/semihost.h"
1497c961 32
2ef6175a
RH
33#include "exec/helper-proto.h"
34#include "exec/helper-gen.h"
2c0262af 35
a7e30d84 36#include "trace-tcg.h"
508127e2 37#include "exec/log.h"
a7e30d84
LV
38
39
2b51668f
PM
40#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
41#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 42/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 43#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
9ee6e8bb 44#define ENABLE_ARCH_5J 0
2b51668f
PM
45#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
46#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
47#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
48#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
49#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 50
86753403 51#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 52
f570c61e 53#include "translate.h"
e12ce78d 54
b5ff1b31
FB
55#if defined(CONFIG_USER_ONLY)
56#define IS_USER(s) 1
57#else
58#define IS_USER(s) (s->user)
59#endif
60
1bcea73e 61TCGv_env cpu_env;
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
b26eefb6 69/* FIXME: These should be removed. */
39d5492a 70static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 71static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 72
022c62cb 73#include "exec/gen-icount.h"
2e70f6ef 74
155c3eac
FN
75static const char *regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
78
b26eefb6
PB
79/* initialize TCG globals. */
80void arm_translate_init(void)
81{
155c3eac
FN
82 int i;
83
a7812ae4 84 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7c255043 85 tcg_ctx.tcg_env = cpu_env;
a7812ae4 86
155c3eac 87 for (i = 0; i < 16; i++) {
e1ccc054 88 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 89 offsetof(CPUARMState, regs[i]),
155c3eac
FN
90 regnames[i]);
91 }
e1ccc054
RH
92 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
93 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
94 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
95 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 96
e1ccc054 97 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 98 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 99 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 100 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 101
14ade10f 102 a64_translate_init();
b26eefb6
PB
103}
104
9bb6558a
PM
105/* Flags for the disas_set_da_iss info argument:
106 * lower bits hold the Rt register number, higher bits are flags.
107 */
108typedef enum ISSInfo {
109 ISSNone = 0,
110 ISSRegMask = 0x1f,
111 ISSInvalid = (1 << 5),
112 ISSIsAcqRel = (1 << 6),
113 ISSIsWrite = (1 << 7),
114 ISSIs16Bit = (1 << 8),
115} ISSInfo;
116
117/* Save the syndrome information for a Data Abort */
118static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
119{
120 uint32_t syn;
121 int sas = memop & MO_SIZE;
122 bool sse = memop & MO_SIGN;
123 bool is_acqrel = issinfo & ISSIsAcqRel;
124 bool is_write = issinfo & ISSIsWrite;
125 bool is_16bit = issinfo & ISSIs16Bit;
126 int srt = issinfo & ISSRegMask;
127
128 if (issinfo & ISSInvalid) {
129 /* Some callsites want to conditionally provide ISS info,
130 * eg "only if this was not a writeback"
131 */
132 return;
133 }
134
135 if (srt == 15) {
136 /* For AArch32, insns where the src/dest is R15 never generate
137 * ISS information. Catching that here saves checking at all
138 * the call sites.
139 */
140 return;
141 }
142
143 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
144 0, 0, 0, is_write, 0, is_16bit);
145 disas_set_insn_syndrome(s, syn);
146}
147
8bd5c820 148static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 149{
8bd5c820 150 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
151 * insns:
152 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
153 * otherwise, access as if at PL0.
154 */
155 switch (s->mmu_idx) {
156 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
157 case ARMMMUIdx_S12NSE0:
158 case ARMMMUIdx_S12NSE1:
8bd5c820 159 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
160 case ARMMMUIdx_S1E3:
161 case ARMMMUIdx_S1SE0:
162 case ARMMMUIdx_S1SE1:
8bd5c820 163 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
164 case ARMMMUIdx_MUser:
165 case ARMMMUIdx_MPriv:
3bef7012 166 case ARMMMUIdx_MNegPri:
e7b921c2 167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
579d21cc
PM
168 case ARMMMUIdx_S2NS:
169 default:
170 g_assert_not_reached();
171 }
172}
173
39d5492a 174static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 175{
39d5492a 176 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
177 tcg_gen_ld_i32(tmp, cpu_env, offset);
178 return tmp;
179}
180
0ecb72a5 181#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 182
39d5492a 183static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
184{
185 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 186 tcg_temp_free_i32(var);
d9ba4830
PB
187}
188
189#define store_cpu_field(var, name) \
0ecb72a5 190 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 191
b26eefb6 192/* Set a variable to the value of a CPU register. */
39d5492a 193static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
194{
195 if (reg == 15) {
196 uint32_t addr;
b90372ad 197 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
198 if (s->thumb)
199 addr = (long)s->pc + 2;
200 else
201 addr = (long)s->pc + 4;
202 tcg_gen_movi_i32(var, addr);
203 } else {
155c3eac 204 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
205 }
206}
207
208/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 209static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 210{
39d5492a 211 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
212 load_reg_var(s, tmp, reg);
213 return tmp;
214}
215
216/* Set a CPU register. The source must be a temporary and will be
217 marked as dead. */
39d5492a 218static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
219{
220 if (reg == 15) {
9b6a3ea7
PM
221 /* In Thumb mode, we must ignore bit 0.
222 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
223 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
224 * We choose to ignore [1:0] in ARM mode for all architecture versions.
225 */
226 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 227 s->base.is_jmp = DISAS_JUMP;
b26eefb6 228 }
155c3eac 229 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 230 tcg_temp_free_i32(var);
b26eefb6
PB
231}
232
b26eefb6 233/* Value extensions. */
86831435
PB
234#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
235#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
236#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
237#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
238
1497c961
PB
239#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
240#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 241
b26eefb6 242
39d5492a 243static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 244{
39d5492a 245 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 246 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
247 tcg_temp_free_i32(tmp_mask);
248}
d9ba4830
PB
249/* Set NZCV flags from the high 4 bits of var. */
250#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
251
d4a2dc67 252static void gen_exception_internal(int excp)
d9ba4830 253{
d4a2dc67
PM
254 TCGv_i32 tcg_excp = tcg_const_i32(excp);
255
256 assert(excp_is_internal(excp));
257 gen_helper_exception_internal(cpu_env, tcg_excp);
258 tcg_temp_free_i32(tcg_excp);
259}
260
73710361 261static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
262{
263 TCGv_i32 tcg_excp = tcg_const_i32(excp);
264 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 265 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 266
73710361
GB
267 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
268 tcg_syn, tcg_el);
269
270 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
271 tcg_temp_free_i32(tcg_syn);
272 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
273}
274
50225ad0
PM
275static void gen_ss_advance(DisasContext *s)
276{
277 /* If the singlestep state is Active-not-pending, advance to
278 * Active-pending.
279 */
280 if (s->ss_active) {
281 s->pstate_ss = 0;
282 gen_helper_clear_pstate_ss(cpu_env);
283 }
284}
285
286static void gen_step_complete_exception(DisasContext *s)
287{
288 /* We just completed step of an insn. Move from Active-not-pending
289 * to Active-pending, and then also take the swstep exception.
290 * This corresponds to making the (IMPDEF) choice to prioritize
291 * swstep exceptions over asynchronous exceptions taken to an exception
292 * level where debug is disabled. This choice has the advantage that
293 * we do not need to maintain internal state corresponding to the
294 * ISV/EX syndrome bits between completion of the step and generation
295 * of the exception, and our syndrome information is always correct.
296 */
297 gen_ss_advance(s);
73710361
GB
298 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
299 default_exception_el(s));
dcba3a8d 300 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
301}
302
5425415e
PM
303static void gen_singlestep_exception(DisasContext *s)
304{
305 /* Generate the right kind of exception for singlestep, which is
306 * either the architectural singlestep or EXCP_DEBUG for QEMU's
307 * gdb singlestepping.
308 */
309 if (s->ss_active) {
310 gen_step_complete_exception(s);
311 } else {
312 gen_exception_internal(EXCP_DEBUG);
313 }
314}
315
b636649f
PM
316static inline bool is_singlestepping(DisasContext *s)
317{
318 /* Return true if we are singlestepping either because of
319 * architectural singlestep or QEMU gdbstub singlestep. This does
320 * not include the command line '-singlestep' mode which is rather
321 * misnamed as it only means "one instruction per TB" and doesn't
322 * affect the code we generate.
323 */
dcba3a8d 324 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
325}
326
39d5492a 327static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 328{
39d5492a
PM
329 TCGv_i32 tmp1 = tcg_temp_new_i32();
330 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
331 tcg_gen_ext16s_i32(tmp1, a);
332 tcg_gen_ext16s_i32(tmp2, b);
3670669c 333 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 334 tcg_temp_free_i32(tmp2);
3670669c
PB
335 tcg_gen_sari_i32(a, a, 16);
336 tcg_gen_sari_i32(b, b, 16);
337 tcg_gen_mul_i32(b, b, a);
338 tcg_gen_mov_i32(a, tmp1);
7d1b0095 339 tcg_temp_free_i32(tmp1);
3670669c
PB
340}
341
342/* Byteswap each halfword. */
39d5492a 343static void gen_rev16(TCGv_i32 var)
3670669c 344{
39d5492a 345 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 346 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 347 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
348 tcg_gen_and_i32(tmp, tmp, mask);
349 tcg_gen_and_i32(var, var, mask);
3670669c 350 tcg_gen_shli_i32(var, var, 8);
3670669c 351 tcg_gen_or_i32(var, var, tmp);
68cedf73 352 tcg_temp_free_i32(mask);
7d1b0095 353 tcg_temp_free_i32(tmp);
3670669c
PB
354}
355
356/* Byteswap low halfword and sign extend. */
39d5492a 357static void gen_revsh(TCGv_i32 var)
3670669c 358{
1a855029
AJ
359 tcg_gen_ext16u_i32(var, var);
360 tcg_gen_bswap16_i32(var, var);
361 tcg_gen_ext16s_i32(var, var);
3670669c
PB
362}
363
838fa72d 364/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 365static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 366{
838fa72d
AJ
367 TCGv_i64 tmp64 = tcg_temp_new_i64();
368
369 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 370 tcg_temp_free_i32(b);
838fa72d
AJ
371 tcg_gen_shli_i64(tmp64, tmp64, 32);
372 tcg_gen_add_i64(a, tmp64, a);
373
374 tcg_temp_free_i64(tmp64);
375 return a;
376}
377
378/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 379static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
380{
381 TCGv_i64 tmp64 = tcg_temp_new_i64();
382
383 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 384 tcg_temp_free_i32(b);
838fa72d
AJ
385 tcg_gen_shli_i64(tmp64, tmp64, 32);
386 tcg_gen_sub_i64(a, tmp64, a);
387
388 tcg_temp_free_i64(tmp64);
389 return a;
3670669c
PB
390}
391
5e3f878a 392/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 393static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 394{
39d5492a
PM
395 TCGv_i32 lo = tcg_temp_new_i32();
396 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 397 TCGv_i64 ret;
5e3f878a 398
831d7fe8 399 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 400 tcg_temp_free_i32(a);
7d1b0095 401 tcg_temp_free_i32(b);
831d7fe8
RH
402
403 ret = tcg_temp_new_i64();
404 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
405 tcg_temp_free_i32(lo);
406 tcg_temp_free_i32(hi);
831d7fe8
RH
407
408 return ret;
5e3f878a
PB
409}
410
39d5492a 411static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 412{
39d5492a
PM
413 TCGv_i32 lo = tcg_temp_new_i32();
414 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 415 TCGv_i64 ret;
5e3f878a 416
831d7fe8 417 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 418 tcg_temp_free_i32(a);
7d1b0095 419 tcg_temp_free_i32(b);
831d7fe8
RH
420
421 ret = tcg_temp_new_i64();
422 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
423 tcg_temp_free_i32(lo);
424 tcg_temp_free_i32(hi);
831d7fe8
RH
425
426 return ret;
5e3f878a
PB
427}
428
8f01245e 429/* Swap low and high halfwords. */
39d5492a 430static void gen_swap_half(TCGv_i32 var)
8f01245e 431{
39d5492a 432 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
433 tcg_gen_shri_i32(tmp, var, 16);
434 tcg_gen_shli_i32(var, var, 16);
435 tcg_gen_or_i32(var, var, tmp);
7d1b0095 436 tcg_temp_free_i32(tmp);
8f01245e
PB
437}
438
b26eefb6
PB
439/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
440 tmp = (t0 ^ t1) & 0x8000;
441 t0 &= ~0x8000;
442 t1 &= ~0x8000;
443 t0 = (t0 + t1) ^ tmp;
444 */
445
39d5492a 446static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 447{
39d5492a 448 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
449 tcg_gen_xor_i32(tmp, t0, t1);
450 tcg_gen_andi_i32(tmp, tmp, 0x8000);
451 tcg_gen_andi_i32(t0, t0, ~0x8000);
452 tcg_gen_andi_i32(t1, t1, ~0x8000);
453 tcg_gen_add_i32(t0, t0, t1);
454 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
455 tcg_temp_free_i32(tmp);
456 tcg_temp_free_i32(t1);
b26eefb6
PB
457}
458
459/* Set CF to the top bit of var. */
39d5492a 460static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 461{
66c374de 462 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
463}
464
465/* Set N and Z flags from var. */
39d5492a 466static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 467{
66c374de
AJ
468 tcg_gen_mov_i32(cpu_NF, var);
469 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
470}
471
472/* T0 += T1 + CF. */
39d5492a 473static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 474{
396e467c 475 tcg_gen_add_i32(t0, t0, t1);
66c374de 476 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
477}
478
e9bb4aa9 479/* dest = T0 + T1 + CF. */
39d5492a 480static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 481{
e9bb4aa9 482 tcg_gen_add_i32(dest, t0, t1);
66c374de 483 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
484}
485
3670669c 486/* dest = T0 - T1 + CF - 1. */
39d5492a 487static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 488{
3670669c 489 tcg_gen_sub_i32(dest, t0, t1);
66c374de 490 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 491 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
492}
493
72485ec4 494/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 495static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 496{
39d5492a 497 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
498 tcg_gen_movi_i32(tmp, 0);
499 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 500 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 501 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
502 tcg_gen_xor_i32(tmp, t0, t1);
503 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
504 tcg_temp_free_i32(tmp);
505 tcg_gen_mov_i32(dest, cpu_NF);
506}
507
49b4c31e 508/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 509static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 510{
39d5492a 511 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
512 if (TCG_TARGET_HAS_add2_i32) {
513 tcg_gen_movi_i32(tmp, 0);
514 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 515 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
516 } else {
517 TCGv_i64 q0 = tcg_temp_new_i64();
518 TCGv_i64 q1 = tcg_temp_new_i64();
519 tcg_gen_extu_i32_i64(q0, t0);
520 tcg_gen_extu_i32_i64(q1, t1);
521 tcg_gen_add_i64(q0, q0, q1);
522 tcg_gen_extu_i32_i64(q1, cpu_CF);
523 tcg_gen_add_i64(q0, q0, q1);
524 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
525 tcg_temp_free_i64(q0);
526 tcg_temp_free_i64(q1);
527 }
528 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
529 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
530 tcg_gen_xor_i32(tmp, t0, t1);
531 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
532 tcg_temp_free_i32(tmp);
533 tcg_gen_mov_i32(dest, cpu_NF);
534}
535
72485ec4 536/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 537static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 538{
39d5492a 539 TCGv_i32 tmp;
72485ec4
AJ
540 tcg_gen_sub_i32(cpu_NF, t0, t1);
541 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
542 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
543 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
544 tmp = tcg_temp_new_i32();
545 tcg_gen_xor_i32(tmp, t0, t1);
546 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
547 tcg_temp_free_i32(tmp);
548 tcg_gen_mov_i32(dest, cpu_NF);
549}
550
e77f0832 551/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 552static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 553{
39d5492a 554 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
555 tcg_gen_not_i32(tmp, t1);
556 gen_adc_CC(dest, t0, tmp);
39d5492a 557 tcg_temp_free_i32(tmp);
2de68a49
RH
558}
559
365af80e 560#define GEN_SHIFT(name) \
39d5492a 561static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 562{ \
39d5492a 563 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
564 tmp1 = tcg_temp_new_i32(); \
565 tcg_gen_andi_i32(tmp1, t1, 0xff); \
566 tmp2 = tcg_const_i32(0); \
567 tmp3 = tcg_const_i32(0x1f); \
568 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
569 tcg_temp_free_i32(tmp3); \
570 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
571 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
572 tcg_temp_free_i32(tmp2); \
573 tcg_temp_free_i32(tmp1); \
574}
575GEN_SHIFT(shl)
576GEN_SHIFT(shr)
577#undef GEN_SHIFT
578
39d5492a 579static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 580{
39d5492a 581 TCGv_i32 tmp1, tmp2;
365af80e
AJ
582 tmp1 = tcg_temp_new_i32();
583 tcg_gen_andi_i32(tmp1, t1, 0xff);
584 tmp2 = tcg_const_i32(0x1f);
585 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
586 tcg_temp_free_i32(tmp2);
587 tcg_gen_sar_i32(dest, t0, tmp1);
588 tcg_temp_free_i32(tmp1);
589}
590
39d5492a 591static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 592{
39d5492a
PM
593 TCGv_i32 c0 = tcg_const_i32(0);
594 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
595 tcg_gen_neg_i32(tmp, src);
596 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
597 tcg_temp_free_i32(c0);
598 tcg_temp_free_i32(tmp);
599}
ad69471c 600
39d5492a 601static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 602{
9a119ff6 603 if (shift == 0) {
66c374de 604 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 605 } else {
66c374de
AJ
606 tcg_gen_shri_i32(cpu_CF, var, shift);
607 if (shift != 31) {
608 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
609 }
9a119ff6 610 }
9a119ff6 611}
b26eefb6 612
9a119ff6 613/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
614static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
615 int shift, int flags)
9a119ff6
PB
616{
617 switch (shiftop) {
618 case 0: /* LSL */
619 if (shift != 0) {
620 if (flags)
621 shifter_out_im(var, 32 - shift);
622 tcg_gen_shli_i32(var, var, shift);
623 }
624 break;
625 case 1: /* LSR */
626 if (shift == 0) {
627 if (flags) {
66c374de 628 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
629 }
630 tcg_gen_movi_i32(var, 0);
631 } else {
632 if (flags)
633 shifter_out_im(var, shift - 1);
634 tcg_gen_shri_i32(var, var, shift);
635 }
636 break;
637 case 2: /* ASR */
638 if (shift == 0)
639 shift = 32;
640 if (flags)
641 shifter_out_im(var, shift - 1);
642 if (shift == 32)
643 shift = 31;
644 tcg_gen_sari_i32(var, var, shift);
645 break;
646 case 3: /* ROR/RRX */
647 if (shift != 0) {
648 if (flags)
649 shifter_out_im(var, shift - 1);
f669df27 650 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 651 } else {
39d5492a 652 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 653 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
654 if (flags)
655 shifter_out_im(var, 0);
656 tcg_gen_shri_i32(var, var, 1);
b26eefb6 657 tcg_gen_or_i32(var, var, tmp);
7d1b0095 658 tcg_temp_free_i32(tmp);
b26eefb6
PB
659 }
660 }
661};
662
39d5492a
PM
663static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
664 TCGv_i32 shift, int flags)
8984bd2e
PB
665{
666 if (flags) {
667 switch (shiftop) {
9ef39277
BS
668 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
669 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
670 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
671 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
672 }
673 } else {
674 switch (shiftop) {
365af80e
AJ
675 case 0:
676 gen_shl(var, var, shift);
677 break;
678 case 1:
679 gen_shr(var, var, shift);
680 break;
681 case 2:
682 gen_sar(var, var, shift);
683 break;
f669df27
AJ
684 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
685 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
686 }
687 }
7d1b0095 688 tcg_temp_free_i32(shift);
8984bd2e
PB
689}
690
6ddbc6e4
PB
691#define PAS_OP(pfx) \
692 switch (op2) { \
693 case 0: gen_pas_helper(glue(pfx,add16)); break; \
694 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
695 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
696 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
697 case 4: gen_pas_helper(glue(pfx,add8)); break; \
698 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
699 }
39d5492a 700static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 701{
a7812ae4 702 TCGv_ptr tmp;
6ddbc6e4
PB
703
704 switch (op1) {
705#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
706 case 1:
a7812ae4 707 tmp = tcg_temp_new_ptr();
0ecb72a5 708 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 709 PAS_OP(s)
b75263d6 710 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
711 break;
712 case 5:
a7812ae4 713 tmp = tcg_temp_new_ptr();
0ecb72a5 714 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 715 PAS_OP(u)
b75263d6 716 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
717 break;
718#undef gen_pas_helper
719#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
720 case 2:
721 PAS_OP(q);
722 break;
723 case 3:
724 PAS_OP(sh);
725 break;
726 case 6:
727 PAS_OP(uq);
728 break;
729 case 7:
730 PAS_OP(uh);
731 break;
732#undef gen_pas_helper
733 }
734}
9ee6e8bb
PB
735#undef PAS_OP
736
6ddbc6e4
PB
737/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
738#define PAS_OP(pfx) \
ed89a2f1 739 switch (op1) { \
6ddbc6e4
PB
740 case 0: gen_pas_helper(glue(pfx,add8)); break; \
741 case 1: gen_pas_helper(glue(pfx,add16)); break; \
742 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
743 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
744 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
745 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
746 }
39d5492a 747static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 748{
a7812ae4 749 TCGv_ptr tmp;
6ddbc6e4 750
ed89a2f1 751 switch (op2) {
6ddbc6e4
PB
752#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
753 case 0:
a7812ae4 754 tmp = tcg_temp_new_ptr();
0ecb72a5 755 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 756 PAS_OP(s)
b75263d6 757 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
758 break;
759 case 4:
a7812ae4 760 tmp = tcg_temp_new_ptr();
0ecb72a5 761 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 762 PAS_OP(u)
b75263d6 763 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
764 break;
765#undef gen_pas_helper
766#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
767 case 1:
768 PAS_OP(q);
769 break;
770 case 2:
771 PAS_OP(sh);
772 break;
773 case 5:
774 PAS_OP(uq);
775 break;
776 case 6:
777 PAS_OP(uh);
778 break;
779#undef gen_pas_helper
780 }
781}
9ee6e8bb
PB
782#undef PAS_OP
783
39fb730a 784/*
6c2c63d3 785 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
786 * This is common between ARM and Aarch64 targets.
787 */
6c2c63d3 788void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 789{
6c2c63d3
RH
790 TCGv_i32 value;
791 TCGCond cond;
792 bool global = true;
d9ba4830 793
d9ba4830
PB
794 switch (cc) {
795 case 0: /* eq: Z */
d9ba4830 796 case 1: /* ne: !Z */
6c2c63d3
RH
797 cond = TCG_COND_EQ;
798 value = cpu_ZF;
d9ba4830 799 break;
6c2c63d3 800
d9ba4830 801 case 2: /* cs: C */
d9ba4830 802 case 3: /* cc: !C */
6c2c63d3
RH
803 cond = TCG_COND_NE;
804 value = cpu_CF;
d9ba4830 805 break;
6c2c63d3 806
d9ba4830 807 case 4: /* mi: N */
d9ba4830 808 case 5: /* pl: !N */
6c2c63d3
RH
809 cond = TCG_COND_LT;
810 value = cpu_NF;
d9ba4830 811 break;
6c2c63d3 812
d9ba4830 813 case 6: /* vs: V */
d9ba4830 814 case 7: /* vc: !V */
6c2c63d3
RH
815 cond = TCG_COND_LT;
816 value = cpu_VF;
d9ba4830 817 break;
6c2c63d3 818
d9ba4830 819 case 8: /* hi: C && !Z */
6c2c63d3
RH
820 case 9: /* ls: !C || Z -> !(C && !Z) */
821 cond = TCG_COND_NE;
822 value = tcg_temp_new_i32();
823 global = false;
824 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
825 ZF is non-zero for !Z; so AND the two subexpressions. */
826 tcg_gen_neg_i32(value, cpu_CF);
827 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 828 break;
6c2c63d3 829
d9ba4830 830 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 831 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
832 /* Since we're only interested in the sign bit, == 0 is >= 0. */
833 cond = TCG_COND_GE;
834 value = tcg_temp_new_i32();
835 global = false;
836 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 837 break;
6c2c63d3 838
d9ba4830 839 case 12: /* gt: !Z && N == V */
d9ba4830 840 case 13: /* le: Z || N != V */
6c2c63d3
RH
841 cond = TCG_COND_NE;
842 value = tcg_temp_new_i32();
843 global = false;
844 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
845 * the sign bit then AND with ZF to yield the result. */
846 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
847 tcg_gen_sari_i32(value, value, 31);
848 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 849 break;
6c2c63d3 850
9305eac0
RH
851 case 14: /* always */
852 case 15: /* always */
853 /* Use the ALWAYS condition, which will fold early.
854 * It doesn't matter what we use for the value. */
855 cond = TCG_COND_ALWAYS;
856 value = cpu_ZF;
857 goto no_invert;
858
d9ba4830
PB
859 default:
860 fprintf(stderr, "Bad condition code 0x%x\n", cc);
861 abort();
862 }
6c2c63d3
RH
863
864 if (cc & 1) {
865 cond = tcg_invert_cond(cond);
866 }
867
9305eac0 868 no_invert:
6c2c63d3
RH
869 cmp->cond = cond;
870 cmp->value = value;
871 cmp->value_global = global;
872}
873
874void arm_free_cc(DisasCompare *cmp)
875{
876 if (!cmp->value_global) {
877 tcg_temp_free_i32(cmp->value);
878 }
879}
880
881void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
882{
883 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
884}
885
886void arm_gen_test_cc(int cc, TCGLabel *label)
887{
888 DisasCompare cmp;
889 arm_test_cc(&cmp, cc);
890 arm_jump_cc(&cmp, label);
891 arm_free_cc(&cmp);
d9ba4830 892}
2c0262af 893
b1d8e52e 894static const uint8_t table_logic_cc[16] = {
2c0262af
FB
895 1, /* and */
896 1, /* xor */
897 0, /* sub */
898 0, /* rsb */
899 0, /* add */
900 0, /* adc */
901 0, /* sbc */
902 0, /* rsc */
903 1, /* andl */
904 1, /* xorl */
905 0, /* cmp */
906 0, /* cmn */
907 1, /* orr */
908 1, /* mov */
909 1, /* bic */
910 1, /* mvn */
911};
3b46e624 912
4d5e8c96
PM
913static inline void gen_set_condexec(DisasContext *s)
914{
915 if (s->condexec_mask) {
916 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
917 TCGv_i32 tmp = tcg_temp_new_i32();
918 tcg_gen_movi_i32(tmp, val);
919 store_cpu_field(tmp, condexec_bits);
920 }
921}
922
923static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
924{
925 tcg_gen_movi_i32(cpu_R[15], val);
926}
927
d9ba4830
PB
928/* Set PC and Thumb state from an immediate address. */
929static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 930{
39d5492a 931 TCGv_i32 tmp;
99c475ab 932
dcba3a8d 933 s->base.is_jmp = DISAS_JUMP;
d9ba4830 934 if (s->thumb != (addr & 1)) {
7d1b0095 935 tmp = tcg_temp_new_i32();
d9ba4830 936 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 937 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 938 tcg_temp_free_i32(tmp);
d9ba4830 939 }
155c3eac 940 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
941}
942
943/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 944static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 945{
dcba3a8d 946 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
947 tcg_gen_andi_i32(cpu_R[15], var, ~1);
948 tcg_gen_andi_i32(var, var, 1);
949 store_cpu_field(var, thumb);
d9ba4830
PB
950}
951
3bb8a96f
PM
952/* Set PC and Thumb state from var. var is marked as dead.
953 * For M-profile CPUs, include logic to detect exception-return
954 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
955 * and BX reg, and no others, and happens only for code in Handler mode.
956 */
957static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
958{
959 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 960 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
961 */
962 gen_bx(s, var);
963 if (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M)) {
dcba3a8d 964 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
965 }
966}
967
968static inline void gen_bx_excret_final_code(DisasContext *s)
969{
970 /* Generate the code to finish possible exception return and end the TB */
971 TCGLabel *excret_label = gen_new_label();
972
973 /* Is the new PC value in the magic range indicating exception return? */
974 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], 0xff000000, excret_label);
975 /* No: end the TB as we would for a DISAS_JMP */
976 if (is_singlestepping(s)) {
977 gen_singlestep_exception(s);
978 } else {
979 tcg_gen_exit_tb(0);
980 }
981 gen_set_label(excret_label);
982 /* Yes: this is an exception return.
983 * At this point in runtime env->regs[15] and env->thumb will hold
984 * the exception-return magic number, which do_v7m_exception_exit()
985 * will read. Nothing else will be able to see those values because
986 * the cpu-exec main loop guarantees that we will always go straight
987 * from raising the exception to the exception-handling code.
988 *
989 * gen_ss_advance(s) does nothing on M profile currently but
990 * calling it is conceptually the right thing as we have executed
991 * this instruction (compare SWI, HVC, SMC handling).
992 */
993 gen_ss_advance(s);
994 gen_exception_internal(EXCP_EXCEPTION_EXIT);
995}
996
21aeb343
JR
997/* Variant of store_reg which uses branch&exchange logic when storing
998 to r15 in ARM architecture v7 and above. The source must be a temporary
999 and will be marked as dead. */
7dcc1f89 1000static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1001{
1002 if (reg == 15 && ENABLE_ARCH_7) {
1003 gen_bx(s, var);
1004 } else {
1005 store_reg(s, reg, var);
1006 }
1007}
1008
be5e7a76
DES
1009/* Variant of store_reg which uses branch&exchange logic when storing
1010 * to r15 in ARM architecture v5T and above. This is used for storing
1011 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1012 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1013static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1014{
1015 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1016 gen_bx_excret(s, var);
be5e7a76
DES
1017 } else {
1018 store_reg(s, reg, var);
1019 }
1020}
1021
e334bd31
PB
1022#ifdef CONFIG_USER_ONLY
1023#define IS_USER_ONLY 1
1024#else
1025#define IS_USER_ONLY 0
1026#endif
1027
08307563
PM
1028/* Abstractions of "generate code to do a guest load/store for
1029 * AArch32", where a vaddr is always 32 bits (and is zero
1030 * extended if we're a 64 bit core) and data is also
1031 * 32 bits unless specifically doing a 64 bit access.
1032 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1033 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1034 */
08307563 1035
7f5616f5 1036static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1037{
7f5616f5
RH
1038 TCGv addr = tcg_temp_new();
1039 tcg_gen_extu_i32_tl(addr, a32);
1040
e334bd31 1041 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1042 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1043 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1044 }
7f5616f5 1045 return addr;
08307563
PM
1046}
1047
7f5616f5
RH
1048static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1049 int index, TCGMemOp opc)
08307563 1050{
7f5616f5
RH
1051 TCGv addr = gen_aa32_addr(s, a32, opc);
1052 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1053 tcg_temp_free(addr);
08307563
PM
1054}
1055
7f5616f5
RH
1056static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1057 int index, TCGMemOp opc)
1058{
1059 TCGv addr = gen_aa32_addr(s, a32, opc);
1060 tcg_gen_qemu_st_i32(val, addr, index, opc);
1061 tcg_temp_free(addr);
1062}
08307563 1063
7f5616f5 1064#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1065static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1066 TCGv_i32 a32, int index) \
08307563 1067{ \
7f5616f5 1068 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1069} \
1070static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1071 TCGv_i32 val, \
1072 TCGv_i32 a32, int index, \
1073 ISSInfo issinfo) \
1074{ \
1075 gen_aa32_ld##SUFF(s, val, a32, index); \
1076 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1077}
1078
7f5616f5 1079#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1080static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1081 TCGv_i32 a32, int index) \
08307563 1082{ \
7f5616f5 1083 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1084} \
1085static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1086 TCGv_i32 val, \
1087 TCGv_i32 a32, int index, \
1088 ISSInfo issinfo) \
1089{ \
1090 gen_aa32_st##SUFF(s, val, a32, index); \
1091 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1092}
1093
7f5616f5 1094static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1095{
e334bd31
PB
1096 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1097 if (!IS_USER_ONLY && s->sctlr_b) {
1098 tcg_gen_rotri_i64(val, val, 32);
1099 }
08307563
PM
1100}
1101
7f5616f5
RH
1102static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1103 int index, TCGMemOp opc)
08307563 1104{
7f5616f5
RH
1105 TCGv addr = gen_aa32_addr(s, a32, opc);
1106 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1107 gen_aa32_frob64(s, val);
1108 tcg_temp_free(addr);
1109}
1110
1111static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1112 TCGv_i32 a32, int index)
1113{
1114 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1115}
1116
1117static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1118 int index, TCGMemOp opc)
1119{
1120 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1121
1122 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1123 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1124 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1125 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1126 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1127 tcg_temp_free_i64(tmp);
e334bd31 1128 } else {
7f5616f5 1129 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1130 }
7f5616f5 1131 tcg_temp_free(addr);
08307563
PM
1132}
1133
7f5616f5
RH
1134static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1135 TCGv_i32 a32, int index)
1136{
1137 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1138}
08307563 1139
7f5616f5
RH
1140DO_GEN_LD(8s, MO_SB)
1141DO_GEN_LD(8u, MO_UB)
1142DO_GEN_LD(16s, MO_SW)
1143DO_GEN_LD(16u, MO_UW)
1144DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1145DO_GEN_ST(8, MO_UB)
1146DO_GEN_ST(16, MO_UW)
1147DO_GEN_ST(32, MO_UL)
08307563 1148
37e6456e
PM
1149static inline void gen_hvc(DisasContext *s, int imm16)
1150{
1151 /* The pre HVC helper handles cases when HVC gets trapped
1152 * as an undefined insn by runtime configuration (ie before
1153 * the insn really executes).
1154 */
1155 gen_set_pc_im(s, s->pc - 4);
1156 gen_helper_pre_hvc(cpu_env);
1157 /* Otherwise we will treat this as a real exception which
1158 * happens after execution of the insn. (The distinction matters
1159 * for the PC value reported to the exception handler and also
1160 * for single stepping.)
1161 */
1162 s->svc_imm = imm16;
1163 gen_set_pc_im(s, s->pc);
dcba3a8d 1164 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1165}
1166
1167static inline void gen_smc(DisasContext *s)
1168{
1169 /* As with HVC, we may take an exception either before or after
1170 * the insn executes.
1171 */
1172 TCGv_i32 tmp;
1173
1174 gen_set_pc_im(s, s->pc - 4);
1175 tmp = tcg_const_i32(syn_aa32_smc());
1176 gen_helper_pre_smc(cpu_env, tmp);
1177 tcg_temp_free_i32(tmp);
1178 gen_set_pc_im(s, s->pc);
dcba3a8d 1179 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1180}
1181
d4a2dc67
PM
1182static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1183{
1184 gen_set_condexec(s);
1185 gen_set_pc_im(s, s->pc - offset);
1186 gen_exception_internal(excp);
dcba3a8d 1187 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1188}
1189
73710361
GB
1190static void gen_exception_insn(DisasContext *s, int offset, int excp,
1191 int syn, uint32_t target_el)
d4a2dc67
PM
1192{
1193 gen_set_condexec(s);
1194 gen_set_pc_im(s, s->pc - offset);
73710361 1195 gen_exception(excp, syn, target_el);
dcba3a8d 1196 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1197}
1198
b5ff1b31
FB
1199/* Force a TB lookup after an instruction that changes the CPU state. */
1200static inline void gen_lookup_tb(DisasContext *s)
1201{
a6445c52 1202 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1203 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1204}
1205
19a6e31c
PM
1206static inline void gen_hlt(DisasContext *s, int imm)
1207{
1208 /* HLT. This has two purposes.
1209 * Architecturally, it is an external halting debug instruction.
1210 * Since QEMU doesn't implement external debug, we treat this as
1211 * it is required for halting debug disabled: it will UNDEF.
1212 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1213 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1214 * must trigger semihosting even for ARMv7 and earlier, where
1215 * HLT was an undefined encoding.
1216 * In system mode, we don't allow userspace access to
1217 * semihosting, to provide some semblance of security
1218 * (and for consistency with our 32-bit semihosting).
1219 */
1220 if (semihosting_enabled() &&
1221#ifndef CONFIG_USER_ONLY
1222 s->current_el != 0 &&
1223#endif
1224 (imm == (s->thumb ? 0x3c : 0xf000))) {
1225 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1226 return;
1227 }
1228
1229 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1230 default_exception_el(s));
1231}
1232
b0109805 1233static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1234 TCGv_i32 var)
2c0262af 1235{
1e8d4eec 1236 int val, rm, shift, shiftop;
39d5492a 1237 TCGv_i32 offset;
2c0262af
FB
1238
1239 if (!(insn & (1 << 25))) {
1240 /* immediate */
1241 val = insn & 0xfff;
1242 if (!(insn & (1 << 23)))
1243 val = -val;
537730b9 1244 if (val != 0)
b0109805 1245 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1246 } else {
1247 /* shift/register */
1248 rm = (insn) & 0xf;
1249 shift = (insn >> 7) & 0x1f;
1e8d4eec 1250 shiftop = (insn >> 5) & 3;
b26eefb6 1251 offset = load_reg(s, rm);
9a119ff6 1252 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1253 if (!(insn & (1 << 23)))
b0109805 1254 tcg_gen_sub_i32(var, var, offset);
2c0262af 1255 else
b0109805 1256 tcg_gen_add_i32(var, var, offset);
7d1b0095 1257 tcg_temp_free_i32(offset);
2c0262af
FB
1258 }
1259}
1260
191f9a93 1261static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1262 int extra, TCGv_i32 var)
2c0262af
FB
1263{
1264 int val, rm;
39d5492a 1265 TCGv_i32 offset;
3b46e624 1266
2c0262af
FB
1267 if (insn & (1 << 22)) {
1268 /* immediate */
1269 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1270 if (!(insn & (1 << 23)))
1271 val = -val;
18acad92 1272 val += extra;
537730b9 1273 if (val != 0)
b0109805 1274 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1275 } else {
1276 /* register */
191f9a93 1277 if (extra)
b0109805 1278 tcg_gen_addi_i32(var, var, extra);
2c0262af 1279 rm = (insn) & 0xf;
b26eefb6 1280 offset = load_reg(s, rm);
2c0262af 1281 if (!(insn & (1 << 23)))
b0109805 1282 tcg_gen_sub_i32(var, var, offset);
2c0262af 1283 else
b0109805 1284 tcg_gen_add_i32(var, var, offset);
7d1b0095 1285 tcg_temp_free_i32(offset);
2c0262af
FB
1286 }
1287}
1288
5aaebd13
PM
1289static TCGv_ptr get_fpstatus_ptr(int neon)
1290{
1291 TCGv_ptr statusptr = tcg_temp_new_ptr();
1292 int offset;
1293 if (neon) {
0ecb72a5 1294 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1295 } else {
0ecb72a5 1296 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1297 }
1298 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1299 return statusptr;
1300}
1301
4373f3ce
PB
1302#define VFP_OP2(name) \
1303static inline void gen_vfp_##name(int dp) \
1304{ \
ae1857ec
PM
1305 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1306 if (dp) { \
1307 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1308 } else { \
1309 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1310 } \
1311 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1312}
1313
4373f3ce
PB
1314VFP_OP2(add)
1315VFP_OP2(sub)
1316VFP_OP2(mul)
1317VFP_OP2(div)
1318
1319#undef VFP_OP2
1320
605a6aed
PM
1321static inline void gen_vfp_F1_mul(int dp)
1322{
1323 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1324 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1325 if (dp) {
ae1857ec 1326 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1327 } else {
ae1857ec 1328 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1329 }
ae1857ec 1330 tcg_temp_free_ptr(fpst);
605a6aed
PM
1331}
1332
1333static inline void gen_vfp_F1_neg(int dp)
1334{
1335 /* Like gen_vfp_neg() but put result in F1 */
1336 if (dp) {
1337 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1338 } else {
1339 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1340 }
1341}
1342
4373f3ce
PB
1343static inline void gen_vfp_abs(int dp)
1344{
1345 if (dp)
1346 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1347 else
1348 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1349}
1350
1351static inline void gen_vfp_neg(int dp)
1352{
1353 if (dp)
1354 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1355 else
1356 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1357}
1358
1359static inline void gen_vfp_sqrt(int dp)
1360{
1361 if (dp)
1362 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1363 else
1364 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1365}
1366
1367static inline void gen_vfp_cmp(int dp)
1368{
1369 if (dp)
1370 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1371 else
1372 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1373}
1374
1375static inline void gen_vfp_cmpe(int dp)
1376{
1377 if (dp)
1378 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1379 else
1380 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1381}
1382
1383static inline void gen_vfp_F1_ld0(int dp)
1384{
1385 if (dp)
5b340b51 1386 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1387 else
5b340b51 1388 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1389}
1390
5500b06c
PM
1391#define VFP_GEN_ITOF(name) \
1392static inline void gen_vfp_##name(int dp, int neon) \
1393{ \
5aaebd13 1394 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1395 if (dp) { \
1396 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1397 } else { \
1398 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1399 } \
b7fa9214 1400 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1401}
1402
5500b06c
PM
1403VFP_GEN_ITOF(uito)
1404VFP_GEN_ITOF(sito)
1405#undef VFP_GEN_ITOF
4373f3ce 1406
5500b06c
PM
1407#define VFP_GEN_FTOI(name) \
1408static inline void gen_vfp_##name(int dp, int neon) \
1409{ \
5aaebd13 1410 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1411 if (dp) { \
1412 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1413 } else { \
1414 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1415 } \
b7fa9214 1416 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1417}
1418
5500b06c
PM
1419VFP_GEN_FTOI(toui)
1420VFP_GEN_FTOI(touiz)
1421VFP_GEN_FTOI(tosi)
1422VFP_GEN_FTOI(tosiz)
1423#undef VFP_GEN_FTOI
4373f3ce 1424
16d5b3ca 1425#define VFP_GEN_FIX(name, round) \
5500b06c 1426static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1427{ \
39d5492a 1428 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1429 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1430 if (dp) { \
16d5b3ca
WN
1431 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1432 statusptr); \
5500b06c 1433 } else { \
16d5b3ca
WN
1434 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1435 statusptr); \
5500b06c 1436 } \
b75263d6 1437 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1438 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1439}
16d5b3ca
WN
1440VFP_GEN_FIX(tosh, _round_to_zero)
1441VFP_GEN_FIX(tosl, _round_to_zero)
1442VFP_GEN_FIX(touh, _round_to_zero)
1443VFP_GEN_FIX(toul, _round_to_zero)
1444VFP_GEN_FIX(shto, )
1445VFP_GEN_FIX(slto, )
1446VFP_GEN_FIX(uhto, )
1447VFP_GEN_FIX(ulto, )
4373f3ce 1448#undef VFP_GEN_FIX
9ee6e8bb 1449
39d5492a 1450static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1451{
08307563 1452 if (dp) {
12dcc321 1453 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1454 } else {
12dcc321 1455 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1456 }
b5ff1b31
FB
1457}
1458
39d5492a 1459static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1460{
08307563 1461 if (dp) {
12dcc321 1462 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1463 } else {
12dcc321 1464 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1465 }
b5ff1b31
FB
1466}
1467
8e96005d
FB
1468static inline long
1469vfp_reg_offset (int dp, int reg)
1470{
1471 if (dp)
1472 return offsetof(CPUARMState, vfp.regs[reg]);
1473 else if (reg & 1) {
1474 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1475 + offsetof(CPU_DoubleU, l.upper);
1476 } else {
1477 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1478 + offsetof(CPU_DoubleU, l.lower);
1479 }
1480}
9ee6e8bb
PB
1481
1482/* Return the offset of a 32-bit piece of a NEON register.
1483 zero is the least significant end of the register. */
1484static inline long
1485neon_reg_offset (int reg, int n)
1486{
1487 int sreg;
1488 sreg = reg * 2 + n;
1489 return vfp_reg_offset(0, sreg);
1490}
1491
39d5492a 1492static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1493{
39d5492a 1494 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1495 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1496 return tmp;
1497}
1498
39d5492a 1499static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1500{
1501 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1502 tcg_temp_free_i32(var);
8f8e3aa4
PB
1503}
1504
a7812ae4 1505static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1506{
1507 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1508}
1509
a7812ae4 1510static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1511{
1512 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1513}
1514
4373f3ce
PB
1515#define tcg_gen_ld_f32 tcg_gen_ld_i32
1516#define tcg_gen_ld_f64 tcg_gen_ld_i64
1517#define tcg_gen_st_f32 tcg_gen_st_i32
1518#define tcg_gen_st_f64 tcg_gen_st_i64
1519
b7bcbe95
FB
1520static inline void gen_mov_F0_vreg(int dp, int reg)
1521{
1522 if (dp)
4373f3ce 1523 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1524 else
4373f3ce 1525 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1526}
1527
1528static inline void gen_mov_F1_vreg(int dp, int reg)
1529{
1530 if (dp)
4373f3ce 1531 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1532 else
4373f3ce 1533 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1534}
1535
1536static inline void gen_mov_vreg_F0(int dp, int reg)
1537{
1538 if (dp)
4373f3ce 1539 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1540 else
4373f3ce 1541 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1542}
1543
18c9b560
AZ
1544#define ARM_CP_RW_BIT (1 << 20)
1545
a7812ae4 1546static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1547{
0ecb72a5 1548 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1549}
1550
a7812ae4 1551static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1552{
0ecb72a5 1553 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1554}
1555
39d5492a 1556static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1557{
39d5492a 1558 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1559 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1560 return var;
e677137d
PB
1561}
1562
39d5492a 1563static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1564{
0ecb72a5 1565 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1566 tcg_temp_free_i32(var);
e677137d
PB
1567}
1568
1569static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1570{
1571 iwmmxt_store_reg(cpu_M0, rn);
1572}
1573
1574static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1575{
1576 iwmmxt_load_reg(cpu_M0, rn);
1577}
1578
1579static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1580{
1581 iwmmxt_load_reg(cpu_V1, rn);
1582 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1583}
1584
1585static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1586{
1587 iwmmxt_load_reg(cpu_V1, rn);
1588 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1589}
1590
1591static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1592{
1593 iwmmxt_load_reg(cpu_V1, rn);
1594 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1595}
1596
1597#define IWMMXT_OP(name) \
1598static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1599{ \
1600 iwmmxt_load_reg(cpu_V1, rn); \
1601 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1602}
1603
477955bd
PM
1604#define IWMMXT_OP_ENV(name) \
1605static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1606{ \
1607 iwmmxt_load_reg(cpu_V1, rn); \
1608 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1609}
1610
1611#define IWMMXT_OP_ENV_SIZE(name) \
1612IWMMXT_OP_ENV(name##b) \
1613IWMMXT_OP_ENV(name##w) \
1614IWMMXT_OP_ENV(name##l)
e677137d 1615
477955bd 1616#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1617static inline void gen_op_iwmmxt_##name##_M0(void) \
1618{ \
477955bd 1619 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1620}
1621
1622IWMMXT_OP(maddsq)
1623IWMMXT_OP(madduq)
1624IWMMXT_OP(sadb)
1625IWMMXT_OP(sadw)
1626IWMMXT_OP(mulslw)
1627IWMMXT_OP(mulshw)
1628IWMMXT_OP(mululw)
1629IWMMXT_OP(muluhw)
1630IWMMXT_OP(macsw)
1631IWMMXT_OP(macuw)
1632
477955bd
PM
1633IWMMXT_OP_ENV_SIZE(unpackl)
1634IWMMXT_OP_ENV_SIZE(unpackh)
1635
1636IWMMXT_OP_ENV1(unpacklub)
1637IWMMXT_OP_ENV1(unpackluw)
1638IWMMXT_OP_ENV1(unpacklul)
1639IWMMXT_OP_ENV1(unpackhub)
1640IWMMXT_OP_ENV1(unpackhuw)
1641IWMMXT_OP_ENV1(unpackhul)
1642IWMMXT_OP_ENV1(unpacklsb)
1643IWMMXT_OP_ENV1(unpacklsw)
1644IWMMXT_OP_ENV1(unpacklsl)
1645IWMMXT_OP_ENV1(unpackhsb)
1646IWMMXT_OP_ENV1(unpackhsw)
1647IWMMXT_OP_ENV1(unpackhsl)
1648
1649IWMMXT_OP_ENV_SIZE(cmpeq)
1650IWMMXT_OP_ENV_SIZE(cmpgtu)
1651IWMMXT_OP_ENV_SIZE(cmpgts)
1652
1653IWMMXT_OP_ENV_SIZE(mins)
1654IWMMXT_OP_ENV_SIZE(minu)
1655IWMMXT_OP_ENV_SIZE(maxs)
1656IWMMXT_OP_ENV_SIZE(maxu)
1657
1658IWMMXT_OP_ENV_SIZE(subn)
1659IWMMXT_OP_ENV_SIZE(addn)
1660IWMMXT_OP_ENV_SIZE(subu)
1661IWMMXT_OP_ENV_SIZE(addu)
1662IWMMXT_OP_ENV_SIZE(subs)
1663IWMMXT_OP_ENV_SIZE(adds)
1664
1665IWMMXT_OP_ENV(avgb0)
1666IWMMXT_OP_ENV(avgb1)
1667IWMMXT_OP_ENV(avgw0)
1668IWMMXT_OP_ENV(avgw1)
e677137d 1669
477955bd
PM
1670IWMMXT_OP_ENV(packuw)
1671IWMMXT_OP_ENV(packul)
1672IWMMXT_OP_ENV(packuq)
1673IWMMXT_OP_ENV(packsw)
1674IWMMXT_OP_ENV(packsl)
1675IWMMXT_OP_ENV(packsq)
e677137d 1676
e677137d
PB
1677static void gen_op_iwmmxt_set_mup(void)
1678{
39d5492a 1679 TCGv_i32 tmp;
e677137d
PB
1680 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1681 tcg_gen_ori_i32(tmp, tmp, 2);
1682 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1683}
1684
1685static void gen_op_iwmmxt_set_cup(void)
1686{
39d5492a 1687 TCGv_i32 tmp;
e677137d
PB
1688 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1689 tcg_gen_ori_i32(tmp, tmp, 1);
1690 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1691}
1692
1693static void gen_op_iwmmxt_setpsr_nz(void)
1694{
39d5492a 1695 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1696 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1697 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1698}
1699
1700static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1701{
1702 iwmmxt_load_reg(cpu_V1, rn);
86831435 1703 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1704 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1705}
1706
39d5492a
PM
1707static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1708 TCGv_i32 dest)
18c9b560
AZ
1709{
1710 int rd;
1711 uint32_t offset;
39d5492a 1712 TCGv_i32 tmp;
18c9b560
AZ
1713
1714 rd = (insn >> 16) & 0xf;
da6b5335 1715 tmp = load_reg(s, rd);
18c9b560
AZ
1716
1717 offset = (insn & 0xff) << ((insn >> 7) & 2);
1718 if (insn & (1 << 24)) {
1719 /* Pre indexed */
1720 if (insn & (1 << 23))
da6b5335 1721 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1722 else
da6b5335
FN
1723 tcg_gen_addi_i32(tmp, tmp, -offset);
1724 tcg_gen_mov_i32(dest, tmp);
18c9b560 1725 if (insn & (1 << 21))
da6b5335
FN
1726 store_reg(s, rd, tmp);
1727 else
7d1b0095 1728 tcg_temp_free_i32(tmp);
18c9b560
AZ
1729 } else if (insn & (1 << 21)) {
1730 /* Post indexed */
da6b5335 1731 tcg_gen_mov_i32(dest, tmp);
18c9b560 1732 if (insn & (1 << 23))
da6b5335 1733 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1734 else
da6b5335
FN
1735 tcg_gen_addi_i32(tmp, tmp, -offset);
1736 store_reg(s, rd, tmp);
18c9b560
AZ
1737 } else if (!(insn & (1 << 23)))
1738 return 1;
1739 return 0;
1740}
1741
39d5492a 1742static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1743{
1744 int rd = (insn >> 0) & 0xf;
39d5492a 1745 TCGv_i32 tmp;
18c9b560 1746
da6b5335
FN
1747 if (insn & (1 << 8)) {
1748 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1749 return 1;
da6b5335
FN
1750 } else {
1751 tmp = iwmmxt_load_creg(rd);
1752 }
1753 } else {
7d1b0095 1754 tmp = tcg_temp_new_i32();
da6b5335 1755 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1756 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1757 }
1758 tcg_gen_andi_i32(tmp, tmp, mask);
1759 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1760 tcg_temp_free_i32(tmp);
18c9b560
AZ
1761 return 0;
1762}
1763
a1c7273b 1764/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1765 (ie. an undefined instruction). */
7dcc1f89 1766static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1767{
1768 int rd, wrd;
1769 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1770 TCGv_i32 addr;
1771 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1772
1773 if ((insn & 0x0e000e00) == 0x0c000000) {
1774 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1775 wrd = insn & 0xf;
1776 rdlo = (insn >> 12) & 0xf;
1777 rdhi = (insn >> 16) & 0xf;
1778 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1779 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1780 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1781 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1782 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1783 } else { /* TMCRR */
da6b5335
FN
1784 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1785 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1786 gen_op_iwmmxt_set_mup();
1787 }
1788 return 0;
1789 }
1790
1791 wrd = (insn >> 12) & 0xf;
7d1b0095 1792 addr = tcg_temp_new_i32();
da6b5335 1793 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1794 tcg_temp_free_i32(addr);
18c9b560 1795 return 1;
da6b5335 1796 }
18c9b560
AZ
1797 if (insn & ARM_CP_RW_BIT) {
1798 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1799 tmp = tcg_temp_new_i32();
12dcc321 1800 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1801 iwmmxt_store_creg(wrd, tmp);
18c9b560 1802 } else {
e677137d
PB
1803 i = 1;
1804 if (insn & (1 << 8)) {
1805 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1806 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d
PB
1807 i = 0;
1808 } else { /* WLDRW wRd */
29531141 1809 tmp = tcg_temp_new_i32();
12dcc321 1810 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1811 }
1812 } else {
29531141 1813 tmp = tcg_temp_new_i32();
e677137d 1814 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1815 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
e677137d 1816 } else { /* WLDRB */
12dcc321 1817 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1818 }
1819 }
1820 if (i) {
1821 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1822 tcg_temp_free_i32(tmp);
e677137d 1823 }
18c9b560
AZ
1824 gen_op_iwmmxt_movq_wRn_M0(wrd);
1825 }
1826 } else {
1827 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1828 tmp = iwmmxt_load_creg(wrd);
12dcc321 1829 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1830 } else {
1831 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1832 tmp = tcg_temp_new_i32();
e677137d
PB
1833 if (insn & (1 << 8)) {
1834 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1835 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1836 } else { /* WSTRW wRd */
ecc7b3aa 1837 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1838 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1839 }
1840 } else {
1841 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1842 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1843 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
e677137d 1844 } else { /* WSTRB */
ecc7b3aa 1845 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1846 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1847 }
1848 }
18c9b560 1849 }
29531141 1850 tcg_temp_free_i32(tmp);
18c9b560 1851 }
7d1b0095 1852 tcg_temp_free_i32(addr);
18c9b560
AZ
1853 return 0;
1854 }
1855
1856 if ((insn & 0x0f000000) != 0x0e000000)
1857 return 1;
1858
1859 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1860 case 0x000: /* WOR */
1861 wrd = (insn >> 12) & 0xf;
1862 rd0 = (insn >> 0) & 0xf;
1863 rd1 = (insn >> 16) & 0xf;
1864 gen_op_iwmmxt_movq_M0_wRn(rd0);
1865 gen_op_iwmmxt_orq_M0_wRn(rd1);
1866 gen_op_iwmmxt_setpsr_nz();
1867 gen_op_iwmmxt_movq_wRn_M0(wrd);
1868 gen_op_iwmmxt_set_mup();
1869 gen_op_iwmmxt_set_cup();
1870 break;
1871 case 0x011: /* TMCR */
1872 if (insn & 0xf)
1873 return 1;
1874 rd = (insn >> 12) & 0xf;
1875 wrd = (insn >> 16) & 0xf;
1876 switch (wrd) {
1877 case ARM_IWMMXT_wCID:
1878 case ARM_IWMMXT_wCASF:
1879 break;
1880 case ARM_IWMMXT_wCon:
1881 gen_op_iwmmxt_set_cup();
1882 /* Fall through. */
1883 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1884 tmp = iwmmxt_load_creg(wrd);
1885 tmp2 = load_reg(s, rd);
f669df27 1886 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1887 tcg_temp_free_i32(tmp2);
da6b5335 1888 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1889 break;
1890 case ARM_IWMMXT_wCGR0:
1891 case ARM_IWMMXT_wCGR1:
1892 case ARM_IWMMXT_wCGR2:
1893 case ARM_IWMMXT_wCGR3:
1894 gen_op_iwmmxt_set_cup();
da6b5335
FN
1895 tmp = load_reg(s, rd);
1896 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1897 break;
1898 default:
1899 return 1;
1900 }
1901 break;
1902 case 0x100: /* WXOR */
1903 wrd = (insn >> 12) & 0xf;
1904 rd0 = (insn >> 0) & 0xf;
1905 rd1 = (insn >> 16) & 0xf;
1906 gen_op_iwmmxt_movq_M0_wRn(rd0);
1907 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1908 gen_op_iwmmxt_setpsr_nz();
1909 gen_op_iwmmxt_movq_wRn_M0(wrd);
1910 gen_op_iwmmxt_set_mup();
1911 gen_op_iwmmxt_set_cup();
1912 break;
1913 case 0x111: /* TMRC */
1914 if (insn & 0xf)
1915 return 1;
1916 rd = (insn >> 12) & 0xf;
1917 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1918 tmp = iwmmxt_load_creg(wrd);
1919 store_reg(s, rd, tmp);
18c9b560
AZ
1920 break;
1921 case 0x300: /* WANDN */
1922 wrd = (insn >> 12) & 0xf;
1923 rd0 = (insn >> 0) & 0xf;
1924 rd1 = (insn >> 16) & 0xf;
1925 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1926 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1927 gen_op_iwmmxt_andq_M0_wRn(rd1);
1928 gen_op_iwmmxt_setpsr_nz();
1929 gen_op_iwmmxt_movq_wRn_M0(wrd);
1930 gen_op_iwmmxt_set_mup();
1931 gen_op_iwmmxt_set_cup();
1932 break;
1933 case 0x200: /* WAND */
1934 wrd = (insn >> 12) & 0xf;
1935 rd0 = (insn >> 0) & 0xf;
1936 rd1 = (insn >> 16) & 0xf;
1937 gen_op_iwmmxt_movq_M0_wRn(rd0);
1938 gen_op_iwmmxt_andq_M0_wRn(rd1);
1939 gen_op_iwmmxt_setpsr_nz();
1940 gen_op_iwmmxt_movq_wRn_M0(wrd);
1941 gen_op_iwmmxt_set_mup();
1942 gen_op_iwmmxt_set_cup();
1943 break;
1944 case 0x810: case 0xa10: /* WMADD */
1945 wrd = (insn >> 12) & 0xf;
1946 rd0 = (insn >> 0) & 0xf;
1947 rd1 = (insn >> 16) & 0xf;
1948 gen_op_iwmmxt_movq_M0_wRn(rd0);
1949 if (insn & (1 << 21))
1950 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1951 else
1952 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1953 gen_op_iwmmxt_movq_wRn_M0(wrd);
1954 gen_op_iwmmxt_set_mup();
1955 break;
1956 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1957 wrd = (insn >> 12) & 0xf;
1958 rd0 = (insn >> 16) & 0xf;
1959 rd1 = (insn >> 0) & 0xf;
1960 gen_op_iwmmxt_movq_M0_wRn(rd0);
1961 switch ((insn >> 22) & 3) {
1962 case 0:
1963 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1964 break;
1965 case 1:
1966 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1967 break;
1968 case 2:
1969 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1970 break;
1971 case 3:
1972 return 1;
1973 }
1974 gen_op_iwmmxt_movq_wRn_M0(wrd);
1975 gen_op_iwmmxt_set_mup();
1976 gen_op_iwmmxt_set_cup();
1977 break;
1978 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1979 wrd = (insn >> 12) & 0xf;
1980 rd0 = (insn >> 16) & 0xf;
1981 rd1 = (insn >> 0) & 0xf;
1982 gen_op_iwmmxt_movq_M0_wRn(rd0);
1983 switch ((insn >> 22) & 3) {
1984 case 0:
1985 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1986 break;
1987 case 1:
1988 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1989 break;
1990 case 2:
1991 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1992 break;
1993 case 3:
1994 return 1;
1995 }
1996 gen_op_iwmmxt_movq_wRn_M0(wrd);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
1999 break;
2000 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2001 wrd = (insn >> 12) & 0xf;
2002 rd0 = (insn >> 16) & 0xf;
2003 rd1 = (insn >> 0) & 0xf;
2004 gen_op_iwmmxt_movq_M0_wRn(rd0);
2005 if (insn & (1 << 22))
2006 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2007 else
2008 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2009 if (!(insn & (1 << 20)))
2010 gen_op_iwmmxt_addl_M0_wRn(wrd);
2011 gen_op_iwmmxt_movq_wRn_M0(wrd);
2012 gen_op_iwmmxt_set_mup();
2013 break;
2014 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2015 wrd = (insn >> 12) & 0xf;
2016 rd0 = (insn >> 16) & 0xf;
2017 rd1 = (insn >> 0) & 0xf;
2018 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2019 if (insn & (1 << 21)) {
2020 if (insn & (1 << 20))
2021 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2022 else
2023 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2024 } else {
2025 if (insn & (1 << 20))
2026 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2027 else
2028 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2029 }
18c9b560
AZ
2030 gen_op_iwmmxt_movq_wRn_M0(wrd);
2031 gen_op_iwmmxt_set_mup();
2032 break;
2033 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2034 wrd = (insn >> 12) & 0xf;
2035 rd0 = (insn >> 16) & 0xf;
2036 rd1 = (insn >> 0) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0);
2038 if (insn & (1 << 21))
2039 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2040 else
2041 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2042 if (!(insn & (1 << 20))) {
e677137d
PB
2043 iwmmxt_load_reg(cpu_V1, wrd);
2044 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2045 }
2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2047 gen_op_iwmmxt_set_mup();
2048 break;
2049 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2050 wrd = (insn >> 12) & 0xf;
2051 rd0 = (insn >> 16) & 0xf;
2052 rd1 = (insn >> 0) & 0xf;
2053 gen_op_iwmmxt_movq_M0_wRn(rd0);
2054 switch ((insn >> 22) & 3) {
2055 case 0:
2056 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2057 break;
2058 case 1:
2059 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2060 break;
2061 case 2:
2062 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2063 break;
2064 case 3:
2065 return 1;
2066 }
2067 gen_op_iwmmxt_movq_wRn_M0(wrd);
2068 gen_op_iwmmxt_set_mup();
2069 gen_op_iwmmxt_set_cup();
2070 break;
2071 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2072 wrd = (insn >> 12) & 0xf;
2073 rd0 = (insn >> 16) & 0xf;
2074 rd1 = (insn >> 0) & 0xf;
2075 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2076 if (insn & (1 << 22)) {
2077 if (insn & (1 << 20))
2078 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2079 else
2080 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2081 } else {
2082 if (insn & (1 << 20))
2083 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2084 else
2085 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2086 }
18c9b560
AZ
2087 gen_op_iwmmxt_movq_wRn_M0(wrd);
2088 gen_op_iwmmxt_set_mup();
2089 gen_op_iwmmxt_set_cup();
2090 break;
2091 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2092 wrd = (insn >> 12) & 0xf;
2093 rd0 = (insn >> 16) & 0xf;
2094 rd1 = (insn >> 0) & 0xf;
2095 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2096 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2097 tcg_gen_andi_i32(tmp, tmp, 7);
2098 iwmmxt_load_reg(cpu_V1, rd1);
2099 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2100 tcg_temp_free_i32(tmp);
18c9b560
AZ
2101 gen_op_iwmmxt_movq_wRn_M0(wrd);
2102 gen_op_iwmmxt_set_mup();
2103 break;
2104 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2105 if (((insn >> 6) & 3) == 3)
2106 return 1;
18c9b560
AZ
2107 rd = (insn >> 12) & 0xf;
2108 wrd = (insn >> 16) & 0xf;
da6b5335 2109 tmp = load_reg(s, rd);
18c9b560
AZ
2110 gen_op_iwmmxt_movq_M0_wRn(wrd);
2111 switch ((insn >> 6) & 3) {
2112 case 0:
da6b5335
FN
2113 tmp2 = tcg_const_i32(0xff);
2114 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2115 break;
2116 case 1:
da6b5335
FN
2117 tmp2 = tcg_const_i32(0xffff);
2118 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2119 break;
2120 case 2:
da6b5335
FN
2121 tmp2 = tcg_const_i32(0xffffffff);
2122 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2123 break;
da6b5335 2124 default:
39d5492a
PM
2125 TCGV_UNUSED_I32(tmp2);
2126 TCGV_UNUSED_I32(tmp3);
18c9b560 2127 }
da6b5335 2128 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2129 tcg_temp_free_i32(tmp3);
2130 tcg_temp_free_i32(tmp2);
7d1b0095 2131 tcg_temp_free_i32(tmp);
18c9b560
AZ
2132 gen_op_iwmmxt_movq_wRn_M0(wrd);
2133 gen_op_iwmmxt_set_mup();
2134 break;
2135 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2136 rd = (insn >> 12) & 0xf;
2137 wrd = (insn >> 16) & 0xf;
da6b5335 2138 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2139 return 1;
2140 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2141 tmp = tcg_temp_new_i32();
18c9b560
AZ
2142 switch ((insn >> 22) & 3) {
2143 case 0:
da6b5335 2144 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2145 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2146 if (insn & 8) {
2147 tcg_gen_ext8s_i32(tmp, tmp);
2148 } else {
2149 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2150 }
2151 break;
2152 case 1:
da6b5335 2153 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2154 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2155 if (insn & 8) {
2156 tcg_gen_ext16s_i32(tmp, tmp);
2157 } else {
2158 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2159 }
2160 break;
2161 case 2:
da6b5335 2162 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2163 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2164 break;
18c9b560 2165 }
da6b5335 2166 store_reg(s, rd, tmp);
18c9b560
AZ
2167 break;
2168 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2169 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2170 return 1;
da6b5335 2171 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2172 switch ((insn >> 22) & 3) {
2173 case 0:
da6b5335 2174 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2175 break;
2176 case 1:
da6b5335 2177 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2178 break;
2179 case 2:
da6b5335 2180 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2181 break;
18c9b560 2182 }
da6b5335
FN
2183 tcg_gen_shli_i32(tmp, tmp, 28);
2184 gen_set_nzcv(tmp);
7d1b0095 2185 tcg_temp_free_i32(tmp);
18c9b560
AZ
2186 break;
2187 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2188 if (((insn >> 6) & 3) == 3)
2189 return 1;
18c9b560
AZ
2190 rd = (insn >> 12) & 0xf;
2191 wrd = (insn >> 16) & 0xf;
da6b5335 2192 tmp = load_reg(s, rd);
18c9b560
AZ
2193 switch ((insn >> 6) & 3) {
2194 case 0:
da6b5335 2195 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2196 break;
2197 case 1:
da6b5335 2198 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2199 break;
2200 case 2:
da6b5335 2201 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2202 break;
18c9b560 2203 }
7d1b0095 2204 tcg_temp_free_i32(tmp);
18c9b560
AZ
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 break;
2208 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2209 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2210 return 1;
da6b5335 2211 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2212 tmp2 = tcg_temp_new_i32();
da6b5335 2213 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2214 switch ((insn >> 22) & 3) {
2215 case 0:
2216 for (i = 0; i < 7; i ++) {
da6b5335
FN
2217 tcg_gen_shli_i32(tmp2, tmp2, 4);
2218 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2219 }
2220 break;
2221 case 1:
2222 for (i = 0; i < 3; i ++) {
da6b5335
FN
2223 tcg_gen_shli_i32(tmp2, tmp2, 8);
2224 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2225 }
2226 break;
2227 case 2:
da6b5335
FN
2228 tcg_gen_shli_i32(tmp2, tmp2, 16);
2229 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2230 break;
18c9b560 2231 }
da6b5335 2232 gen_set_nzcv(tmp);
7d1b0095
PM
2233 tcg_temp_free_i32(tmp2);
2234 tcg_temp_free_i32(tmp);
18c9b560
AZ
2235 break;
2236 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2237 wrd = (insn >> 12) & 0xf;
2238 rd0 = (insn >> 16) & 0xf;
2239 gen_op_iwmmxt_movq_M0_wRn(rd0);
2240 switch ((insn >> 22) & 3) {
2241 case 0:
e677137d 2242 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2243 break;
2244 case 1:
e677137d 2245 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2246 break;
2247 case 2:
e677137d 2248 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2249 break;
2250 case 3:
2251 return 1;
2252 }
2253 gen_op_iwmmxt_movq_wRn_M0(wrd);
2254 gen_op_iwmmxt_set_mup();
2255 break;
2256 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2257 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2258 return 1;
da6b5335 2259 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2260 tmp2 = tcg_temp_new_i32();
da6b5335 2261 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2262 switch ((insn >> 22) & 3) {
2263 case 0:
2264 for (i = 0; i < 7; i ++) {
da6b5335
FN
2265 tcg_gen_shli_i32(tmp2, tmp2, 4);
2266 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2267 }
2268 break;
2269 case 1:
2270 for (i = 0; i < 3; i ++) {
da6b5335
FN
2271 tcg_gen_shli_i32(tmp2, tmp2, 8);
2272 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2273 }
2274 break;
2275 case 2:
da6b5335
FN
2276 tcg_gen_shli_i32(tmp2, tmp2, 16);
2277 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2278 break;
18c9b560 2279 }
da6b5335 2280 gen_set_nzcv(tmp);
7d1b0095
PM
2281 tcg_temp_free_i32(tmp2);
2282 tcg_temp_free_i32(tmp);
18c9b560
AZ
2283 break;
2284 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2285 rd = (insn >> 12) & 0xf;
2286 rd0 = (insn >> 16) & 0xf;
da6b5335 2287 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2288 return 1;
2289 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2290 tmp = tcg_temp_new_i32();
18c9b560
AZ
2291 switch ((insn >> 22) & 3) {
2292 case 0:
da6b5335 2293 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2294 break;
2295 case 1:
da6b5335 2296 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2297 break;
2298 case 2:
da6b5335 2299 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2300 break;
18c9b560 2301 }
da6b5335 2302 store_reg(s, rd, tmp);
18c9b560
AZ
2303 break;
2304 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2305 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2306 wrd = (insn >> 12) & 0xf;
2307 rd0 = (insn >> 16) & 0xf;
2308 rd1 = (insn >> 0) & 0xf;
2309 gen_op_iwmmxt_movq_M0_wRn(rd0);
2310 switch ((insn >> 22) & 3) {
2311 case 0:
2312 if (insn & (1 << 21))
2313 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2314 else
2315 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2316 break;
2317 case 1:
2318 if (insn & (1 << 21))
2319 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2320 else
2321 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2322 break;
2323 case 2:
2324 if (insn & (1 << 21))
2325 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2326 else
2327 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2328 break;
2329 case 3:
2330 return 1;
2331 }
2332 gen_op_iwmmxt_movq_wRn_M0(wrd);
2333 gen_op_iwmmxt_set_mup();
2334 gen_op_iwmmxt_set_cup();
2335 break;
2336 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2337 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2338 wrd = (insn >> 12) & 0xf;
2339 rd0 = (insn >> 16) & 0xf;
2340 gen_op_iwmmxt_movq_M0_wRn(rd0);
2341 switch ((insn >> 22) & 3) {
2342 case 0:
2343 if (insn & (1 << 21))
2344 gen_op_iwmmxt_unpacklsb_M0();
2345 else
2346 gen_op_iwmmxt_unpacklub_M0();
2347 break;
2348 case 1:
2349 if (insn & (1 << 21))
2350 gen_op_iwmmxt_unpacklsw_M0();
2351 else
2352 gen_op_iwmmxt_unpackluw_M0();
2353 break;
2354 case 2:
2355 if (insn & (1 << 21))
2356 gen_op_iwmmxt_unpacklsl_M0();
2357 else
2358 gen_op_iwmmxt_unpacklul_M0();
2359 break;
2360 case 3:
2361 return 1;
2362 }
2363 gen_op_iwmmxt_movq_wRn_M0(wrd);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2366 break;
2367 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2368 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2369 wrd = (insn >> 12) & 0xf;
2370 rd0 = (insn >> 16) & 0xf;
2371 gen_op_iwmmxt_movq_M0_wRn(rd0);
2372 switch ((insn >> 22) & 3) {
2373 case 0:
2374 if (insn & (1 << 21))
2375 gen_op_iwmmxt_unpackhsb_M0();
2376 else
2377 gen_op_iwmmxt_unpackhub_M0();
2378 break;
2379 case 1:
2380 if (insn & (1 << 21))
2381 gen_op_iwmmxt_unpackhsw_M0();
2382 else
2383 gen_op_iwmmxt_unpackhuw_M0();
2384 break;
2385 case 2:
2386 if (insn & (1 << 21))
2387 gen_op_iwmmxt_unpackhsl_M0();
2388 else
2389 gen_op_iwmmxt_unpackhul_M0();
2390 break;
2391 case 3:
2392 return 1;
2393 }
2394 gen_op_iwmmxt_movq_wRn_M0(wrd);
2395 gen_op_iwmmxt_set_mup();
2396 gen_op_iwmmxt_set_cup();
2397 break;
2398 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2399 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2400 if (((insn >> 22) & 3) == 0)
2401 return 1;
18c9b560
AZ
2402 wrd = (insn >> 12) & 0xf;
2403 rd0 = (insn >> 16) & 0xf;
2404 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2405 tmp = tcg_temp_new_i32();
da6b5335 2406 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2407 tcg_temp_free_i32(tmp);
18c9b560 2408 return 1;
da6b5335 2409 }
18c9b560 2410 switch ((insn >> 22) & 3) {
18c9b560 2411 case 1:
477955bd 2412 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2413 break;
2414 case 2:
477955bd 2415 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2416 break;
2417 case 3:
477955bd 2418 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2419 break;
2420 }
7d1b0095 2421 tcg_temp_free_i32(tmp);
18c9b560
AZ
2422 gen_op_iwmmxt_movq_wRn_M0(wrd);
2423 gen_op_iwmmxt_set_mup();
2424 gen_op_iwmmxt_set_cup();
2425 break;
2426 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2427 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2428 if (((insn >> 22) & 3) == 0)
2429 return 1;
18c9b560
AZ
2430 wrd = (insn >> 12) & 0xf;
2431 rd0 = (insn >> 16) & 0xf;
2432 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2433 tmp = tcg_temp_new_i32();
da6b5335 2434 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2435 tcg_temp_free_i32(tmp);
18c9b560 2436 return 1;
da6b5335 2437 }
18c9b560 2438 switch ((insn >> 22) & 3) {
18c9b560 2439 case 1:
477955bd 2440 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2441 break;
2442 case 2:
477955bd 2443 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2444 break;
2445 case 3:
477955bd 2446 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2447 break;
2448 }
7d1b0095 2449 tcg_temp_free_i32(tmp);
18c9b560
AZ
2450 gen_op_iwmmxt_movq_wRn_M0(wrd);
2451 gen_op_iwmmxt_set_mup();
2452 gen_op_iwmmxt_set_cup();
2453 break;
2454 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2455 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2456 if (((insn >> 22) & 3) == 0)
2457 return 1;
18c9b560
AZ
2458 wrd = (insn >> 12) & 0xf;
2459 rd0 = (insn >> 16) & 0xf;
2460 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2461 tmp = tcg_temp_new_i32();
da6b5335 2462 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2463 tcg_temp_free_i32(tmp);
18c9b560 2464 return 1;
da6b5335 2465 }
18c9b560 2466 switch ((insn >> 22) & 3) {
18c9b560 2467 case 1:
477955bd 2468 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2469 break;
2470 case 2:
477955bd 2471 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2472 break;
2473 case 3:
477955bd 2474 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2475 break;
2476 }
7d1b0095 2477 tcg_temp_free_i32(tmp);
18c9b560
AZ
2478 gen_op_iwmmxt_movq_wRn_M0(wrd);
2479 gen_op_iwmmxt_set_mup();
2480 gen_op_iwmmxt_set_cup();
2481 break;
2482 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2483 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2484 if (((insn >> 22) & 3) == 0)
2485 return 1;
18c9b560
AZ
2486 wrd = (insn >> 12) & 0xf;
2487 rd0 = (insn >> 16) & 0xf;
2488 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2489 tmp = tcg_temp_new_i32();
18c9b560 2490 switch ((insn >> 22) & 3) {
18c9b560 2491 case 1:
da6b5335 2492 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2493 tcg_temp_free_i32(tmp);
18c9b560 2494 return 1;
da6b5335 2495 }
477955bd 2496 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2497 break;
2498 case 2:
da6b5335 2499 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2500 tcg_temp_free_i32(tmp);
18c9b560 2501 return 1;
da6b5335 2502 }
477955bd 2503 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2504 break;
2505 case 3:
da6b5335 2506 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2507 tcg_temp_free_i32(tmp);
18c9b560 2508 return 1;
da6b5335 2509 }
477955bd 2510 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2511 break;
2512 }
7d1b0095 2513 tcg_temp_free_i32(tmp);
18c9b560
AZ
2514 gen_op_iwmmxt_movq_wRn_M0(wrd);
2515 gen_op_iwmmxt_set_mup();
2516 gen_op_iwmmxt_set_cup();
2517 break;
2518 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2519 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2520 wrd = (insn >> 12) & 0xf;
2521 rd0 = (insn >> 16) & 0xf;
2522 rd1 = (insn >> 0) & 0xf;
2523 gen_op_iwmmxt_movq_M0_wRn(rd0);
2524 switch ((insn >> 22) & 3) {
2525 case 0:
2526 if (insn & (1 << 21))
2527 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2528 else
2529 gen_op_iwmmxt_minub_M0_wRn(rd1);
2530 break;
2531 case 1:
2532 if (insn & (1 << 21))
2533 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2534 else
2535 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2536 break;
2537 case 2:
2538 if (insn & (1 << 21))
2539 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2540 else
2541 gen_op_iwmmxt_minul_M0_wRn(rd1);
2542 break;
2543 case 3:
2544 return 1;
2545 }
2546 gen_op_iwmmxt_movq_wRn_M0(wrd);
2547 gen_op_iwmmxt_set_mup();
2548 break;
2549 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2550 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2551 wrd = (insn >> 12) & 0xf;
2552 rd0 = (insn >> 16) & 0xf;
2553 rd1 = (insn >> 0) & 0xf;
2554 gen_op_iwmmxt_movq_M0_wRn(rd0);
2555 switch ((insn >> 22) & 3) {
2556 case 0:
2557 if (insn & (1 << 21))
2558 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2559 else
2560 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2561 break;
2562 case 1:
2563 if (insn & (1 << 21))
2564 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2565 else
2566 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2567 break;
2568 case 2:
2569 if (insn & (1 << 21))
2570 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2571 else
2572 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2573 break;
2574 case 3:
2575 return 1;
2576 }
2577 gen_op_iwmmxt_movq_wRn_M0(wrd);
2578 gen_op_iwmmxt_set_mup();
2579 break;
2580 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2581 case 0x402: case 0x502: case 0x602: case 0x702:
2582 wrd = (insn >> 12) & 0xf;
2583 rd0 = (insn >> 16) & 0xf;
2584 rd1 = (insn >> 0) & 0xf;
2585 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2586 tmp = tcg_const_i32((insn >> 20) & 3);
2587 iwmmxt_load_reg(cpu_V1, rd1);
2588 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2589 tcg_temp_free_i32(tmp);
18c9b560
AZ
2590 gen_op_iwmmxt_movq_wRn_M0(wrd);
2591 gen_op_iwmmxt_set_mup();
2592 break;
2593 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2594 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2595 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2596 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2597 wrd = (insn >> 12) & 0xf;
2598 rd0 = (insn >> 16) & 0xf;
2599 rd1 = (insn >> 0) & 0xf;
2600 gen_op_iwmmxt_movq_M0_wRn(rd0);
2601 switch ((insn >> 20) & 0xf) {
2602 case 0x0:
2603 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2604 break;
2605 case 0x1:
2606 gen_op_iwmmxt_subub_M0_wRn(rd1);
2607 break;
2608 case 0x3:
2609 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2610 break;
2611 case 0x4:
2612 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2613 break;
2614 case 0x5:
2615 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2616 break;
2617 case 0x7:
2618 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2619 break;
2620 case 0x8:
2621 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2622 break;
2623 case 0x9:
2624 gen_op_iwmmxt_subul_M0_wRn(rd1);
2625 break;
2626 case 0xb:
2627 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2628 break;
2629 default:
2630 return 1;
2631 }
2632 gen_op_iwmmxt_movq_wRn_M0(wrd);
2633 gen_op_iwmmxt_set_mup();
2634 gen_op_iwmmxt_set_cup();
2635 break;
2636 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2637 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2638 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2639 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2640 wrd = (insn >> 12) & 0xf;
2641 rd0 = (insn >> 16) & 0xf;
2642 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2643 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2644 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2645 tcg_temp_free_i32(tmp);
18c9b560
AZ
2646 gen_op_iwmmxt_movq_wRn_M0(wrd);
2647 gen_op_iwmmxt_set_mup();
2648 gen_op_iwmmxt_set_cup();
2649 break;
2650 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2651 case 0x418: case 0x518: case 0x618: case 0x718:
2652 case 0x818: case 0x918: case 0xa18: case 0xb18:
2653 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2654 wrd = (insn >> 12) & 0xf;
2655 rd0 = (insn >> 16) & 0xf;
2656 rd1 = (insn >> 0) & 0xf;
2657 gen_op_iwmmxt_movq_M0_wRn(rd0);
2658 switch ((insn >> 20) & 0xf) {
2659 case 0x0:
2660 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2661 break;
2662 case 0x1:
2663 gen_op_iwmmxt_addub_M0_wRn(rd1);
2664 break;
2665 case 0x3:
2666 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2667 break;
2668 case 0x4:
2669 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2670 break;
2671 case 0x5:
2672 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2673 break;
2674 case 0x7:
2675 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2676 break;
2677 case 0x8:
2678 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2679 break;
2680 case 0x9:
2681 gen_op_iwmmxt_addul_M0_wRn(rd1);
2682 break;
2683 case 0xb:
2684 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2685 break;
2686 default:
2687 return 1;
2688 }
2689 gen_op_iwmmxt_movq_wRn_M0(wrd);
2690 gen_op_iwmmxt_set_mup();
2691 gen_op_iwmmxt_set_cup();
2692 break;
2693 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2694 case 0x408: case 0x508: case 0x608: case 0x708:
2695 case 0x808: case 0x908: case 0xa08: case 0xb08:
2696 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2697 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2698 return 1;
18c9b560
AZ
2699 wrd = (insn >> 12) & 0xf;
2700 rd0 = (insn >> 16) & 0xf;
2701 rd1 = (insn >> 0) & 0xf;
2702 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2703 switch ((insn >> 22) & 3) {
18c9b560
AZ
2704 case 1:
2705 if (insn & (1 << 21))
2706 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2707 else
2708 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2709 break;
2710 case 2:
2711 if (insn & (1 << 21))
2712 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2713 else
2714 gen_op_iwmmxt_packul_M0_wRn(rd1);
2715 break;
2716 case 3:
2717 if (insn & (1 << 21))
2718 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2719 else
2720 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2721 break;
2722 }
2723 gen_op_iwmmxt_movq_wRn_M0(wrd);
2724 gen_op_iwmmxt_set_mup();
2725 gen_op_iwmmxt_set_cup();
2726 break;
2727 case 0x201: case 0x203: case 0x205: case 0x207:
2728 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2729 case 0x211: case 0x213: case 0x215: case 0x217:
2730 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2731 wrd = (insn >> 5) & 0xf;
2732 rd0 = (insn >> 12) & 0xf;
2733 rd1 = (insn >> 0) & 0xf;
2734 if (rd0 == 0xf || rd1 == 0xf)
2735 return 1;
2736 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2737 tmp = load_reg(s, rd0);
2738 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2739 switch ((insn >> 16) & 0xf) {
2740 case 0x0: /* TMIA */
da6b5335 2741 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2742 break;
2743 case 0x8: /* TMIAPH */
da6b5335 2744 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2745 break;
2746 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2747 if (insn & (1 << 16))
da6b5335 2748 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2749 if (insn & (1 << 17))
da6b5335
FN
2750 tcg_gen_shri_i32(tmp2, tmp2, 16);
2751 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2752 break;
2753 default:
7d1b0095
PM
2754 tcg_temp_free_i32(tmp2);
2755 tcg_temp_free_i32(tmp);
18c9b560
AZ
2756 return 1;
2757 }
7d1b0095
PM
2758 tcg_temp_free_i32(tmp2);
2759 tcg_temp_free_i32(tmp);
18c9b560
AZ
2760 gen_op_iwmmxt_movq_wRn_M0(wrd);
2761 gen_op_iwmmxt_set_mup();
2762 break;
2763 default:
2764 return 1;
2765 }
2766
2767 return 0;
2768}
2769
a1c7273b 2770/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2771 (ie. an undefined instruction). */
7dcc1f89 2772static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2773{
2774 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2775 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2776
2777 if ((insn & 0x0ff00f10) == 0x0e200010) {
2778 /* Multiply with Internal Accumulate Format */
2779 rd0 = (insn >> 12) & 0xf;
2780 rd1 = insn & 0xf;
2781 acc = (insn >> 5) & 7;
2782
2783 if (acc != 0)
2784 return 1;
2785
3a554c0f
FN
2786 tmp = load_reg(s, rd0);
2787 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2788 switch ((insn >> 16) & 0xf) {
2789 case 0x0: /* MIA */
3a554c0f 2790 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2791 break;
2792 case 0x8: /* MIAPH */
3a554c0f 2793 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2794 break;
2795 case 0xc: /* MIABB */
2796 case 0xd: /* MIABT */
2797 case 0xe: /* MIATB */
2798 case 0xf: /* MIATT */
18c9b560 2799 if (insn & (1 << 16))
3a554c0f 2800 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2801 if (insn & (1 << 17))
3a554c0f
FN
2802 tcg_gen_shri_i32(tmp2, tmp2, 16);
2803 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2804 break;
2805 default:
2806 return 1;
2807 }
7d1b0095
PM
2808 tcg_temp_free_i32(tmp2);
2809 tcg_temp_free_i32(tmp);
18c9b560
AZ
2810
2811 gen_op_iwmmxt_movq_wRn_M0(acc);
2812 return 0;
2813 }
2814
2815 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2816 /* Internal Accumulator Access Format */
2817 rdhi = (insn >> 16) & 0xf;
2818 rdlo = (insn >> 12) & 0xf;
2819 acc = insn & 7;
2820
2821 if (acc != 0)
2822 return 1;
2823
2824 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2825 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2826 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2827 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2828 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2829 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2830 } else { /* MAR */
3a554c0f
FN
2831 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2832 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2833 }
2834 return 0;
2835 }
2836
2837 return 1;
2838}
2839
9ee6e8bb
PB
2840#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2841#define VFP_SREG(insn, bigbit, smallbit) \
2842 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2843#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2844 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2845 reg = (((insn) >> (bigbit)) & 0x0f) \
2846 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2847 } else { \
2848 if (insn & (1 << (smallbit))) \
2849 return 1; \
2850 reg = ((insn) >> (bigbit)) & 0x0f; \
2851 }} while (0)
2852
2853#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2854#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2855#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2856#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2857#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2858#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2859
4373f3ce 2860/* Move between integer and VFP cores. */
39d5492a 2861static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2862{
39d5492a 2863 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2864 tcg_gen_mov_i32(tmp, cpu_F0s);
2865 return tmp;
2866}
2867
39d5492a 2868static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2869{
2870 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2871 tcg_temp_free_i32(tmp);
4373f3ce
PB
2872}
2873
39d5492a 2874static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2875{
39d5492a 2876 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2877 if (shift)
2878 tcg_gen_shri_i32(var, var, shift);
86831435 2879 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2880 tcg_gen_shli_i32(tmp, var, 8);
2881 tcg_gen_or_i32(var, var, tmp);
2882 tcg_gen_shli_i32(tmp, var, 16);
2883 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2884 tcg_temp_free_i32(tmp);
ad69471c
PB
2885}
2886
39d5492a 2887static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2888{
39d5492a 2889 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2890 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2891 tcg_gen_shli_i32(tmp, var, 16);
2892 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2893 tcg_temp_free_i32(tmp);
ad69471c
PB
2894}
2895
39d5492a 2896static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2897{
39d5492a 2898 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2899 tcg_gen_andi_i32(var, var, 0xffff0000);
2900 tcg_gen_shri_i32(tmp, var, 16);
2901 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2902 tcg_temp_free_i32(tmp);
ad69471c
PB
2903}
2904
39d5492a 2905static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2906{
2907 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2908 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2909 switch (size) {
2910 case 0:
12dcc321 2911 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2912 gen_neon_dup_u8(tmp, 0);
2913 break;
2914 case 1:
12dcc321 2915 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2916 gen_neon_dup_low16(tmp);
2917 break;
2918 case 2:
12dcc321 2919 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2920 break;
2921 default: /* Avoid compiler warnings. */
2922 abort();
2923 }
2924 return tmp;
2925}
2926
04731fb5
WN
2927static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2928 uint32_t dp)
2929{
2930 uint32_t cc = extract32(insn, 20, 2);
2931
2932 if (dp) {
2933 TCGv_i64 frn, frm, dest;
2934 TCGv_i64 tmp, zero, zf, nf, vf;
2935
2936 zero = tcg_const_i64(0);
2937
2938 frn = tcg_temp_new_i64();
2939 frm = tcg_temp_new_i64();
2940 dest = tcg_temp_new_i64();
2941
2942 zf = tcg_temp_new_i64();
2943 nf = tcg_temp_new_i64();
2944 vf = tcg_temp_new_i64();
2945
2946 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2947 tcg_gen_ext_i32_i64(nf, cpu_NF);
2948 tcg_gen_ext_i32_i64(vf, cpu_VF);
2949
2950 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2951 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2952 switch (cc) {
2953 case 0: /* eq: Z */
2954 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2955 frn, frm);
2956 break;
2957 case 1: /* vs: V */
2958 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2959 frn, frm);
2960 break;
2961 case 2: /* ge: N == V -> N ^ V == 0 */
2962 tmp = tcg_temp_new_i64();
2963 tcg_gen_xor_i64(tmp, vf, nf);
2964 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2965 frn, frm);
2966 tcg_temp_free_i64(tmp);
2967 break;
2968 case 3: /* gt: !Z && N == V */
2969 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2970 frn, frm);
2971 tmp = tcg_temp_new_i64();
2972 tcg_gen_xor_i64(tmp, vf, nf);
2973 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2974 dest, frm);
2975 tcg_temp_free_i64(tmp);
2976 break;
2977 }
2978 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2979 tcg_temp_free_i64(frn);
2980 tcg_temp_free_i64(frm);
2981 tcg_temp_free_i64(dest);
2982
2983 tcg_temp_free_i64(zf);
2984 tcg_temp_free_i64(nf);
2985 tcg_temp_free_i64(vf);
2986
2987 tcg_temp_free_i64(zero);
2988 } else {
2989 TCGv_i32 frn, frm, dest;
2990 TCGv_i32 tmp, zero;
2991
2992 zero = tcg_const_i32(0);
2993
2994 frn = tcg_temp_new_i32();
2995 frm = tcg_temp_new_i32();
2996 dest = tcg_temp_new_i32();
2997 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2998 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2999 switch (cc) {
3000 case 0: /* eq: Z */
3001 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3002 frn, frm);
3003 break;
3004 case 1: /* vs: V */
3005 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3006 frn, frm);
3007 break;
3008 case 2: /* ge: N == V -> N ^ V == 0 */
3009 tmp = tcg_temp_new_i32();
3010 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3011 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3012 frn, frm);
3013 tcg_temp_free_i32(tmp);
3014 break;
3015 case 3: /* gt: !Z && N == V */
3016 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3017 frn, frm);
3018 tmp = tcg_temp_new_i32();
3019 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3020 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3021 dest, frm);
3022 tcg_temp_free_i32(tmp);
3023 break;
3024 }
3025 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3026 tcg_temp_free_i32(frn);
3027 tcg_temp_free_i32(frm);
3028 tcg_temp_free_i32(dest);
3029
3030 tcg_temp_free_i32(zero);
3031 }
3032
3033 return 0;
3034}
3035
40cfacdd
WN
3036static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3037 uint32_t rm, uint32_t dp)
3038{
3039 uint32_t vmin = extract32(insn, 6, 1);
3040 TCGv_ptr fpst = get_fpstatus_ptr(0);
3041
3042 if (dp) {
3043 TCGv_i64 frn, frm, dest;
3044
3045 frn = tcg_temp_new_i64();
3046 frm = tcg_temp_new_i64();
3047 dest = tcg_temp_new_i64();
3048
3049 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3050 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3051 if (vmin) {
f71a2ae5 3052 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3053 } else {
f71a2ae5 3054 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3055 }
3056 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3057 tcg_temp_free_i64(frn);
3058 tcg_temp_free_i64(frm);
3059 tcg_temp_free_i64(dest);
3060 } else {
3061 TCGv_i32 frn, frm, dest;
3062
3063 frn = tcg_temp_new_i32();
3064 frm = tcg_temp_new_i32();
3065 dest = tcg_temp_new_i32();
3066
3067 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3068 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3069 if (vmin) {
f71a2ae5 3070 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3071 } else {
f71a2ae5 3072 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3073 }
3074 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3075 tcg_temp_free_i32(frn);
3076 tcg_temp_free_i32(frm);
3077 tcg_temp_free_i32(dest);
3078 }
3079
3080 tcg_temp_free_ptr(fpst);
3081 return 0;
3082}
3083
7655f39b
WN
3084static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3085 int rounding)
3086{
3087 TCGv_ptr fpst = get_fpstatus_ptr(0);
3088 TCGv_i32 tcg_rmode;
3089
3090 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3091 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3092
3093 if (dp) {
3094 TCGv_i64 tcg_op;
3095 TCGv_i64 tcg_res;
3096 tcg_op = tcg_temp_new_i64();
3097 tcg_res = tcg_temp_new_i64();
3098 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3099 gen_helper_rintd(tcg_res, tcg_op, fpst);
3100 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3101 tcg_temp_free_i64(tcg_op);
3102 tcg_temp_free_i64(tcg_res);
3103 } else {
3104 TCGv_i32 tcg_op;
3105 TCGv_i32 tcg_res;
3106 tcg_op = tcg_temp_new_i32();
3107 tcg_res = tcg_temp_new_i32();
3108 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3109 gen_helper_rints(tcg_res, tcg_op, fpst);
3110 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3111 tcg_temp_free_i32(tcg_op);
3112 tcg_temp_free_i32(tcg_res);
3113 }
3114
3115 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3116 tcg_temp_free_i32(tcg_rmode);
3117
3118 tcg_temp_free_ptr(fpst);
3119 return 0;
3120}
3121
c9975a83
WN
3122static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3123 int rounding)
3124{
3125 bool is_signed = extract32(insn, 7, 1);
3126 TCGv_ptr fpst = get_fpstatus_ptr(0);
3127 TCGv_i32 tcg_rmode, tcg_shift;
3128
3129 tcg_shift = tcg_const_i32(0);
3130
3131 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3132 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3133
3134 if (dp) {
3135 TCGv_i64 tcg_double, tcg_res;
3136 TCGv_i32 tcg_tmp;
3137 /* Rd is encoded as a single precision register even when the source
3138 * is double precision.
3139 */
3140 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3141 tcg_double = tcg_temp_new_i64();
3142 tcg_res = tcg_temp_new_i64();
3143 tcg_tmp = tcg_temp_new_i32();
3144 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3145 if (is_signed) {
3146 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3147 } else {
3148 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3149 }
ecc7b3aa 3150 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3151 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3152 tcg_temp_free_i32(tcg_tmp);
3153 tcg_temp_free_i64(tcg_res);
3154 tcg_temp_free_i64(tcg_double);
3155 } else {
3156 TCGv_i32 tcg_single, tcg_res;
3157 tcg_single = tcg_temp_new_i32();
3158 tcg_res = tcg_temp_new_i32();
3159 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3160 if (is_signed) {
3161 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3162 } else {
3163 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3164 }
3165 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3166 tcg_temp_free_i32(tcg_res);
3167 tcg_temp_free_i32(tcg_single);
3168 }
3169
3170 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3171 tcg_temp_free_i32(tcg_rmode);
3172
3173 tcg_temp_free_i32(tcg_shift);
3174
3175 tcg_temp_free_ptr(fpst);
3176
3177 return 0;
3178}
7655f39b
WN
3179
3180/* Table for converting the most common AArch32 encoding of
3181 * rounding mode to arm_fprounding order (which matches the
3182 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3183 */
3184static const uint8_t fp_decode_rm[] = {
3185 FPROUNDING_TIEAWAY,
3186 FPROUNDING_TIEEVEN,
3187 FPROUNDING_POSINF,
3188 FPROUNDING_NEGINF,
3189};
3190
7dcc1f89 3191static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3192{
3193 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3194
d614a513 3195 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3196 return 1;
3197 }
3198
3199 if (dp) {
3200 VFP_DREG_D(rd, insn);
3201 VFP_DREG_N(rn, insn);
3202 VFP_DREG_M(rm, insn);
3203 } else {
3204 rd = VFP_SREG_D(insn);
3205 rn = VFP_SREG_N(insn);
3206 rm = VFP_SREG_M(insn);
3207 }
3208
3209 if ((insn & 0x0f800e50) == 0x0e000a00) {
3210 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3211 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3212 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3213 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3214 /* VRINTA, VRINTN, VRINTP, VRINTM */
3215 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3216 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3217 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3218 /* VCVTA, VCVTN, VCVTP, VCVTM */
3219 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3220 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3221 }
3222 return 1;
3223}
3224
a1c7273b 3225/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3226 (ie. an undefined instruction). */
7dcc1f89 3227static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3228{
3229 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3230 int dp, veclen;
39d5492a
PM
3231 TCGv_i32 addr;
3232 TCGv_i32 tmp;
3233 TCGv_i32 tmp2;
b7bcbe95 3234
d614a513 3235 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3236 return 1;
d614a513 3237 }
40f137e1 3238
2c7ffc41
PM
3239 /* FIXME: this access check should not take precedence over UNDEF
3240 * for invalid encodings; we will generate incorrect syndrome information
3241 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3242 */
9dbbc748 3243 if (s->fp_excp_el) {
2c7ffc41 3244 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3245 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3246 return 0;
3247 }
3248
5df8bac1 3249 if (!s->vfp_enabled) {
9ee6e8bb 3250 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3251 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3252 return 1;
3253 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3254 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3255 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3256 return 1;
a50c0f51 3257 }
40f137e1 3258 }
6a57f3eb
WN
3259
3260 if (extract32(insn, 28, 4) == 0xf) {
3261 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3262 * only used in v8 and above.
3263 */
7dcc1f89 3264 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3265 }
3266
b7bcbe95
FB
3267 dp = ((insn & 0xf00) == 0xb00);
3268 switch ((insn >> 24) & 0xf) {
3269 case 0xe:
3270 if (insn & (1 << 4)) {
3271 /* single register transfer */
b7bcbe95
FB
3272 rd = (insn >> 12) & 0xf;
3273 if (dp) {
9ee6e8bb
PB
3274 int size;
3275 int pass;
3276
3277 VFP_DREG_N(rn, insn);
3278 if (insn & 0xf)
b7bcbe95 3279 return 1;
9ee6e8bb 3280 if (insn & 0x00c00060
d614a513 3281 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3282 return 1;
d614a513 3283 }
9ee6e8bb
PB
3284
3285 pass = (insn >> 21) & 1;
3286 if (insn & (1 << 22)) {
3287 size = 0;
3288 offset = ((insn >> 5) & 3) * 8;
3289 } else if (insn & (1 << 5)) {
3290 size = 1;
3291 offset = (insn & (1 << 6)) ? 16 : 0;
3292 } else {
3293 size = 2;
3294 offset = 0;
3295 }
18c9b560 3296 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3297 /* vfp->arm */
ad69471c 3298 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3299 switch (size) {
3300 case 0:
9ee6e8bb 3301 if (offset)
ad69471c 3302 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3303 if (insn & (1 << 23))
ad69471c 3304 gen_uxtb(tmp);
9ee6e8bb 3305 else
ad69471c 3306 gen_sxtb(tmp);
9ee6e8bb
PB
3307 break;
3308 case 1:
9ee6e8bb
PB
3309 if (insn & (1 << 23)) {
3310 if (offset) {
ad69471c 3311 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3312 } else {
ad69471c 3313 gen_uxth(tmp);
9ee6e8bb
PB
3314 }
3315 } else {
3316 if (offset) {
ad69471c 3317 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3318 } else {
ad69471c 3319 gen_sxth(tmp);
9ee6e8bb
PB
3320 }
3321 }
3322 break;
3323 case 2:
9ee6e8bb
PB
3324 break;
3325 }
ad69471c 3326 store_reg(s, rd, tmp);
b7bcbe95
FB
3327 } else {
3328 /* arm->vfp */
ad69471c 3329 tmp = load_reg(s, rd);
9ee6e8bb
PB
3330 if (insn & (1 << 23)) {
3331 /* VDUP */
3332 if (size == 0) {
ad69471c 3333 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3334 } else if (size == 1) {
ad69471c 3335 gen_neon_dup_low16(tmp);
9ee6e8bb 3336 }
cbbccffc 3337 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3338 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3339 tcg_gen_mov_i32(tmp2, tmp);
3340 neon_store_reg(rn, n, tmp2);
3341 }
3342 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3343 } else {
3344 /* VMOV */
3345 switch (size) {
3346 case 0:
ad69471c 3347 tmp2 = neon_load_reg(rn, pass);
d593c48e 3348 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3349 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3350 break;
3351 case 1:
ad69471c 3352 tmp2 = neon_load_reg(rn, pass);
d593c48e 3353 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3354 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3355 break;
3356 case 2:
9ee6e8bb
PB
3357 break;
3358 }
ad69471c 3359 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3360 }
b7bcbe95 3361 }
9ee6e8bb
PB
3362 } else { /* !dp */
3363 if ((insn & 0x6f) != 0x00)
3364 return 1;
3365 rn = VFP_SREG_N(insn);
18c9b560 3366 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3367 /* vfp->arm */
3368 if (insn & (1 << 21)) {
3369 /* system register */
40f137e1 3370 rn >>= 1;
9ee6e8bb 3371
b7bcbe95 3372 switch (rn) {
40f137e1 3373 case ARM_VFP_FPSID:
4373f3ce 3374 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3375 VFP3 restricts all id registers to privileged
3376 accesses. */
3377 if (IS_USER(s)
d614a513 3378 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3379 return 1;
d614a513 3380 }
4373f3ce 3381 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3382 break;
40f137e1 3383 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3384 if (IS_USER(s))
3385 return 1;
4373f3ce 3386 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3387 break;
40f137e1
PB
3388 case ARM_VFP_FPINST:
3389 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3390 /* Not present in VFP3. */
3391 if (IS_USER(s)
d614a513 3392 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3393 return 1;
d614a513 3394 }
4373f3ce 3395 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3396 break;
40f137e1 3397 case ARM_VFP_FPSCR:
601d70b9 3398 if (rd == 15) {
4373f3ce
PB
3399 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3400 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3401 } else {
7d1b0095 3402 tmp = tcg_temp_new_i32();
4373f3ce
PB
3403 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3404 }
b7bcbe95 3405 break;
a50c0f51 3406 case ARM_VFP_MVFR2:
d614a513 3407 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3408 return 1;
3409 }
3410 /* fall through */
9ee6e8bb
PB
3411 case ARM_VFP_MVFR0:
3412 case ARM_VFP_MVFR1:
3413 if (IS_USER(s)
d614a513 3414 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3415 return 1;
d614a513 3416 }
4373f3ce 3417 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3418 break;
b7bcbe95
FB
3419 default:
3420 return 1;
3421 }
3422 } else {
3423 gen_mov_F0_vreg(0, rn);
4373f3ce 3424 tmp = gen_vfp_mrs();
b7bcbe95
FB
3425 }
3426 if (rd == 15) {
b5ff1b31 3427 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3428 gen_set_nzcv(tmp);
7d1b0095 3429 tcg_temp_free_i32(tmp);
4373f3ce
PB
3430 } else {
3431 store_reg(s, rd, tmp);
3432 }
b7bcbe95
FB
3433 } else {
3434 /* arm->vfp */
b7bcbe95 3435 if (insn & (1 << 21)) {
40f137e1 3436 rn >>= 1;
b7bcbe95
FB
3437 /* system register */
3438 switch (rn) {
40f137e1 3439 case ARM_VFP_FPSID:
9ee6e8bb
PB
3440 case ARM_VFP_MVFR0:
3441 case ARM_VFP_MVFR1:
b7bcbe95
FB
3442 /* Writes are ignored. */
3443 break;
40f137e1 3444 case ARM_VFP_FPSCR:
e4c1cfa5 3445 tmp = load_reg(s, rd);
4373f3ce 3446 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3447 tcg_temp_free_i32(tmp);
b5ff1b31 3448 gen_lookup_tb(s);
b7bcbe95 3449 break;
40f137e1 3450 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3451 if (IS_USER(s))
3452 return 1;
71b3c3de
JR
3453 /* TODO: VFP subarchitecture support.
3454 * For now, keep the EN bit only */
e4c1cfa5 3455 tmp = load_reg(s, rd);
71b3c3de 3456 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3457 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3458 gen_lookup_tb(s);
3459 break;
3460 case ARM_VFP_FPINST:
3461 case ARM_VFP_FPINST2:
23adb861
PM
3462 if (IS_USER(s)) {
3463 return 1;
3464 }
e4c1cfa5 3465 tmp = load_reg(s, rd);
4373f3ce 3466 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3467 break;
b7bcbe95
FB
3468 default:
3469 return 1;
3470 }
3471 } else {
e4c1cfa5 3472 tmp = load_reg(s, rd);
4373f3ce 3473 gen_vfp_msr(tmp);
b7bcbe95
FB
3474 gen_mov_vreg_F0(0, rn);
3475 }
3476 }
3477 }
3478 } else {
3479 /* data processing */
3480 /* The opcode is in bits 23, 21, 20 and 6. */
3481 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3482 if (dp) {
3483 if (op == 15) {
3484 /* rn is opcode */
3485 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3486 } else {
3487 /* rn is register number */
9ee6e8bb 3488 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3489 }
3490
239c20c7
WN
3491 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3492 ((rn & 0x1e) == 0x6))) {
3493 /* Integer or single/half precision destination. */
9ee6e8bb 3494 rd = VFP_SREG_D(insn);
b7bcbe95 3495 } else {
9ee6e8bb 3496 VFP_DREG_D(rd, insn);
b7bcbe95 3497 }
04595bf6 3498 if (op == 15 &&
239c20c7
WN
3499 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3500 ((rn & 0x1e) == 0x4))) {
3501 /* VCVT from int or half precision is always from S reg
3502 * regardless of dp bit. VCVT with immediate frac_bits
3503 * has same format as SREG_M.
04595bf6
PM
3504 */
3505 rm = VFP_SREG_M(insn);
b7bcbe95 3506 } else {
9ee6e8bb 3507 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3508 }
3509 } else {
9ee6e8bb 3510 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3511 if (op == 15 && rn == 15) {
3512 /* Double precision destination. */
9ee6e8bb
PB
3513 VFP_DREG_D(rd, insn);
3514 } else {
3515 rd = VFP_SREG_D(insn);
3516 }
04595bf6
PM
3517 /* NB that we implicitly rely on the encoding for the frac_bits
3518 * in VCVT of fixed to float being the same as that of an SREG_M
3519 */
9ee6e8bb 3520 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3521 }
3522
69d1fc22 3523 veclen = s->vec_len;
b7bcbe95
FB
3524 if (op == 15 && rn > 3)
3525 veclen = 0;
3526
3527 /* Shut up compiler warnings. */
3528 delta_m = 0;
3529 delta_d = 0;
3530 bank_mask = 0;
3b46e624 3531
b7bcbe95
FB
3532 if (veclen > 0) {
3533 if (dp)
3534 bank_mask = 0xc;
3535 else
3536 bank_mask = 0x18;
3537
3538 /* Figure out what type of vector operation this is. */
3539 if ((rd & bank_mask) == 0) {
3540 /* scalar */
3541 veclen = 0;
3542 } else {
3543 if (dp)
69d1fc22 3544 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3545 else
69d1fc22 3546 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3547
3548 if ((rm & bank_mask) == 0) {
3549 /* mixed scalar/vector */
3550 delta_m = 0;
3551 } else {
3552 /* vector */
3553 delta_m = delta_d;
3554 }
3555 }
3556 }
3557
3558 /* Load the initial operands. */
3559 if (op == 15) {
3560 switch (rn) {
3561 case 16:
3562 case 17:
3563 /* Integer source */
3564 gen_mov_F0_vreg(0, rm);
3565 break;
3566 case 8:
3567 case 9:
3568 /* Compare */
3569 gen_mov_F0_vreg(dp, rd);
3570 gen_mov_F1_vreg(dp, rm);
3571 break;
3572 case 10:
3573 case 11:
3574 /* Compare with zero */
3575 gen_mov_F0_vreg(dp, rd);
3576 gen_vfp_F1_ld0(dp);
3577 break;
9ee6e8bb
PB
3578 case 20:
3579 case 21:
3580 case 22:
3581 case 23:
644ad806
PB
3582 case 28:
3583 case 29:
3584 case 30:
3585 case 31:
9ee6e8bb
PB
3586 /* Source and destination the same. */
3587 gen_mov_F0_vreg(dp, rd);
3588 break;
6e0c0ed1
PM
3589 case 4:
3590 case 5:
3591 case 6:
3592 case 7:
239c20c7
WN
3593 /* VCVTB, VCVTT: only present with the halfprec extension
3594 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3595 * (we choose to UNDEF)
6e0c0ed1 3596 */
d614a513
PM
3597 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3598 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3599 return 1;
3600 }
239c20c7
WN
3601 if (!extract32(rn, 1, 1)) {
3602 /* Half precision source. */
3603 gen_mov_F0_vreg(0, rm);
3604 break;
3605 }
6e0c0ed1 3606 /* Otherwise fall through */
b7bcbe95
FB
3607 default:
3608 /* One source operand. */
3609 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3610 break;
b7bcbe95
FB
3611 }
3612 } else {
3613 /* Two source operands. */
3614 gen_mov_F0_vreg(dp, rn);
3615 gen_mov_F1_vreg(dp, rm);
3616 }
3617
3618 for (;;) {
3619 /* Perform the calculation. */
3620 switch (op) {
605a6aed
PM
3621 case 0: /* VMLA: fd + (fn * fm) */
3622 /* Note that order of inputs to the add matters for NaNs */
3623 gen_vfp_F1_mul(dp);
3624 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3625 gen_vfp_add(dp);
3626 break;
605a6aed 3627 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3628 gen_vfp_mul(dp);
605a6aed
PM
3629 gen_vfp_F1_neg(dp);
3630 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3631 gen_vfp_add(dp);
3632 break;
605a6aed
PM
3633 case 2: /* VNMLS: -fd + (fn * fm) */
3634 /* Note that it isn't valid to replace (-A + B) with (B - A)
3635 * or similar plausible looking simplifications
3636 * because this will give wrong results for NaNs.
3637 */
3638 gen_vfp_F1_mul(dp);
3639 gen_mov_F0_vreg(dp, rd);
3640 gen_vfp_neg(dp);
3641 gen_vfp_add(dp);
b7bcbe95 3642 break;
605a6aed 3643 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3644 gen_vfp_mul(dp);
605a6aed
PM
3645 gen_vfp_F1_neg(dp);
3646 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3647 gen_vfp_neg(dp);
605a6aed 3648 gen_vfp_add(dp);
b7bcbe95
FB
3649 break;
3650 case 4: /* mul: fn * fm */
3651 gen_vfp_mul(dp);
3652 break;
3653 case 5: /* nmul: -(fn * fm) */
3654 gen_vfp_mul(dp);
3655 gen_vfp_neg(dp);
3656 break;
3657 case 6: /* add: fn + fm */
3658 gen_vfp_add(dp);
3659 break;
3660 case 7: /* sub: fn - fm */
3661 gen_vfp_sub(dp);
3662 break;
3663 case 8: /* div: fn / fm */
3664 gen_vfp_div(dp);
3665 break;
da97f52c
PM
3666 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3667 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3668 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3669 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3670 /* These are fused multiply-add, and must be done as one
3671 * floating point operation with no rounding between the
3672 * multiplication and addition steps.
3673 * NB that doing the negations here as separate steps is
3674 * correct : an input NaN should come out with its sign bit
3675 * flipped if it is a negated-input.
3676 */
d614a513 3677 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3678 return 1;
3679 }
3680 if (dp) {
3681 TCGv_ptr fpst;
3682 TCGv_i64 frd;
3683 if (op & 1) {
3684 /* VFNMS, VFMS */
3685 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3686 }
3687 frd = tcg_temp_new_i64();
3688 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3689 if (op & 2) {
3690 /* VFNMA, VFNMS */
3691 gen_helper_vfp_negd(frd, frd);
3692 }
3693 fpst = get_fpstatus_ptr(0);
3694 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3695 cpu_F1d, frd, fpst);
3696 tcg_temp_free_ptr(fpst);
3697 tcg_temp_free_i64(frd);
3698 } else {
3699 TCGv_ptr fpst;
3700 TCGv_i32 frd;
3701 if (op & 1) {
3702 /* VFNMS, VFMS */
3703 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3704 }
3705 frd = tcg_temp_new_i32();
3706 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3707 if (op & 2) {
3708 gen_helper_vfp_negs(frd, frd);
3709 }
3710 fpst = get_fpstatus_ptr(0);
3711 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3712 cpu_F1s, frd, fpst);
3713 tcg_temp_free_ptr(fpst);
3714 tcg_temp_free_i32(frd);
3715 }
3716 break;
9ee6e8bb 3717 case 14: /* fconst */
d614a513
PM
3718 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3719 return 1;
3720 }
9ee6e8bb
PB
3721
3722 n = (insn << 12) & 0x80000000;
3723 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3724 if (dp) {
3725 if (i & 0x40)
3726 i |= 0x3f80;
3727 else
3728 i |= 0x4000;
3729 n |= i << 16;
4373f3ce 3730 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3731 } else {
3732 if (i & 0x40)
3733 i |= 0x780;
3734 else
3735 i |= 0x800;
3736 n |= i << 19;
5b340b51 3737 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3738 }
9ee6e8bb 3739 break;
b7bcbe95
FB
3740 case 15: /* extension space */
3741 switch (rn) {
3742 case 0: /* cpy */
3743 /* no-op */
3744 break;
3745 case 1: /* abs */
3746 gen_vfp_abs(dp);
3747 break;
3748 case 2: /* neg */
3749 gen_vfp_neg(dp);
3750 break;
3751 case 3: /* sqrt */
3752 gen_vfp_sqrt(dp);
3753 break;
239c20c7 3754 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3755 tmp = gen_vfp_mrs();
3756 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3757 if (dp) {
3758 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3759 cpu_env);
3760 } else {
3761 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3762 cpu_env);
3763 }
7d1b0095 3764 tcg_temp_free_i32(tmp);
60011498 3765 break;
239c20c7 3766 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3767 tmp = gen_vfp_mrs();
3768 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3769 if (dp) {
3770 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3771 cpu_env);
3772 } else {
3773 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3774 cpu_env);
3775 }
7d1b0095 3776 tcg_temp_free_i32(tmp);
60011498 3777 break;
239c20c7 3778 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3779 tmp = tcg_temp_new_i32();
239c20c7
WN
3780 if (dp) {
3781 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3782 cpu_env);
3783 } else {
3784 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3785 cpu_env);
3786 }
60011498
PB
3787 gen_mov_F0_vreg(0, rd);
3788 tmp2 = gen_vfp_mrs();
3789 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3790 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3791 tcg_temp_free_i32(tmp2);
60011498
PB
3792 gen_vfp_msr(tmp);
3793 break;
239c20c7 3794 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3795 tmp = tcg_temp_new_i32();
239c20c7
WN
3796 if (dp) {
3797 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3798 cpu_env);
3799 } else {
3800 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3801 cpu_env);
3802 }
60011498
PB
3803 tcg_gen_shli_i32(tmp, tmp, 16);
3804 gen_mov_F0_vreg(0, rd);
3805 tmp2 = gen_vfp_mrs();
3806 tcg_gen_ext16u_i32(tmp2, tmp2);
3807 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3808 tcg_temp_free_i32(tmp2);
60011498
PB
3809 gen_vfp_msr(tmp);
3810 break;
b7bcbe95
FB
3811 case 8: /* cmp */
3812 gen_vfp_cmp(dp);
3813 break;
3814 case 9: /* cmpe */
3815 gen_vfp_cmpe(dp);
3816 break;
3817 case 10: /* cmpz */
3818 gen_vfp_cmp(dp);
3819 break;
3820 case 11: /* cmpez */
3821 gen_vfp_F1_ld0(dp);
3822 gen_vfp_cmpe(dp);
3823 break;
664c6733
WN
3824 case 12: /* vrintr */
3825 {
3826 TCGv_ptr fpst = get_fpstatus_ptr(0);
3827 if (dp) {
3828 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3829 } else {
3830 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3831 }
3832 tcg_temp_free_ptr(fpst);
3833 break;
3834 }
a290c62a
WN
3835 case 13: /* vrintz */
3836 {
3837 TCGv_ptr fpst = get_fpstatus_ptr(0);
3838 TCGv_i32 tcg_rmode;
3839 tcg_rmode = tcg_const_i32(float_round_to_zero);
3840 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3841 if (dp) {
3842 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3843 } else {
3844 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3845 }
3846 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3847 tcg_temp_free_i32(tcg_rmode);
3848 tcg_temp_free_ptr(fpst);
3849 break;
3850 }
4e82bc01
WN
3851 case 14: /* vrintx */
3852 {
3853 TCGv_ptr fpst = get_fpstatus_ptr(0);
3854 if (dp) {
3855 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3856 } else {
3857 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3858 }
3859 tcg_temp_free_ptr(fpst);
3860 break;
3861 }
b7bcbe95
FB
3862 case 15: /* single<->double conversion */
3863 if (dp)
4373f3ce 3864 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3865 else
4373f3ce 3866 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3867 break;
3868 case 16: /* fuito */
5500b06c 3869 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3870 break;
3871 case 17: /* fsito */
5500b06c 3872 gen_vfp_sito(dp, 0);
b7bcbe95 3873 break;
9ee6e8bb 3874 case 20: /* fshto */
d614a513
PM
3875 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3876 return 1;
3877 }
5500b06c 3878 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3879 break;
3880 case 21: /* fslto */
d614a513
PM
3881 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3882 return 1;
3883 }
5500b06c 3884 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3885 break;
3886 case 22: /* fuhto */
d614a513
PM
3887 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3888 return 1;
3889 }
5500b06c 3890 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3891 break;
3892 case 23: /* fulto */
d614a513
PM
3893 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3894 return 1;
3895 }
5500b06c 3896 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3897 break;
b7bcbe95 3898 case 24: /* ftoui */
5500b06c 3899 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3900 break;
3901 case 25: /* ftouiz */
5500b06c 3902 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3903 break;
3904 case 26: /* ftosi */
5500b06c 3905 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3906 break;
3907 case 27: /* ftosiz */
5500b06c 3908 gen_vfp_tosiz(dp, 0);
b7bcbe95 3909 break;
9ee6e8bb 3910 case 28: /* ftosh */
d614a513
PM
3911 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3912 return 1;
3913 }
5500b06c 3914 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3915 break;
3916 case 29: /* ftosl */
d614a513
PM
3917 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3918 return 1;
3919 }
5500b06c 3920 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3921 break;
3922 case 30: /* ftouh */
d614a513
PM
3923 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3924 return 1;
3925 }
5500b06c 3926 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3927 break;
3928 case 31: /* ftoul */
d614a513
PM
3929 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3930 return 1;
3931 }
5500b06c 3932 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3933 break;
b7bcbe95 3934 default: /* undefined */
b7bcbe95
FB
3935 return 1;
3936 }
3937 break;
3938 default: /* undefined */
b7bcbe95
FB
3939 return 1;
3940 }
3941
3942 /* Write back the result. */
239c20c7
WN
3943 if (op == 15 && (rn >= 8 && rn <= 11)) {
3944 /* Comparison, do nothing. */
3945 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3946 (rn & 0x1e) == 0x6)) {
3947 /* VCVT double to int: always integer result.
3948 * VCVT double to half precision is always a single
3949 * precision result.
3950 */
b7bcbe95 3951 gen_mov_vreg_F0(0, rd);
239c20c7 3952 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3953 /* conversion */
3954 gen_mov_vreg_F0(!dp, rd);
239c20c7 3955 } else {
b7bcbe95 3956 gen_mov_vreg_F0(dp, rd);
239c20c7 3957 }
b7bcbe95
FB
3958
3959 /* break out of the loop if we have finished */
3960 if (veclen == 0)
3961 break;
3962
3963 if (op == 15 && delta_m == 0) {
3964 /* single source one-many */
3965 while (veclen--) {
3966 rd = ((rd + delta_d) & (bank_mask - 1))
3967 | (rd & bank_mask);
3968 gen_mov_vreg_F0(dp, rd);
3969 }
3970 break;
3971 }
3972 /* Setup the next operands. */
3973 veclen--;
3974 rd = ((rd + delta_d) & (bank_mask - 1))
3975 | (rd & bank_mask);
3976
3977 if (op == 15) {
3978 /* One source operand. */
3979 rm = ((rm + delta_m) & (bank_mask - 1))
3980 | (rm & bank_mask);
3981 gen_mov_F0_vreg(dp, rm);
3982 } else {
3983 /* Two source operands. */
3984 rn = ((rn + delta_d) & (bank_mask - 1))
3985 | (rn & bank_mask);
3986 gen_mov_F0_vreg(dp, rn);
3987 if (delta_m) {
3988 rm = ((rm + delta_m) & (bank_mask - 1))
3989 | (rm & bank_mask);
3990 gen_mov_F1_vreg(dp, rm);
3991 }
3992 }
3993 }
3994 }
3995 break;
3996 case 0xc:
3997 case 0xd:
8387da81 3998 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3999 /* two-register transfer */
4000 rn = (insn >> 16) & 0xf;
4001 rd = (insn >> 12) & 0xf;
4002 if (dp) {
9ee6e8bb
PB
4003 VFP_DREG_M(rm, insn);
4004 } else {
4005 rm = VFP_SREG_M(insn);
4006 }
b7bcbe95 4007
18c9b560 4008 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4009 /* vfp->arm */
4010 if (dp) {
4373f3ce
PB
4011 gen_mov_F0_vreg(0, rm * 2);
4012 tmp = gen_vfp_mrs();
4013 store_reg(s, rd, tmp);
4014 gen_mov_F0_vreg(0, rm * 2 + 1);
4015 tmp = gen_vfp_mrs();
4016 store_reg(s, rn, tmp);
b7bcbe95
FB
4017 } else {
4018 gen_mov_F0_vreg(0, rm);
4373f3ce 4019 tmp = gen_vfp_mrs();
8387da81 4020 store_reg(s, rd, tmp);
b7bcbe95 4021 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4022 tmp = gen_vfp_mrs();
8387da81 4023 store_reg(s, rn, tmp);
b7bcbe95
FB
4024 }
4025 } else {
4026 /* arm->vfp */
4027 if (dp) {
4373f3ce
PB
4028 tmp = load_reg(s, rd);
4029 gen_vfp_msr(tmp);
4030 gen_mov_vreg_F0(0, rm * 2);
4031 tmp = load_reg(s, rn);
4032 gen_vfp_msr(tmp);
4033 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4034 } else {
8387da81 4035 tmp = load_reg(s, rd);
4373f3ce 4036 gen_vfp_msr(tmp);
b7bcbe95 4037 gen_mov_vreg_F0(0, rm);
8387da81 4038 tmp = load_reg(s, rn);
4373f3ce 4039 gen_vfp_msr(tmp);
b7bcbe95
FB
4040 gen_mov_vreg_F0(0, rm + 1);
4041 }
4042 }
4043 } else {
4044 /* Load/store */
4045 rn = (insn >> 16) & 0xf;
4046 if (dp)
9ee6e8bb 4047 VFP_DREG_D(rd, insn);
b7bcbe95 4048 else
9ee6e8bb 4049 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4050 if ((insn & 0x01200000) == 0x01000000) {
4051 /* Single load/store */
4052 offset = (insn & 0xff) << 2;
4053 if ((insn & (1 << 23)) == 0)
4054 offset = -offset;
934814f1
PM
4055 if (s->thumb && rn == 15) {
4056 /* This is actually UNPREDICTABLE */
4057 addr = tcg_temp_new_i32();
4058 tcg_gen_movi_i32(addr, s->pc & ~2);
4059 } else {
4060 addr = load_reg(s, rn);
4061 }
312eea9f 4062 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4063 if (insn & (1 << 20)) {
312eea9f 4064 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4065 gen_mov_vreg_F0(dp, rd);
4066 } else {
4067 gen_mov_F0_vreg(dp, rd);
312eea9f 4068 gen_vfp_st(s, dp, addr);
b7bcbe95 4069 }
7d1b0095 4070 tcg_temp_free_i32(addr);
b7bcbe95
FB
4071 } else {
4072 /* load/store multiple */
934814f1 4073 int w = insn & (1 << 21);
b7bcbe95
FB
4074 if (dp)
4075 n = (insn >> 1) & 0x7f;
4076 else
4077 n = insn & 0xff;
4078
934814f1
PM
4079 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4080 /* P == U , W == 1 => UNDEF */
4081 return 1;
4082 }
4083 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4084 /* UNPREDICTABLE cases for bad immediates: we choose to
4085 * UNDEF to avoid generating huge numbers of TCG ops
4086 */
4087 return 1;
4088 }
4089 if (rn == 15 && w) {
4090 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4091 return 1;
4092 }
4093
4094 if (s->thumb && rn == 15) {
4095 /* This is actually UNPREDICTABLE */
4096 addr = tcg_temp_new_i32();
4097 tcg_gen_movi_i32(addr, s->pc & ~2);
4098 } else {
4099 addr = load_reg(s, rn);
4100 }
b7bcbe95 4101 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4102 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4103
4104 if (dp)
4105 offset = 8;
4106 else
4107 offset = 4;
4108 for (i = 0; i < n; i++) {
18c9b560 4109 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4110 /* load */
312eea9f 4111 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4112 gen_mov_vreg_F0(dp, rd + i);
4113 } else {
4114 /* store */
4115 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4116 gen_vfp_st(s, dp, addr);
b7bcbe95 4117 }
312eea9f 4118 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4119 }
934814f1 4120 if (w) {
b7bcbe95
FB
4121 /* writeback */
4122 if (insn & (1 << 24))
4123 offset = -offset * n;
4124 else if (dp && (insn & 1))
4125 offset = 4;
4126 else
4127 offset = 0;
4128
4129 if (offset != 0)
312eea9f
FN
4130 tcg_gen_addi_i32(addr, addr, offset);
4131 store_reg(s, rn, addr);
4132 } else {
7d1b0095 4133 tcg_temp_free_i32(addr);
b7bcbe95
FB
4134 }
4135 }
4136 }
4137 break;
4138 default:
4139 /* Should never happen. */
4140 return 1;
4141 }
4142 return 0;
4143}
4144
90aa39a1 4145static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4146{
90aa39a1 4147#ifndef CONFIG_USER_ONLY
dcba3a8d 4148 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
4149 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4150#else
4151 return true;
4152#endif
4153}
6e256c93 4154
8a6b28c7
EC
4155static void gen_goto_ptr(void)
4156{
4157 TCGv addr = tcg_temp_new();
4158 tcg_gen_extu_i32_tl(addr, cpu_R[15]);
4159 tcg_gen_lookup_and_goto_ptr(addr);
4160 tcg_temp_free(addr);
4161}
4162
4cae8f56
AB
4163/* This will end the TB but doesn't guarantee we'll return to
4164 * cpu_loop_exec. Any live exit_requests will be processed as we
4165 * enter the next TB.
4166 */
8a6b28c7 4167static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4168{
4169 if (use_goto_tb(s, dest)) {
57fec1fe 4170 tcg_gen_goto_tb(n);
eaed129d 4171 gen_set_pc_im(s, dest);
dcba3a8d 4172 tcg_gen_exit_tb((uintptr_t)s->base.tb + n);
6e256c93 4173 } else {
eaed129d 4174 gen_set_pc_im(s, dest);
8a6b28c7 4175 gen_goto_ptr();
6e256c93 4176 }
dcba3a8d 4177 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
4178}
4179
8aaca4c0
FB
4180static inline void gen_jmp (DisasContext *s, uint32_t dest)
4181{
b636649f 4182 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4183 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4184 if (s->thumb)
d9ba4830
PB
4185 dest |= 1;
4186 gen_bx_im(s, dest);
8aaca4c0 4187 } else {
6e256c93 4188 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4189 }
4190}
4191
39d5492a 4192static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4193{
ee097184 4194 if (x)
d9ba4830 4195 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4196 else
d9ba4830 4197 gen_sxth(t0);
ee097184 4198 if (y)
d9ba4830 4199 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4200 else
d9ba4830
PB
4201 gen_sxth(t1);
4202 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4203}
4204
4205/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4206static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4207{
b5ff1b31
FB
4208 uint32_t mask;
4209
4210 mask = 0;
4211 if (flags & (1 << 0))
4212 mask |= 0xff;
4213 if (flags & (1 << 1))
4214 mask |= 0xff00;
4215 if (flags & (1 << 2))
4216 mask |= 0xff0000;
4217 if (flags & (1 << 3))
4218 mask |= 0xff000000;
9ee6e8bb 4219
2ae23e75 4220 /* Mask out undefined bits. */
9ee6e8bb 4221 mask &= ~CPSR_RESERVED;
d614a513 4222 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4223 mask &= ~CPSR_T;
d614a513
PM
4224 }
4225 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4226 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4227 }
4228 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4229 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4230 }
4231 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4232 mask &= ~CPSR_IT;
d614a513 4233 }
4051e12c
PM
4234 /* Mask out execution state and reserved bits. */
4235 if (!spsr) {
4236 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4237 }
b5ff1b31
FB
4238 /* Mask out privileged bits. */
4239 if (IS_USER(s))
9ee6e8bb 4240 mask &= CPSR_USER;
b5ff1b31
FB
4241 return mask;
4242}
4243
2fbac54b 4244/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4245static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4246{
39d5492a 4247 TCGv_i32 tmp;
b5ff1b31
FB
4248 if (spsr) {
4249 /* ??? This is also undefined in system mode. */
4250 if (IS_USER(s))
4251 return 1;
d9ba4830
PB
4252
4253 tmp = load_cpu_field(spsr);
4254 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4255 tcg_gen_andi_i32(t0, t0, mask);
4256 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4257 store_cpu_field(tmp, spsr);
b5ff1b31 4258 } else {
2fbac54b 4259 gen_set_cpsr(t0, mask);
b5ff1b31 4260 }
7d1b0095 4261 tcg_temp_free_i32(t0);
b5ff1b31
FB
4262 gen_lookup_tb(s);
4263 return 0;
4264}
4265
2fbac54b
FN
4266/* Returns nonzero if access to the PSR is not permitted. */
4267static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4268{
39d5492a 4269 TCGv_i32 tmp;
7d1b0095 4270 tmp = tcg_temp_new_i32();
2fbac54b
FN
4271 tcg_gen_movi_i32(tmp, val);
4272 return gen_set_psr(s, mask, spsr, tmp);
4273}
4274
8bfd0550
PM
4275static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4276 int *tgtmode, int *regno)
4277{
4278 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4279 * the target mode and register number, and identify the various
4280 * unpredictable cases.
4281 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4282 * + executed in user mode
4283 * + using R15 as the src/dest register
4284 * + accessing an unimplemented register
4285 * + accessing a register that's inaccessible at current PL/security state*
4286 * + accessing a register that you could access with a different insn
4287 * We choose to UNDEF in all these cases.
4288 * Since we don't know which of the various AArch32 modes we are in
4289 * we have to defer some checks to runtime.
4290 * Accesses to Monitor mode registers from Secure EL1 (which implies
4291 * that EL3 is AArch64) must trap to EL3.
4292 *
4293 * If the access checks fail this function will emit code to take
4294 * an exception and return false. Otherwise it will return true,
4295 * and set *tgtmode and *regno appropriately.
4296 */
4297 int exc_target = default_exception_el(s);
4298
4299 /* These instructions are present only in ARMv8, or in ARMv7 with the
4300 * Virtualization Extensions.
4301 */
4302 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4303 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4304 goto undef;
4305 }
4306
4307 if (IS_USER(s) || rn == 15) {
4308 goto undef;
4309 }
4310
4311 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4312 * of registers into (r, sysm).
4313 */
4314 if (r) {
4315 /* SPSRs for other modes */
4316 switch (sysm) {
4317 case 0xe: /* SPSR_fiq */
4318 *tgtmode = ARM_CPU_MODE_FIQ;
4319 break;
4320 case 0x10: /* SPSR_irq */
4321 *tgtmode = ARM_CPU_MODE_IRQ;
4322 break;
4323 case 0x12: /* SPSR_svc */
4324 *tgtmode = ARM_CPU_MODE_SVC;
4325 break;
4326 case 0x14: /* SPSR_abt */
4327 *tgtmode = ARM_CPU_MODE_ABT;
4328 break;
4329 case 0x16: /* SPSR_und */
4330 *tgtmode = ARM_CPU_MODE_UND;
4331 break;
4332 case 0x1c: /* SPSR_mon */
4333 *tgtmode = ARM_CPU_MODE_MON;
4334 break;
4335 case 0x1e: /* SPSR_hyp */
4336 *tgtmode = ARM_CPU_MODE_HYP;
4337 break;
4338 default: /* unallocated */
4339 goto undef;
4340 }
4341 /* We arbitrarily assign SPSR a register number of 16. */
4342 *regno = 16;
4343 } else {
4344 /* general purpose registers for other modes */
4345 switch (sysm) {
4346 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4347 *tgtmode = ARM_CPU_MODE_USR;
4348 *regno = sysm + 8;
4349 break;
4350 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4351 *tgtmode = ARM_CPU_MODE_FIQ;
4352 *regno = sysm;
4353 break;
4354 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4355 *tgtmode = ARM_CPU_MODE_IRQ;
4356 *regno = sysm & 1 ? 13 : 14;
4357 break;
4358 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4359 *tgtmode = ARM_CPU_MODE_SVC;
4360 *regno = sysm & 1 ? 13 : 14;
4361 break;
4362 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4363 *tgtmode = ARM_CPU_MODE_ABT;
4364 *regno = sysm & 1 ? 13 : 14;
4365 break;
4366 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4367 *tgtmode = ARM_CPU_MODE_UND;
4368 *regno = sysm & 1 ? 13 : 14;
4369 break;
4370 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4371 *tgtmode = ARM_CPU_MODE_MON;
4372 *regno = sysm & 1 ? 13 : 14;
4373 break;
4374 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4375 *tgtmode = ARM_CPU_MODE_HYP;
4376 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4377 *regno = sysm & 1 ? 13 : 17;
4378 break;
4379 default: /* unallocated */
4380 goto undef;
4381 }
4382 }
4383
4384 /* Catch the 'accessing inaccessible register' cases we can detect
4385 * at translate time.
4386 */
4387 switch (*tgtmode) {
4388 case ARM_CPU_MODE_MON:
4389 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4390 goto undef;
4391 }
4392 if (s->current_el == 1) {
4393 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4394 * then accesses to Mon registers trap to EL3
4395 */
4396 exc_target = 3;
4397 goto undef;
4398 }
4399 break;
4400 case ARM_CPU_MODE_HYP:
4401 /* Note that we can forbid accesses from EL2 here because they
4402 * must be from Hyp mode itself
4403 */
4404 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4405 goto undef;
4406 }
4407 break;
4408 default:
4409 break;
4410 }
4411
4412 return true;
4413
4414undef:
4415 /* If we get here then some access check did not pass */
4416 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4417 return false;
4418}
4419
4420static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4421{
4422 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4423 int tgtmode = 0, regno = 0;
4424
4425 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4426 return;
4427 }
4428
4429 /* Sync state because msr_banked() can raise exceptions */
4430 gen_set_condexec(s);
4431 gen_set_pc_im(s, s->pc - 4);
4432 tcg_reg = load_reg(s, rn);
4433 tcg_tgtmode = tcg_const_i32(tgtmode);
4434 tcg_regno = tcg_const_i32(regno);
4435 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4436 tcg_temp_free_i32(tcg_tgtmode);
4437 tcg_temp_free_i32(tcg_regno);
4438 tcg_temp_free_i32(tcg_reg);
dcba3a8d 4439 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4440}
4441
4442static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4443{
4444 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4445 int tgtmode = 0, regno = 0;
4446
4447 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4448 return;
4449 }
4450
4451 /* Sync state because mrs_banked() can raise exceptions */
4452 gen_set_condexec(s);
4453 gen_set_pc_im(s, s->pc - 4);
4454 tcg_reg = tcg_temp_new_i32();
4455 tcg_tgtmode = tcg_const_i32(tgtmode);
4456 tcg_regno = tcg_const_i32(regno);
4457 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4458 tcg_temp_free_i32(tcg_tgtmode);
4459 tcg_temp_free_i32(tcg_regno);
4460 store_reg(s, rn, tcg_reg);
dcba3a8d 4461 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4462}
4463
fb0e8e79
PM
4464/* Store value to PC as for an exception return (ie don't
4465 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4466 * will do the masking based on the new value of the Thumb bit.
4467 */
4468static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4469{
fb0e8e79
PM
4470 tcg_gen_mov_i32(cpu_R[15], pc);
4471 tcg_temp_free_i32(pc);
b5ff1b31
FB
4472}
4473
b0109805 4474/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4475static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4476{
fb0e8e79
PM
4477 store_pc_exc_ret(s, pc);
4478 /* The cpsr_write_eret helper will mask the low bits of PC
4479 * appropriately depending on the new Thumb bit, so it must
4480 * be called after storing the new PC.
4481 */
235ea1f5 4482 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 4483 tcg_temp_free_i32(cpsr);
b29fd33d 4484 /* Must exit loop to check un-masked IRQs */
dcba3a8d 4485 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 4486}
3b46e624 4487
fb0e8e79
PM
4488/* Generate an old-style exception return. Marks pc as dead. */
4489static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4490{
4491 gen_rfe(s, pc, load_cpu_field(spsr));
4492}
4493
c22edfeb
AB
4494/*
4495 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4496 * only call the helper when running single threaded TCG code to ensure
4497 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4498 * just skip this instruction. Currently the SEV/SEVL instructions
4499 * which are *one* of many ways to wake the CPU from WFE are not
4500 * implemented so we can't sleep like WFI does.
4501 */
9ee6e8bb
PB
4502static void gen_nop_hint(DisasContext *s, int val)
4503{
4504 switch (val) {
c87e5a61 4505 case 1: /* yield */
c22edfeb
AB
4506 if (!parallel_cpus) {
4507 gen_set_pc_im(s, s->pc);
dcba3a8d 4508 s->base.is_jmp = DISAS_YIELD;
c22edfeb 4509 }
c87e5a61 4510 break;
9ee6e8bb 4511 case 3: /* wfi */
eaed129d 4512 gen_set_pc_im(s, s->pc);
dcba3a8d 4513 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
4514 break;
4515 case 2: /* wfe */
c22edfeb
AB
4516 if (!parallel_cpus) {
4517 gen_set_pc_im(s, s->pc);
dcba3a8d 4518 s->base.is_jmp = DISAS_WFE;
c22edfeb 4519 }
72c1d3af 4520 break;
9ee6e8bb 4521 case 4: /* sev */
12b10571
MR
4522 case 5: /* sevl */
4523 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4524 default: /* nop */
4525 break;
4526 }
4527}
99c475ab 4528
ad69471c 4529#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4530
39d5492a 4531static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4532{
4533 switch (size) {
dd8fbd78
FN
4534 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4535 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4536 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4537 default: abort();
9ee6e8bb 4538 }
9ee6e8bb
PB
4539}
4540
39d5492a 4541static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4542{
4543 switch (size) {
dd8fbd78
FN
4544 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4545 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4546 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4547 default: return;
4548 }
4549}
4550
4551/* 32-bit pairwise ops end up the same as the elementwise versions. */
4552#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4553#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4554#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4555#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4556
ad69471c
PB
4557#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4558 switch ((size << 1) | u) { \
4559 case 0: \
dd8fbd78 4560 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4561 break; \
4562 case 1: \
dd8fbd78 4563 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4564 break; \
4565 case 2: \
dd8fbd78 4566 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4567 break; \
4568 case 3: \
dd8fbd78 4569 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4570 break; \
4571 case 4: \
dd8fbd78 4572 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4573 break; \
4574 case 5: \
dd8fbd78 4575 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4576 break; \
4577 default: return 1; \
4578 }} while (0)
9ee6e8bb
PB
4579
4580#define GEN_NEON_INTEGER_OP(name) do { \
4581 switch ((size << 1) | u) { \
ad69471c 4582 case 0: \
dd8fbd78 4583 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4584 break; \
4585 case 1: \
dd8fbd78 4586 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4587 break; \
4588 case 2: \
dd8fbd78 4589 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4590 break; \
4591 case 3: \
dd8fbd78 4592 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4593 break; \
4594 case 4: \
dd8fbd78 4595 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4596 break; \
4597 case 5: \
dd8fbd78 4598 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4599 break; \
9ee6e8bb
PB
4600 default: return 1; \
4601 }} while (0)
4602
39d5492a 4603static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4604{
39d5492a 4605 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4606 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4607 return tmp;
9ee6e8bb
PB
4608}
4609
39d5492a 4610static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4611{
dd8fbd78 4612 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4613 tcg_temp_free_i32(var);
9ee6e8bb
PB
4614}
4615
39d5492a 4616static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4617{
39d5492a 4618 TCGv_i32 tmp;
9ee6e8bb 4619 if (size == 1) {
0fad6efc
PM
4620 tmp = neon_load_reg(reg & 7, reg >> 4);
4621 if (reg & 8) {
dd8fbd78 4622 gen_neon_dup_high16(tmp);
0fad6efc
PM
4623 } else {
4624 gen_neon_dup_low16(tmp);
dd8fbd78 4625 }
0fad6efc
PM
4626 } else {
4627 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4628 }
dd8fbd78 4629 return tmp;
9ee6e8bb
PB
4630}
4631
02acedf9 4632static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4633{
39d5492a 4634 TCGv_i32 tmp, tmp2;
600b828c 4635 if (!q && size == 2) {
02acedf9
PM
4636 return 1;
4637 }
4638 tmp = tcg_const_i32(rd);
4639 tmp2 = tcg_const_i32(rm);
4640 if (q) {
4641 switch (size) {
4642 case 0:
02da0b2d 4643 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4644 break;
4645 case 1:
02da0b2d 4646 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4647 break;
4648 case 2:
02da0b2d 4649 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4650 break;
4651 default:
4652 abort();
4653 }
4654 } else {
4655 switch (size) {
4656 case 0:
02da0b2d 4657 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4658 break;
4659 case 1:
02da0b2d 4660 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4661 break;
4662 default:
4663 abort();
4664 }
4665 }
4666 tcg_temp_free_i32(tmp);
4667 tcg_temp_free_i32(tmp2);
4668 return 0;
19457615
FN
4669}
4670
d68a6f3a 4671static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4672{
39d5492a 4673 TCGv_i32 tmp, tmp2;
600b828c 4674 if (!q && size == 2) {
d68a6f3a
PM
4675 return 1;
4676 }
4677 tmp = tcg_const_i32(rd);
4678 tmp2 = tcg_const_i32(rm);
4679 if (q) {
4680 switch (size) {
4681 case 0:
02da0b2d 4682 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4683 break;
4684 case 1:
02da0b2d 4685 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4686 break;
4687 case 2:
02da0b2d 4688 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4689 break;
4690 default:
4691 abort();
4692 }
4693 } else {
4694 switch (size) {
4695 case 0:
02da0b2d 4696 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4697 break;
4698 case 1:
02da0b2d 4699 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4700 break;
4701 default:
4702 abort();
4703 }
4704 }
4705 tcg_temp_free_i32(tmp);
4706 tcg_temp_free_i32(tmp2);
4707 return 0;
19457615
FN
4708}
4709
39d5492a 4710static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4711{
39d5492a 4712 TCGv_i32 rd, tmp;
19457615 4713
7d1b0095
PM
4714 rd = tcg_temp_new_i32();
4715 tmp = tcg_temp_new_i32();
19457615
FN
4716
4717 tcg_gen_shli_i32(rd, t0, 8);
4718 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4719 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4720 tcg_gen_or_i32(rd, rd, tmp);
4721
4722 tcg_gen_shri_i32(t1, t1, 8);
4723 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4724 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4725 tcg_gen_or_i32(t1, t1, tmp);
4726 tcg_gen_mov_i32(t0, rd);
4727
7d1b0095
PM
4728 tcg_temp_free_i32(tmp);
4729 tcg_temp_free_i32(rd);
19457615
FN
4730}
4731
39d5492a 4732static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4733{
39d5492a 4734 TCGv_i32 rd, tmp;
19457615 4735
7d1b0095
PM
4736 rd = tcg_temp_new_i32();
4737 tmp = tcg_temp_new_i32();
19457615
FN
4738
4739 tcg_gen_shli_i32(rd, t0, 16);
4740 tcg_gen_andi_i32(tmp, t1, 0xffff);
4741 tcg_gen_or_i32(rd, rd, tmp);
4742 tcg_gen_shri_i32(t1, t1, 16);
4743 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4744 tcg_gen_or_i32(t1, t1, tmp);
4745 tcg_gen_mov_i32(t0, rd);
4746
7d1b0095
PM
4747 tcg_temp_free_i32(tmp);
4748 tcg_temp_free_i32(rd);
19457615
FN
4749}
4750
4751
9ee6e8bb
PB
4752static struct {
4753 int nregs;
4754 int interleave;
4755 int spacing;
4756} neon_ls_element_type[11] = {
4757 {4, 4, 1},
4758 {4, 4, 2},
4759 {4, 1, 1},
4760 {4, 2, 1},
4761 {3, 3, 1},
4762 {3, 3, 2},
4763 {3, 1, 1},
4764 {1, 1, 1},
4765 {2, 2, 1},
4766 {2, 2, 2},
4767 {2, 1, 1}
4768};
4769
4770/* Translate a NEON load/store element instruction. Return nonzero if the
4771 instruction is invalid. */
7dcc1f89 4772static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4773{
4774 int rd, rn, rm;
4775 int op;
4776 int nregs;
4777 int interleave;
84496233 4778 int spacing;
9ee6e8bb
PB
4779 int stride;
4780 int size;
4781 int reg;
4782 int pass;
4783 int load;
4784 int shift;
9ee6e8bb 4785 int n;
39d5492a
PM
4786 TCGv_i32 addr;
4787 TCGv_i32 tmp;
4788 TCGv_i32 tmp2;
84496233 4789 TCGv_i64 tmp64;
9ee6e8bb 4790
2c7ffc41
PM
4791 /* FIXME: this access check should not take precedence over UNDEF
4792 * for invalid encodings; we will generate incorrect syndrome information
4793 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4794 */
9dbbc748 4795 if (s->fp_excp_el) {
2c7ffc41 4796 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4797 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4798 return 0;
4799 }
4800
5df8bac1 4801 if (!s->vfp_enabled)
9ee6e8bb
PB
4802 return 1;
4803 VFP_DREG_D(rd, insn);
4804 rn = (insn >> 16) & 0xf;
4805 rm = insn & 0xf;
4806 load = (insn & (1 << 21)) != 0;
4807 if ((insn & (1 << 23)) == 0) {
4808 /* Load store all elements. */
4809 op = (insn >> 8) & 0xf;
4810 size = (insn >> 6) & 3;
84496233 4811 if (op > 10)
9ee6e8bb 4812 return 1;
f2dd89d0
PM
4813 /* Catch UNDEF cases for bad values of align field */
4814 switch (op & 0xc) {
4815 case 4:
4816 if (((insn >> 5) & 1) == 1) {
4817 return 1;
4818 }
4819 break;
4820 case 8:
4821 if (((insn >> 4) & 3) == 3) {
4822 return 1;
4823 }
4824 break;
4825 default:
4826 break;
4827 }
9ee6e8bb
PB
4828 nregs = neon_ls_element_type[op].nregs;
4829 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4830 spacing = neon_ls_element_type[op].spacing;
4831 if (size == 3 && (interleave | spacing) != 1)
4832 return 1;
e318a60b 4833 addr = tcg_temp_new_i32();
dcc65026 4834 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4835 stride = (1 << size) * interleave;
4836 for (reg = 0; reg < nregs; reg++) {
4837 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4838 load_reg_var(s, addr, rn);
4839 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4840 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4841 load_reg_var(s, addr, rn);
4842 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4843 }
84496233 4844 if (size == 3) {
8ed1237d 4845 tmp64 = tcg_temp_new_i64();
84496233 4846 if (load) {
12dcc321 4847 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4848 neon_store_reg64(tmp64, rd);
84496233 4849 } else {
84496233 4850 neon_load_reg64(tmp64, rd);
12dcc321 4851 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4852 }
8ed1237d 4853 tcg_temp_free_i64(tmp64);
84496233
JR
4854 tcg_gen_addi_i32(addr, addr, stride);
4855 } else {
4856 for (pass = 0; pass < 2; pass++) {
4857 if (size == 2) {
4858 if (load) {
58ab8e96 4859 tmp = tcg_temp_new_i32();
12dcc321 4860 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
4861 neon_store_reg(rd, pass, tmp);
4862 } else {
4863 tmp = neon_load_reg(rd, pass);
12dcc321 4864 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 4865 tcg_temp_free_i32(tmp);
84496233 4866 }
1b2b1e54 4867 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4868 } else if (size == 1) {
4869 if (load) {
58ab8e96 4870 tmp = tcg_temp_new_i32();
12dcc321 4871 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 4872 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4873 tmp2 = tcg_temp_new_i32();
12dcc321 4874 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 4875 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4876 tcg_gen_shli_i32(tmp2, tmp2, 16);
4877 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4878 tcg_temp_free_i32(tmp2);
84496233
JR
4879 neon_store_reg(rd, pass, tmp);
4880 } else {
4881 tmp = neon_load_reg(rd, pass);
7d1b0095 4882 tmp2 = tcg_temp_new_i32();
84496233 4883 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 4884 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 4885 tcg_temp_free_i32(tmp);
84496233 4886 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 4887 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 4888 tcg_temp_free_i32(tmp2);
1b2b1e54 4889 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4890 }
84496233
JR
4891 } else /* size == 0 */ {
4892 if (load) {
39d5492a 4893 TCGV_UNUSED_I32(tmp2);
84496233 4894 for (n = 0; n < 4; n++) {
58ab8e96 4895 tmp = tcg_temp_new_i32();
12dcc321 4896 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
4897 tcg_gen_addi_i32(addr, addr, stride);
4898 if (n == 0) {
4899 tmp2 = tmp;
4900 } else {
41ba8341
PB
4901 tcg_gen_shli_i32(tmp, tmp, n * 8);
4902 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4903 tcg_temp_free_i32(tmp);
84496233 4904 }
9ee6e8bb 4905 }
84496233
JR
4906 neon_store_reg(rd, pass, tmp2);
4907 } else {
4908 tmp2 = neon_load_reg(rd, pass);
4909 for (n = 0; n < 4; n++) {
7d1b0095 4910 tmp = tcg_temp_new_i32();
84496233
JR
4911 if (n == 0) {
4912 tcg_gen_mov_i32(tmp, tmp2);
4913 } else {
4914 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4915 }
12dcc321 4916 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 4917 tcg_temp_free_i32(tmp);
84496233
JR
4918 tcg_gen_addi_i32(addr, addr, stride);
4919 }
7d1b0095 4920 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4921 }
4922 }
4923 }
4924 }
84496233 4925 rd += spacing;
9ee6e8bb 4926 }
e318a60b 4927 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4928 stride = nregs * 8;
4929 } else {
4930 size = (insn >> 10) & 3;
4931 if (size == 3) {
4932 /* Load single element to all lanes. */
8e18cde3
PM
4933 int a = (insn >> 4) & 1;
4934 if (!load) {
9ee6e8bb 4935 return 1;
8e18cde3 4936 }
9ee6e8bb
PB
4937 size = (insn >> 6) & 3;
4938 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4939
4940 if (size == 3) {
4941 if (nregs != 4 || a == 0) {
9ee6e8bb 4942 return 1;
99c475ab 4943 }
8e18cde3
PM
4944 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4945 size = 2;
4946 }
4947 if (nregs == 1 && a == 1 && size == 0) {
4948 return 1;
4949 }
4950 if (nregs == 3 && a == 1) {
4951 return 1;
4952 }
e318a60b 4953 addr = tcg_temp_new_i32();
8e18cde3
PM
4954 load_reg_var(s, addr, rn);
4955 if (nregs == 1) {
4956 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4957 tmp = gen_load_and_replicate(s, addr, size);
4958 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4959 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4960 if (insn & (1 << 5)) {
4961 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4962 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4963 }
4964 tcg_temp_free_i32(tmp);
4965 } else {
4966 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4967 stride = (insn & (1 << 5)) ? 2 : 1;
4968 for (reg = 0; reg < nregs; reg++) {
4969 tmp = gen_load_and_replicate(s, addr, size);
4970 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4971 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4972 tcg_temp_free_i32(tmp);
4973 tcg_gen_addi_i32(addr, addr, 1 << size);
4974 rd += stride;
4975 }
9ee6e8bb 4976 }
e318a60b 4977 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4978 stride = (1 << size) * nregs;
4979 } else {
4980 /* Single element. */
93262b16 4981 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4982 pass = (insn >> 7) & 1;
4983 switch (size) {
4984 case 0:
4985 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4986 stride = 1;
4987 break;
4988 case 1:
4989 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4990 stride = (insn & (1 << 5)) ? 2 : 1;
4991 break;
4992 case 2:
4993 shift = 0;
9ee6e8bb
PB
4994 stride = (insn & (1 << 6)) ? 2 : 1;
4995 break;
4996 default:
4997 abort();
4998 }
4999 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
5000 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5001 switch (nregs) {
5002 case 1:
5003 if (((idx & (1 << size)) != 0) ||
5004 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5005 return 1;
5006 }
5007 break;
5008 case 3:
5009 if ((idx & 1) != 0) {
5010 return 1;
5011 }
5012 /* fall through */
5013 case 2:
5014 if (size == 2 && (idx & 2) != 0) {
5015 return 1;
5016 }
5017 break;
5018 case 4:
5019 if ((size == 2) && ((idx & 3) == 3)) {
5020 return 1;
5021 }
5022 break;
5023 default:
5024 abort();
5025 }
5026 if ((rd + stride * (nregs - 1)) > 31) {
5027 /* Attempts to write off the end of the register file
5028 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5029 * the neon_load_reg() would write off the end of the array.
5030 */
5031 return 1;
5032 }
e318a60b 5033 addr = tcg_temp_new_i32();
dcc65026 5034 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5035 for (reg = 0; reg < nregs; reg++) {
5036 if (load) {
58ab8e96 5037 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
5038 switch (size) {
5039 case 0:
12dcc321 5040 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5041 break;
5042 case 1:
12dcc321 5043 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5044 break;
5045 case 2:
12dcc321 5046 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5047 break;
a50f5b91
PB
5048 default: /* Avoid compiler warnings. */
5049 abort();
9ee6e8bb
PB
5050 }
5051 if (size != 2) {
8f8e3aa4 5052 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
5053 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5054 shift, size ? 16 : 8);
7d1b0095 5055 tcg_temp_free_i32(tmp2);
9ee6e8bb 5056 }
8f8e3aa4 5057 neon_store_reg(rd, pass, tmp);
9ee6e8bb 5058 } else { /* Store */
8f8e3aa4
PB
5059 tmp = neon_load_reg(rd, pass);
5060 if (shift)
5061 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
5062 switch (size) {
5063 case 0:
12dcc321 5064 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5065 break;
5066 case 1:
12dcc321 5067 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5068 break;
5069 case 2:
12dcc321 5070 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5071 break;
99c475ab 5072 }
58ab8e96 5073 tcg_temp_free_i32(tmp);
99c475ab 5074 }
9ee6e8bb 5075 rd += stride;
1b2b1e54 5076 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5077 }
e318a60b 5078 tcg_temp_free_i32(addr);
9ee6e8bb 5079 stride = nregs * (1 << size);
99c475ab 5080 }
9ee6e8bb
PB
5081 }
5082 if (rm != 15) {
39d5492a 5083 TCGv_i32 base;
b26eefb6
PB
5084
5085 base = load_reg(s, rn);
9ee6e8bb 5086 if (rm == 13) {
b26eefb6 5087 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5088 } else {
39d5492a 5089 TCGv_i32 index;
b26eefb6
PB
5090 index = load_reg(s, rm);
5091 tcg_gen_add_i32(base, base, index);
7d1b0095 5092 tcg_temp_free_i32(index);
9ee6e8bb 5093 }
b26eefb6 5094 store_reg(s, rn, base);
9ee6e8bb
PB
5095 }
5096 return 0;
5097}
3b46e624 5098
8f8e3aa4 5099/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 5100static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
5101{
5102 tcg_gen_and_i32(t, t, c);
f669df27 5103 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
5104 tcg_gen_or_i32(dest, t, f);
5105}
5106
39d5492a 5107static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5108{
5109 switch (size) {
5110 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5111 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5112 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5113 default: abort();
5114 }
5115}
5116
39d5492a 5117static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5118{
5119 switch (size) {
02da0b2d
PM
5120 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5121 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5122 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5123 default: abort();
5124 }
5125}
5126
39d5492a 5127static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5128{
5129 switch (size) {
02da0b2d
PM
5130 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5131 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5132 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5133 default: abort();
5134 }
5135}
5136
39d5492a 5137static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5138{
5139 switch (size) {
02da0b2d
PM
5140 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5141 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5142 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5143 default: abort();
5144 }
5145}
5146
39d5492a 5147static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5148 int q, int u)
5149{
5150 if (q) {
5151 if (u) {
5152 switch (size) {
5153 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5154 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5155 default: abort();
5156 }
5157 } else {
5158 switch (size) {
5159 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5160 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5161 default: abort();
5162 }
5163 }
5164 } else {
5165 if (u) {
5166 switch (size) {
b408a9b0
CL
5167 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5168 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5169 default: abort();
5170 }
5171 } else {
5172 switch (size) {
5173 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5174 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5175 default: abort();
5176 }
5177 }
5178 }
5179}
5180
39d5492a 5181static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5182{
5183 if (u) {
5184 switch (size) {
5185 case 0: gen_helper_neon_widen_u8(dest, src); break;
5186 case 1: gen_helper_neon_widen_u16(dest, src); break;
5187 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5188 default: abort();
5189 }
5190 } else {
5191 switch (size) {
5192 case 0: gen_helper_neon_widen_s8(dest, src); break;
5193 case 1: gen_helper_neon_widen_s16(dest, src); break;
5194 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5195 default: abort();
5196 }
5197 }
7d1b0095 5198 tcg_temp_free_i32(src);
ad69471c
PB
5199}
5200
5201static inline void gen_neon_addl(int size)
5202{
5203 switch (size) {
5204 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5205 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5206 case 2: tcg_gen_add_i64(CPU_V001); break;
5207 default: abort();
5208 }
5209}
5210
5211static inline void gen_neon_subl(int size)
5212{
5213 switch (size) {
5214 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5215 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5216 case 2: tcg_gen_sub_i64(CPU_V001); break;
5217 default: abort();
5218 }
5219}
5220
a7812ae4 5221static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5222{
5223 switch (size) {
5224 case 0: gen_helper_neon_negl_u16(var, var); break;
5225 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5226 case 2:
5227 tcg_gen_neg_i64(var, var);
5228 break;
ad69471c
PB
5229 default: abort();
5230 }
5231}
5232
a7812ae4 5233static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5234{
5235 switch (size) {
02da0b2d
PM
5236 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5237 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5238 default: abort();
5239 }
5240}
5241
39d5492a
PM
5242static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5243 int size, int u)
ad69471c 5244{
a7812ae4 5245 TCGv_i64 tmp;
ad69471c
PB
5246
5247 switch ((size << 1) | u) {
5248 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5249 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5250 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5251 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5252 case 4:
5253 tmp = gen_muls_i64_i32(a, b);
5254 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5255 tcg_temp_free_i64(tmp);
ad69471c
PB
5256 break;
5257 case 5:
5258 tmp = gen_mulu_i64_i32(a, b);
5259 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5260 tcg_temp_free_i64(tmp);
ad69471c
PB
5261 break;
5262 default: abort();
5263 }
c6067f04
CL
5264
5265 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5266 Don't forget to clean them now. */
5267 if (size < 2) {
7d1b0095
PM
5268 tcg_temp_free_i32(a);
5269 tcg_temp_free_i32(b);
c6067f04 5270 }
ad69471c
PB
5271}
5272
39d5492a
PM
5273static void gen_neon_narrow_op(int op, int u, int size,
5274 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5275{
5276 if (op) {
5277 if (u) {
5278 gen_neon_unarrow_sats(size, dest, src);
5279 } else {
5280 gen_neon_narrow(size, dest, src);
5281 }
5282 } else {
5283 if (u) {
5284 gen_neon_narrow_satu(size, dest, src);
5285 } else {
5286 gen_neon_narrow_sats(size, dest, src);
5287 }
5288 }
5289}
5290
62698be3
PM
5291/* Symbolic constants for op fields for Neon 3-register same-length.
5292 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5293 * table A7-9.
5294 */
5295#define NEON_3R_VHADD 0
5296#define NEON_3R_VQADD 1
5297#define NEON_3R_VRHADD 2
5298#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5299#define NEON_3R_VHSUB 4
5300#define NEON_3R_VQSUB 5
5301#define NEON_3R_VCGT 6
5302#define NEON_3R_VCGE 7
5303#define NEON_3R_VSHL 8
5304#define NEON_3R_VQSHL 9
5305#define NEON_3R_VRSHL 10
5306#define NEON_3R_VQRSHL 11
5307#define NEON_3R_VMAX 12
5308#define NEON_3R_VMIN 13
5309#define NEON_3R_VABD 14
5310#define NEON_3R_VABA 15
5311#define NEON_3R_VADD_VSUB 16
5312#define NEON_3R_VTST_VCEQ 17
5313#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5314#define NEON_3R_VMUL 19
5315#define NEON_3R_VPMAX 20
5316#define NEON_3R_VPMIN 21
5317#define NEON_3R_VQDMULH_VQRDMULH 22
5318#define NEON_3R_VPADD 23
f1ecb913 5319#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 5320#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
5321#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5322#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5323#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5324#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5325#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5326#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5327
5328static const uint8_t neon_3r_sizes[] = {
5329 [NEON_3R_VHADD] = 0x7,
5330 [NEON_3R_VQADD] = 0xf,
5331 [NEON_3R_VRHADD] = 0x7,
5332 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5333 [NEON_3R_VHSUB] = 0x7,
5334 [NEON_3R_VQSUB] = 0xf,
5335 [NEON_3R_VCGT] = 0x7,
5336 [NEON_3R_VCGE] = 0x7,
5337 [NEON_3R_VSHL] = 0xf,
5338 [NEON_3R_VQSHL] = 0xf,
5339 [NEON_3R_VRSHL] = 0xf,
5340 [NEON_3R_VQRSHL] = 0xf,
5341 [NEON_3R_VMAX] = 0x7,
5342 [NEON_3R_VMIN] = 0x7,
5343 [NEON_3R_VABD] = 0x7,
5344 [NEON_3R_VABA] = 0x7,
5345 [NEON_3R_VADD_VSUB] = 0xf,
5346 [NEON_3R_VTST_VCEQ] = 0x7,
5347 [NEON_3R_VML] = 0x7,
5348 [NEON_3R_VMUL] = 0x7,
5349 [NEON_3R_VPMAX] = 0x7,
5350 [NEON_3R_VPMIN] = 0x7,
5351 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5352 [NEON_3R_VPADD] = 0x7,
f1ecb913 5353 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 5354 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5355 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5356 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5357 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5358 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5359 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5360 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5361};
5362
600b828c
PM
5363/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5364 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5365 * table A7-13.
5366 */
5367#define NEON_2RM_VREV64 0
5368#define NEON_2RM_VREV32 1
5369#define NEON_2RM_VREV16 2
5370#define NEON_2RM_VPADDL 4
5371#define NEON_2RM_VPADDL_U 5
9d935509
AB
5372#define NEON_2RM_AESE 6 /* Includes AESD */
5373#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5374#define NEON_2RM_VCLS 8
5375#define NEON_2RM_VCLZ 9
5376#define NEON_2RM_VCNT 10
5377#define NEON_2RM_VMVN 11
5378#define NEON_2RM_VPADAL 12
5379#define NEON_2RM_VPADAL_U 13
5380#define NEON_2RM_VQABS 14
5381#define NEON_2RM_VQNEG 15
5382#define NEON_2RM_VCGT0 16
5383#define NEON_2RM_VCGE0 17
5384#define NEON_2RM_VCEQ0 18
5385#define NEON_2RM_VCLE0 19
5386#define NEON_2RM_VCLT0 20
f1ecb913 5387#define NEON_2RM_SHA1H 21
600b828c
PM
5388#define NEON_2RM_VABS 22
5389#define NEON_2RM_VNEG 23
5390#define NEON_2RM_VCGT0_F 24
5391#define NEON_2RM_VCGE0_F 25
5392#define NEON_2RM_VCEQ0_F 26
5393#define NEON_2RM_VCLE0_F 27
5394#define NEON_2RM_VCLT0_F 28
5395#define NEON_2RM_VABS_F 30
5396#define NEON_2RM_VNEG_F 31
5397#define NEON_2RM_VSWP 32
5398#define NEON_2RM_VTRN 33
5399#define NEON_2RM_VUZP 34
5400#define NEON_2RM_VZIP 35
5401#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5402#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5403#define NEON_2RM_VSHLL 38
f1ecb913 5404#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5405#define NEON_2RM_VRINTN 40
2ce70625 5406#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5407#define NEON_2RM_VRINTA 42
5408#define NEON_2RM_VRINTZ 43
600b828c 5409#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5410#define NEON_2RM_VRINTM 45
600b828c 5411#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5412#define NEON_2RM_VRINTP 47
901ad525
WN
5413#define NEON_2RM_VCVTAU 48
5414#define NEON_2RM_VCVTAS 49
5415#define NEON_2RM_VCVTNU 50
5416#define NEON_2RM_VCVTNS 51
5417#define NEON_2RM_VCVTPU 52
5418#define NEON_2RM_VCVTPS 53
5419#define NEON_2RM_VCVTMU 54
5420#define NEON_2RM_VCVTMS 55
600b828c
PM
5421#define NEON_2RM_VRECPE 56
5422#define NEON_2RM_VRSQRTE 57
5423#define NEON_2RM_VRECPE_F 58
5424#define NEON_2RM_VRSQRTE_F 59
5425#define NEON_2RM_VCVT_FS 60
5426#define NEON_2RM_VCVT_FU 61
5427#define NEON_2RM_VCVT_SF 62
5428#define NEON_2RM_VCVT_UF 63
5429
5430static int neon_2rm_is_float_op(int op)
5431{
5432 /* Return true if this neon 2reg-misc op is float-to-float */
5433 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5434 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5435 op == NEON_2RM_VRINTM ||
5436 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5437 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5438}
5439
fe8fcf3d
PM
5440static bool neon_2rm_is_v8_op(int op)
5441{
5442 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5443 switch (op) {
5444 case NEON_2RM_VRINTN:
5445 case NEON_2RM_VRINTA:
5446 case NEON_2RM_VRINTM:
5447 case NEON_2RM_VRINTP:
5448 case NEON_2RM_VRINTZ:
5449 case NEON_2RM_VRINTX:
5450 case NEON_2RM_VCVTAU:
5451 case NEON_2RM_VCVTAS:
5452 case NEON_2RM_VCVTNU:
5453 case NEON_2RM_VCVTNS:
5454 case NEON_2RM_VCVTPU:
5455 case NEON_2RM_VCVTPS:
5456 case NEON_2RM_VCVTMU:
5457 case NEON_2RM_VCVTMS:
5458 return true;
5459 default:
5460 return false;
5461 }
5462}
5463
600b828c
PM
5464/* Each entry in this array has bit n set if the insn allows
5465 * size value n (otherwise it will UNDEF). Since unallocated
5466 * op values will have no bits set they always UNDEF.
5467 */
5468static const uint8_t neon_2rm_sizes[] = {
5469 [NEON_2RM_VREV64] = 0x7,
5470 [NEON_2RM_VREV32] = 0x3,
5471 [NEON_2RM_VREV16] = 0x1,
5472 [NEON_2RM_VPADDL] = 0x7,
5473 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5474 [NEON_2RM_AESE] = 0x1,
5475 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5476 [NEON_2RM_VCLS] = 0x7,
5477 [NEON_2RM_VCLZ] = 0x7,
5478 [NEON_2RM_VCNT] = 0x1,
5479 [NEON_2RM_VMVN] = 0x1,
5480 [NEON_2RM_VPADAL] = 0x7,
5481 [NEON_2RM_VPADAL_U] = 0x7,
5482 [NEON_2RM_VQABS] = 0x7,
5483 [NEON_2RM_VQNEG] = 0x7,
5484 [NEON_2RM_VCGT0] = 0x7,
5485 [NEON_2RM_VCGE0] = 0x7,
5486 [NEON_2RM_VCEQ0] = 0x7,
5487 [NEON_2RM_VCLE0] = 0x7,
5488 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5489 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5490 [NEON_2RM_VABS] = 0x7,
5491 [NEON_2RM_VNEG] = 0x7,
5492 [NEON_2RM_VCGT0_F] = 0x4,
5493 [NEON_2RM_VCGE0_F] = 0x4,
5494 [NEON_2RM_VCEQ0_F] = 0x4,
5495 [NEON_2RM_VCLE0_F] = 0x4,
5496 [NEON_2RM_VCLT0_F] = 0x4,
5497 [NEON_2RM_VABS_F] = 0x4,
5498 [NEON_2RM_VNEG_F] = 0x4,
5499 [NEON_2RM_VSWP] = 0x1,
5500 [NEON_2RM_VTRN] = 0x7,
5501 [NEON_2RM_VUZP] = 0x7,
5502 [NEON_2RM_VZIP] = 0x7,
5503 [NEON_2RM_VMOVN] = 0x7,
5504 [NEON_2RM_VQMOVN] = 0x7,
5505 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5506 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5507 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5508 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5509 [NEON_2RM_VRINTA] = 0x4,
5510 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5511 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5512 [NEON_2RM_VRINTM] = 0x4,
600b828c 5513 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5514 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5515 [NEON_2RM_VCVTAU] = 0x4,
5516 [NEON_2RM_VCVTAS] = 0x4,
5517 [NEON_2RM_VCVTNU] = 0x4,
5518 [NEON_2RM_VCVTNS] = 0x4,
5519 [NEON_2RM_VCVTPU] = 0x4,
5520 [NEON_2RM_VCVTPS] = 0x4,
5521 [NEON_2RM_VCVTMU] = 0x4,
5522 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5523 [NEON_2RM_VRECPE] = 0x4,
5524 [NEON_2RM_VRSQRTE] = 0x4,
5525 [NEON_2RM_VRECPE_F] = 0x4,
5526 [NEON_2RM_VRSQRTE_F] = 0x4,
5527 [NEON_2RM_VCVT_FS] = 0x4,
5528 [NEON_2RM_VCVT_FU] = 0x4,
5529 [NEON_2RM_VCVT_SF] = 0x4,
5530 [NEON_2RM_VCVT_UF] = 0x4,
5531};
5532
9ee6e8bb
PB
5533/* Translate a NEON data processing instruction. Return nonzero if the
5534 instruction is invalid.
ad69471c
PB
5535 We process data in a mixture of 32-bit and 64-bit chunks.
5536 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5537
7dcc1f89 5538static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5539{
5540 int op;
5541 int q;
5542 int rd, rn, rm;
5543 int size;
5544 int shift;
5545 int pass;
5546 int count;
5547 int pairwise;
5548 int u;
ca9a32e4 5549 uint32_t imm, mask;
39d5492a 5550 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 5551 TCGv_i64 tmp64;
9ee6e8bb 5552
2c7ffc41
PM
5553 /* FIXME: this access check should not take precedence over UNDEF
5554 * for invalid encodings; we will generate incorrect syndrome information
5555 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5556 */
9dbbc748 5557 if (s->fp_excp_el) {
2c7ffc41 5558 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5559 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5560 return 0;
5561 }
5562
5df8bac1 5563 if (!s->vfp_enabled)
9ee6e8bb
PB
5564 return 1;
5565 q = (insn & (1 << 6)) != 0;
5566 u = (insn >> 24) & 1;
5567 VFP_DREG_D(rd, insn);
5568 VFP_DREG_N(rn, insn);
5569 VFP_DREG_M(rm, insn);
5570 size = (insn >> 20) & 3;
5571 if ((insn & (1 << 23)) == 0) {
5572 /* Three register same length. */
5573 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5574 /* Catch invalid op and bad size combinations: UNDEF */
5575 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5576 return 1;
5577 }
25f84f79
PM
5578 /* All insns of this form UNDEF for either this condition or the
5579 * superset of cases "Q==1"; we catch the latter later.
5580 */
5581 if (q && ((rd | rn | rm) & 1)) {
5582 return 1;
5583 }
f1ecb913
AB
5584 /*
5585 * The SHA-1/SHA-256 3-register instructions require special treatment
5586 * here, as their size field is overloaded as an op type selector, and
5587 * they all consume their input in a single pass.
5588 */
5589 if (op == NEON_3R_SHA) {
5590 if (!q) {
5591 return 1;
5592 }
5593 if (!u) { /* SHA-1 */
d614a513 5594 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5595 return 1;
5596 }
5597 tmp = tcg_const_i32(rd);
5598 tmp2 = tcg_const_i32(rn);
5599 tmp3 = tcg_const_i32(rm);
5600 tmp4 = tcg_const_i32(size);
5601 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5602 tcg_temp_free_i32(tmp4);
5603 } else { /* SHA-256 */
d614a513 5604 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5605 return 1;
5606 }
5607 tmp = tcg_const_i32(rd);
5608 tmp2 = tcg_const_i32(rn);
5609 tmp3 = tcg_const_i32(rm);
5610 switch (size) {
5611 case 0:
5612 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5613 break;
5614 case 1:
5615 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5616 break;
5617 case 2:
5618 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5619 break;
5620 }
5621 }
5622 tcg_temp_free_i32(tmp);
5623 tcg_temp_free_i32(tmp2);
5624 tcg_temp_free_i32(tmp3);
5625 return 0;
5626 }
62698be3
PM
5627 if (size == 3 && op != NEON_3R_LOGIC) {
5628 /* 64-bit element instructions. */
9ee6e8bb 5629 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5630 neon_load_reg64(cpu_V0, rn + pass);
5631 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5632 switch (op) {
62698be3 5633 case NEON_3R_VQADD:
9ee6e8bb 5634 if (u) {
02da0b2d
PM
5635 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5636 cpu_V0, cpu_V1);
2c0262af 5637 } else {
02da0b2d
PM
5638 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5639 cpu_V0, cpu_V1);
2c0262af 5640 }
9ee6e8bb 5641 break;
62698be3 5642 case NEON_3R_VQSUB:
9ee6e8bb 5643 if (u) {
02da0b2d
PM
5644 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5645 cpu_V0, cpu_V1);
ad69471c 5646 } else {
02da0b2d
PM
5647 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5648 cpu_V0, cpu_V1);
ad69471c
PB
5649 }
5650 break;
62698be3 5651 case NEON_3R_VSHL:
ad69471c
PB
5652 if (u) {
5653 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5654 } else {
5655 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5656 }
5657 break;
62698be3 5658 case NEON_3R_VQSHL:
ad69471c 5659 if (u) {
02da0b2d
PM
5660 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5661 cpu_V1, cpu_V0);
ad69471c 5662 } else {
02da0b2d
PM
5663 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5664 cpu_V1, cpu_V0);
ad69471c
PB
5665 }
5666 break;
62698be3 5667 case NEON_3R_VRSHL:
ad69471c
PB
5668 if (u) {
5669 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5670 } else {
ad69471c
PB
5671 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5672 }
5673 break;
62698be3 5674 case NEON_3R_VQRSHL:
ad69471c 5675 if (u) {
02da0b2d
PM
5676 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5677 cpu_V1, cpu_V0);
ad69471c 5678 } else {
02da0b2d
PM
5679 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5680 cpu_V1, cpu_V0);
1e8d4eec 5681 }
9ee6e8bb 5682 break;
62698be3 5683 case NEON_3R_VADD_VSUB:
9ee6e8bb 5684 if (u) {
ad69471c 5685 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5686 } else {
ad69471c 5687 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5688 }
5689 break;
5690 default:
5691 abort();
2c0262af 5692 }
ad69471c 5693 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5694 }
9ee6e8bb 5695 return 0;
2c0262af 5696 }
25f84f79 5697 pairwise = 0;
9ee6e8bb 5698 switch (op) {
62698be3
PM
5699 case NEON_3R_VSHL:
5700 case NEON_3R_VQSHL:
5701 case NEON_3R_VRSHL:
5702 case NEON_3R_VQRSHL:
9ee6e8bb 5703 {
ad69471c
PB
5704 int rtmp;
5705 /* Shift instruction operands are reversed. */
5706 rtmp = rn;
9ee6e8bb 5707 rn = rm;
ad69471c 5708 rm = rtmp;
9ee6e8bb 5709 }
2c0262af 5710 break;
25f84f79
PM
5711 case NEON_3R_VPADD:
5712 if (u) {
5713 return 1;
5714 }
5715 /* Fall through */
62698be3
PM
5716 case NEON_3R_VPMAX:
5717 case NEON_3R_VPMIN:
9ee6e8bb 5718 pairwise = 1;
2c0262af 5719 break;
25f84f79
PM
5720 case NEON_3R_FLOAT_ARITH:
5721 pairwise = (u && size < 2); /* if VPADD (float) */
5722 break;
5723 case NEON_3R_FLOAT_MINMAX:
5724 pairwise = u; /* if VPMIN/VPMAX (float) */
5725 break;
5726 case NEON_3R_FLOAT_CMP:
5727 if (!u && size) {
5728 /* no encoding for U=0 C=1x */
5729 return 1;
5730 }
5731 break;
5732 case NEON_3R_FLOAT_ACMP:
5733 if (!u) {
5734 return 1;
5735 }
5736 break;
505935fc
WN
5737 case NEON_3R_FLOAT_MISC:
5738 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5739 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5740 return 1;
5741 }
2c0262af 5742 break;
25f84f79
PM
5743 case NEON_3R_VMUL:
5744 if (u && (size != 0)) {
5745 /* UNDEF on invalid size for polynomial subcase */
5746 return 1;
5747 }
2c0262af 5748 break;
da97f52c 5749 case NEON_3R_VFM:
d614a513 5750 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5751 return 1;
5752 }
5753 break;
9ee6e8bb 5754 default:
2c0262af 5755 break;
9ee6e8bb 5756 }
dd8fbd78 5757
25f84f79
PM
5758 if (pairwise && q) {
5759 /* All the pairwise insns UNDEF if Q is set */
5760 return 1;
5761 }
5762
9ee6e8bb
PB
5763 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5764
5765 if (pairwise) {
5766 /* Pairwise. */
a5a14945
JR
5767 if (pass < 1) {
5768 tmp = neon_load_reg(rn, 0);
5769 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5770 } else {
a5a14945
JR
5771 tmp = neon_load_reg(rm, 0);
5772 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5773 }
5774 } else {
5775 /* Elementwise. */
dd8fbd78
FN
5776 tmp = neon_load_reg(rn, pass);
5777 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5778 }
5779 switch (op) {
62698be3 5780 case NEON_3R_VHADD:
9ee6e8bb
PB
5781 GEN_NEON_INTEGER_OP(hadd);
5782 break;
62698be3 5783 case NEON_3R_VQADD:
02da0b2d 5784 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5785 break;
62698be3 5786 case NEON_3R_VRHADD:
9ee6e8bb 5787 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5788 break;
62698be3 5789 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5790 switch ((u << 2) | size) {
5791 case 0: /* VAND */
dd8fbd78 5792 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5793 break;
5794 case 1: /* BIC */
f669df27 5795 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5796 break;
5797 case 2: /* VORR */
dd8fbd78 5798 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5799 break;
5800 case 3: /* VORN */
f669df27 5801 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5802 break;
5803 case 4: /* VEOR */
dd8fbd78 5804 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5805 break;
5806 case 5: /* VBSL */
dd8fbd78
FN
5807 tmp3 = neon_load_reg(rd, pass);
5808 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5809 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5810 break;
5811 case 6: /* VBIT */
dd8fbd78
FN
5812 tmp3 = neon_load_reg(rd, pass);
5813 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5814 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5815 break;
5816 case 7: /* VBIF */
dd8fbd78
FN
5817 tmp3 = neon_load_reg(rd, pass);
5818 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5819 tcg_temp_free_i32(tmp3);
9ee6e8bb 5820 break;
2c0262af
FB
5821 }
5822 break;
62698be3 5823 case NEON_3R_VHSUB:
9ee6e8bb
PB
5824 GEN_NEON_INTEGER_OP(hsub);
5825 break;
62698be3 5826 case NEON_3R_VQSUB:
02da0b2d 5827 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5828 break;
62698be3 5829 case NEON_3R_VCGT:
9ee6e8bb
PB
5830 GEN_NEON_INTEGER_OP(cgt);
5831 break;
62698be3 5832 case NEON_3R_VCGE:
9ee6e8bb
PB
5833 GEN_NEON_INTEGER_OP(cge);
5834 break;
62698be3 5835 case NEON_3R_VSHL:
ad69471c 5836 GEN_NEON_INTEGER_OP(shl);
2c0262af 5837 break;
62698be3 5838 case NEON_3R_VQSHL:
02da0b2d 5839 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5840 break;
62698be3 5841 case NEON_3R_VRSHL:
ad69471c 5842 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5843 break;
62698be3 5844 case NEON_3R_VQRSHL:
02da0b2d 5845 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5846 break;
62698be3 5847 case NEON_3R_VMAX:
9ee6e8bb
PB
5848 GEN_NEON_INTEGER_OP(max);
5849 break;
62698be3 5850 case NEON_3R_VMIN:
9ee6e8bb
PB
5851 GEN_NEON_INTEGER_OP(min);
5852 break;
62698be3 5853 case NEON_3R_VABD:
9ee6e8bb
PB
5854 GEN_NEON_INTEGER_OP(abd);
5855 break;
62698be3 5856 case NEON_3R_VABA:
9ee6e8bb 5857 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5858 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5859 tmp2 = neon_load_reg(rd, pass);
5860 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5861 break;
62698be3 5862 case NEON_3R_VADD_VSUB:
9ee6e8bb 5863 if (!u) { /* VADD */
62698be3 5864 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5865 } else { /* VSUB */
5866 switch (size) {
dd8fbd78
FN
5867 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5868 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5869 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5870 default: abort();
9ee6e8bb
PB
5871 }
5872 }
5873 break;
62698be3 5874 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5875 if (!u) { /* VTST */
5876 switch (size) {
dd8fbd78
FN
5877 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5878 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5879 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5880 default: abort();
9ee6e8bb
PB
5881 }
5882 } else { /* VCEQ */
5883 switch (size) {
dd8fbd78
FN
5884 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5885 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5886 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5887 default: abort();
9ee6e8bb
PB
5888 }
5889 }
5890 break;
62698be3 5891 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5892 switch (size) {
dd8fbd78
FN
5893 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5894 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5895 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5896 default: abort();
9ee6e8bb 5897 }
7d1b0095 5898 tcg_temp_free_i32(tmp2);
dd8fbd78 5899 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5900 if (u) { /* VMLS */
dd8fbd78 5901 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5902 } else { /* VMLA */
dd8fbd78 5903 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5904 }
5905 break;
62698be3 5906 case NEON_3R_VMUL:
9ee6e8bb 5907 if (u) { /* polynomial */
dd8fbd78 5908 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5909 } else { /* Integer */
5910 switch (size) {
dd8fbd78
FN
5911 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5912 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5913 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5914 default: abort();
9ee6e8bb
PB
5915 }
5916 }
5917 break;
62698be3 5918 case NEON_3R_VPMAX:
9ee6e8bb
PB
5919 GEN_NEON_INTEGER_OP(pmax);
5920 break;
62698be3 5921 case NEON_3R_VPMIN:
9ee6e8bb
PB
5922 GEN_NEON_INTEGER_OP(pmin);
5923 break;
62698be3 5924 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5925 if (!u) { /* VQDMULH */
5926 switch (size) {
02da0b2d
PM
5927 case 1:
5928 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5929 break;
5930 case 2:
5931 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5932 break;
62698be3 5933 default: abort();
9ee6e8bb 5934 }
62698be3 5935 } else { /* VQRDMULH */
9ee6e8bb 5936 switch (size) {
02da0b2d
PM
5937 case 1:
5938 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5939 break;
5940 case 2:
5941 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5942 break;
62698be3 5943 default: abort();
9ee6e8bb
PB
5944 }
5945 }
5946 break;
62698be3 5947 case NEON_3R_VPADD:
9ee6e8bb 5948 switch (size) {
dd8fbd78
FN
5949 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5950 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5951 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5952 default: abort();
9ee6e8bb
PB
5953 }
5954 break;
62698be3 5955 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5956 {
5957 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5958 switch ((u << 2) | size) {
5959 case 0: /* VADD */
aa47cfdd
PM
5960 case 4: /* VPADD */
5961 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5962 break;
5963 case 2: /* VSUB */
aa47cfdd 5964 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5965 break;
5966 case 6: /* VABD */
aa47cfdd 5967 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5968 break;
5969 default:
62698be3 5970 abort();
9ee6e8bb 5971 }
aa47cfdd 5972 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5973 break;
aa47cfdd 5974 }
62698be3 5975 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5976 {
5977 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5978 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5979 if (!u) {
7d1b0095 5980 tcg_temp_free_i32(tmp2);
dd8fbd78 5981 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5982 if (size == 0) {
aa47cfdd 5983 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5984 } else {
aa47cfdd 5985 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5986 }
5987 }
aa47cfdd 5988 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5989 break;
aa47cfdd 5990 }
62698be3 5991 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5992 {
5993 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5994 if (!u) {
aa47cfdd 5995 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5996 } else {
aa47cfdd
PM
5997 if (size == 0) {
5998 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5999 } else {
6000 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6001 }
b5ff1b31 6002 }
aa47cfdd 6003 tcg_temp_free_ptr(fpstatus);
2c0262af 6004 break;
aa47cfdd 6005 }
62698be3 6006 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6007 {
6008 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6009 if (size == 0) {
6010 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6011 } else {
6012 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6013 }
6014 tcg_temp_free_ptr(fpstatus);
2c0262af 6015 break;
aa47cfdd 6016 }
62698be3 6017 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6018 {
6019 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6020 if (size == 0) {
f71a2ae5 6021 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6022 } else {
f71a2ae5 6023 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6024 }
6025 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6026 break;
aa47cfdd 6027 }
505935fc
WN
6028 case NEON_3R_FLOAT_MISC:
6029 if (u) {
6030 /* VMAXNM/VMINNM */
6031 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6032 if (size == 0) {
f71a2ae5 6033 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6034 } else {
f71a2ae5 6035 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6036 }
6037 tcg_temp_free_ptr(fpstatus);
6038 } else {
6039 if (size == 0) {
6040 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6041 } else {
6042 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6043 }
6044 }
2c0262af 6045 break;
da97f52c
PM
6046 case NEON_3R_VFM:
6047 {
6048 /* VFMA, VFMS: fused multiply-add */
6049 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6050 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6051 if (size) {
6052 /* VFMS */
6053 gen_helper_vfp_negs(tmp, tmp);
6054 }
6055 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6056 tcg_temp_free_i32(tmp3);
6057 tcg_temp_free_ptr(fpstatus);
6058 break;
6059 }
9ee6e8bb
PB
6060 default:
6061 abort();
2c0262af 6062 }
7d1b0095 6063 tcg_temp_free_i32(tmp2);
dd8fbd78 6064
9ee6e8bb
PB
6065 /* Save the result. For elementwise operations we can put it
6066 straight into the destination register. For pairwise operations
6067 we have to be careful to avoid clobbering the source operands. */
6068 if (pairwise && rd == rm) {
dd8fbd78 6069 neon_store_scratch(pass, tmp);
9ee6e8bb 6070 } else {
dd8fbd78 6071 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6072 }
6073
6074 } /* for pass */
6075 if (pairwise && rd == rm) {
6076 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6077 tmp = neon_load_scratch(pass);
6078 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6079 }
6080 }
ad69471c 6081 /* End of 3 register same size operations. */
9ee6e8bb
PB
6082 } else if (insn & (1 << 4)) {
6083 if ((insn & 0x00380080) != 0) {
6084 /* Two registers and shift. */
6085 op = (insn >> 8) & 0xf;
6086 if (insn & (1 << 7)) {
cc13115b
PM
6087 /* 64-bit shift. */
6088 if (op > 7) {
6089 return 1;
6090 }
9ee6e8bb
PB
6091 size = 3;
6092 } else {
6093 size = 2;
6094 while ((insn & (1 << (size + 19))) == 0)
6095 size--;
6096 }
6097 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 6098 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
6099 by immediate using the variable shift operations. */
6100 if (op < 8) {
6101 /* Shift by immediate:
6102 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6103 if (q && ((rd | rm) & 1)) {
6104 return 1;
6105 }
6106 if (!u && (op == 4 || op == 6)) {
6107 return 1;
6108 }
9ee6e8bb
PB
6109 /* Right shifts are encoded as N - shift, where N is the
6110 element size in bits. */
6111 if (op <= 4)
6112 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
6113 if (size == 3) {
6114 count = q + 1;
6115 } else {
6116 count = q ? 4: 2;
6117 }
6118 switch (size) {
6119 case 0:
6120 imm = (uint8_t) shift;
6121 imm |= imm << 8;
6122 imm |= imm << 16;
6123 break;
6124 case 1:
6125 imm = (uint16_t) shift;
6126 imm |= imm << 16;
6127 break;
6128 case 2:
6129 case 3:
6130 imm = shift;
6131 break;
6132 default:
6133 abort();
6134 }
6135
6136 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6137 if (size == 3) {
6138 neon_load_reg64(cpu_V0, rm + pass);
6139 tcg_gen_movi_i64(cpu_V1, imm);
6140 switch (op) {
6141 case 0: /* VSHR */
6142 case 1: /* VSRA */
6143 if (u)
6144 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6145 else
ad69471c 6146 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6147 break;
ad69471c
PB
6148 case 2: /* VRSHR */
6149 case 3: /* VRSRA */
6150 if (u)
6151 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6152 else
ad69471c 6153 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6154 break;
ad69471c 6155 case 4: /* VSRI */
ad69471c
PB
6156 case 5: /* VSHL, VSLI */
6157 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6158 break;
0322b26e 6159 case 6: /* VQSHLU */
02da0b2d
PM
6160 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6161 cpu_V0, cpu_V1);
ad69471c 6162 break;
0322b26e
PM
6163 case 7: /* VQSHL */
6164 if (u) {
02da0b2d 6165 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6166 cpu_V0, cpu_V1);
6167 } else {
02da0b2d 6168 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6169 cpu_V0, cpu_V1);
6170 }
9ee6e8bb 6171 break;
9ee6e8bb 6172 }
ad69471c
PB
6173 if (op == 1 || op == 3) {
6174 /* Accumulate. */
5371cb81 6175 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6176 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6177 } else if (op == 4 || (op == 5 && u)) {
6178 /* Insert */
923e6509
CL
6179 neon_load_reg64(cpu_V1, rd + pass);
6180 uint64_t mask;
6181 if (shift < -63 || shift > 63) {
6182 mask = 0;
6183 } else {
6184 if (op == 4) {
6185 mask = 0xffffffffffffffffull >> -shift;
6186 } else {
6187 mask = 0xffffffffffffffffull << shift;
6188 }
6189 }
6190 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6191 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6192 }
6193 neon_store_reg64(cpu_V0, rd + pass);
6194 } else { /* size < 3 */
6195 /* Operands in T0 and T1. */
dd8fbd78 6196 tmp = neon_load_reg(rm, pass);
7d1b0095 6197 tmp2 = tcg_temp_new_i32();
dd8fbd78 6198 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6199 switch (op) {
6200 case 0: /* VSHR */
6201 case 1: /* VSRA */
6202 GEN_NEON_INTEGER_OP(shl);
6203 break;
6204 case 2: /* VRSHR */
6205 case 3: /* VRSRA */
6206 GEN_NEON_INTEGER_OP(rshl);
6207 break;
6208 case 4: /* VSRI */
ad69471c
PB
6209 case 5: /* VSHL, VSLI */
6210 switch (size) {
dd8fbd78
FN
6211 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6212 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6213 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6214 default: abort();
ad69471c
PB
6215 }
6216 break;
0322b26e 6217 case 6: /* VQSHLU */
ad69471c 6218 switch (size) {
0322b26e 6219 case 0:
02da0b2d
PM
6220 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6221 tmp, tmp2);
0322b26e
PM
6222 break;
6223 case 1:
02da0b2d
PM
6224 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6225 tmp, tmp2);
0322b26e
PM
6226 break;
6227 case 2:
02da0b2d
PM
6228 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6229 tmp, tmp2);
0322b26e
PM
6230 break;
6231 default:
cc13115b 6232 abort();
ad69471c
PB
6233 }
6234 break;
0322b26e 6235 case 7: /* VQSHL */
02da0b2d 6236 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6237 break;
ad69471c 6238 }
7d1b0095 6239 tcg_temp_free_i32(tmp2);
ad69471c
PB
6240
6241 if (op == 1 || op == 3) {
6242 /* Accumulate. */
dd8fbd78 6243 tmp2 = neon_load_reg(rd, pass);
5371cb81 6244 gen_neon_add(size, tmp, tmp2);
7d1b0095 6245 tcg_temp_free_i32(tmp2);
ad69471c
PB
6246 } else if (op == 4 || (op == 5 && u)) {
6247 /* Insert */
6248 switch (size) {
6249 case 0:
6250 if (op == 4)
ca9a32e4 6251 mask = 0xff >> -shift;
ad69471c 6252 else
ca9a32e4
JR
6253 mask = (uint8_t)(0xff << shift);
6254 mask |= mask << 8;
6255 mask |= mask << 16;
ad69471c
PB
6256 break;
6257 case 1:
6258 if (op == 4)
ca9a32e4 6259 mask = 0xffff >> -shift;
ad69471c 6260 else
ca9a32e4
JR
6261 mask = (uint16_t)(0xffff << shift);
6262 mask |= mask << 16;
ad69471c
PB
6263 break;
6264 case 2:
ca9a32e4
JR
6265 if (shift < -31 || shift > 31) {
6266 mask = 0;
6267 } else {
6268 if (op == 4)
6269 mask = 0xffffffffu >> -shift;
6270 else
6271 mask = 0xffffffffu << shift;
6272 }
ad69471c
PB
6273 break;
6274 default:
6275 abort();
6276 }
dd8fbd78 6277 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6278 tcg_gen_andi_i32(tmp, tmp, mask);
6279 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6280 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6281 tcg_temp_free_i32(tmp2);
ad69471c 6282 }
dd8fbd78 6283 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6284 }
6285 } /* for pass */
6286 } else if (op < 10) {
ad69471c 6287 /* Shift by immediate and narrow:
9ee6e8bb 6288 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6289 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6290 if (rm & 1) {
6291 return 1;
6292 }
9ee6e8bb
PB
6293 shift = shift - (1 << (size + 3));
6294 size++;
92cdfaeb 6295 if (size == 3) {
a7812ae4 6296 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6297 neon_load_reg64(cpu_V0, rm);
6298 neon_load_reg64(cpu_V1, rm + 1);
6299 for (pass = 0; pass < 2; pass++) {
6300 TCGv_i64 in;
6301 if (pass == 0) {
6302 in = cpu_V0;
6303 } else {
6304 in = cpu_V1;
6305 }
ad69471c 6306 if (q) {
0b36f4cd 6307 if (input_unsigned) {
92cdfaeb 6308 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6309 } else {
92cdfaeb 6310 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6311 }
ad69471c 6312 } else {
0b36f4cd 6313 if (input_unsigned) {
92cdfaeb 6314 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6315 } else {
92cdfaeb 6316 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6317 }
ad69471c 6318 }
7d1b0095 6319 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6320 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6321 neon_store_reg(rd, pass, tmp);
6322 } /* for pass */
6323 tcg_temp_free_i64(tmp64);
6324 } else {
6325 if (size == 1) {
6326 imm = (uint16_t)shift;
6327 imm |= imm << 16;
2c0262af 6328 } else {
92cdfaeb
PM
6329 /* size == 2 */
6330 imm = (uint32_t)shift;
6331 }
6332 tmp2 = tcg_const_i32(imm);
6333 tmp4 = neon_load_reg(rm + 1, 0);
6334 tmp5 = neon_load_reg(rm + 1, 1);
6335 for (pass = 0; pass < 2; pass++) {
6336 if (pass == 0) {
6337 tmp = neon_load_reg(rm, 0);
6338 } else {
6339 tmp = tmp4;
6340 }
0b36f4cd
CL
6341 gen_neon_shift_narrow(size, tmp, tmp2, q,
6342 input_unsigned);
92cdfaeb
PM
6343 if (pass == 0) {
6344 tmp3 = neon_load_reg(rm, 1);
6345 } else {
6346 tmp3 = tmp5;
6347 }
0b36f4cd
CL
6348 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6349 input_unsigned);
36aa55dc 6350 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6351 tcg_temp_free_i32(tmp);
6352 tcg_temp_free_i32(tmp3);
6353 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6354 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6355 neon_store_reg(rd, pass, tmp);
6356 } /* for pass */
c6067f04 6357 tcg_temp_free_i32(tmp2);
b75263d6 6358 }
9ee6e8bb 6359 } else if (op == 10) {
cc13115b
PM
6360 /* VSHLL, VMOVL */
6361 if (q || (rd & 1)) {
9ee6e8bb 6362 return 1;
cc13115b 6363 }
ad69471c
PB
6364 tmp = neon_load_reg(rm, 0);
6365 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6366 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6367 if (pass == 1)
6368 tmp = tmp2;
6369
6370 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6371
9ee6e8bb
PB
6372 if (shift != 0) {
6373 /* The shift is less than the width of the source
ad69471c
PB
6374 type, so we can just shift the whole register. */
6375 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6376 /* Widen the result of shift: we need to clear
6377 * the potential overflow bits resulting from
6378 * left bits of the narrow input appearing as
6379 * right bits of left the neighbour narrow
6380 * input. */
ad69471c
PB
6381 if (size < 2 || !u) {
6382 uint64_t imm64;
6383 if (size == 0) {
6384 imm = (0xffu >> (8 - shift));
6385 imm |= imm << 16;
acdf01ef 6386 } else if (size == 1) {
ad69471c 6387 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6388 } else {
6389 /* size == 2 */
6390 imm = 0xffffffff >> (32 - shift);
6391 }
6392 if (size < 2) {
6393 imm64 = imm | (((uint64_t)imm) << 32);
6394 } else {
6395 imm64 = imm;
9ee6e8bb 6396 }
acdf01ef 6397 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6398 }
6399 }
ad69471c 6400 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6401 }
f73534a5 6402 } else if (op >= 14) {
9ee6e8bb 6403 /* VCVT fixed-point. */
cc13115b
PM
6404 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6405 return 1;
6406 }
f73534a5
PM
6407 /* We have already masked out the must-be-1 top bit of imm6,
6408 * hence this 32-shift where the ARM ARM has 64-imm6.
6409 */
6410 shift = 32 - shift;
9ee6e8bb 6411 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6412 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6413 if (!(op & 1)) {
9ee6e8bb 6414 if (u)
5500b06c 6415 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6416 else
5500b06c 6417 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6418 } else {
6419 if (u)
5500b06c 6420 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6421 else
5500b06c 6422 gen_vfp_tosl(0, shift, 1);
2c0262af 6423 }
4373f3ce 6424 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6425 }
6426 } else {
9ee6e8bb
PB
6427 return 1;
6428 }
6429 } else { /* (insn & 0x00380080) == 0 */
6430 int invert;
7d80fee5
PM
6431 if (q && (rd & 1)) {
6432 return 1;
6433 }
9ee6e8bb
PB
6434
6435 op = (insn >> 8) & 0xf;
6436 /* One register and immediate. */
6437 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6438 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6439 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6440 * We choose to not special-case this and will behave as if a
6441 * valid constant encoding of 0 had been given.
6442 */
9ee6e8bb
PB
6443 switch (op) {
6444 case 0: case 1:
6445 /* no-op */
6446 break;
6447 case 2: case 3:
6448 imm <<= 8;
6449 break;
6450 case 4: case 5:
6451 imm <<= 16;
6452 break;
6453 case 6: case 7:
6454 imm <<= 24;
6455 break;
6456 case 8: case 9:
6457 imm |= imm << 16;
6458 break;
6459 case 10: case 11:
6460 imm = (imm << 8) | (imm << 24);
6461 break;
6462 case 12:
8e31209e 6463 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6464 break;
6465 case 13:
6466 imm = (imm << 16) | 0xffff;
6467 break;
6468 case 14:
6469 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6470 if (invert)
6471 imm = ~imm;
6472 break;
6473 case 15:
7d80fee5
PM
6474 if (invert) {
6475 return 1;
6476 }
9ee6e8bb
PB
6477 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6478 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6479 break;
6480 }
6481 if (invert)
6482 imm = ~imm;
6483
9ee6e8bb
PB
6484 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6485 if (op & 1 && op < 12) {
ad69471c 6486 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6487 if (invert) {
6488 /* The immediate value has already been inverted, so
6489 BIC becomes AND. */
ad69471c 6490 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6491 } else {
ad69471c 6492 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6493 }
9ee6e8bb 6494 } else {
ad69471c 6495 /* VMOV, VMVN. */
7d1b0095 6496 tmp = tcg_temp_new_i32();
9ee6e8bb 6497 if (op == 14 && invert) {
a5a14945 6498 int n;
ad69471c
PB
6499 uint32_t val;
6500 val = 0;
9ee6e8bb
PB
6501 for (n = 0; n < 4; n++) {
6502 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6503 val |= 0xff << (n * 8);
9ee6e8bb 6504 }
ad69471c
PB
6505 tcg_gen_movi_i32(tmp, val);
6506 } else {
6507 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6508 }
9ee6e8bb 6509 }
ad69471c 6510 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6511 }
6512 }
e4b3861d 6513 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6514 if (size != 3) {
6515 op = (insn >> 8) & 0xf;
6516 if ((insn & (1 << 6)) == 0) {
6517 /* Three registers of different lengths. */
6518 int src1_wide;
6519 int src2_wide;
6520 int prewiden;
526d0096
PM
6521 /* undefreq: bit 0 : UNDEF if size == 0
6522 * bit 1 : UNDEF if size == 1
6523 * bit 2 : UNDEF if size == 2
6524 * bit 3 : UNDEF if U == 1
6525 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6526 */
6527 int undefreq;
6528 /* prewiden, src1_wide, src2_wide, undefreq */
6529 static const int neon_3reg_wide[16][4] = {
6530 {1, 0, 0, 0}, /* VADDL */
6531 {1, 1, 0, 0}, /* VADDW */
6532 {1, 0, 0, 0}, /* VSUBL */
6533 {1, 1, 0, 0}, /* VSUBW */
6534 {0, 1, 1, 0}, /* VADDHN */
6535 {0, 0, 0, 0}, /* VABAL */
6536 {0, 1, 1, 0}, /* VSUBHN */
6537 {0, 0, 0, 0}, /* VABDL */
6538 {0, 0, 0, 0}, /* VMLAL */
526d0096 6539 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6540 {0, 0, 0, 0}, /* VMLSL */
526d0096 6541 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6542 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6543 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6544 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6545 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6546 };
6547
6548 prewiden = neon_3reg_wide[op][0];
6549 src1_wide = neon_3reg_wide[op][1];
6550 src2_wide = neon_3reg_wide[op][2];
695272dc 6551 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6552
526d0096
PM
6553 if ((undefreq & (1 << size)) ||
6554 ((undefreq & 8) && u)) {
695272dc
PM
6555 return 1;
6556 }
6557 if ((src1_wide && (rn & 1)) ||
6558 (src2_wide && (rm & 1)) ||
6559 (!src2_wide && (rd & 1))) {
ad69471c 6560 return 1;
695272dc 6561 }
ad69471c 6562
4e624eda
PM
6563 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6564 * outside the loop below as it only performs a single pass.
6565 */
6566 if (op == 14 && size == 2) {
6567 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6568
d614a513 6569 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6570 return 1;
6571 }
6572 tcg_rn = tcg_temp_new_i64();
6573 tcg_rm = tcg_temp_new_i64();
6574 tcg_rd = tcg_temp_new_i64();
6575 neon_load_reg64(tcg_rn, rn);
6576 neon_load_reg64(tcg_rm, rm);
6577 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6578 neon_store_reg64(tcg_rd, rd);
6579 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6580 neon_store_reg64(tcg_rd, rd + 1);
6581 tcg_temp_free_i64(tcg_rn);
6582 tcg_temp_free_i64(tcg_rm);
6583 tcg_temp_free_i64(tcg_rd);
6584 return 0;
6585 }
6586
9ee6e8bb
PB
6587 /* Avoid overlapping operands. Wide source operands are
6588 always aligned so will never overlap with wide
6589 destinations in problematic ways. */
8f8e3aa4 6590 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6591 tmp = neon_load_reg(rm, 1);
6592 neon_store_scratch(2, tmp);
8f8e3aa4 6593 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6594 tmp = neon_load_reg(rn, 1);
6595 neon_store_scratch(2, tmp);
9ee6e8bb 6596 }
39d5492a 6597 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6598 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6599 if (src1_wide) {
6600 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6601 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6602 } else {
ad69471c 6603 if (pass == 1 && rd == rn) {
dd8fbd78 6604 tmp = neon_load_scratch(2);
9ee6e8bb 6605 } else {
ad69471c
PB
6606 tmp = neon_load_reg(rn, pass);
6607 }
6608 if (prewiden) {
6609 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6610 }
6611 }
ad69471c
PB
6612 if (src2_wide) {
6613 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6614 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6615 } else {
ad69471c 6616 if (pass == 1 && rd == rm) {
dd8fbd78 6617 tmp2 = neon_load_scratch(2);
9ee6e8bb 6618 } else {
ad69471c
PB
6619 tmp2 = neon_load_reg(rm, pass);
6620 }
6621 if (prewiden) {
6622 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6623 }
9ee6e8bb
PB
6624 }
6625 switch (op) {
6626 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6627 gen_neon_addl(size);
9ee6e8bb 6628 break;
79b0e534 6629 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6630 gen_neon_subl(size);
9ee6e8bb
PB
6631 break;
6632 case 5: case 7: /* VABAL, VABDL */
6633 switch ((size << 1) | u) {
ad69471c
PB
6634 case 0:
6635 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6636 break;
6637 case 1:
6638 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6639 break;
6640 case 2:
6641 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6642 break;
6643 case 3:
6644 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6645 break;
6646 case 4:
6647 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6648 break;
6649 case 5:
6650 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6651 break;
9ee6e8bb
PB
6652 default: abort();
6653 }
7d1b0095
PM
6654 tcg_temp_free_i32(tmp2);
6655 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6656 break;
6657 case 8: case 9: case 10: case 11: case 12: case 13:
6658 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6659 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6660 break;
6661 case 14: /* Polynomial VMULL */
e5ca24cb 6662 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6663 tcg_temp_free_i32(tmp2);
6664 tcg_temp_free_i32(tmp);
e5ca24cb 6665 break;
695272dc
PM
6666 default: /* 15 is RESERVED: caught earlier */
6667 abort();
9ee6e8bb 6668 }
ebcd88ce
PM
6669 if (op == 13) {
6670 /* VQDMULL */
6671 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6672 neon_store_reg64(cpu_V0, rd + pass);
6673 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6674 /* Accumulate. */
ebcd88ce 6675 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6676 switch (op) {
4dc064e6
PM
6677 case 10: /* VMLSL */
6678 gen_neon_negl(cpu_V0, size);
6679 /* Fall through */
6680 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6681 gen_neon_addl(size);
9ee6e8bb
PB
6682 break;
6683 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6684 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6685 if (op == 11) {
6686 gen_neon_negl(cpu_V0, size);
6687 }
ad69471c
PB
6688 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6689 break;
9ee6e8bb
PB
6690 default:
6691 abort();
6692 }
ad69471c 6693 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6694 } else if (op == 4 || op == 6) {
6695 /* Narrowing operation. */
7d1b0095 6696 tmp = tcg_temp_new_i32();
79b0e534 6697 if (!u) {
9ee6e8bb 6698 switch (size) {
ad69471c
PB
6699 case 0:
6700 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6701 break;
6702 case 1:
6703 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6704 break;
6705 case 2:
6706 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6707 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6708 break;
9ee6e8bb
PB
6709 default: abort();
6710 }
6711 } else {
6712 switch (size) {
ad69471c
PB
6713 case 0:
6714 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6715 break;
6716 case 1:
6717 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6718 break;
6719 case 2:
6720 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6721 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6722 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6723 break;
9ee6e8bb
PB
6724 default: abort();
6725 }
6726 }
ad69471c
PB
6727 if (pass == 0) {
6728 tmp3 = tmp;
6729 } else {
6730 neon_store_reg(rd, 0, tmp3);
6731 neon_store_reg(rd, 1, tmp);
6732 }
9ee6e8bb
PB
6733 } else {
6734 /* Write back the result. */
ad69471c 6735 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6736 }
6737 }
6738 } else {
3e3326df
PM
6739 /* Two registers and a scalar. NB that for ops of this form
6740 * the ARM ARM labels bit 24 as Q, but it is in our variable
6741 * 'u', not 'q'.
6742 */
6743 if (size == 0) {
6744 return 1;
6745 }
9ee6e8bb 6746 switch (op) {
9ee6e8bb 6747 case 1: /* Float VMLA scalar */
9ee6e8bb 6748 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6749 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6750 if (size == 1) {
6751 return 1;
6752 }
6753 /* fall through */
6754 case 0: /* Integer VMLA scalar */
6755 case 4: /* Integer VMLS scalar */
6756 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6757 case 12: /* VQDMULH scalar */
6758 case 13: /* VQRDMULH scalar */
3e3326df
PM
6759 if (u && ((rd | rn) & 1)) {
6760 return 1;
6761 }
dd8fbd78
FN
6762 tmp = neon_get_scalar(size, rm);
6763 neon_store_scratch(0, tmp);
9ee6e8bb 6764 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6765 tmp = neon_load_scratch(0);
6766 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6767 if (op == 12) {
6768 if (size == 1) {
02da0b2d 6769 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6770 } else {
02da0b2d 6771 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6772 }
6773 } else if (op == 13) {
6774 if (size == 1) {
02da0b2d 6775 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6776 } else {
02da0b2d 6777 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6778 }
6779 } else if (op & 1) {
aa47cfdd
PM
6780 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6781 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6782 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6783 } else {
6784 switch (size) {
dd8fbd78
FN
6785 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6786 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6787 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6788 default: abort();
9ee6e8bb
PB
6789 }
6790 }
7d1b0095 6791 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6792 if (op < 8) {
6793 /* Accumulate. */
dd8fbd78 6794 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6795 switch (op) {
6796 case 0:
dd8fbd78 6797 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6798 break;
6799 case 1:
aa47cfdd
PM
6800 {
6801 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6802 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6803 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6804 break;
aa47cfdd 6805 }
9ee6e8bb 6806 case 4:
dd8fbd78 6807 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6808 break;
6809 case 5:
aa47cfdd
PM
6810 {
6811 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6812 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6813 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6814 break;
aa47cfdd 6815 }
9ee6e8bb
PB
6816 default:
6817 abort();
6818 }
7d1b0095 6819 tcg_temp_free_i32(tmp2);
9ee6e8bb 6820 }
dd8fbd78 6821 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6822 }
6823 break;
9ee6e8bb 6824 case 3: /* VQDMLAL scalar */
9ee6e8bb 6825 case 7: /* VQDMLSL scalar */
9ee6e8bb 6826 case 11: /* VQDMULL scalar */
3e3326df 6827 if (u == 1) {
ad69471c 6828 return 1;
3e3326df
PM
6829 }
6830 /* fall through */
6831 case 2: /* VMLAL sclar */
6832 case 6: /* VMLSL scalar */
6833 case 10: /* VMULL scalar */
6834 if (rd & 1) {
6835 return 1;
6836 }
dd8fbd78 6837 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6838 /* We need a copy of tmp2 because gen_neon_mull
6839 * deletes it during pass 0. */
7d1b0095 6840 tmp4 = tcg_temp_new_i32();
c6067f04 6841 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6842 tmp3 = neon_load_reg(rn, 1);
ad69471c 6843
9ee6e8bb 6844 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6845 if (pass == 0) {
6846 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6847 } else {
dd8fbd78 6848 tmp = tmp3;
c6067f04 6849 tmp2 = tmp4;
9ee6e8bb 6850 }
ad69471c 6851 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6852 if (op != 11) {
6853 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6854 }
9ee6e8bb 6855 switch (op) {
4dc064e6
PM
6856 case 6:
6857 gen_neon_negl(cpu_V0, size);
6858 /* Fall through */
6859 case 2:
ad69471c 6860 gen_neon_addl(size);
9ee6e8bb
PB
6861 break;
6862 case 3: case 7:
ad69471c 6863 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6864 if (op == 7) {
6865 gen_neon_negl(cpu_V0, size);
6866 }
ad69471c 6867 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6868 break;
6869 case 10:
6870 /* no-op */
6871 break;
6872 case 11:
ad69471c 6873 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6874 break;
6875 default:
6876 abort();
6877 }
ad69471c 6878 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6879 }
dd8fbd78 6880
dd8fbd78 6881
9ee6e8bb
PB
6882 break;
6883 default: /* 14 and 15 are RESERVED */
6884 return 1;
6885 }
6886 }
6887 } else { /* size == 3 */
6888 if (!u) {
6889 /* Extract. */
9ee6e8bb 6890 imm = (insn >> 8) & 0xf;
ad69471c
PB
6891
6892 if (imm > 7 && !q)
6893 return 1;
6894
52579ea1
PM
6895 if (q && ((rd | rn | rm) & 1)) {
6896 return 1;
6897 }
6898
ad69471c
PB
6899 if (imm == 0) {
6900 neon_load_reg64(cpu_V0, rn);
6901 if (q) {
6902 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6903 }
ad69471c
PB
6904 } else if (imm == 8) {
6905 neon_load_reg64(cpu_V0, rn + 1);
6906 if (q) {
6907 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6908 }
ad69471c 6909 } else if (q) {
a7812ae4 6910 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6911 if (imm < 8) {
6912 neon_load_reg64(cpu_V0, rn);
a7812ae4 6913 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6914 } else {
6915 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6916 neon_load_reg64(tmp64, rm);
ad69471c
PB
6917 }
6918 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6919 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6920 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6921 if (imm < 8) {
6922 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6923 } else {
ad69471c
PB
6924 neon_load_reg64(cpu_V1, rm + 1);
6925 imm -= 8;
9ee6e8bb 6926 }
ad69471c 6927 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6928 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6929 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6930 tcg_temp_free_i64(tmp64);
ad69471c 6931 } else {
a7812ae4 6932 /* BUGFIX */
ad69471c 6933 neon_load_reg64(cpu_V0, rn);
a7812ae4 6934 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6935 neon_load_reg64(cpu_V1, rm);
a7812ae4 6936 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6937 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6938 }
6939 neon_store_reg64(cpu_V0, rd);
6940 if (q) {
6941 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6942 }
6943 } else if ((insn & (1 << 11)) == 0) {
6944 /* Two register misc. */
6945 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6946 size = (insn >> 18) & 3;
600b828c
PM
6947 /* UNDEF for unknown op values and bad op-size combinations */
6948 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6949 return 1;
6950 }
fe8fcf3d
PM
6951 if (neon_2rm_is_v8_op(op) &&
6952 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6953 return 1;
6954 }
fc2a9b37
PM
6955 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6956 q && ((rm | rd) & 1)) {
6957 return 1;
6958 }
9ee6e8bb 6959 switch (op) {
600b828c 6960 case NEON_2RM_VREV64:
9ee6e8bb 6961 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6962 tmp = neon_load_reg(rm, pass * 2);
6963 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6964 switch (size) {
dd8fbd78
FN
6965 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6966 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6967 case 2: /* no-op */ break;
6968 default: abort();
6969 }
dd8fbd78 6970 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6971 if (size == 2) {
dd8fbd78 6972 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6973 } else {
9ee6e8bb 6974 switch (size) {
dd8fbd78
FN
6975 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6976 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6977 default: abort();
6978 }
dd8fbd78 6979 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6980 }
6981 }
6982 break;
600b828c
PM
6983 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6984 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6985 for (pass = 0; pass < q + 1; pass++) {
6986 tmp = neon_load_reg(rm, pass * 2);
6987 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6988 tmp = neon_load_reg(rm, pass * 2 + 1);
6989 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6990 switch (size) {
6991 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6992 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6993 case 2: tcg_gen_add_i64(CPU_V001); break;
6994 default: abort();
6995 }
600b828c 6996 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6997 /* Accumulate. */
ad69471c
PB
6998 neon_load_reg64(cpu_V1, rd + pass);
6999 gen_neon_addl(size);
9ee6e8bb 7000 }
ad69471c 7001 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7002 }
7003 break;
600b828c 7004 case NEON_2RM_VTRN:
9ee6e8bb 7005 if (size == 2) {
a5a14945 7006 int n;
9ee6e8bb 7007 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7008 tmp = neon_load_reg(rm, n);
7009 tmp2 = neon_load_reg(rd, n + 1);
7010 neon_store_reg(rm, n, tmp2);
7011 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7012 }
7013 } else {
7014 goto elementwise;
7015 }
7016 break;
600b828c 7017 case NEON_2RM_VUZP:
02acedf9 7018 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7019 return 1;
9ee6e8bb
PB
7020 }
7021 break;
600b828c 7022 case NEON_2RM_VZIP:
d68a6f3a 7023 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7024 return 1;
9ee6e8bb
PB
7025 }
7026 break;
600b828c
PM
7027 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7028 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7029 if (rm & 1) {
7030 return 1;
7031 }
39d5492a 7032 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 7033 for (pass = 0; pass < 2; pass++) {
ad69471c 7034 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7035 tmp = tcg_temp_new_i32();
600b828c
PM
7036 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7037 tmp, cpu_V0);
ad69471c
PB
7038 if (pass == 0) {
7039 tmp2 = tmp;
7040 } else {
7041 neon_store_reg(rd, 0, tmp2);
7042 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7043 }
9ee6e8bb
PB
7044 }
7045 break;
600b828c 7046 case NEON_2RM_VSHLL:
fc2a9b37 7047 if (q || (rd & 1)) {
9ee6e8bb 7048 return 1;
600b828c 7049 }
ad69471c
PB
7050 tmp = neon_load_reg(rm, 0);
7051 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7052 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7053 if (pass == 1)
7054 tmp = tmp2;
7055 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7056 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7057 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7058 }
7059 break;
600b828c 7060 case NEON_2RM_VCVT_F16_F32:
d614a513 7061 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7062 q || (rm & 1)) {
7063 return 1;
7064 }
7d1b0095
PM
7065 tmp = tcg_temp_new_i32();
7066 tmp2 = tcg_temp_new_i32();
60011498 7067 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 7068 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 7069 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 7070 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7071 tcg_gen_shli_i32(tmp2, tmp2, 16);
7072 tcg_gen_or_i32(tmp2, tmp2, tmp);
7073 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 7074 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
7075 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7076 neon_store_reg(rd, 0, tmp2);
7d1b0095 7077 tmp2 = tcg_temp_new_i32();
2d981da7 7078 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7079 tcg_gen_shli_i32(tmp2, tmp2, 16);
7080 tcg_gen_or_i32(tmp2, tmp2, tmp);
7081 neon_store_reg(rd, 1, tmp2);
7d1b0095 7082 tcg_temp_free_i32(tmp);
60011498 7083 break;
600b828c 7084 case NEON_2RM_VCVT_F32_F16:
d614a513 7085 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7086 q || (rd & 1)) {
7087 return 1;
7088 }
7d1b0095 7089 tmp3 = tcg_temp_new_i32();
60011498
PB
7090 tmp = neon_load_reg(rm, 0);
7091 tmp2 = neon_load_reg(rm, 1);
7092 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 7093 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7094 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7095 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 7096 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7097 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7098 tcg_temp_free_i32(tmp);
60011498 7099 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 7100 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7101 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7102 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 7103 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7104 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7105 tcg_temp_free_i32(tmp2);
7106 tcg_temp_free_i32(tmp3);
60011498 7107 break;
9d935509 7108 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 7109 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
7110 || ((rm | rd) & 1)) {
7111 return 1;
7112 }
7113 tmp = tcg_const_i32(rd);
7114 tmp2 = tcg_const_i32(rm);
7115
7116 /* Bit 6 is the lowest opcode bit; it distinguishes between
7117 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7118 */
7119 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7120
7121 if (op == NEON_2RM_AESE) {
7122 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
7123 } else {
7124 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
7125 }
7126 tcg_temp_free_i32(tmp);
7127 tcg_temp_free_i32(tmp2);
7128 tcg_temp_free_i32(tmp3);
7129 break;
f1ecb913 7130 case NEON_2RM_SHA1H:
d614a513 7131 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
7132 || ((rm | rd) & 1)) {
7133 return 1;
7134 }
7135 tmp = tcg_const_i32(rd);
7136 tmp2 = tcg_const_i32(rm);
7137
7138 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
7139
7140 tcg_temp_free_i32(tmp);
7141 tcg_temp_free_i32(tmp2);
7142 break;
7143 case NEON_2RM_SHA1SU1:
7144 if ((rm | rd) & 1) {
7145 return 1;
7146 }
7147 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7148 if (q) {
d614a513 7149 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
7150 return 1;
7151 }
d614a513 7152 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
7153 return 1;
7154 }
7155 tmp = tcg_const_i32(rd);
7156 tmp2 = tcg_const_i32(rm);
7157 if (q) {
7158 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
7159 } else {
7160 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
7161 }
7162 tcg_temp_free_i32(tmp);
7163 tcg_temp_free_i32(tmp2);
7164 break;
9ee6e8bb
PB
7165 default:
7166 elementwise:
7167 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7168 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7169 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7170 neon_reg_offset(rm, pass));
39d5492a 7171 TCGV_UNUSED_I32(tmp);
9ee6e8bb 7172 } else {
dd8fbd78 7173 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7174 }
7175 switch (op) {
600b828c 7176 case NEON_2RM_VREV32:
9ee6e8bb 7177 switch (size) {
dd8fbd78
FN
7178 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7179 case 1: gen_swap_half(tmp); break;
600b828c 7180 default: abort();
9ee6e8bb
PB
7181 }
7182 break;
600b828c 7183 case NEON_2RM_VREV16:
dd8fbd78 7184 gen_rev16(tmp);
9ee6e8bb 7185 break;
600b828c 7186 case NEON_2RM_VCLS:
9ee6e8bb 7187 switch (size) {
dd8fbd78
FN
7188 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7189 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7190 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7191 default: abort();
9ee6e8bb
PB
7192 }
7193 break;
600b828c 7194 case NEON_2RM_VCLZ:
9ee6e8bb 7195 switch (size) {
dd8fbd78
FN
7196 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7197 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 7198 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 7199 default: abort();
9ee6e8bb
PB
7200 }
7201 break;
600b828c 7202 case NEON_2RM_VCNT:
dd8fbd78 7203 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7204 break;
600b828c 7205 case NEON_2RM_VMVN:
dd8fbd78 7206 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7207 break;
600b828c 7208 case NEON_2RM_VQABS:
9ee6e8bb 7209 switch (size) {
02da0b2d
PM
7210 case 0:
7211 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7212 break;
7213 case 1:
7214 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7215 break;
7216 case 2:
7217 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7218 break;
600b828c 7219 default: abort();
9ee6e8bb
PB
7220 }
7221 break;
600b828c 7222 case NEON_2RM_VQNEG:
9ee6e8bb 7223 switch (size) {
02da0b2d
PM
7224 case 0:
7225 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7226 break;
7227 case 1:
7228 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7229 break;
7230 case 2:
7231 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7232 break;
600b828c 7233 default: abort();
9ee6e8bb
PB
7234 }
7235 break;
600b828c 7236 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7237 tmp2 = tcg_const_i32(0);
9ee6e8bb 7238 switch(size) {
dd8fbd78
FN
7239 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7240 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7241 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7242 default: abort();
9ee6e8bb 7243 }
39d5492a 7244 tcg_temp_free_i32(tmp2);
600b828c 7245 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7246 tcg_gen_not_i32(tmp, tmp);
600b828c 7247 }
9ee6e8bb 7248 break;
600b828c 7249 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7250 tmp2 = tcg_const_i32(0);
9ee6e8bb 7251 switch(size) {
dd8fbd78
FN
7252 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7253 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7254 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7255 default: abort();
9ee6e8bb 7256 }
39d5492a 7257 tcg_temp_free_i32(tmp2);
600b828c 7258 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7259 tcg_gen_not_i32(tmp, tmp);
600b828c 7260 }
9ee6e8bb 7261 break;
600b828c 7262 case NEON_2RM_VCEQ0:
dd8fbd78 7263 tmp2 = tcg_const_i32(0);
9ee6e8bb 7264 switch(size) {
dd8fbd78
FN
7265 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7266 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7267 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7268 default: abort();
9ee6e8bb 7269 }
39d5492a 7270 tcg_temp_free_i32(tmp2);
9ee6e8bb 7271 break;
600b828c 7272 case NEON_2RM_VABS:
9ee6e8bb 7273 switch(size) {
dd8fbd78
FN
7274 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7275 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7276 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7277 default: abort();
9ee6e8bb
PB
7278 }
7279 break;
600b828c 7280 case NEON_2RM_VNEG:
dd8fbd78
FN
7281 tmp2 = tcg_const_i32(0);
7282 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7283 tcg_temp_free_i32(tmp2);
9ee6e8bb 7284 break;
600b828c 7285 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7286 {
7287 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7288 tmp2 = tcg_const_i32(0);
aa47cfdd 7289 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7290 tcg_temp_free_i32(tmp2);
aa47cfdd 7291 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7292 break;
aa47cfdd 7293 }
600b828c 7294 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7295 {
7296 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7297 tmp2 = tcg_const_i32(0);
aa47cfdd 7298 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7299 tcg_temp_free_i32(tmp2);
aa47cfdd 7300 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7301 break;
aa47cfdd 7302 }
600b828c 7303 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7304 {
7305 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7306 tmp2 = tcg_const_i32(0);
aa47cfdd 7307 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7308 tcg_temp_free_i32(tmp2);
aa47cfdd 7309 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7310 break;
aa47cfdd 7311 }
600b828c 7312 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7313 {
7314 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7315 tmp2 = tcg_const_i32(0);
aa47cfdd 7316 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7317 tcg_temp_free_i32(tmp2);
aa47cfdd 7318 tcg_temp_free_ptr(fpstatus);
0e326109 7319 break;
aa47cfdd 7320 }
600b828c 7321 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7322 {
7323 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7324 tmp2 = tcg_const_i32(0);
aa47cfdd 7325 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7326 tcg_temp_free_i32(tmp2);
aa47cfdd 7327 tcg_temp_free_ptr(fpstatus);
0e326109 7328 break;
aa47cfdd 7329 }
600b828c 7330 case NEON_2RM_VABS_F:
4373f3ce 7331 gen_vfp_abs(0);
9ee6e8bb 7332 break;
600b828c 7333 case NEON_2RM_VNEG_F:
4373f3ce 7334 gen_vfp_neg(0);
9ee6e8bb 7335 break;
600b828c 7336 case NEON_2RM_VSWP:
dd8fbd78
FN
7337 tmp2 = neon_load_reg(rd, pass);
7338 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7339 break;
600b828c 7340 case NEON_2RM_VTRN:
dd8fbd78 7341 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7342 switch (size) {
dd8fbd78
FN
7343 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7344 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7345 default: abort();
9ee6e8bb 7346 }
dd8fbd78 7347 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7348 break;
34f7b0a2
WN
7349 case NEON_2RM_VRINTN:
7350 case NEON_2RM_VRINTA:
7351 case NEON_2RM_VRINTM:
7352 case NEON_2RM_VRINTP:
7353 case NEON_2RM_VRINTZ:
7354 {
7355 TCGv_i32 tcg_rmode;
7356 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7357 int rmode;
7358
7359 if (op == NEON_2RM_VRINTZ) {
7360 rmode = FPROUNDING_ZERO;
7361 } else {
7362 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7363 }
7364
7365 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7366 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7367 cpu_env);
7368 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7369 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7370 cpu_env);
7371 tcg_temp_free_ptr(fpstatus);
7372 tcg_temp_free_i32(tcg_rmode);
7373 break;
7374 }
2ce70625
WN
7375 case NEON_2RM_VRINTX:
7376 {
7377 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7378 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7379 tcg_temp_free_ptr(fpstatus);
7380 break;
7381 }
901ad525
WN
7382 case NEON_2RM_VCVTAU:
7383 case NEON_2RM_VCVTAS:
7384 case NEON_2RM_VCVTNU:
7385 case NEON_2RM_VCVTNS:
7386 case NEON_2RM_VCVTPU:
7387 case NEON_2RM_VCVTPS:
7388 case NEON_2RM_VCVTMU:
7389 case NEON_2RM_VCVTMS:
7390 {
7391 bool is_signed = !extract32(insn, 7, 1);
7392 TCGv_ptr fpst = get_fpstatus_ptr(1);
7393 TCGv_i32 tcg_rmode, tcg_shift;
7394 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7395
7396 tcg_shift = tcg_const_i32(0);
7397 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7398 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7399 cpu_env);
7400
7401 if (is_signed) {
7402 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7403 tcg_shift, fpst);
7404 } else {
7405 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7406 tcg_shift, fpst);
7407 }
7408
7409 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7410 cpu_env);
7411 tcg_temp_free_i32(tcg_rmode);
7412 tcg_temp_free_i32(tcg_shift);
7413 tcg_temp_free_ptr(fpst);
7414 break;
7415 }
600b828c 7416 case NEON_2RM_VRECPE:
b6d4443a
AB
7417 {
7418 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7419 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7420 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7421 break;
b6d4443a 7422 }
600b828c 7423 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7424 {
7425 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7426 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7427 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7428 break;
c2fb418e 7429 }
600b828c 7430 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7431 {
7432 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7433 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7434 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7435 break;
b6d4443a 7436 }
600b828c 7437 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7438 {
7439 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7440 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7441 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7442 break;
c2fb418e 7443 }
600b828c 7444 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7445 gen_vfp_sito(0, 1);
9ee6e8bb 7446 break;
600b828c 7447 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7448 gen_vfp_uito(0, 1);
9ee6e8bb 7449 break;
600b828c 7450 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7451 gen_vfp_tosiz(0, 1);
9ee6e8bb 7452 break;
600b828c 7453 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7454 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7455 break;
7456 default:
600b828c
PM
7457 /* Reserved op values were caught by the
7458 * neon_2rm_sizes[] check earlier.
7459 */
7460 abort();
9ee6e8bb 7461 }
600b828c 7462 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7463 tcg_gen_st_f32(cpu_F0s, cpu_env,
7464 neon_reg_offset(rd, pass));
9ee6e8bb 7465 } else {
dd8fbd78 7466 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7467 }
7468 }
7469 break;
7470 }
7471 } else if ((insn & (1 << 10)) == 0) {
7472 /* VTBL, VTBX. */
56907d77
PM
7473 int n = ((insn >> 8) & 3) + 1;
7474 if ((rn + n) > 32) {
7475 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7476 * helper function running off the end of the register file.
7477 */
7478 return 1;
7479 }
7480 n <<= 3;
9ee6e8bb 7481 if (insn & (1 << 6)) {
8f8e3aa4 7482 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7483 } else {
7d1b0095 7484 tmp = tcg_temp_new_i32();
8f8e3aa4 7485 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7486 }
8f8e3aa4 7487 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
7488 tmp4 = tcg_const_i32(rn);
7489 tmp5 = tcg_const_i32(n);
9ef39277 7490 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 7491 tcg_temp_free_i32(tmp);
9ee6e8bb 7492 if (insn & (1 << 6)) {
8f8e3aa4 7493 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7494 } else {
7d1b0095 7495 tmp = tcg_temp_new_i32();
8f8e3aa4 7496 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7497 }
8f8e3aa4 7498 tmp3 = neon_load_reg(rm, 1);
9ef39277 7499 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
7500 tcg_temp_free_i32(tmp5);
7501 tcg_temp_free_i32(tmp4);
8f8e3aa4 7502 neon_store_reg(rd, 0, tmp2);
3018f259 7503 neon_store_reg(rd, 1, tmp3);
7d1b0095 7504 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7505 } else if ((insn & 0x380) == 0) {
7506 /* VDUP */
133da6aa
JR
7507 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7508 return 1;
7509 }
9ee6e8bb 7510 if (insn & (1 << 19)) {
dd8fbd78 7511 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7512 } else {
dd8fbd78 7513 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7514 }
7515 if (insn & (1 << 16)) {
dd8fbd78 7516 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7517 } else if (insn & (1 << 17)) {
7518 if ((insn >> 18) & 1)
dd8fbd78 7519 gen_neon_dup_high16(tmp);
9ee6e8bb 7520 else
dd8fbd78 7521 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7522 }
7523 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7524 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7525 tcg_gen_mov_i32(tmp2, tmp);
7526 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7527 }
7d1b0095 7528 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7529 } else {
7530 return 1;
7531 }
7532 }
7533 }
7534 return 0;
7535}
7536
7dcc1f89 7537static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7538{
4b6a83fb
PM
7539 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7540 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7541
7542 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7543
7544 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7545 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7546 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7547 return 1;
7548 }
d614a513 7549 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7550 return disas_iwmmxt_insn(s, insn);
d614a513 7551 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7552 return disas_dsp_insn(s, insn);
c0f4af17
PM
7553 }
7554 return 1;
4b6a83fb
PM
7555 }
7556
7557 /* Otherwise treat as a generic register access */
7558 is64 = (insn & (1 << 25)) == 0;
7559 if (!is64 && ((insn & (1 << 4)) == 0)) {
7560 /* cdp */
7561 return 1;
7562 }
7563
7564 crm = insn & 0xf;
7565 if (is64) {
7566 crn = 0;
7567 opc1 = (insn >> 4) & 0xf;
7568 opc2 = 0;
7569 rt2 = (insn >> 16) & 0xf;
7570 } else {
7571 crn = (insn >> 16) & 0xf;
7572 opc1 = (insn >> 21) & 7;
7573 opc2 = (insn >> 5) & 7;
7574 rt2 = 0;
7575 }
7576 isread = (insn >> 20) & 1;
7577 rt = (insn >> 12) & 0xf;
7578
60322b39 7579 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7580 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7581 if (ri) {
7582 /* Check access permissions */
dcbff19b 7583 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7584 return 1;
7585 }
7586
c0f4af17 7587 if (ri->accessfn ||
d614a513 7588 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7589 /* Emit code to perform further access permissions checks at
7590 * runtime; this may result in an exception.
c0f4af17
PM
7591 * Note that on XScale all cp0..c13 registers do an access check
7592 * call in order to handle c15_cpar.
f59df3f2
PM
7593 */
7594 TCGv_ptr tmpptr;
3f208fd7 7595 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7596 uint32_t syndrome;
7597
7598 /* Note that since we are an implementation which takes an
7599 * exception on a trapped conditional instruction only if the
7600 * instruction passes its condition code check, we can take
7601 * advantage of the clause in the ARM ARM that allows us to set
7602 * the COND field in the instruction to 0xE in all cases.
7603 * We could fish the actual condition out of the insn (ARM)
7604 * or the condexec bits (Thumb) but it isn't necessary.
7605 */
7606 switch (cpnum) {
7607 case 14:
7608 if (is64) {
7609 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7610 isread, false);
8bcbf37c
PM
7611 } else {
7612 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7613 rt, isread, false);
8bcbf37c
PM
7614 }
7615 break;
7616 case 15:
7617 if (is64) {
7618 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7619 isread, false);
8bcbf37c
PM
7620 } else {
7621 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7622 rt, isread, false);
8bcbf37c
PM
7623 }
7624 break;
7625 default:
7626 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7627 * so this can only happen if this is an ARMv7 or earlier CPU,
7628 * in which case the syndrome information won't actually be
7629 * guest visible.
7630 */
d614a513 7631 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7632 syndrome = syn_uncategorized();
7633 break;
7634 }
7635
43bfa4a1 7636 gen_set_condexec(s);
3977ee5d 7637 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7638 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7639 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7640 tcg_isread = tcg_const_i32(isread);
7641 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7642 tcg_isread);
f59df3f2 7643 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7644 tcg_temp_free_i32(tcg_syn);
3f208fd7 7645 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7646 }
7647
4b6a83fb
PM
7648 /* Handle special cases first */
7649 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7650 case ARM_CP_NOP:
7651 return 0;
7652 case ARM_CP_WFI:
7653 if (isread) {
7654 return 1;
7655 }
eaed129d 7656 gen_set_pc_im(s, s->pc);
dcba3a8d 7657 s->base.is_jmp = DISAS_WFI;
2bee5105 7658 return 0;
4b6a83fb
PM
7659 default:
7660 break;
7661 }
7662
dcba3a8d 7663 if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7664 gen_io_start();
7665 }
7666
4b6a83fb
PM
7667 if (isread) {
7668 /* Read */
7669 if (is64) {
7670 TCGv_i64 tmp64;
7671 TCGv_i32 tmp;
7672 if (ri->type & ARM_CP_CONST) {
7673 tmp64 = tcg_const_i64(ri->resetvalue);
7674 } else if (ri->readfn) {
7675 TCGv_ptr tmpptr;
4b6a83fb
PM
7676 tmp64 = tcg_temp_new_i64();
7677 tmpptr = tcg_const_ptr(ri);
7678 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7679 tcg_temp_free_ptr(tmpptr);
7680 } else {
7681 tmp64 = tcg_temp_new_i64();
7682 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7683 }
7684 tmp = tcg_temp_new_i32();
ecc7b3aa 7685 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7686 store_reg(s, rt, tmp);
7687 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7688 tmp = tcg_temp_new_i32();
ecc7b3aa 7689 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7690 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7691 store_reg(s, rt2, tmp);
7692 } else {
39d5492a 7693 TCGv_i32 tmp;
4b6a83fb
PM
7694 if (ri->type & ARM_CP_CONST) {
7695 tmp = tcg_const_i32(ri->resetvalue);
7696 } else if (ri->readfn) {
7697 TCGv_ptr tmpptr;
4b6a83fb
PM
7698 tmp = tcg_temp_new_i32();
7699 tmpptr = tcg_const_ptr(ri);
7700 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7701 tcg_temp_free_ptr(tmpptr);
7702 } else {
7703 tmp = load_cpu_offset(ri->fieldoffset);
7704 }
7705 if (rt == 15) {
7706 /* Destination register of r15 for 32 bit loads sets
7707 * the condition codes from the high 4 bits of the value
7708 */
7709 gen_set_nzcv(tmp);
7710 tcg_temp_free_i32(tmp);
7711 } else {
7712 store_reg(s, rt, tmp);
7713 }
7714 }
7715 } else {
7716 /* Write */
7717 if (ri->type & ARM_CP_CONST) {
7718 /* If not forbidden by access permissions, treat as WI */
7719 return 0;
7720 }
7721
7722 if (is64) {
39d5492a 7723 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7724 TCGv_i64 tmp64 = tcg_temp_new_i64();
7725 tmplo = load_reg(s, rt);
7726 tmphi = load_reg(s, rt2);
7727 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7728 tcg_temp_free_i32(tmplo);
7729 tcg_temp_free_i32(tmphi);
7730 if (ri->writefn) {
7731 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7732 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7733 tcg_temp_free_ptr(tmpptr);
7734 } else {
7735 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7736 }
7737 tcg_temp_free_i64(tmp64);
7738 } else {
7739 if (ri->writefn) {
39d5492a 7740 TCGv_i32 tmp;
4b6a83fb 7741 TCGv_ptr tmpptr;
4b6a83fb
PM
7742 tmp = load_reg(s, rt);
7743 tmpptr = tcg_const_ptr(ri);
7744 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7745 tcg_temp_free_ptr(tmpptr);
7746 tcg_temp_free_i32(tmp);
7747 } else {
39d5492a 7748 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7749 store_cpu_offset(tmp, ri->fieldoffset);
7750 }
7751 }
2452731c
PM
7752 }
7753
dcba3a8d 7754 if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7755 /* I/O operations must end the TB here (whether read or write) */
7756 gen_io_end();
7757 gen_lookup_tb(s);
7758 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7759 /* We default to ending the TB on a coprocessor register write,
7760 * but allow this to be suppressed by the register definition
7761 * (usually only necessary to work around guest bugs).
7762 */
2452731c 7763 gen_lookup_tb(s);
4b6a83fb 7764 }
2452731c 7765
4b6a83fb
PM
7766 return 0;
7767 }
7768
626187d8
PM
7769 /* Unknown register; this might be a guest error or a QEMU
7770 * unimplemented feature.
7771 */
7772 if (is64) {
7773 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7774 "64 bit system register cp:%d opc1: %d crm:%d "
7775 "(%s)\n",
7776 isread ? "read" : "write", cpnum, opc1, crm,
7777 s->ns ? "non-secure" : "secure");
626187d8
PM
7778 } else {
7779 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7780 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7781 "(%s)\n",
7782 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7783 s->ns ? "non-secure" : "secure");
626187d8
PM
7784 }
7785
4a9a539f 7786 return 1;
9ee6e8bb
PB
7787}
7788
5e3f878a
PB
7789
7790/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7791static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7792{
39d5492a 7793 TCGv_i32 tmp;
7d1b0095 7794 tmp = tcg_temp_new_i32();
ecc7b3aa 7795 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7796 store_reg(s, rlow, tmp);
7d1b0095 7797 tmp = tcg_temp_new_i32();
5e3f878a 7798 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7799 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7800 store_reg(s, rhigh, tmp);
7801}
7802
7803/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7804static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7805{
a7812ae4 7806 TCGv_i64 tmp;
39d5492a 7807 TCGv_i32 tmp2;
5e3f878a 7808
36aa55dc 7809 /* Load value and extend to 64 bits. */
a7812ae4 7810 tmp = tcg_temp_new_i64();
5e3f878a
PB
7811 tmp2 = load_reg(s, rlow);
7812 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7813 tcg_temp_free_i32(tmp2);
5e3f878a 7814 tcg_gen_add_i64(val, val, tmp);
b75263d6 7815 tcg_temp_free_i64(tmp);
5e3f878a
PB
7816}
7817
7818/* load and add a 64-bit value from a register pair. */
a7812ae4 7819static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7820{
a7812ae4 7821 TCGv_i64 tmp;
39d5492a
PM
7822 TCGv_i32 tmpl;
7823 TCGv_i32 tmph;
5e3f878a
PB
7824
7825 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7826 tmpl = load_reg(s, rlow);
7827 tmph = load_reg(s, rhigh);
a7812ae4 7828 tmp = tcg_temp_new_i64();
36aa55dc 7829 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7830 tcg_temp_free_i32(tmpl);
7831 tcg_temp_free_i32(tmph);
5e3f878a 7832 tcg_gen_add_i64(val, val, tmp);
b75263d6 7833 tcg_temp_free_i64(tmp);
5e3f878a
PB
7834}
7835
c9f10124 7836/* Set N and Z flags from hi|lo. */
39d5492a 7837static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7838{
c9f10124
RH
7839 tcg_gen_mov_i32(cpu_NF, hi);
7840 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7841}
7842
426f5abc
PB
7843/* Load/Store exclusive instructions are implemented by remembering
7844 the value/address loaded, and seeing if these are the same
354161b3 7845 when the store is performed. This should be sufficient to implement
426f5abc 7846 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
7847 regular stores. The compare vs the remembered value is done during
7848 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 7849static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7850 TCGv_i32 addr, int size)
426f5abc 7851{
94ee24e7 7852 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 7853 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 7854
50225ad0
PM
7855 s->is_ldex = true;
7856
426f5abc 7857 if (size == 3) {
39d5492a 7858 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 7859 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 7860
354161b3
EC
7861 gen_aa32_ld_i64(s, t64, addr, get_mem_index(s), opc);
7862 tcg_gen_mov_i64(cpu_exclusive_val, t64);
7863 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7864 tcg_temp_free_i64(t64);
7865
7866 store_reg(s, rt2, tmp2);
03d05e2d 7867 } else {
354161b3 7868 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 7869 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7870 }
03d05e2d
PM
7871
7872 store_reg(s, rt, tmp);
7873 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7874}
7875
7876static void gen_clrex(DisasContext *s)
7877{
03d05e2d 7878 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7879}
7880
426f5abc 7881static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7882 TCGv_i32 addr, int size)
426f5abc 7883{
354161b3
EC
7884 TCGv_i32 t0, t1, t2;
7885 TCGv_i64 extaddr;
7886 TCGv taddr;
42a268c2
RH
7887 TCGLabel *done_label;
7888 TCGLabel *fail_label;
354161b3 7889 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
7890
7891 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7892 [addr] = {Rt};
7893 {Rd} = 0;
7894 } else {
7895 {Rd} = 1;
7896 } */
7897 fail_label = gen_new_label();
7898 done_label = gen_new_label();
03d05e2d
PM
7899 extaddr = tcg_temp_new_i64();
7900 tcg_gen_extu_i32_i64(extaddr, addr);
7901 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7902 tcg_temp_free_i64(extaddr);
7903
354161b3
EC
7904 taddr = gen_aa32_addr(s, addr, opc);
7905 t0 = tcg_temp_new_i32();
7906 t1 = load_reg(s, rt);
426f5abc 7907 if (size == 3) {
354161b3
EC
7908 TCGv_i64 o64 = tcg_temp_new_i64();
7909 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 7910
354161b3
EC
7911 t2 = load_reg(s, rt2);
7912 tcg_gen_concat_i32_i64(n64, t1, t2);
7913 tcg_temp_free_i32(t2);
7914 gen_aa32_frob64(s, n64);
03d05e2d 7915
354161b3
EC
7916 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7917 get_mem_index(s), opc);
7918 tcg_temp_free_i64(n64);
7919
7920 gen_aa32_frob64(s, o64);
7921 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7922 tcg_gen_extrl_i64_i32(t0, o64);
7923
7924 tcg_temp_free_i64(o64);
7925 } else {
7926 t2 = tcg_temp_new_i32();
7927 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7928 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7929 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7930 tcg_temp_free_i32(t2);
426f5abc 7931 }
354161b3
EC
7932 tcg_temp_free_i32(t1);
7933 tcg_temp_free(taddr);
7934 tcg_gen_mov_i32(cpu_R[rd], t0);
7935 tcg_temp_free_i32(t0);
426f5abc 7936 tcg_gen_br(done_label);
354161b3 7937
426f5abc
PB
7938 gen_set_label(fail_label);
7939 tcg_gen_movi_i32(cpu_R[rd], 1);
7940 gen_set_label(done_label);
03d05e2d 7941 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 7942}
426f5abc 7943
81465888
PM
7944/* gen_srs:
7945 * @env: CPUARMState
7946 * @s: DisasContext
7947 * @mode: mode field from insn (which stack to store to)
7948 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7949 * @writeback: true if writeback bit set
7950 *
7951 * Generate code for the SRS (Store Return State) insn.
7952 */
7953static void gen_srs(DisasContext *s,
7954 uint32_t mode, uint32_t amode, bool writeback)
7955{
7956 int32_t offset;
cbc0326b
PM
7957 TCGv_i32 addr, tmp;
7958 bool undef = false;
7959
7960 /* SRS is:
7961 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 7962 * and specified mode is monitor mode
cbc0326b
PM
7963 * - UNDEFINED in Hyp mode
7964 * - UNPREDICTABLE in User or System mode
7965 * - UNPREDICTABLE if the specified mode is:
7966 * -- not implemented
7967 * -- not a valid mode number
7968 * -- a mode that's at a higher exception level
7969 * -- Monitor, if we are Non-secure
f01377f5 7970 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 7971 */
ba63cf47 7972 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
7973 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7974 return;
7975 }
7976
7977 if (s->current_el == 0 || s->current_el == 2) {
7978 undef = true;
7979 }
7980
7981 switch (mode) {
7982 case ARM_CPU_MODE_USR:
7983 case ARM_CPU_MODE_FIQ:
7984 case ARM_CPU_MODE_IRQ:
7985 case ARM_CPU_MODE_SVC:
7986 case ARM_CPU_MODE_ABT:
7987 case ARM_CPU_MODE_UND:
7988 case ARM_CPU_MODE_SYS:
7989 break;
7990 case ARM_CPU_MODE_HYP:
7991 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7992 undef = true;
7993 }
7994 break;
7995 case ARM_CPU_MODE_MON:
7996 /* No need to check specifically for "are we non-secure" because
7997 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7998 * so if this isn't EL3 then we must be non-secure.
7999 */
8000 if (s->current_el != 3) {
8001 undef = true;
8002 }
8003 break;
8004 default:
8005 undef = true;
8006 }
8007
8008 if (undef) {
8009 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8010 default_exception_el(s));
8011 return;
8012 }
8013
8014 addr = tcg_temp_new_i32();
8015 tmp = tcg_const_i32(mode);
f01377f5
PM
8016 /* get_r13_banked() will raise an exception if called from System mode */
8017 gen_set_condexec(s);
8018 gen_set_pc_im(s, s->pc - 4);
81465888
PM
8019 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8020 tcg_temp_free_i32(tmp);
8021 switch (amode) {
8022 case 0: /* DA */
8023 offset = -4;
8024 break;
8025 case 1: /* IA */
8026 offset = 0;
8027 break;
8028 case 2: /* DB */
8029 offset = -8;
8030 break;
8031 case 3: /* IB */
8032 offset = 4;
8033 break;
8034 default:
8035 abort();
8036 }
8037 tcg_gen_addi_i32(addr, addr, offset);
8038 tmp = load_reg(s, 14);
12dcc321 8039 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8040 tcg_temp_free_i32(tmp);
81465888
PM
8041 tmp = load_cpu_field(spsr);
8042 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 8043 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8044 tcg_temp_free_i32(tmp);
81465888
PM
8045 if (writeback) {
8046 switch (amode) {
8047 case 0:
8048 offset = -8;
8049 break;
8050 case 1:
8051 offset = 4;
8052 break;
8053 case 2:
8054 offset = -4;
8055 break;
8056 case 3:
8057 offset = 0;
8058 break;
8059 default:
8060 abort();
8061 }
8062 tcg_gen_addi_i32(addr, addr, offset);
8063 tmp = tcg_const_i32(mode);
8064 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8065 tcg_temp_free_i32(tmp);
8066 }
8067 tcg_temp_free_i32(addr);
dcba3a8d 8068 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
8069}
8070
f4df2210 8071static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8072{
f4df2210 8073 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8074 TCGv_i32 tmp;
8075 TCGv_i32 tmp2;
8076 TCGv_i32 tmp3;
8077 TCGv_i32 addr;
a7812ae4 8078 TCGv_i64 tmp64;
9ee6e8bb 8079
e13886e3
PM
8080 /* M variants do not implement ARM mode; this must raise the INVSTATE
8081 * UsageFault exception.
8082 */
b53d8923 8083 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
8084 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8085 default_exception_el(s));
8086 return;
b53d8923 8087 }
9ee6e8bb
PB
8088 cond = insn >> 28;
8089 if (cond == 0xf){
be5e7a76
DES
8090 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8091 * choose to UNDEF. In ARMv5 and above the space is used
8092 * for miscellaneous unconditional instructions.
8093 */
8094 ARCH(5);
8095
9ee6e8bb
PB
8096 /* Unconditional instructions. */
8097 if (((insn >> 25) & 7) == 1) {
8098 /* NEON Data processing. */
d614a513 8099 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8100 goto illegal_op;
d614a513 8101 }
9ee6e8bb 8102
7dcc1f89 8103 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8104 goto illegal_op;
7dcc1f89 8105 }
9ee6e8bb
PB
8106 return;
8107 }
8108 if ((insn & 0x0f100000) == 0x04000000) {
8109 /* NEON load/store. */
d614a513 8110 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8111 goto illegal_op;
d614a513 8112 }
9ee6e8bb 8113
7dcc1f89 8114 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8115 goto illegal_op;
7dcc1f89 8116 }
9ee6e8bb
PB
8117 return;
8118 }
6a57f3eb
WN
8119 if ((insn & 0x0f000e10) == 0x0e000a00) {
8120 /* VFP. */
7dcc1f89 8121 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8122 goto illegal_op;
8123 }
8124 return;
8125 }
3d185e5d
PM
8126 if (((insn & 0x0f30f000) == 0x0510f000) ||
8127 ((insn & 0x0f30f010) == 0x0710f000)) {
8128 if ((insn & (1 << 22)) == 0) {
8129 /* PLDW; v7MP */
d614a513 8130 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8131 goto illegal_op;
8132 }
8133 }
8134 /* Otherwise PLD; v5TE+ */
be5e7a76 8135 ARCH(5TE);
3d185e5d
PM
8136 return;
8137 }
8138 if (((insn & 0x0f70f000) == 0x0450f000) ||
8139 ((insn & 0x0f70f010) == 0x0650f000)) {
8140 ARCH(7);
8141 return; /* PLI; V7 */
8142 }
8143 if (((insn & 0x0f700000) == 0x04100000) ||
8144 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8145 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8146 goto illegal_op;
8147 }
8148 return; /* v7MP: Unallocated memory hint: must NOP */
8149 }
8150
8151 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8152 ARCH(6);
8153 /* setend */
9886ecdf
PB
8154 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8155 gen_helper_setend(cpu_env);
dcba3a8d 8156 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8157 }
8158 return;
8159 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8160 switch ((insn >> 4) & 0xf) {
8161 case 1: /* clrex */
8162 ARCH(6K);
426f5abc 8163 gen_clrex(s);
9ee6e8bb
PB
8164 return;
8165 case 4: /* dsb */
8166 case 5: /* dmb */
9ee6e8bb 8167 ARCH(7);
61e4c432 8168 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8169 return;
6df99dec
SS
8170 case 6: /* isb */
8171 /* We need to break the TB after this insn to execute
8172 * self-modifying code correctly and also to take
8173 * any pending interrupts immediately.
8174 */
0b609cc1 8175 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 8176 return;
9ee6e8bb
PB
8177 default:
8178 goto illegal_op;
8179 }
8180 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8181 /* srs */
81465888
PM
8182 ARCH(6);
8183 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8184 return;
ea825eee 8185 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8186 /* rfe */
c67b6b71 8187 int32_t offset;
9ee6e8bb
PB
8188 if (IS_USER(s))
8189 goto illegal_op;
8190 ARCH(6);
8191 rn = (insn >> 16) & 0xf;
b0109805 8192 addr = load_reg(s, rn);
9ee6e8bb
PB
8193 i = (insn >> 23) & 3;
8194 switch (i) {
b0109805 8195 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8196 case 1: offset = 0; break; /* IA */
8197 case 2: offset = -8; break; /* DB */
b0109805 8198 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8199 default: abort();
8200 }
8201 if (offset)
b0109805
PB
8202 tcg_gen_addi_i32(addr, addr, offset);
8203 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8204 tmp = tcg_temp_new_i32();
12dcc321 8205 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8206 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8207 tmp2 = tcg_temp_new_i32();
12dcc321 8208 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8209 if (insn & (1 << 21)) {
8210 /* Base writeback. */
8211 switch (i) {
b0109805 8212 case 0: offset = -8; break;
c67b6b71
FN
8213 case 1: offset = 4; break;
8214 case 2: offset = -4; break;
b0109805 8215 case 3: offset = 0; break;
9ee6e8bb
PB
8216 default: abort();
8217 }
8218 if (offset)
b0109805
PB
8219 tcg_gen_addi_i32(addr, addr, offset);
8220 store_reg(s, rn, addr);
8221 } else {
7d1b0095 8222 tcg_temp_free_i32(addr);
9ee6e8bb 8223 }
b0109805 8224 gen_rfe(s, tmp, tmp2);
c67b6b71 8225 return;
9ee6e8bb
PB
8226 } else if ((insn & 0x0e000000) == 0x0a000000) {
8227 /* branch link and change to thumb (blx <offset>) */
8228 int32_t offset;
8229
8230 val = (uint32_t)s->pc;
7d1b0095 8231 tmp = tcg_temp_new_i32();
d9ba4830
PB
8232 tcg_gen_movi_i32(tmp, val);
8233 store_reg(s, 14, tmp);
9ee6e8bb
PB
8234 /* Sign-extend the 24-bit offset */
8235 offset = (((int32_t)insn) << 8) >> 8;
8236 /* offset * 4 + bit24 * 2 + (thumb bit) */
8237 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8238 /* pipeline offset */
8239 val += 4;
be5e7a76 8240 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8241 gen_bx_im(s, val);
9ee6e8bb
PB
8242 return;
8243 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8244 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8245 /* iWMMXt register transfer. */
c0f4af17 8246 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8247 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8248 return;
c0f4af17
PM
8249 }
8250 }
9ee6e8bb
PB
8251 }
8252 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8253 /* Coprocessor double register transfer. */
be5e7a76 8254 ARCH(5TE);
9ee6e8bb
PB
8255 } else if ((insn & 0x0f000010) == 0x0e000010) {
8256 /* Additional coprocessor register transfer. */
7997d92f 8257 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8258 uint32_t mask;
8259 uint32_t val;
8260 /* cps (privileged) */
8261 if (IS_USER(s))
8262 return;
8263 mask = val = 0;
8264 if (insn & (1 << 19)) {
8265 if (insn & (1 << 8))
8266 mask |= CPSR_A;
8267 if (insn & (1 << 7))
8268 mask |= CPSR_I;
8269 if (insn & (1 << 6))
8270 mask |= CPSR_F;
8271 if (insn & (1 << 18))
8272 val |= mask;
8273 }
7997d92f 8274 if (insn & (1 << 17)) {
9ee6e8bb
PB
8275 mask |= CPSR_M;
8276 val |= (insn & 0x1f);
8277 }
8278 if (mask) {
2fbac54b 8279 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8280 }
8281 return;
8282 }
8283 goto illegal_op;
8284 }
8285 if (cond != 0xe) {
8286 /* if not always execute, we generate a conditional jump to
8287 next instruction */
8288 s->condlabel = gen_new_label();
39fb730a 8289 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8290 s->condjmp = 1;
8291 }
8292 if ((insn & 0x0f900000) == 0x03000000) {
8293 if ((insn & (1 << 21)) == 0) {
8294 ARCH(6T2);
8295 rd = (insn >> 12) & 0xf;
8296 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8297 if ((insn & (1 << 22)) == 0) {
8298 /* MOVW */
7d1b0095 8299 tmp = tcg_temp_new_i32();
5e3f878a 8300 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8301 } else {
8302 /* MOVT */
5e3f878a 8303 tmp = load_reg(s, rd);
86831435 8304 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8305 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8306 }
5e3f878a 8307 store_reg(s, rd, tmp);
9ee6e8bb
PB
8308 } else {
8309 if (((insn >> 12) & 0xf) != 0xf)
8310 goto illegal_op;
8311 if (((insn >> 16) & 0xf) == 0) {
8312 gen_nop_hint(s, insn & 0xff);
8313 } else {
8314 /* CPSR = immediate */
8315 val = insn & 0xff;
8316 shift = ((insn >> 8) & 0xf) * 2;
8317 if (shift)
8318 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8319 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8320 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8321 i, val)) {
9ee6e8bb 8322 goto illegal_op;
7dcc1f89 8323 }
9ee6e8bb
PB
8324 }
8325 }
8326 } else if ((insn & 0x0f900000) == 0x01000000
8327 && (insn & 0x00000090) != 0x00000090) {
8328 /* miscellaneous instructions */
8329 op1 = (insn >> 21) & 3;
8330 sh = (insn >> 4) & 0xf;
8331 rm = insn & 0xf;
8332 switch (sh) {
8bfd0550
PM
8333 case 0x0: /* MSR, MRS */
8334 if (insn & (1 << 9)) {
8335 /* MSR (banked) and MRS (banked) */
8336 int sysm = extract32(insn, 16, 4) |
8337 (extract32(insn, 8, 1) << 4);
8338 int r = extract32(insn, 22, 1);
8339
8340 if (op1 & 1) {
8341 /* MSR (banked) */
8342 gen_msr_banked(s, r, sysm, rm);
8343 } else {
8344 /* MRS (banked) */
8345 int rd = extract32(insn, 12, 4);
8346
8347 gen_mrs_banked(s, r, sysm, rd);
8348 }
8349 break;
8350 }
8351
8352 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8353 if (op1 & 1) {
8354 /* PSR = reg */
2fbac54b 8355 tmp = load_reg(s, rm);
9ee6e8bb 8356 i = ((op1 & 2) != 0);
7dcc1f89 8357 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8358 goto illegal_op;
8359 } else {
8360 /* reg = PSR */
8361 rd = (insn >> 12) & 0xf;
8362 if (op1 & 2) {
8363 if (IS_USER(s))
8364 goto illegal_op;
d9ba4830 8365 tmp = load_cpu_field(spsr);
9ee6e8bb 8366 } else {
7d1b0095 8367 tmp = tcg_temp_new_i32();
9ef39277 8368 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8369 }
d9ba4830 8370 store_reg(s, rd, tmp);
9ee6e8bb
PB
8371 }
8372 break;
8373 case 0x1:
8374 if (op1 == 1) {
8375 /* branch/exchange thumb (bx). */
be5e7a76 8376 ARCH(4T);
d9ba4830
PB
8377 tmp = load_reg(s, rm);
8378 gen_bx(s, tmp);
9ee6e8bb
PB
8379 } else if (op1 == 3) {
8380 /* clz */
be5e7a76 8381 ARCH(5);
9ee6e8bb 8382 rd = (insn >> 12) & 0xf;
1497c961 8383 tmp = load_reg(s, rm);
7539a012 8384 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8385 store_reg(s, rd, tmp);
9ee6e8bb
PB
8386 } else {
8387 goto illegal_op;
8388 }
8389 break;
8390 case 0x2:
8391 if (op1 == 1) {
8392 ARCH(5J); /* bxj */
8393 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8394 tmp = load_reg(s, rm);
8395 gen_bx(s, tmp);
9ee6e8bb
PB
8396 } else {
8397 goto illegal_op;
8398 }
8399 break;
8400 case 0x3:
8401 if (op1 != 1)
8402 goto illegal_op;
8403
be5e7a76 8404 ARCH(5);
9ee6e8bb 8405 /* branch link/exchange thumb (blx) */
d9ba4830 8406 tmp = load_reg(s, rm);
7d1b0095 8407 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8408 tcg_gen_movi_i32(tmp2, s->pc);
8409 store_reg(s, 14, tmp2);
8410 gen_bx(s, tmp);
9ee6e8bb 8411 break;
eb0ecd5a
WN
8412 case 0x4:
8413 {
8414 /* crc32/crc32c */
8415 uint32_t c = extract32(insn, 8, 4);
8416
8417 /* Check this CPU supports ARMv8 CRC instructions.
8418 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8419 * Bits 8, 10 and 11 should be zero.
8420 */
d614a513 8421 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8422 (c & 0xd) != 0) {
8423 goto illegal_op;
8424 }
8425
8426 rn = extract32(insn, 16, 4);
8427 rd = extract32(insn, 12, 4);
8428
8429 tmp = load_reg(s, rn);
8430 tmp2 = load_reg(s, rm);
aa633469
PM
8431 if (op1 == 0) {
8432 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8433 } else if (op1 == 1) {
8434 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8435 }
eb0ecd5a
WN
8436 tmp3 = tcg_const_i32(1 << op1);
8437 if (c & 0x2) {
8438 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8439 } else {
8440 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8441 }
8442 tcg_temp_free_i32(tmp2);
8443 tcg_temp_free_i32(tmp3);
8444 store_reg(s, rd, tmp);
8445 break;
8446 }
9ee6e8bb 8447 case 0x5: /* saturating add/subtract */
be5e7a76 8448 ARCH(5TE);
9ee6e8bb
PB
8449 rd = (insn >> 12) & 0xf;
8450 rn = (insn >> 16) & 0xf;
b40d0353 8451 tmp = load_reg(s, rm);
5e3f878a 8452 tmp2 = load_reg(s, rn);
9ee6e8bb 8453 if (op1 & 2)
9ef39277 8454 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8455 if (op1 & 1)
9ef39277 8456 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8457 else
9ef39277 8458 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8459 tcg_temp_free_i32(tmp2);
5e3f878a 8460 store_reg(s, rd, tmp);
9ee6e8bb 8461 break;
49e14940 8462 case 7:
d4a2dc67
PM
8463 {
8464 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8465 switch (op1) {
19a6e31c
PM
8466 case 0:
8467 /* HLT */
8468 gen_hlt(s, imm16);
8469 break;
37e6456e
PM
8470 case 1:
8471 /* bkpt */
8472 ARCH(5);
8473 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
8474 syn_aa32_bkpt(imm16, false),
8475 default_exception_el(s));
37e6456e
PM
8476 break;
8477 case 2:
8478 /* Hypervisor call (v7) */
8479 ARCH(7);
8480 if (IS_USER(s)) {
8481 goto illegal_op;
8482 }
8483 gen_hvc(s, imm16);
8484 break;
8485 case 3:
8486 /* Secure monitor call (v6+) */
8487 ARCH(6K);
8488 if (IS_USER(s)) {
8489 goto illegal_op;
8490 }
8491 gen_smc(s);
8492 break;
8493 default:
19a6e31c 8494 g_assert_not_reached();
49e14940 8495 }
9ee6e8bb 8496 break;
d4a2dc67 8497 }
9ee6e8bb
PB
8498 case 0x8: /* signed multiply */
8499 case 0xa:
8500 case 0xc:
8501 case 0xe:
be5e7a76 8502 ARCH(5TE);
9ee6e8bb
PB
8503 rs = (insn >> 8) & 0xf;
8504 rn = (insn >> 12) & 0xf;
8505 rd = (insn >> 16) & 0xf;
8506 if (op1 == 1) {
8507 /* (32 * 16) >> 16 */
5e3f878a
PB
8508 tmp = load_reg(s, rm);
8509 tmp2 = load_reg(s, rs);
9ee6e8bb 8510 if (sh & 4)
5e3f878a 8511 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8512 else
5e3f878a 8513 gen_sxth(tmp2);
a7812ae4
PB
8514 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8515 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8516 tmp = tcg_temp_new_i32();
ecc7b3aa 8517 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8518 tcg_temp_free_i64(tmp64);
9ee6e8bb 8519 if ((sh & 2) == 0) {
5e3f878a 8520 tmp2 = load_reg(s, rn);
9ef39277 8521 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8522 tcg_temp_free_i32(tmp2);
9ee6e8bb 8523 }
5e3f878a 8524 store_reg(s, rd, tmp);
9ee6e8bb
PB
8525 } else {
8526 /* 16 * 16 */
5e3f878a
PB
8527 tmp = load_reg(s, rm);
8528 tmp2 = load_reg(s, rs);
8529 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8530 tcg_temp_free_i32(tmp2);
9ee6e8bb 8531 if (op1 == 2) {
a7812ae4
PB
8532 tmp64 = tcg_temp_new_i64();
8533 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8534 tcg_temp_free_i32(tmp);
a7812ae4
PB
8535 gen_addq(s, tmp64, rn, rd);
8536 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8537 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8538 } else {
8539 if (op1 == 0) {
5e3f878a 8540 tmp2 = load_reg(s, rn);
9ef39277 8541 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8542 tcg_temp_free_i32(tmp2);
9ee6e8bb 8543 }
5e3f878a 8544 store_reg(s, rd, tmp);
9ee6e8bb
PB
8545 }
8546 }
8547 break;
8548 default:
8549 goto illegal_op;
8550 }
8551 } else if (((insn & 0x0e000000) == 0 &&
8552 (insn & 0x00000090) != 0x90) ||
8553 ((insn & 0x0e000000) == (1 << 25))) {
8554 int set_cc, logic_cc, shiftop;
8555
8556 op1 = (insn >> 21) & 0xf;
8557 set_cc = (insn >> 20) & 1;
8558 logic_cc = table_logic_cc[op1] & set_cc;
8559
8560 /* data processing instruction */
8561 if (insn & (1 << 25)) {
8562 /* immediate operand */
8563 val = insn & 0xff;
8564 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8565 if (shift) {
9ee6e8bb 8566 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8567 }
7d1b0095 8568 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8569 tcg_gen_movi_i32(tmp2, val);
8570 if (logic_cc && shift) {
8571 gen_set_CF_bit31(tmp2);
8572 }
9ee6e8bb
PB
8573 } else {
8574 /* register */
8575 rm = (insn) & 0xf;
e9bb4aa9 8576 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8577 shiftop = (insn >> 5) & 3;
8578 if (!(insn & (1 << 4))) {
8579 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8580 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8581 } else {
8582 rs = (insn >> 8) & 0xf;
8984bd2e 8583 tmp = load_reg(s, rs);
e9bb4aa9 8584 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8585 }
8586 }
8587 if (op1 != 0x0f && op1 != 0x0d) {
8588 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8589 tmp = load_reg(s, rn);
8590 } else {
39d5492a 8591 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
8592 }
8593 rd = (insn >> 12) & 0xf;
8594 switch(op1) {
8595 case 0x00:
e9bb4aa9
JR
8596 tcg_gen_and_i32(tmp, tmp, tmp2);
8597 if (logic_cc) {
8598 gen_logic_CC(tmp);
8599 }
7dcc1f89 8600 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8601 break;
8602 case 0x01:
e9bb4aa9
JR
8603 tcg_gen_xor_i32(tmp, tmp, tmp2);
8604 if (logic_cc) {
8605 gen_logic_CC(tmp);
8606 }
7dcc1f89 8607 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8608 break;
8609 case 0x02:
8610 if (set_cc && rd == 15) {
8611 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8612 if (IS_USER(s)) {
9ee6e8bb 8613 goto illegal_op;
e9bb4aa9 8614 }
72485ec4 8615 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8616 gen_exception_return(s, tmp);
9ee6e8bb 8617 } else {
e9bb4aa9 8618 if (set_cc) {
72485ec4 8619 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8620 } else {
8621 tcg_gen_sub_i32(tmp, tmp, tmp2);
8622 }
7dcc1f89 8623 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8624 }
8625 break;
8626 case 0x03:
e9bb4aa9 8627 if (set_cc) {
72485ec4 8628 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8629 } else {
8630 tcg_gen_sub_i32(tmp, tmp2, tmp);
8631 }
7dcc1f89 8632 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8633 break;
8634 case 0x04:
e9bb4aa9 8635 if (set_cc) {
72485ec4 8636 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8637 } else {
8638 tcg_gen_add_i32(tmp, tmp, tmp2);
8639 }
7dcc1f89 8640 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8641 break;
8642 case 0x05:
e9bb4aa9 8643 if (set_cc) {
49b4c31e 8644 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8645 } else {
8646 gen_add_carry(tmp, tmp, tmp2);
8647 }
7dcc1f89 8648 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8649 break;
8650 case 0x06:
e9bb4aa9 8651 if (set_cc) {
2de68a49 8652 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8653 } else {
8654 gen_sub_carry(tmp, tmp, tmp2);
8655 }
7dcc1f89 8656 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8657 break;
8658 case 0x07:
e9bb4aa9 8659 if (set_cc) {
2de68a49 8660 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8661 } else {
8662 gen_sub_carry(tmp, tmp2, tmp);
8663 }
7dcc1f89 8664 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8665 break;
8666 case 0x08:
8667 if (set_cc) {
e9bb4aa9
JR
8668 tcg_gen_and_i32(tmp, tmp, tmp2);
8669 gen_logic_CC(tmp);
9ee6e8bb 8670 }
7d1b0095 8671 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8672 break;
8673 case 0x09:
8674 if (set_cc) {
e9bb4aa9
JR
8675 tcg_gen_xor_i32(tmp, tmp, tmp2);
8676 gen_logic_CC(tmp);
9ee6e8bb 8677 }
7d1b0095 8678 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8679 break;
8680 case 0x0a:
8681 if (set_cc) {
72485ec4 8682 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8683 }
7d1b0095 8684 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8685 break;
8686 case 0x0b:
8687 if (set_cc) {
72485ec4 8688 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8689 }
7d1b0095 8690 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8691 break;
8692 case 0x0c:
e9bb4aa9
JR
8693 tcg_gen_or_i32(tmp, tmp, tmp2);
8694 if (logic_cc) {
8695 gen_logic_CC(tmp);
8696 }
7dcc1f89 8697 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8698 break;
8699 case 0x0d:
8700 if (logic_cc && rd == 15) {
8701 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8702 if (IS_USER(s)) {
9ee6e8bb 8703 goto illegal_op;
e9bb4aa9
JR
8704 }
8705 gen_exception_return(s, tmp2);
9ee6e8bb 8706 } else {
e9bb4aa9
JR
8707 if (logic_cc) {
8708 gen_logic_CC(tmp2);
8709 }
7dcc1f89 8710 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8711 }
8712 break;
8713 case 0x0e:
f669df27 8714 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8715 if (logic_cc) {
8716 gen_logic_CC(tmp);
8717 }
7dcc1f89 8718 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8719 break;
8720 default:
8721 case 0x0f:
e9bb4aa9
JR
8722 tcg_gen_not_i32(tmp2, tmp2);
8723 if (logic_cc) {
8724 gen_logic_CC(tmp2);
8725 }
7dcc1f89 8726 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8727 break;
8728 }
e9bb4aa9 8729 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8730 tcg_temp_free_i32(tmp2);
e9bb4aa9 8731 }
9ee6e8bb
PB
8732 } else {
8733 /* other instructions */
8734 op1 = (insn >> 24) & 0xf;
8735 switch(op1) {
8736 case 0x0:
8737 case 0x1:
8738 /* multiplies, extra load/stores */
8739 sh = (insn >> 5) & 3;
8740 if (sh == 0) {
8741 if (op1 == 0x0) {
8742 rd = (insn >> 16) & 0xf;
8743 rn = (insn >> 12) & 0xf;
8744 rs = (insn >> 8) & 0xf;
8745 rm = (insn) & 0xf;
8746 op1 = (insn >> 20) & 0xf;
8747 switch (op1) {
8748 case 0: case 1: case 2: case 3: case 6:
8749 /* 32 bit mul */
5e3f878a
PB
8750 tmp = load_reg(s, rs);
8751 tmp2 = load_reg(s, rm);
8752 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8753 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8754 if (insn & (1 << 22)) {
8755 /* Subtract (mls) */
8756 ARCH(6T2);
5e3f878a
PB
8757 tmp2 = load_reg(s, rn);
8758 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8759 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8760 } else if (insn & (1 << 21)) {
8761 /* Add */
5e3f878a
PB
8762 tmp2 = load_reg(s, rn);
8763 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8764 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8765 }
8766 if (insn & (1 << 20))
5e3f878a
PB
8767 gen_logic_CC(tmp);
8768 store_reg(s, rd, tmp);
9ee6e8bb 8769 break;
8aac08b1
AJ
8770 case 4:
8771 /* 64 bit mul double accumulate (UMAAL) */
8772 ARCH(6);
8773 tmp = load_reg(s, rs);
8774 tmp2 = load_reg(s, rm);
8775 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8776 gen_addq_lo(s, tmp64, rn);
8777 gen_addq_lo(s, tmp64, rd);
8778 gen_storeq_reg(s, rn, rd, tmp64);
8779 tcg_temp_free_i64(tmp64);
8780 break;
8781 case 8: case 9: case 10: case 11:
8782 case 12: case 13: case 14: case 15:
8783 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8784 tmp = load_reg(s, rs);
8785 tmp2 = load_reg(s, rm);
8aac08b1 8786 if (insn & (1 << 22)) {
c9f10124 8787 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8788 } else {
c9f10124 8789 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8790 }
8791 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8792 TCGv_i32 al = load_reg(s, rn);
8793 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8794 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8795 tcg_temp_free_i32(al);
8796 tcg_temp_free_i32(ah);
9ee6e8bb 8797 }
8aac08b1 8798 if (insn & (1 << 20)) {
c9f10124 8799 gen_logicq_cc(tmp, tmp2);
8aac08b1 8800 }
c9f10124
RH
8801 store_reg(s, rn, tmp);
8802 store_reg(s, rd, tmp2);
9ee6e8bb 8803 break;
8aac08b1
AJ
8804 default:
8805 goto illegal_op;
9ee6e8bb
PB
8806 }
8807 } else {
8808 rn = (insn >> 16) & 0xf;
8809 rd = (insn >> 12) & 0xf;
8810 if (insn & (1 << 23)) {
8811 /* load/store exclusive */
2359bf80 8812 int op2 = (insn >> 8) & 3;
86753403 8813 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8814
8815 switch (op2) {
8816 case 0: /* lda/stl */
8817 if (op1 == 1) {
8818 goto illegal_op;
8819 }
8820 ARCH(8);
8821 break;
8822 case 1: /* reserved */
8823 goto illegal_op;
8824 case 2: /* ldaex/stlex */
8825 ARCH(8);
8826 break;
8827 case 3: /* ldrex/strex */
8828 if (op1) {
8829 ARCH(6K);
8830 } else {
8831 ARCH(6);
8832 }
8833 break;
8834 }
8835
3174f8e9 8836 addr = tcg_temp_local_new_i32();
98a46317 8837 load_reg_var(s, addr, rn);
2359bf80
MR
8838
8839 /* Since the emulation does not have barriers,
8840 the acquire/release semantics need no special
8841 handling */
8842 if (op2 == 0) {
8843 if (insn & (1 << 20)) {
8844 tmp = tcg_temp_new_i32();
8845 switch (op1) {
8846 case 0: /* lda */
9bb6558a
PM
8847 gen_aa32_ld32u_iss(s, tmp, addr,
8848 get_mem_index(s),
8849 rd | ISSIsAcqRel);
2359bf80
MR
8850 break;
8851 case 2: /* ldab */
9bb6558a
PM
8852 gen_aa32_ld8u_iss(s, tmp, addr,
8853 get_mem_index(s),
8854 rd | ISSIsAcqRel);
2359bf80
MR
8855 break;
8856 case 3: /* ldah */
9bb6558a
PM
8857 gen_aa32_ld16u_iss(s, tmp, addr,
8858 get_mem_index(s),
8859 rd | ISSIsAcqRel);
2359bf80
MR
8860 break;
8861 default:
8862 abort();
8863 }
8864 store_reg(s, rd, tmp);
8865 } else {
8866 rm = insn & 0xf;
8867 tmp = load_reg(s, rm);
8868 switch (op1) {
8869 case 0: /* stl */
9bb6558a
PM
8870 gen_aa32_st32_iss(s, tmp, addr,
8871 get_mem_index(s),
8872 rm | ISSIsAcqRel);
2359bf80
MR
8873 break;
8874 case 2: /* stlb */
9bb6558a
PM
8875 gen_aa32_st8_iss(s, tmp, addr,
8876 get_mem_index(s),
8877 rm | ISSIsAcqRel);
2359bf80
MR
8878 break;
8879 case 3: /* stlh */
9bb6558a
PM
8880 gen_aa32_st16_iss(s, tmp, addr,
8881 get_mem_index(s),
8882 rm | ISSIsAcqRel);
2359bf80
MR
8883 break;
8884 default:
8885 abort();
8886 }
8887 tcg_temp_free_i32(tmp);
8888 }
8889 } else if (insn & (1 << 20)) {
86753403
PB
8890 switch (op1) {
8891 case 0: /* ldrex */
426f5abc 8892 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8893 break;
8894 case 1: /* ldrexd */
426f5abc 8895 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8896 break;
8897 case 2: /* ldrexb */
426f5abc 8898 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8899 break;
8900 case 3: /* ldrexh */
426f5abc 8901 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8902 break;
8903 default:
8904 abort();
8905 }
9ee6e8bb
PB
8906 } else {
8907 rm = insn & 0xf;
86753403
PB
8908 switch (op1) {
8909 case 0: /* strex */
426f5abc 8910 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8911 break;
8912 case 1: /* strexd */
502e64fe 8913 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8914 break;
8915 case 2: /* strexb */
426f5abc 8916 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8917 break;
8918 case 3: /* strexh */
426f5abc 8919 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8920 break;
8921 default:
8922 abort();
8923 }
9ee6e8bb 8924 }
39d5492a 8925 tcg_temp_free_i32(addr);
9ee6e8bb 8926 } else {
cf12bce0
EC
8927 TCGv taddr;
8928 TCGMemOp opc = s->be_data;
8929
9ee6e8bb
PB
8930 /* SWP instruction */
8931 rm = (insn) & 0xf;
8932
9ee6e8bb 8933 if (insn & (1 << 22)) {
cf12bce0 8934 opc |= MO_UB;
9ee6e8bb 8935 } else {
cf12bce0 8936 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 8937 }
cf12bce0
EC
8938
8939 addr = load_reg(s, rn);
8940 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 8941 tcg_temp_free_i32(addr);
cf12bce0
EC
8942
8943 tmp = load_reg(s, rm);
8944 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
8945 get_mem_index(s), opc);
8946 tcg_temp_free(taddr);
8947 store_reg(s, rd, tmp);
9ee6e8bb
PB
8948 }
8949 }
8950 } else {
8951 int address_offset;
3960c336 8952 bool load = insn & (1 << 20);
63f26fcf
PM
8953 bool wbit = insn & (1 << 21);
8954 bool pbit = insn & (1 << 24);
3960c336 8955 bool doubleword = false;
9bb6558a
PM
8956 ISSInfo issinfo;
8957
9ee6e8bb
PB
8958 /* Misc load/store */
8959 rn = (insn >> 16) & 0xf;
8960 rd = (insn >> 12) & 0xf;
3960c336 8961
9bb6558a
PM
8962 /* ISS not valid if writeback */
8963 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
8964
3960c336
PM
8965 if (!load && (sh & 2)) {
8966 /* doubleword */
8967 ARCH(5TE);
8968 if (rd & 1) {
8969 /* UNPREDICTABLE; we choose to UNDEF */
8970 goto illegal_op;
8971 }
8972 load = (sh & 1) == 0;
8973 doubleword = true;
8974 }
8975
b0109805 8976 addr = load_reg(s, rn);
63f26fcf 8977 if (pbit) {
b0109805 8978 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 8979 }
9ee6e8bb 8980 address_offset = 0;
3960c336
PM
8981
8982 if (doubleword) {
8983 if (!load) {
9ee6e8bb 8984 /* store */
b0109805 8985 tmp = load_reg(s, rd);
12dcc321 8986 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8987 tcg_temp_free_i32(tmp);
b0109805
PB
8988 tcg_gen_addi_i32(addr, addr, 4);
8989 tmp = load_reg(s, rd + 1);
12dcc321 8990 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8991 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8992 } else {
8993 /* load */
5a839c0d 8994 tmp = tcg_temp_new_i32();
12dcc321 8995 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
8996 store_reg(s, rd, tmp);
8997 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8998 tmp = tcg_temp_new_i32();
12dcc321 8999 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9000 rd++;
9ee6e8bb
PB
9001 }
9002 address_offset = -4;
3960c336
PM
9003 } else if (load) {
9004 /* load */
9005 tmp = tcg_temp_new_i32();
9006 switch (sh) {
9007 case 1:
9bb6558a
PM
9008 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9009 issinfo);
3960c336
PM
9010 break;
9011 case 2:
9bb6558a
PM
9012 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9013 issinfo);
3960c336
PM
9014 break;
9015 default:
9016 case 3:
9bb6558a
PM
9017 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9018 issinfo);
3960c336
PM
9019 break;
9020 }
9ee6e8bb
PB
9021 } else {
9022 /* store */
b0109805 9023 tmp = load_reg(s, rd);
9bb6558a 9024 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 9025 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9026 }
9027 /* Perform base writeback before the loaded value to
9028 ensure correct behavior with overlapping index registers.
b6af0975 9029 ldrd with base writeback is undefined if the
9ee6e8bb 9030 destination and index registers overlap. */
63f26fcf 9031 if (!pbit) {
b0109805
PB
9032 gen_add_datah_offset(s, insn, address_offset, addr);
9033 store_reg(s, rn, addr);
63f26fcf 9034 } else if (wbit) {
9ee6e8bb 9035 if (address_offset)
b0109805
PB
9036 tcg_gen_addi_i32(addr, addr, address_offset);
9037 store_reg(s, rn, addr);
9038 } else {
7d1b0095 9039 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9040 }
9041 if (load) {
9042 /* Complete the load. */
b0109805 9043 store_reg(s, rd, tmp);
9ee6e8bb
PB
9044 }
9045 }
9046 break;
9047 case 0x4:
9048 case 0x5:
9049 goto do_ldst;
9050 case 0x6:
9051 case 0x7:
9052 if (insn & (1 << 4)) {
9053 ARCH(6);
9054 /* Armv6 Media instructions. */
9055 rm = insn & 0xf;
9056 rn = (insn >> 16) & 0xf;
2c0262af 9057 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
9058 rs = (insn >> 8) & 0xf;
9059 switch ((insn >> 23) & 3) {
9060 case 0: /* Parallel add/subtract. */
9061 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
9062 tmp = load_reg(s, rn);
9063 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9064 sh = (insn >> 5) & 7;
9065 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9066 goto illegal_op;
6ddbc6e4 9067 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 9068 tcg_temp_free_i32(tmp2);
6ddbc6e4 9069 store_reg(s, rd, tmp);
9ee6e8bb
PB
9070 break;
9071 case 1:
9072 if ((insn & 0x00700020) == 0) {
6c95676b 9073 /* Halfword pack. */
3670669c
PB
9074 tmp = load_reg(s, rn);
9075 tmp2 = load_reg(s, rm);
9ee6e8bb 9076 shift = (insn >> 7) & 0x1f;
3670669c
PB
9077 if (insn & (1 << 6)) {
9078 /* pkhtb */
22478e79
AZ
9079 if (shift == 0)
9080 shift = 31;
9081 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 9082 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 9083 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
9084 } else {
9085 /* pkhbt */
22478e79
AZ
9086 if (shift)
9087 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 9088 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
9089 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9090 }
9091 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9092 tcg_temp_free_i32(tmp2);
3670669c 9093 store_reg(s, rd, tmp);
9ee6e8bb
PB
9094 } else if ((insn & 0x00200020) == 0x00200000) {
9095 /* [us]sat */
6ddbc6e4 9096 tmp = load_reg(s, rm);
9ee6e8bb
PB
9097 shift = (insn >> 7) & 0x1f;
9098 if (insn & (1 << 6)) {
9099 if (shift == 0)
9100 shift = 31;
6ddbc6e4 9101 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9102 } else {
6ddbc6e4 9103 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9104 }
9105 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9106 tmp2 = tcg_const_i32(sh);
9107 if (insn & (1 << 22))
9ef39277 9108 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9109 else
9ef39277 9110 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9111 tcg_temp_free_i32(tmp2);
6ddbc6e4 9112 store_reg(s, rd, tmp);
9ee6e8bb
PB
9113 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9114 /* [us]sat16 */
6ddbc6e4 9115 tmp = load_reg(s, rm);
9ee6e8bb 9116 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9117 tmp2 = tcg_const_i32(sh);
9118 if (insn & (1 << 22))
9ef39277 9119 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9120 else
9ef39277 9121 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9122 tcg_temp_free_i32(tmp2);
6ddbc6e4 9123 store_reg(s, rd, tmp);
9ee6e8bb
PB
9124 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9125 /* Select bytes. */
6ddbc6e4
PB
9126 tmp = load_reg(s, rn);
9127 tmp2 = load_reg(s, rm);
7d1b0095 9128 tmp3 = tcg_temp_new_i32();
0ecb72a5 9129 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9130 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9131 tcg_temp_free_i32(tmp3);
9132 tcg_temp_free_i32(tmp2);
6ddbc6e4 9133 store_reg(s, rd, tmp);
9ee6e8bb 9134 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9135 tmp = load_reg(s, rm);
9ee6e8bb 9136 shift = (insn >> 10) & 3;
1301f322 9137 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9138 rotate, a shift is sufficient. */
9139 if (shift != 0)
f669df27 9140 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9141 op1 = (insn >> 20) & 7;
9142 switch (op1) {
5e3f878a
PB
9143 case 0: gen_sxtb16(tmp); break;
9144 case 2: gen_sxtb(tmp); break;
9145 case 3: gen_sxth(tmp); break;
9146 case 4: gen_uxtb16(tmp); break;
9147 case 6: gen_uxtb(tmp); break;
9148 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9149 default: goto illegal_op;
9150 }
9151 if (rn != 15) {
5e3f878a 9152 tmp2 = load_reg(s, rn);
9ee6e8bb 9153 if ((op1 & 3) == 0) {
5e3f878a 9154 gen_add16(tmp, tmp2);
9ee6e8bb 9155 } else {
5e3f878a 9156 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9157 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9158 }
9159 }
6c95676b 9160 store_reg(s, rd, tmp);
9ee6e8bb
PB
9161 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9162 /* rev */
b0109805 9163 tmp = load_reg(s, rm);
9ee6e8bb
PB
9164 if (insn & (1 << 22)) {
9165 if (insn & (1 << 7)) {
b0109805 9166 gen_revsh(tmp);
9ee6e8bb
PB
9167 } else {
9168 ARCH(6T2);
b0109805 9169 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9170 }
9171 } else {
9172 if (insn & (1 << 7))
b0109805 9173 gen_rev16(tmp);
9ee6e8bb 9174 else
66896cb8 9175 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9176 }
b0109805 9177 store_reg(s, rd, tmp);
9ee6e8bb
PB
9178 } else {
9179 goto illegal_op;
9180 }
9181 break;
9182 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9183 switch ((insn >> 20) & 0x7) {
9184 case 5:
9185 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9186 /* op2 not 00x or 11x : UNDEF */
9187 goto illegal_op;
9188 }
838fa72d
AJ
9189 /* Signed multiply most significant [accumulate].
9190 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9191 tmp = load_reg(s, rm);
9192 tmp2 = load_reg(s, rs);
a7812ae4 9193 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9194
955a7dd5 9195 if (rd != 15) {
838fa72d 9196 tmp = load_reg(s, rd);
9ee6e8bb 9197 if (insn & (1 << 6)) {
838fa72d 9198 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9199 } else {
838fa72d 9200 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9201 }
9202 }
838fa72d
AJ
9203 if (insn & (1 << 5)) {
9204 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9205 }
9206 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9207 tmp = tcg_temp_new_i32();
ecc7b3aa 9208 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9209 tcg_temp_free_i64(tmp64);
955a7dd5 9210 store_reg(s, rn, tmp);
41e9564d
PM
9211 break;
9212 case 0:
9213 case 4:
9214 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9215 if (insn & (1 << 7)) {
9216 goto illegal_op;
9217 }
9218 tmp = load_reg(s, rm);
9219 tmp2 = load_reg(s, rs);
9ee6e8bb 9220 if (insn & (1 << 5))
5e3f878a
PB
9221 gen_swap_half(tmp2);
9222 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9223 if (insn & (1 << 22)) {
5e3f878a 9224 /* smlald, smlsld */
33bbd75a
PC
9225 TCGv_i64 tmp64_2;
9226
a7812ae4 9227 tmp64 = tcg_temp_new_i64();
33bbd75a 9228 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9229 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9230 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9231 tcg_temp_free_i32(tmp);
33bbd75a
PC
9232 tcg_temp_free_i32(tmp2);
9233 if (insn & (1 << 6)) {
9234 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9235 } else {
9236 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9237 }
9238 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9239 gen_addq(s, tmp64, rd, rn);
9240 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9241 tcg_temp_free_i64(tmp64);
9ee6e8bb 9242 } else {
5e3f878a 9243 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9244 if (insn & (1 << 6)) {
9245 /* This subtraction cannot overflow. */
9246 tcg_gen_sub_i32(tmp, tmp, tmp2);
9247 } else {
9248 /* This addition cannot overflow 32 bits;
9249 * however it may overflow considered as a
9250 * signed operation, in which case we must set
9251 * the Q flag.
9252 */
9253 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9254 }
9255 tcg_temp_free_i32(tmp2);
22478e79 9256 if (rd != 15)
9ee6e8bb 9257 {
22478e79 9258 tmp2 = load_reg(s, rd);
9ef39277 9259 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9260 tcg_temp_free_i32(tmp2);
9ee6e8bb 9261 }
22478e79 9262 store_reg(s, rn, tmp);
9ee6e8bb 9263 }
41e9564d 9264 break;
b8b8ea05
PM
9265 case 1:
9266 case 3:
9267 /* SDIV, UDIV */
d614a513 9268 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9269 goto illegal_op;
9270 }
9271 if (((insn >> 5) & 7) || (rd != 15)) {
9272 goto illegal_op;
9273 }
9274 tmp = load_reg(s, rm);
9275 tmp2 = load_reg(s, rs);
9276 if (insn & (1 << 21)) {
9277 gen_helper_udiv(tmp, tmp, tmp2);
9278 } else {
9279 gen_helper_sdiv(tmp, tmp, tmp2);
9280 }
9281 tcg_temp_free_i32(tmp2);
9282 store_reg(s, rn, tmp);
9283 break;
41e9564d
PM
9284 default:
9285 goto illegal_op;
9ee6e8bb
PB
9286 }
9287 break;
9288 case 3:
9289 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9290 switch (op1) {
9291 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9292 ARCH(6);
9293 tmp = load_reg(s, rm);
9294 tmp2 = load_reg(s, rs);
9295 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9296 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9297 if (rd != 15) {
9298 tmp2 = load_reg(s, rd);
6ddbc6e4 9299 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9300 tcg_temp_free_i32(tmp2);
9ee6e8bb 9301 }
ded9d295 9302 store_reg(s, rn, tmp);
9ee6e8bb
PB
9303 break;
9304 case 0x20: case 0x24: case 0x28: case 0x2c:
9305 /* Bitfield insert/clear. */
9306 ARCH(6T2);
9307 shift = (insn >> 7) & 0x1f;
9308 i = (insn >> 16) & 0x1f;
45140a57
KB
9309 if (i < shift) {
9310 /* UNPREDICTABLE; we choose to UNDEF */
9311 goto illegal_op;
9312 }
9ee6e8bb
PB
9313 i = i + 1 - shift;
9314 if (rm == 15) {
7d1b0095 9315 tmp = tcg_temp_new_i32();
5e3f878a 9316 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9317 } else {
5e3f878a 9318 tmp = load_reg(s, rm);
9ee6e8bb
PB
9319 }
9320 if (i != 32) {
5e3f878a 9321 tmp2 = load_reg(s, rd);
d593c48e 9322 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9323 tcg_temp_free_i32(tmp2);
9ee6e8bb 9324 }
5e3f878a 9325 store_reg(s, rd, tmp);
9ee6e8bb
PB
9326 break;
9327 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9328 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9329 ARCH(6T2);
5e3f878a 9330 tmp = load_reg(s, rm);
9ee6e8bb
PB
9331 shift = (insn >> 7) & 0x1f;
9332 i = ((insn >> 16) & 0x1f) + 1;
9333 if (shift + i > 32)
9334 goto illegal_op;
9335 if (i < 32) {
9336 if (op1 & 0x20) {
59a71b4c 9337 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9338 } else {
59a71b4c 9339 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9340 }
9341 }
5e3f878a 9342 store_reg(s, rd, tmp);
9ee6e8bb
PB
9343 break;
9344 default:
9345 goto illegal_op;
9346 }
9347 break;
9348 }
9349 break;
9350 }
9351 do_ldst:
9352 /* Check for undefined extension instructions
9353 * per the ARM Bible IE:
9354 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9355 */
9356 sh = (0xf << 20) | (0xf << 4);
9357 if (op1 == 0x7 && ((insn & sh) == sh))
9358 {
9359 goto illegal_op;
9360 }
9361 /* load/store byte/word */
9362 rn = (insn >> 16) & 0xf;
9363 rd = (insn >> 12) & 0xf;
b0109805 9364 tmp2 = load_reg(s, rn);
a99caa48
PM
9365 if ((insn & 0x01200000) == 0x00200000) {
9366 /* ldrt/strt */
579d21cc 9367 i = get_a32_user_mem_index(s);
a99caa48
PM
9368 } else {
9369 i = get_mem_index(s);
9370 }
9ee6e8bb 9371 if (insn & (1 << 24))
b0109805 9372 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9373 if (insn & (1 << 20)) {
9374 /* load */
5a839c0d 9375 tmp = tcg_temp_new_i32();
9ee6e8bb 9376 if (insn & (1 << 22)) {
9bb6558a 9377 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9378 } else {
9bb6558a 9379 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9380 }
9ee6e8bb
PB
9381 } else {
9382 /* store */
b0109805 9383 tmp = load_reg(s, rd);
5a839c0d 9384 if (insn & (1 << 22)) {
9bb6558a 9385 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9386 } else {
9bb6558a 9387 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9388 }
9389 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9390 }
9391 if (!(insn & (1 << 24))) {
b0109805
PB
9392 gen_add_data_offset(s, insn, tmp2);
9393 store_reg(s, rn, tmp2);
9394 } else if (insn & (1 << 21)) {
9395 store_reg(s, rn, tmp2);
9396 } else {
7d1b0095 9397 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9398 }
9399 if (insn & (1 << 20)) {
9400 /* Complete the load. */
7dcc1f89 9401 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9402 }
9403 break;
9404 case 0x08:
9405 case 0x09:
9406 {
da3e53dd
PM
9407 int j, n, loaded_base;
9408 bool exc_return = false;
9409 bool is_load = extract32(insn, 20, 1);
9410 bool user = false;
39d5492a 9411 TCGv_i32 loaded_var;
9ee6e8bb
PB
9412 /* load/store multiple words */
9413 /* XXX: store correct base if write back */
9ee6e8bb 9414 if (insn & (1 << 22)) {
da3e53dd 9415 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9416 if (IS_USER(s))
9417 goto illegal_op; /* only usable in supervisor mode */
9418
da3e53dd
PM
9419 if (is_load && extract32(insn, 15, 1)) {
9420 exc_return = true;
9421 } else {
9422 user = true;
9423 }
9ee6e8bb
PB
9424 }
9425 rn = (insn >> 16) & 0xf;
b0109805 9426 addr = load_reg(s, rn);
9ee6e8bb
PB
9427
9428 /* compute total size */
9429 loaded_base = 0;
39d5492a 9430 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9431 n = 0;
9432 for(i=0;i<16;i++) {
9433 if (insn & (1 << i))
9434 n++;
9435 }
9436 /* XXX: test invalid n == 0 case ? */
9437 if (insn & (1 << 23)) {
9438 if (insn & (1 << 24)) {
9439 /* pre increment */
b0109805 9440 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9441 } else {
9442 /* post increment */
9443 }
9444 } else {
9445 if (insn & (1 << 24)) {
9446 /* pre decrement */
b0109805 9447 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9448 } else {
9449 /* post decrement */
9450 if (n != 1)
b0109805 9451 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9452 }
9453 }
9454 j = 0;
9455 for(i=0;i<16;i++) {
9456 if (insn & (1 << i)) {
da3e53dd 9457 if (is_load) {
9ee6e8bb 9458 /* load */
5a839c0d 9459 tmp = tcg_temp_new_i32();
12dcc321 9460 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9461 if (user) {
b75263d6 9462 tmp2 = tcg_const_i32(i);
1ce94f81 9463 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9464 tcg_temp_free_i32(tmp2);
7d1b0095 9465 tcg_temp_free_i32(tmp);
9ee6e8bb 9466 } else if (i == rn) {
b0109805 9467 loaded_var = tmp;
9ee6e8bb 9468 loaded_base = 1;
fb0e8e79
PM
9469 } else if (rn == 15 && exc_return) {
9470 store_pc_exc_ret(s, tmp);
9ee6e8bb 9471 } else {
7dcc1f89 9472 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9473 }
9474 } else {
9475 /* store */
9476 if (i == 15) {
9477 /* special case: r15 = PC + 8 */
9478 val = (long)s->pc + 4;
7d1b0095 9479 tmp = tcg_temp_new_i32();
b0109805 9480 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9481 } else if (user) {
7d1b0095 9482 tmp = tcg_temp_new_i32();
b75263d6 9483 tmp2 = tcg_const_i32(i);
9ef39277 9484 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9485 tcg_temp_free_i32(tmp2);
9ee6e8bb 9486 } else {
b0109805 9487 tmp = load_reg(s, i);
9ee6e8bb 9488 }
12dcc321 9489 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9490 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9491 }
9492 j++;
9493 /* no need to add after the last transfer */
9494 if (j != n)
b0109805 9495 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9496 }
9497 }
9498 if (insn & (1 << 21)) {
9499 /* write back */
9500 if (insn & (1 << 23)) {
9501 if (insn & (1 << 24)) {
9502 /* pre increment */
9503 } else {
9504 /* post increment */
b0109805 9505 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9506 }
9507 } else {
9508 if (insn & (1 << 24)) {
9509 /* pre decrement */
9510 if (n != 1)
b0109805 9511 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9512 } else {
9513 /* post decrement */
b0109805 9514 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9515 }
9516 }
b0109805
PB
9517 store_reg(s, rn, addr);
9518 } else {
7d1b0095 9519 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9520 }
9521 if (loaded_base) {
b0109805 9522 store_reg(s, rn, loaded_var);
9ee6e8bb 9523 }
da3e53dd 9524 if (exc_return) {
9ee6e8bb 9525 /* Restore CPSR from SPSR. */
d9ba4830 9526 tmp = load_cpu_field(spsr);
235ea1f5 9527 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 9528 tcg_temp_free_i32(tmp);
b29fd33d 9529 /* Must exit loop to check un-masked IRQs */
dcba3a8d 9530 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
9531 }
9532 }
9533 break;
9534 case 0xa:
9535 case 0xb:
9536 {
9537 int32_t offset;
9538
9539 /* branch (and link) */
9540 val = (int32_t)s->pc;
9541 if (insn & (1 << 24)) {
7d1b0095 9542 tmp = tcg_temp_new_i32();
5e3f878a
PB
9543 tcg_gen_movi_i32(tmp, val);
9544 store_reg(s, 14, tmp);
9ee6e8bb 9545 }
534df156
PM
9546 offset = sextract32(insn << 2, 0, 26);
9547 val += offset + 4;
9ee6e8bb
PB
9548 gen_jmp(s, val);
9549 }
9550 break;
9551 case 0xc:
9552 case 0xd:
9553 case 0xe:
6a57f3eb
WN
9554 if (((insn >> 8) & 0xe) == 10) {
9555 /* VFP. */
7dcc1f89 9556 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9557 goto illegal_op;
9558 }
7dcc1f89 9559 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9560 /* Coprocessor. */
9ee6e8bb 9561 goto illegal_op;
6a57f3eb 9562 }
9ee6e8bb
PB
9563 break;
9564 case 0xf:
9565 /* swi */
eaed129d 9566 gen_set_pc_im(s, s->pc);
d4a2dc67 9567 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 9568 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
9569 break;
9570 default:
9571 illegal_op:
73710361
GB
9572 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9573 default_exception_el(s));
9ee6e8bb
PB
9574 break;
9575 }
9576 }
9577}
9578
9579/* Return true if this is a Thumb-2 logical op. */
9580static int
9581thumb2_logic_op(int op)
9582{
9583 return (op < 8);
9584}
9585
9586/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9587 then set condition code flags based on the result of the operation.
9588 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9589 to the high bit of T1.
9590 Returns zero if the opcode is valid. */
9591
9592static int
39d5492a
PM
9593gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9594 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9595{
9596 int logic_cc;
9597
9598 logic_cc = 0;
9599 switch (op) {
9600 case 0: /* and */
396e467c 9601 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9602 logic_cc = conds;
9603 break;
9604 case 1: /* bic */
f669df27 9605 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9606 logic_cc = conds;
9607 break;
9608 case 2: /* orr */
396e467c 9609 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9610 logic_cc = conds;
9611 break;
9612 case 3: /* orn */
29501f1b 9613 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9614 logic_cc = conds;
9615 break;
9616 case 4: /* eor */
396e467c 9617 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9618 logic_cc = conds;
9619 break;
9620 case 8: /* add */
9621 if (conds)
72485ec4 9622 gen_add_CC(t0, t0, t1);
9ee6e8bb 9623 else
396e467c 9624 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9625 break;
9626 case 10: /* adc */
9627 if (conds)
49b4c31e 9628 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9629 else
396e467c 9630 gen_adc(t0, t1);
9ee6e8bb
PB
9631 break;
9632 case 11: /* sbc */
2de68a49
RH
9633 if (conds) {
9634 gen_sbc_CC(t0, t0, t1);
9635 } else {
396e467c 9636 gen_sub_carry(t0, t0, t1);
2de68a49 9637 }
9ee6e8bb
PB
9638 break;
9639 case 13: /* sub */
9640 if (conds)
72485ec4 9641 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9642 else
396e467c 9643 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9644 break;
9645 case 14: /* rsb */
9646 if (conds)
72485ec4 9647 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9648 else
396e467c 9649 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9650 break;
9651 default: /* 5, 6, 7, 9, 12, 15. */
9652 return 1;
9653 }
9654 if (logic_cc) {
396e467c 9655 gen_logic_CC(t0);
9ee6e8bb 9656 if (shifter_out)
396e467c 9657 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9658 }
9659 return 0;
9660}
9661
9662/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9663 is not legal. */
0ecb72a5 9664static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 9665{
b0109805 9666 uint32_t insn, imm, shift, offset;
9ee6e8bb 9667 uint32_t rd, rn, rm, rs;
39d5492a
PM
9668 TCGv_i32 tmp;
9669 TCGv_i32 tmp2;
9670 TCGv_i32 tmp3;
9671 TCGv_i32 addr;
a7812ae4 9672 TCGv_i64 tmp64;
9ee6e8bb
PB
9673 int op;
9674 int shiftop;
9675 int conds;
9676 int logic_cc;
9677
d614a513
PM
9678 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9679 || arm_dc_feature(s, ARM_FEATURE_M))) {
601d70b9 9680 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
9681 16-bit instructions to get correct prefetch abort behavior. */
9682 insn = insn_hw1;
9683 if ((insn & (1 << 12)) == 0) {
be5e7a76 9684 ARCH(5);
9ee6e8bb
PB
9685 /* Second half of blx. */
9686 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
9687 tmp = load_reg(s, 14);
9688 tcg_gen_addi_i32(tmp, tmp, offset);
9689 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 9690
7d1b0095 9691 tmp2 = tcg_temp_new_i32();
b0109805 9692 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9693 store_reg(s, 14, tmp2);
9694 gen_bx(s, tmp);
9ee6e8bb
PB
9695 return 0;
9696 }
9697 if (insn & (1 << 11)) {
9698 /* Second half of bl. */
9699 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 9700 tmp = load_reg(s, 14);
6a0d8a1d 9701 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 9702
7d1b0095 9703 tmp2 = tcg_temp_new_i32();
b0109805 9704 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9705 store_reg(s, 14, tmp2);
9706 gen_bx(s, tmp);
9ee6e8bb
PB
9707 return 0;
9708 }
9709 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9710 /* Instruction spans a page boundary. Implement it as two
9711 16-bit instructions in case the second half causes an
9712 prefetch abort. */
9713 offset = ((int32_t)insn << 21) >> 9;
396e467c 9714 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
9715 return 0;
9716 }
9717 /* Fall through to 32-bit decode. */
9718 }
9719
f9fd40eb 9720 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
9ee6e8bb
PB
9721 s->pc += 2;
9722 insn |= (uint32_t)insn_hw1 << 16;
9723
9724 if ((insn & 0xf800e800) != 0xf000e800) {
9725 ARCH(6T2);
9726 }
9727
9728 rn = (insn >> 16) & 0xf;
9729 rs = (insn >> 12) & 0xf;
9730 rd = (insn >> 8) & 0xf;
9731 rm = insn & 0xf;
9732 switch ((insn >> 25) & 0xf) {
9733 case 0: case 1: case 2: case 3:
9734 /* 16-bit instructions. Should never happen. */
9735 abort();
9736 case 4:
9737 if (insn & (1 << 22)) {
ebfe27c5
PM
9738 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9739 * - load/store doubleword, load/store exclusive, ldacq/strel,
9740 * table branch.
9741 */
9ee6e8bb 9742 if (insn & 0x01200000) {
ebfe27c5
PM
9743 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9744 * - load/store dual (post-indexed)
9745 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9746 * - load/store dual (literal and immediate)
9747 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9748 * - load/store dual (pre-indexed)
9749 */
9ee6e8bb 9750 if (rn == 15) {
ebfe27c5
PM
9751 if (insn & (1 << 21)) {
9752 /* UNPREDICTABLE */
9753 goto illegal_op;
9754 }
7d1b0095 9755 addr = tcg_temp_new_i32();
b0109805 9756 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9757 } else {
b0109805 9758 addr = load_reg(s, rn);
9ee6e8bb
PB
9759 }
9760 offset = (insn & 0xff) * 4;
9761 if ((insn & (1 << 23)) == 0)
9762 offset = -offset;
9763 if (insn & (1 << 24)) {
b0109805 9764 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9765 offset = 0;
9766 }
9767 if (insn & (1 << 20)) {
9768 /* ldrd */
e2592fad 9769 tmp = tcg_temp_new_i32();
12dcc321 9770 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9771 store_reg(s, rs, tmp);
9772 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9773 tmp = tcg_temp_new_i32();
12dcc321 9774 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9775 store_reg(s, rd, tmp);
9ee6e8bb
PB
9776 } else {
9777 /* strd */
b0109805 9778 tmp = load_reg(s, rs);
12dcc321 9779 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9780 tcg_temp_free_i32(tmp);
b0109805
PB
9781 tcg_gen_addi_i32(addr, addr, 4);
9782 tmp = load_reg(s, rd);
12dcc321 9783 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9784 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9785 }
9786 if (insn & (1 << 21)) {
9787 /* Base writeback. */
b0109805
PB
9788 tcg_gen_addi_i32(addr, addr, offset - 4);
9789 store_reg(s, rn, addr);
9790 } else {
7d1b0095 9791 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9792 }
9793 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
9794 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9795 * - load/store exclusive word
9796 */
9797 if (rs == 15) {
9798 goto illegal_op;
9799 }
39d5492a 9800 addr = tcg_temp_local_new_i32();
98a46317 9801 load_reg_var(s, addr, rn);
426f5abc 9802 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9803 if (insn & (1 << 20)) {
426f5abc 9804 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9805 } else {
426f5abc 9806 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9807 }
39d5492a 9808 tcg_temp_free_i32(addr);
2359bf80 9809 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9810 /* Table Branch. */
9811 if (rn == 15) {
7d1b0095 9812 addr = tcg_temp_new_i32();
b0109805 9813 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9814 } else {
b0109805 9815 addr = load_reg(s, rn);
9ee6e8bb 9816 }
b26eefb6 9817 tmp = load_reg(s, rm);
b0109805 9818 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9819 if (insn & (1 << 4)) {
9820 /* tbh */
b0109805 9821 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9822 tcg_temp_free_i32(tmp);
e2592fad 9823 tmp = tcg_temp_new_i32();
12dcc321 9824 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9825 } else { /* tbb */
7d1b0095 9826 tcg_temp_free_i32(tmp);
e2592fad 9827 tmp = tcg_temp_new_i32();
12dcc321 9828 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9829 }
7d1b0095 9830 tcg_temp_free_i32(addr);
b0109805
PB
9831 tcg_gen_shli_i32(tmp, tmp, 1);
9832 tcg_gen_addi_i32(tmp, tmp, s->pc);
9833 store_reg(s, 15, tmp);
9ee6e8bb 9834 } else {
2359bf80 9835 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9836 op = (insn >> 4) & 0x3;
2359bf80
MR
9837 switch (op2) {
9838 case 0:
426f5abc 9839 goto illegal_op;
2359bf80
MR
9840 case 1:
9841 /* Load/store exclusive byte/halfword/doubleword */
9842 if (op == 2) {
9843 goto illegal_op;
9844 }
9845 ARCH(7);
9846 break;
9847 case 2:
9848 /* Load-acquire/store-release */
9849 if (op == 3) {
9850 goto illegal_op;
9851 }
9852 /* Fall through */
9853 case 3:
9854 /* Load-acquire/store-release exclusive */
9855 ARCH(8);
9856 break;
426f5abc 9857 }
39d5492a 9858 addr = tcg_temp_local_new_i32();
98a46317 9859 load_reg_var(s, addr, rn);
2359bf80
MR
9860 if (!(op2 & 1)) {
9861 if (insn & (1 << 20)) {
9862 tmp = tcg_temp_new_i32();
9863 switch (op) {
9864 case 0: /* ldab */
9bb6558a
PM
9865 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9866 rs | ISSIsAcqRel);
2359bf80
MR
9867 break;
9868 case 1: /* ldah */
9bb6558a
PM
9869 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9870 rs | ISSIsAcqRel);
2359bf80
MR
9871 break;
9872 case 2: /* lda */
9bb6558a
PM
9873 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
9874 rs | ISSIsAcqRel);
2359bf80
MR
9875 break;
9876 default:
9877 abort();
9878 }
9879 store_reg(s, rs, tmp);
9880 } else {
9881 tmp = load_reg(s, rs);
9882 switch (op) {
9883 case 0: /* stlb */
9bb6558a
PM
9884 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
9885 rs | ISSIsAcqRel);
2359bf80
MR
9886 break;
9887 case 1: /* stlh */
9bb6558a
PM
9888 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
9889 rs | ISSIsAcqRel);
2359bf80
MR
9890 break;
9891 case 2: /* stl */
9bb6558a
PM
9892 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
9893 rs | ISSIsAcqRel);
2359bf80
MR
9894 break;
9895 default:
9896 abort();
9897 }
9898 tcg_temp_free_i32(tmp);
9899 }
9900 } else if (insn & (1 << 20)) {
426f5abc 9901 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9902 } else {
426f5abc 9903 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9904 }
39d5492a 9905 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9906 }
9907 } else {
9908 /* Load/store multiple, RFE, SRS. */
9909 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9910 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9911 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9912 goto illegal_op;
00115976 9913 }
9ee6e8bb
PB
9914 if (insn & (1 << 20)) {
9915 /* rfe */
b0109805
PB
9916 addr = load_reg(s, rn);
9917 if ((insn & (1 << 24)) == 0)
9918 tcg_gen_addi_i32(addr, addr, -8);
9919 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9920 tmp = tcg_temp_new_i32();
12dcc321 9921 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9922 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9923 tmp2 = tcg_temp_new_i32();
12dcc321 9924 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9925 if (insn & (1 << 21)) {
9926 /* Base writeback. */
b0109805
PB
9927 if (insn & (1 << 24)) {
9928 tcg_gen_addi_i32(addr, addr, 4);
9929 } else {
9930 tcg_gen_addi_i32(addr, addr, -4);
9931 }
9932 store_reg(s, rn, addr);
9933 } else {
7d1b0095 9934 tcg_temp_free_i32(addr);
9ee6e8bb 9935 }
b0109805 9936 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9937 } else {
9938 /* srs */
81465888
PM
9939 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9940 insn & (1 << 21));
9ee6e8bb
PB
9941 }
9942 } else {
5856d44e 9943 int i, loaded_base = 0;
39d5492a 9944 TCGv_i32 loaded_var;
9ee6e8bb 9945 /* Load/store multiple. */
b0109805 9946 addr = load_reg(s, rn);
9ee6e8bb
PB
9947 offset = 0;
9948 for (i = 0; i < 16; i++) {
9949 if (insn & (1 << i))
9950 offset += 4;
9951 }
9952 if (insn & (1 << 24)) {
b0109805 9953 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9954 }
9955
39d5492a 9956 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9957 for (i = 0; i < 16; i++) {
9958 if ((insn & (1 << i)) == 0)
9959 continue;
9960 if (insn & (1 << 20)) {
9961 /* Load. */
e2592fad 9962 tmp = tcg_temp_new_i32();
12dcc321 9963 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9964 if (i == 15) {
3bb8a96f 9965 gen_bx_excret(s, tmp);
5856d44e
YO
9966 } else if (i == rn) {
9967 loaded_var = tmp;
9968 loaded_base = 1;
9ee6e8bb 9969 } else {
b0109805 9970 store_reg(s, i, tmp);
9ee6e8bb
PB
9971 }
9972 } else {
9973 /* Store. */
b0109805 9974 tmp = load_reg(s, i);
12dcc321 9975 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9976 tcg_temp_free_i32(tmp);
9ee6e8bb 9977 }
b0109805 9978 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9979 }
5856d44e
YO
9980 if (loaded_base) {
9981 store_reg(s, rn, loaded_var);
9982 }
9ee6e8bb
PB
9983 if (insn & (1 << 21)) {
9984 /* Base register writeback. */
9985 if (insn & (1 << 24)) {
b0109805 9986 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9987 }
9988 /* Fault if writeback register is in register list. */
9989 if (insn & (1 << rn))
9990 goto illegal_op;
b0109805
PB
9991 store_reg(s, rn, addr);
9992 } else {
7d1b0095 9993 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9994 }
9995 }
9996 }
9997 break;
2af9ab77
JB
9998 case 5:
9999
9ee6e8bb 10000 op = (insn >> 21) & 0xf;
2af9ab77 10001 if (op == 6) {
62b44f05
AR
10002 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10003 goto illegal_op;
10004 }
2af9ab77
JB
10005 /* Halfword pack. */
10006 tmp = load_reg(s, rn);
10007 tmp2 = load_reg(s, rm);
10008 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
10009 if (insn & (1 << 5)) {
10010 /* pkhtb */
10011 if (shift == 0)
10012 shift = 31;
10013 tcg_gen_sari_i32(tmp2, tmp2, shift);
10014 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10015 tcg_gen_ext16u_i32(tmp2, tmp2);
10016 } else {
10017 /* pkhbt */
10018 if (shift)
10019 tcg_gen_shli_i32(tmp2, tmp2, shift);
10020 tcg_gen_ext16u_i32(tmp, tmp);
10021 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10022 }
10023 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10024 tcg_temp_free_i32(tmp2);
3174f8e9
FN
10025 store_reg(s, rd, tmp);
10026 } else {
2af9ab77
JB
10027 /* Data processing register constant shift. */
10028 if (rn == 15) {
7d1b0095 10029 tmp = tcg_temp_new_i32();
2af9ab77
JB
10030 tcg_gen_movi_i32(tmp, 0);
10031 } else {
10032 tmp = load_reg(s, rn);
10033 }
10034 tmp2 = load_reg(s, rm);
10035
10036 shiftop = (insn >> 4) & 3;
10037 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10038 conds = (insn & (1 << 20)) != 0;
10039 logic_cc = (conds && thumb2_logic_op(op));
10040 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10041 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10042 goto illegal_op;
7d1b0095 10043 tcg_temp_free_i32(tmp2);
2af9ab77
JB
10044 if (rd != 15) {
10045 store_reg(s, rd, tmp);
10046 } else {
7d1b0095 10047 tcg_temp_free_i32(tmp);
2af9ab77 10048 }
3174f8e9 10049 }
9ee6e8bb
PB
10050 break;
10051 case 13: /* Misc data processing. */
10052 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10053 if (op < 4 && (insn & 0xf000) != 0xf000)
10054 goto illegal_op;
10055 switch (op) {
10056 case 0: /* Register controlled shift. */
8984bd2e
PB
10057 tmp = load_reg(s, rn);
10058 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10059 if ((insn & 0x70) != 0)
10060 goto illegal_op;
10061 op = (insn >> 21) & 3;
8984bd2e
PB
10062 logic_cc = (insn & (1 << 20)) != 0;
10063 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10064 if (logic_cc)
10065 gen_logic_CC(tmp);
bedb8a6b 10066 store_reg(s, rd, tmp);
9ee6e8bb
PB
10067 break;
10068 case 1: /* Sign/zero extend. */
62b44f05
AR
10069 op = (insn >> 20) & 7;
10070 switch (op) {
10071 case 0: /* SXTAH, SXTH */
10072 case 1: /* UXTAH, UXTH */
10073 case 4: /* SXTAB, SXTB */
10074 case 5: /* UXTAB, UXTB */
10075 break;
10076 case 2: /* SXTAB16, SXTB16 */
10077 case 3: /* UXTAB16, UXTB16 */
10078 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10079 goto illegal_op;
10080 }
10081 break;
10082 default:
10083 goto illegal_op;
10084 }
10085 if (rn != 15) {
10086 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10087 goto illegal_op;
10088 }
10089 }
5e3f878a 10090 tmp = load_reg(s, rm);
9ee6e8bb 10091 shift = (insn >> 4) & 3;
1301f322 10092 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10093 rotate, a shift is sufficient. */
10094 if (shift != 0)
f669df27 10095 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10096 op = (insn >> 20) & 7;
10097 switch (op) {
5e3f878a
PB
10098 case 0: gen_sxth(tmp); break;
10099 case 1: gen_uxth(tmp); break;
10100 case 2: gen_sxtb16(tmp); break;
10101 case 3: gen_uxtb16(tmp); break;
10102 case 4: gen_sxtb(tmp); break;
10103 case 5: gen_uxtb(tmp); break;
62b44f05
AR
10104 default:
10105 g_assert_not_reached();
9ee6e8bb
PB
10106 }
10107 if (rn != 15) {
5e3f878a 10108 tmp2 = load_reg(s, rn);
9ee6e8bb 10109 if ((op >> 1) == 1) {
5e3f878a 10110 gen_add16(tmp, tmp2);
9ee6e8bb 10111 } else {
5e3f878a 10112 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10113 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10114 }
10115 }
5e3f878a 10116 store_reg(s, rd, tmp);
9ee6e8bb
PB
10117 break;
10118 case 2: /* SIMD add/subtract. */
62b44f05
AR
10119 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10120 goto illegal_op;
10121 }
9ee6e8bb
PB
10122 op = (insn >> 20) & 7;
10123 shift = (insn >> 4) & 7;
10124 if ((op & 3) == 3 || (shift & 3) == 3)
10125 goto illegal_op;
6ddbc6e4
PB
10126 tmp = load_reg(s, rn);
10127 tmp2 = load_reg(s, rm);
10128 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10129 tcg_temp_free_i32(tmp2);
6ddbc6e4 10130 store_reg(s, rd, tmp);
9ee6e8bb
PB
10131 break;
10132 case 3: /* Other data processing. */
10133 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10134 if (op < 4) {
10135 /* Saturating add/subtract. */
62b44f05
AR
10136 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10137 goto illegal_op;
10138 }
d9ba4830
PB
10139 tmp = load_reg(s, rn);
10140 tmp2 = load_reg(s, rm);
9ee6e8bb 10141 if (op & 1)
9ef39277 10142 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10143 if (op & 2)
9ef39277 10144 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10145 else
9ef39277 10146 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10147 tcg_temp_free_i32(tmp2);
9ee6e8bb 10148 } else {
62b44f05
AR
10149 switch (op) {
10150 case 0x0a: /* rbit */
10151 case 0x08: /* rev */
10152 case 0x09: /* rev16 */
10153 case 0x0b: /* revsh */
10154 case 0x18: /* clz */
10155 break;
10156 case 0x10: /* sel */
10157 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10158 goto illegal_op;
10159 }
10160 break;
10161 case 0x20: /* crc32/crc32c */
10162 case 0x21:
10163 case 0x22:
10164 case 0x28:
10165 case 0x29:
10166 case 0x2a:
10167 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10168 goto illegal_op;
10169 }
10170 break;
10171 default:
10172 goto illegal_op;
10173 }
d9ba4830 10174 tmp = load_reg(s, rn);
9ee6e8bb
PB
10175 switch (op) {
10176 case 0x0a: /* rbit */
d9ba4830 10177 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10178 break;
10179 case 0x08: /* rev */
66896cb8 10180 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10181 break;
10182 case 0x09: /* rev16 */
d9ba4830 10183 gen_rev16(tmp);
9ee6e8bb
PB
10184 break;
10185 case 0x0b: /* revsh */
d9ba4830 10186 gen_revsh(tmp);
9ee6e8bb
PB
10187 break;
10188 case 0x10: /* sel */
d9ba4830 10189 tmp2 = load_reg(s, rm);
7d1b0095 10190 tmp3 = tcg_temp_new_i32();
0ecb72a5 10191 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10192 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10193 tcg_temp_free_i32(tmp3);
10194 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10195 break;
10196 case 0x18: /* clz */
7539a012 10197 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10198 break;
eb0ecd5a
WN
10199 case 0x20:
10200 case 0x21:
10201 case 0x22:
10202 case 0x28:
10203 case 0x29:
10204 case 0x2a:
10205 {
10206 /* crc32/crc32c */
10207 uint32_t sz = op & 0x3;
10208 uint32_t c = op & 0x8;
10209
eb0ecd5a 10210 tmp2 = load_reg(s, rm);
aa633469
PM
10211 if (sz == 0) {
10212 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10213 } else if (sz == 1) {
10214 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10215 }
eb0ecd5a
WN
10216 tmp3 = tcg_const_i32(1 << sz);
10217 if (c) {
10218 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10219 } else {
10220 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10221 }
10222 tcg_temp_free_i32(tmp2);
10223 tcg_temp_free_i32(tmp3);
10224 break;
10225 }
9ee6e8bb 10226 default:
62b44f05 10227 g_assert_not_reached();
9ee6e8bb
PB
10228 }
10229 }
d9ba4830 10230 store_reg(s, rd, tmp);
9ee6e8bb
PB
10231 break;
10232 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10233 switch ((insn >> 20) & 7) {
10234 case 0: /* 32 x 32 -> 32 */
10235 case 7: /* Unsigned sum of absolute differences. */
10236 break;
10237 case 1: /* 16 x 16 -> 32 */
10238 case 2: /* Dual multiply add. */
10239 case 3: /* 32 * 16 -> 32msb */
10240 case 4: /* Dual multiply subtract. */
10241 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10242 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10243 goto illegal_op;
10244 }
10245 break;
10246 }
9ee6e8bb 10247 op = (insn >> 4) & 0xf;
d9ba4830
PB
10248 tmp = load_reg(s, rn);
10249 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10250 switch ((insn >> 20) & 7) {
10251 case 0: /* 32 x 32 -> 32 */
d9ba4830 10252 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10253 tcg_temp_free_i32(tmp2);
9ee6e8bb 10254 if (rs != 15) {
d9ba4830 10255 tmp2 = load_reg(s, rs);
9ee6e8bb 10256 if (op)
d9ba4830 10257 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10258 else
d9ba4830 10259 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10260 tcg_temp_free_i32(tmp2);
9ee6e8bb 10261 }
9ee6e8bb
PB
10262 break;
10263 case 1: /* 16 x 16 -> 32 */
d9ba4830 10264 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10265 tcg_temp_free_i32(tmp2);
9ee6e8bb 10266 if (rs != 15) {
d9ba4830 10267 tmp2 = load_reg(s, rs);
9ef39277 10268 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10269 tcg_temp_free_i32(tmp2);
9ee6e8bb 10270 }
9ee6e8bb
PB
10271 break;
10272 case 2: /* Dual multiply add. */
10273 case 4: /* Dual multiply subtract. */
10274 if (op)
d9ba4830
PB
10275 gen_swap_half(tmp2);
10276 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10277 if (insn & (1 << 22)) {
e1d177b9 10278 /* This subtraction cannot overflow. */
d9ba4830 10279 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10280 } else {
e1d177b9
PM
10281 /* This addition cannot overflow 32 bits;
10282 * however it may overflow considered as a signed
10283 * operation, in which case we must set the Q flag.
10284 */
9ef39277 10285 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10286 }
7d1b0095 10287 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10288 if (rs != 15)
10289 {
d9ba4830 10290 tmp2 = load_reg(s, rs);
9ef39277 10291 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10292 tcg_temp_free_i32(tmp2);
9ee6e8bb 10293 }
9ee6e8bb
PB
10294 break;
10295 case 3: /* 32 * 16 -> 32msb */
10296 if (op)
d9ba4830 10297 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10298 else
d9ba4830 10299 gen_sxth(tmp2);
a7812ae4
PB
10300 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10301 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10302 tmp = tcg_temp_new_i32();
ecc7b3aa 10303 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10304 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10305 if (rs != 15)
10306 {
d9ba4830 10307 tmp2 = load_reg(s, rs);
9ef39277 10308 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10309 tcg_temp_free_i32(tmp2);
9ee6e8bb 10310 }
9ee6e8bb 10311 break;
838fa72d
AJ
10312 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10313 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10314 if (rs != 15) {
838fa72d
AJ
10315 tmp = load_reg(s, rs);
10316 if (insn & (1 << 20)) {
10317 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10318 } else {
838fa72d 10319 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10320 }
2c0262af 10321 }
838fa72d
AJ
10322 if (insn & (1 << 4)) {
10323 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10324 }
10325 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10326 tmp = tcg_temp_new_i32();
ecc7b3aa 10327 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10328 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10329 break;
10330 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10331 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10332 tcg_temp_free_i32(tmp2);
9ee6e8bb 10333 if (rs != 15) {
d9ba4830
PB
10334 tmp2 = load_reg(s, rs);
10335 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10336 tcg_temp_free_i32(tmp2);
5fd46862 10337 }
9ee6e8bb 10338 break;
2c0262af 10339 }
d9ba4830 10340 store_reg(s, rd, tmp);
2c0262af 10341 break;
9ee6e8bb
PB
10342 case 6: case 7: /* 64-bit multiply, Divide. */
10343 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10344 tmp = load_reg(s, rn);
10345 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10346 if ((op & 0x50) == 0x10) {
10347 /* sdiv, udiv */
d614a513 10348 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10349 goto illegal_op;
47789990 10350 }
9ee6e8bb 10351 if (op & 0x20)
5e3f878a 10352 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10353 else
5e3f878a 10354 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10355 tcg_temp_free_i32(tmp2);
5e3f878a 10356 store_reg(s, rd, tmp);
9ee6e8bb
PB
10357 } else if ((op & 0xe) == 0xc) {
10358 /* Dual multiply accumulate long. */
62b44f05
AR
10359 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10360 tcg_temp_free_i32(tmp);
10361 tcg_temp_free_i32(tmp2);
10362 goto illegal_op;
10363 }
9ee6e8bb 10364 if (op & 1)
5e3f878a
PB
10365 gen_swap_half(tmp2);
10366 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10367 if (op & 0x10) {
5e3f878a 10368 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10369 } else {
5e3f878a 10370 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10371 }
7d1b0095 10372 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10373 /* BUGFIX */
10374 tmp64 = tcg_temp_new_i64();
10375 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10376 tcg_temp_free_i32(tmp);
a7812ae4
PB
10377 gen_addq(s, tmp64, rs, rd);
10378 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10379 tcg_temp_free_i64(tmp64);
2c0262af 10380 } else {
9ee6e8bb
PB
10381 if (op & 0x20) {
10382 /* Unsigned 64-bit multiply */
a7812ae4 10383 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10384 } else {
9ee6e8bb
PB
10385 if (op & 8) {
10386 /* smlalxy */
62b44f05
AR
10387 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10388 tcg_temp_free_i32(tmp2);
10389 tcg_temp_free_i32(tmp);
10390 goto illegal_op;
10391 }
5e3f878a 10392 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10393 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10394 tmp64 = tcg_temp_new_i64();
10395 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10396 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10397 } else {
10398 /* Signed 64-bit multiply */
a7812ae4 10399 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10400 }
b5ff1b31 10401 }
9ee6e8bb
PB
10402 if (op & 4) {
10403 /* umaal */
62b44f05
AR
10404 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10405 tcg_temp_free_i64(tmp64);
10406 goto illegal_op;
10407 }
a7812ae4
PB
10408 gen_addq_lo(s, tmp64, rs);
10409 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10410 } else if (op & 0x40) {
10411 /* 64-bit accumulate. */
a7812ae4 10412 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10413 }
a7812ae4 10414 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10415 tcg_temp_free_i64(tmp64);
5fd46862 10416 }
2c0262af 10417 break;
9ee6e8bb
PB
10418 }
10419 break;
10420 case 6: case 7: case 14: case 15:
10421 /* Coprocessor. */
7517748e
PM
10422 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10423 /* We don't currently implement M profile FP support,
10424 * so this entire space should give a NOCP fault.
10425 */
10426 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10427 default_exception_el(s));
10428 break;
10429 }
9ee6e8bb
PB
10430 if (((insn >> 24) & 3) == 3) {
10431 /* Translate into the equivalent ARM encoding. */
f06053e3 10432 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10433 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10434 goto illegal_op;
7dcc1f89 10435 }
6a57f3eb 10436 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10437 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10438 goto illegal_op;
10439 }
9ee6e8bb
PB
10440 } else {
10441 if (insn & (1 << 28))
10442 goto illegal_op;
7dcc1f89 10443 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10444 goto illegal_op;
7dcc1f89 10445 }
9ee6e8bb
PB
10446 }
10447 break;
10448 case 8: case 9: case 10: case 11:
10449 if (insn & (1 << 15)) {
10450 /* Branches, misc control. */
10451 if (insn & 0x5000) {
10452 /* Unconditional branch. */
10453 /* signextend(hw1[10:0]) -> offset[:12]. */
10454 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10455 /* hw1[10:0] -> offset[11:1]. */
10456 offset |= (insn & 0x7ff) << 1;
10457 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10458 offset[24:22] already have the same value because of the
10459 sign extension above. */
10460 offset ^= ((~insn) & (1 << 13)) << 10;
10461 offset ^= ((~insn) & (1 << 11)) << 11;
10462
9ee6e8bb
PB
10463 if (insn & (1 << 14)) {
10464 /* Branch and link. */
3174f8e9 10465 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10466 }
3b46e624 10467
b0109805 10468 offset += s->pc;
9ee6e8bb
PB
10469 if (insn & (1 << 12)) {
10470 /* b/bl */
b0109805 10471 gen_jmp(s, offset);
9ee6e8bb
PB
10472 } else {
10473 /* blx */
b0109805 10474 offset &= ~(uint32_t)2;
be5e7a76 10475 /* thumb2 bx, no need to check */
b0109805 10476 gen_bx_im(s, offset);
2c0262af 10477 }
9ee6e8bb
PB
10478 } else if (((insn >> 23) & 7) == 7) {
10479 /* Misc control */
10480 if (insn & (1 << 13))
10481 goto illegal_op;
10482
10483 if (insn & (1 << 26)) {
001b3cab
PM
10484 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10485 goto illegal_op;
10486 }
37e6456e
PM
10487 if (!(insn & (1 << 20))) {
10488 /* Hypervisor call (v7) */
10489 int imm16 = extract32(insn, 16, 4) << 12
10490 | extract32(insn, 0, 12);
10491 ARCH(7);
10492 if (IS_USER(s)) {
10493 goto illegal_op;
10494 }
10495 gen_hvc(s, imm16);
10496 } else {
10497 /* Secure monitor call (v6+) */
10498 ARCH(6K);
10499 if (IS_USER(s)) {
10500 goto illegal_op;
10501 }
10502 gen_smc(s);
10503 }
2c0262af 10504 } else {
9ee6e8bb
PB
10505 op = (insn >> 20) & 7;
10506 switch (op) {
10507 case 0: /* msr cpsr. */
b53d8923 10508 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 10509 tmp = load_reg(s, rn);
b28b3377
PM
10510 /* the constant is the mask and SYSm fields */
10511 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 10512 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10513 tcg_temp_free_i32(addr);
7d1b0095 10514 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10515 gen_lookup_tb(s);
10516 break;
10517 }
10518 /* fall through */
10519 case 1: /* msr spsr. */
b53d8923 10520 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10521 goto illegal_op;
b53d8923 10522 }
8bfd0550
PM
10523
10524 if (extract32(insn, 5, 1)) {
10525 /* MSR (banked) */
10526 int sysm = extract32(insn, 8, 4) |
10527 (extract32(insn, 4, 1) << 4);
10528 int r = op & 1;
10529
10530 gen_msr_banked(s, r, sysm, rm);
10531 break;
10532 }
10533
10534 /* MSR (for PSRs) */
2fbac54b
FN
10535 tmp = load_reg(s, rn);
10536 if (gen_set_psr(s,
7dcc1f89 10537 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10538 op == 1, tmp))
9ee6e8bb
PB
10539 goto illegal_op;
10540 break;
10541 case 2: /* cps, nop-hint. */
10542 if (((insn >> 8) & 7) == 0) {
10543 gen_nop_hint(s, insn & 0xff);
10544 }
10545 /* Implemented as NOP in user mode. */
10546 if (IS_USER(s))
10547 break;
10548 offset = 0;
10549 imm = 0;
10550 if (insn & (1 << 10)) {
10551 if (insn & (1 << 7))
10552 offset |= CPSR_A;
10553 if (insn & (1 << 6))
10554 offset |= CPSR_I;
10555 if (insn & (1 << 5))
10556 offset |= CPSR_F;
10557 if (insn & (1 << 9))
10558 imm = CPSR_A | CPSR_I | CPSR_F;
10559 }
10560 if (insn & (1 << 8)) {
10561 offset |= 0x1f;
10562 imm |= (insn & 0x1f);
10563 }
10564 if (offset) {
2fbac54b 10565 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10566 }
10567 break;
10568 case 3: /* Special control operations. */
426f5abc 10569 ARCH(7);
9ee6e8bb
PB
10570 op = (insn >> 4) & 0xf;
10571 switch (op) {
10572 case 2: /* clrex */
426f5abc 10573 gen_clrex(s);
9ee6e8bb
PB
10574 break;
10575 case 4: /* dsb */
10576 case 5: /* dmb */
61e4c432 10577 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10578 break;
6df99dec
SS
10579 case 6: /* isb */
10580 /* We need to break the TB after this insn
10581 * to execute self-modifying code correctly
10582 * and also to take any pending interrupts
10583 * immediately.
10584 */
0b609cc1 10585 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 10586 break;
9ee6e8bb
PB
10587 default:
10588 goto illegal_op;
10589 }
10590 break;
10591 case 4: /* bxj */
9d7c59c8
PM
10592 /* Trivial implementation equivalent to bx.
10593 * This instruction doesn't exist at all for M-profile.
10594 */
10595 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10596 goto illegal_op;
10597 }
d9ba4830
PB
10598 tmp = load_reg(s, rn);
10599 gen_bx(s, tmp);
9ee6e8bb
PB
10600 break;
10601 case 5: /* Exception return. */
b8b45b68
RV
10602 if (IS_USER(s)) {
10603 goto illegal_op;
10604 }
10605 if (rn != 14 || rd != 15) {
10606 goto illegal_op;
10607 }
10608 tmp = load_reg(s, rn);
10609 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10610 gen_exception_return(s, tmp);
10611 break;
8bfd0550 10612 case 6: /* MRS */
43ac6574
PM
10613 if (extract32(insn, 5, 1) &&
10614 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10615 /* MRS (banked) */
10616 int sysm = extract32(insn, 16, 4) |
10617 (extract32(insn, 4, 1) << 4);
10618
10619 gen_mrs_banked(s, 0, sysm, rd);
10620 break;
10621 }
10622
3d54026f
PM
10623 if (extract32(insn, 16, 4) != 0xf) {
10624 goto illegal_op;
10625 }
10626 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10627 extract32(insn, 0, 8) != 0) {
10628 goto illegal_op;
10629 }
10630
8bfd0550 10631 /* mrs cpsr */
7d1b0095 10632 tmp = tcg_temp_new_i32();
b53d8923 10633 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10634 addr = tcg_const_i32(insn & 0xff);
10635 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10636 tcg_temp_free_i32(addr);
9ee6e8bb 10637 } else {
9ef39277 10638 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10639 }
8984bd2e 10640 store_reg(s, rd, tmp);
9ee6e8bb 10641 break;
8bfd0550 10642 case 7: /* MRS */
43ac6574
PM
10643 if (extract32(insn, 5, 1) &&
10644 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10645 /* MRS (banked) */
10646 int sysm = extract32(insn, 16, 4) |
10647 (extract32(insn, 4, 1) << 4);
10648
10649 gen_mrs_banked(s, 1, sysm, rd);
10650 break;
10651 }
10652
10653 /* mrs spsr. */
9ee6e8bb 10654 /* Not accessible in user mode. */
b53d8923 10655 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10656 goto illegal_op;
b53d8923 10657 }
3d54026f
PM
10658
10659 if (extract32(insn, 16, 4) != 0xf ||
10660 extract32(insn, 0, 8) != 0) {
10661 goto illegal_op;
10662 }
10663
d9ba4830
PB
10664 tmp = load_cpu_field(spsr);
10665 store_reg(s, rd, tmp);
9ee6e8bb 10666 break;
2c0262af
FB
10667 }
10668 }
9ee6e8bb
PB
10669 } else {
10670 /* Conditional branch. */
10671 op = (insn >> 22) & 0xf;
10672 /* Generate a conditional jump to next instruction. */
10673 s->condlabel = gen_new_label();
39fb730a 10674 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
10675 s->condjmp = 1;
10676
10677 /* offset[11:1] = insn[10:0] */
10678 offset = (insn & 0x7ff) << 1;
10679 /* offset[17:12] = insn[21:16]. */
10680 offset |= (insn & 0x003f0000) >> 4;
10681 /* offset[31:20] = insn[26]. */
10682 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10683 /* offset[18] = insn[13]. */
10684 offset |= (insn & (1 << 13)) << 5;
10685 /* offset[19] = insn[11]. */
10686 offset |= (insn & (1 << 11)) << 8;
10687
10688 /* jump to the offset */
b0109805 10689 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10690 }
10691 } else {
10692 /* Data processing immediate. */
10693 if (insn & (1 << 25)) {
10694 if (insn & (1 << 24)) {
10695 if (insn & (1 << 20))
10696 goto illegal_op;
10697 /* Bitfield/Saturate. */
10698 op = (insn >> 21) & 7;
10699 imm = insn & 0x1f;
10700 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10701 if (rn == 15) {
7d1b0095 10702 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10703 tcg_gen_movi_i32(tmp, 0);
10704 } else {
10705 tmp = load_reg(s, rn);
10706 }
9ee6e8bb
PB
10707 switch (op) {
10708 case 2: /* Signed bitfield extract. */
10709 imm++;
10710 if (shift + imm > 32)
10711 goto illegal_op;
59a71b4c
RH
10712 if (imm < 32) {
10713 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10714 }
9ee6e8bb
PB
10715 break;
10716 case 6: /* Unsigned bitfield extract. */
10717 imm++;
10718 if (shift + imm > 32)
10719 goto illegal_op;
59a71b4c
RH
10720 if (imm < 32) {
10721 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10722 }
9ee6e8bb
PB
10723 break;
10724 case 3: /* Bitfield insert/clear. */
10725 if (imm < shift)
10726 goto illegal_op;
10727 imm = imm + 1 - shift;
10728 if (imm != 32) {
6ddbc6e4 10729 tmp2 = load_reg(s, rd);
d593c48e 10730 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10731 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10732 }
10733 break;
10734 case 7:
10735 goto illegal_op;
10736 default: /* Saturate. */
9ee6e8bb
PB
10737 if (shift) {
10738 if (op & 1)
6ddbc6e4 10739 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10740 else
6ddbc6e4 10741 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10742 }
6ddbc6e4 10743 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10744 if (op & 4) {
10745 /* Unsigned. */
62b44f05
AR
10746 if ((op & 1) && shift == 0) {
10747 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10748 tcg_temp_free_i32(tmp);
10749 tcg_temp_free_i32(tmp2);
10750 goto illegal_op;
10751 }
9ef39277 10752 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10753 } else {
9ef39277 10754 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10755 }
2c0262af 10756 } else {
9ee6e8bb 10757 /* Signed. */
62b44f05
AR
10758 if ((op & 1) && shift == 0) {
10759 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10760 tcg_temp_free_i32(tmp);
10761 tcg_temp_free_i32(tmp2);
10762 goto illegal_op;
10763 }
9ef39277 10764 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10765 } else {
9ef39277 10766 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10767 }
2c0262af 10768 }
b75263d6 10769 tcg_temp_free_i32(tmp2);
9ee6e8bb 10770 break;
2c0262af 10771 }
6ddbc6e4 10772 store_reg(s, rd, tmp);
9ee6e8bb
PB
10773 } else {
10774 imm = ((insn & 0x04000000) >> 15)
10775 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10776 if (insn & (1 << 22)) {
10777 /* 16-bit immediate. */
10778 imm |= (insn >> 4) & 0xf000;
10779 if (insn & (1 << 23)) {
10780 /* movt */
5e3f878a 10781 tmp = load_reg(s, rd);
86831435 10782 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10783 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10784 } else {
9ee6e8bb 10785 /* movw */
7d1b0095 10786 tmp = tcg_temp_new_i32();
5e3f878a 10787 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10788 }
10789 } else {
9ee6e8bb
PB
10790 /* Add/sub 12-bit immediate. */
10791 if (rn == 15) {
b0109805 10792 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10793 if (insn & (1 << 23))
b0109805 10794 offset -= imm;
9ee6e8bb 10795 else
b0109805 10796 offset += imm;
7d1b0095 10797 tmp = tcg_temp_new_i32();
5e3f878a 10798 tcg_gen_movi_i32(tmp, offset);
2c0262af 10799 } else {
5e3f878a 10800 tmp = load_reg(s, rn);
9ee6e8bb 10801 if (insn & (1 << 23))
5e3f878a 10802 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10803 else
5e3f878a 10804 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10805 }
9ee6e8bb 10806 }
5e3f878a 10807 store_reg(s, rd, tmp);
191abaa2 10808 }
9ee6e8bb
PB
10809 } else {
10810 int shifter_out = 0;
10811 /* modified 12-bit immediate. */
10812 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10813 imm = (insn & 0xff);
10814 switch (shift) {
10815 case 0: /* XY */
10816 /* Nothing to do. */
10817 break;
10818 case 1: /* 00XY00XY */
10819 imm |= imm << 16;
10820 break;
10821 case 2: /* XY00XY00 */
10822 imm |= imm << 16;
10823 imm <<= 8;
10824 break;
10825 case 3: /* XYXYXYXY */
10826 imm |= imm << 16;
10827 imm |= imm << 8;
10828 break;
10829 default: /* Rotated constant. */
10830 shift = (shift << 1) | (imm >> 7);
10831 imm |= 0x80;
10832 imm = imm << (32 - shift);
10833 shifter_out = 1;
10834 break;
b5ff1b31 10835 }
7d1b0095 10836 tmp2 = tcg_temp_new_i32();
3174f8e9 10837 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10838 rn = (insn >> 16) & 0xf;
3174f8e9 10839 if (rn == 15) {
7d1b0095 10840 tmp = tcg_temp_new_i32();
3174f8e9
FN
10841 tcg_gen_movi_i32(tmp, 0);
10842 } else {
10843 tmp = load_reg(s, rn);
10844 }
9ee6e8bb
PB
10845 op = (insn >> 21) & 0xf;
10846 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10847 shifter_out, tmp, tmp2))
9ee6e8bb 10848 goto illegal_op;
7d1b0095 10849 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10850 rd = (insn >> 8) & 0xf;
10851 if (rd != 15) {
3174f8e9
FN
10852 store_reg(s, rd, tmp);
10853 } else {
7d1b0095 10854 tcg_temp_free_i32(tmp);
2c0262af 10855 }
2c0262af 10856 }
9ee6e8bb
PB
10857 }
10858 break;
10859 case 12: /* Load/store single data item. */
10860 {
10861 int postinc = 0;
10862 int writeback = 0;
a99caa48 10863 int memidx;
9bb6558a
PM
10864 ISSInfo issinfo;
10865
9ee6e8bb 10866 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10867 if (disas_neon_ls_insn(s, insn)) {
c1713132 10868 goto illegal_op;
7dcc1f89 10869 }
9ee6e8bb
PB
10870 break;
10871 }
a2fdc890
PM
10872 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10873 if (rs == 15) {
10874 if (!(insn & (1 << 20))) {
10875 goto illegal_op;
10876 }
10877 if (op != 2) {
10878 /* Byte or halfword load space with dest == r15 : memory hints.
10879 * Catch them early so we don't emit pointless addressing code.
10880 * This space is a mix of:
10881 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10882 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10883 * cores)
10884 * unallocated hints, which must be treated as NOPs
10885 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10886 * which is easiest for the decoding logic
10887 * Some space which must UNDEF
10888 */
10889 int op1 = (insn >> 23) & 3;
10890 int op2 = (insn >> 6) & 0x3f;
10891 if (op & 2) {
10892 goto illegal_op;
10893 }
10894 if (rn == 15) {
02afbf64
PM
10895 /* UNPREDICTABLE, unallocated hint or
10896 * PLD/PLDW/PLI (literal)
10897 */
a2fdc890
PM
10898 return 0;
10899 }
10900 if (op1 & 1) {
02afbf64 10901 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10902 }
10903 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10904 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10905 }
10906 /* UNDEF space, or an UNPREDICTABLE */
10907 return 1;
10908 }
10909 }
a99caa48 10910 memidx = get_mem_index(s);
9ee6e8bb 10911 if (rn == 15) {
7d1b0095 10912 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10913 /* PC relative. */
10914 /* s->pc has already been incremented by 4. */
10915 imm = s->pc & 0xfffffffc;
10916 if (insn & (1 << 23))
10917 imm += insn & 0xfff;
10918 else
10919 imm -= insn & 0xfff;
b0109805 10920 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10921 } else {
b0109805 10922 addr = load_reg(s, rn);
9ee6e8bb
PB
10923 if (insn & (1 << 23)) {
10924 /* Positive offset. */
10925 imm = insn & 0xfff;
b0109805 10926 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10927 } else {
9ee6e8bb 10928 imm = insn & 0xff;
2a0308c5
PM
10929 switch ((insn >> 8) & 0xf) {
10930 case 0x0: /* Shifted Register. */
9ee6e8bb 10931 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10932 if (shift > 3) {
10933 tcg_temp_free_i32(addr);
18c9b560 10934 goto illegal_op;
2a0308c5 10935 }
b26eefb6 10936 tmp = load_reg(s, rm);
9ee6e8bb 10937 if (shift)
b26eefb6 10938 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10939 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10940 tcg_temp_free_i32(tmp);
9ee6e8bb 10941 break;
2a0308c5 10942 case 0xc: /* Negative offset. */
b0109805 10943 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10944 break;
2a0308c5 10945 case 0xe: /* User privilege. */
b0109805 10946 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 10947 memidx = get_a32_user_mem_index(s);
9ee6e8bb 10948 break;
2a0308c5 10949 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10950 imm = -imm;
10951 /* Fall through. */
2a0308c5 10952 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10953 postinc = 1;
10954 writeback = 1;
10955 break;
2a0308c5 10956 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10957 imm = -imm;
10958 /* Fall through. */
2a0308c5 10959 case 0xf: /* Pre-increment. */
b0109805 10960 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10961 writeback = 1;
10962 break;
10963 default:
2a0308c5 10964 tcg_temp_free_i32(addr);
b7bcbe95 10965 goto illegal_op;
9ee6e8bb
PB
10966 }
10967 }
10968 }
9bb6558a
PM
10969
10970 issinfo = writeback ? ISSInvalid : rs;
10971
9ee6e8bb
PB
10972 if (insn & (1 << 20)) {
10973 /* Load. */
5a839c0d 10974 tmp = tcg_temp_new_i32();
a2fdc890 10975 switch (op) {
5a839c0d 10976 case 0:
9bb6558a 10977 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10978 break;
10979 case 4:
9bb6558a 10980 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10981 break;
10982 case 1:
9bb6558a 10983 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10984 break;
10985 case 5:
9bb6558a 10986 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10987 break;
10988 case 2:
9bb6558a 10989 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 10990 break;
2a0308c5 10991 default:
5a839c0d 10992 tcg_temp_free_i32(tmp);
2a0308c5
PM
10993 tcg_temp_free_i32(addr);
10994 goto illegal_op;
a2fdc890
PM
10995 }
10996 if (rs == 15) {
3bb8a96f 10997 gen_bx_excret(s, tmp);
9ee6e8bb 10998 } else {
a2fdc890 10999 store_reg(s, rs, tmp);
9ee6e8bb
PB
11000 }
11001 } else {
11002 /* Store. */
b0109805 11003 tmp = load_reg(s, rs);
9ee6e8bb 11004 switch (op) {
5a839c0d 11005 case 0:
9bb6558a 11006 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11007 break;
11008 case 1:
9bb6558a 11009 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11010 break;
11011 case 2:
9bb6558a 11012 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11013 break;
2a0308c5 11014 default:
5a839c0d 11015 tcg_temp_free_i32(tmp);
2a0308c5
PM
11016 tcg_temp_free_i32(addr);
11017 goto illegal_op;
b7bcbe95 11018 }
5a839c0d 11019 tcg_temp_free_i32(tmp);
2c0262af 11020 }
9ee6e8bb 11021 if (postinc)
b0109805
PB
11022 tcg_gen_addi_i32(addr, addr, imm);
11023 if (writeback) {
11024 store_reg(s, rn, addr);
11025 } else {
7d1b0095 11026 tcg_temp_free_i32(addr);
b0109805 11027 }
9ee6e8bb
PB
11028 }
11029 break;
11030 default:
11031 goto illegal_op;
2c0262af 11032 }
9ee6e8bb
PB
11033 return 0;
11034illegal_op:
11035 return 1;
2c0262af
FB
11036}
11037
0ecb72a5 11038static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
11039{
11040 uint32_t val, insn, op, rm, rn, rd, shift, cond;
11041 int32_t offset;
11042 int i;
39d5492a
PM
11043 TCGv_i32 tmp;
11044 TCGv_i32 tmp2;
11045 TCGv_i32 addr;
99c475ab 11046
9ee6e8bb
PB
11047 if (s->condexec_mask) {
11048 cond = s->condexec_cond;
bedd2912
JB
11049 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
11050 s->condlabel = gen_new_label();
39fb730a 11051 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
11052 s->condjmp = 1;
11053 }
9ee6e8bb
PB
11054 }
11055
f9fd40eb 11056 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
99c475ab 11057 s->pc += 2;
b5ff1b31 11058
99c475ab
FB
11059 switch (insn >> 12) {
11060 case 0: case 1:
396e467c 11061
99c475ab
FB
11062 rd = insn & 7;
11063 op = (insn >> 11) & 3;
11064 if (op == 3) {
11065 /* add/subtract */
11066 rn = (insn >> 3) & 7;
396e467c 11067 tmp = load_reg(s, rn);
99c475ab
FB
11068 if (insn & (1 << 10)) {
11069 /* immediate */
7d1b0095 11070 tmp2 = tcg_temp_new_i32();
396e467c 11071 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
11072 } else {
11073 /* reg */
11074 rm = (insn >> 6) & 7;
396e467c 11075 tmp2 = load_reg(s, rm);
99c475ab 11076 }
9ee6e8bb
PB
11077 if (insn & (1 << 9)) {
11078 if (s->condexec_mask)
396e467c 11079 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11080 else
72485ec4 11081 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
11082 } else {
11083 if (s->condexec_mask)
396e467c 11084 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 11085 else
72485ec4 11086 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 11087 }
7d1b0095 11088 tcg_temp_free_i32(tmp2);
396e467c 11089 store_reg(s, rd, tmp);
99c475ab
FB
11090 } else {
11091 /* shift immediate */
11092 rm = (insn >> 3) & 7;
11093 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
11094 tmp = load_reg(s, rm);
11095 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11096 if (!s->condexec_mask)
11097 gen_logic_CC(tmp);
11098 store_reg(s, rd, tmp);
99c475ab
FB
11099 }
11100 break;
11101 case 2: case 3:
11102 /* arithmetic large immediate */
11103 op = (insn >> 11) & 3;
11104 rd = (insn >> 8) & 0x7;
396e467c 11105 if (op == 0) { /* mov */
7d1b0095 11106 tmp = tcg_temp_new_i32();
396e467c 11107 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11108 if (!s->condexec_mask)
396e467c
FN
11109 gen_logic_CC(tmp);
11110 store_reg(s, rd, tmp);
11111 } else {
11112 tmp = load_reg(s, rd);
7d1b0095 11113 tmp2 = tcg_temp_new_i32();
396e467c
FN
11114 tcg_gen_movi_i32(tmp2, insn & 0xff);
11115 switch (op) {
11116 case 1: /* cmp */
72485ec4 11117 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11118 tcg_temp_free_i32(tmp);
11119 tcg_temp_free_i32(tmp2);
396e467c
FN
11120 break;
11121 case 2: /* add */
11122 if (s->condexec_mask)
11123 tcg_gen_add_i32(tmp, tmp, tmp2);
11124 else
72485ec4 11125 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11126 tcg_temp_free_i32(tmp2);
396e467c
FN
11127 store_reg(s, rd, tmp);
11128 break;
11129 case 3: /* sub */
11130 if (s->condexec_mask)
11131 tcg_gen_sub_i32(tmp, tmp, tmp2);
11132 else
72485ec4 11133 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11134 tcg_temp_free_i32(tmp2);
396e467c
FN
11135 store_reg(s, rd, tmp);
11136 break;
11137 }
99c475ab 11138 }
99c475ab
FB
11139 break;
11140 case 4:
11141 if (insn & (1 << 11)) {
11142 rd = (insn >> 8) & 7;
5899f386
FB
11143 /* load pc-relative. Bit 1 of PC is ignored. */
11144 val = s->pc + 2 + ((insn & 0xff) * 4);
11145 val &= ~(uint32_t)2;
7d1b0095 11146 addr = tcg_temp_new_i32();
b0109805 11147 tcg_gen_movi_i32(addr, val);
c40c8556 11148 tmp = tcg_temp_new_i32();
9bb6558a
PM
11149 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11150 rd | ISSIs16Bit);
7d1b0095 11151 tcg_temp_free_i32(addr);
b0109805 11152 store_reg(s, rd, tmp);
99c475ab
FB
11153 break;
11154 }
11155 if (insn & (1 << 10)) {
ebfe27c5
PM
11156 /* 0b0100_01xx_xxxx_xxxx
11157 * - data processing extended, branch and exchange
11158 */
99c475ab
FB
11159 rd = (insn & 7) | ((insn >> 4) & 8);
11160 rm = (insn >> 3) & 0xf;
11161 op = (insn >> 8) & 3;
11162 switch (op) {
11163 case 0: /* add */
396e467c
FN
11164 tmp = load_reg(s, rd);
11165 tmp2 = load_reg(s, rm);
11166 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11167 tcg_temp_free_i32(tmp2);
396e467c 11168 store_reg(s, rd, tmp);
99c475ab
FB
11169 break;
11170 case 1: /* cmp */
396e467c
FN
11171 tmp = load_reg(s, rd);
11172 tmp2 = load_reg(s, rm);
72485ec4 11173 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11174 tcg_temp_free_i32(tmp2);
11175 tcg_temp_free_i32(tmp);
99c475ab
FB
11176 break;
11177 case 2: /* mov/cpy */
396e467c
FN
11178 tmp = load_reg(s, rm);
11179 store_reg(s, rd, tmp);
99c475ab 11180 break;
ebfe27c5
PM
11181 case 3:
11182 {
11183 /* 0b0100_0111_xxxx_xxxx
11184 * - branch [and link] exchange thumb register
11185 */
11186 bool link = insn & (1 << 7);
11187
11188 if (insn & 7) {
11189 goto undef;
11190 }
11191 if (link) {
be5e7a76 11192 ARCH(5);
ebfe27c5
PM
11193 }
11194 tmp = load_reg(s, rm);
11195 if (link) {
99c475ab 11196 val = (uint32_t)s->pc | 1;
7d1b0095 11197 tmp2 = tcg_temp_new_i32();
b0109805
PB
11198 tcg_gen_movi_i32(tmp2, val);
11199 store_reg(s, 14, tmp2);
3bb8a96f
PM
11200 gen_bx(s, tmp);
11201 } else {
11202 /* Only BX works as exception-return, not BLX */
11203 gen_bx_excret(s, tmp);
99c475ab 11204 }
99c475ab
FB
11205 break;
11206 }
ebfe27c5 11207 }
99c475ab
FB
11208 break;
11209 }
11210
11211 /* data processing register */
11212 rd = insn & 7;
11213 rm = (insn >> 3) & 7;
11214 op = (insn >> 6) & 0xf;
11215 if (op == 2 || op == 3 || op == 4 || op == 7) {
11216 /* the shift/rotate ops want the operands backwards */
11217 val = rm;
11218 rm = rd;
11219 rd = val;
11220 val = 1;
11221 } else {
11222 val = 0;
11223 }
11224
396e467c 11225 if (op == 9) { /* neg */
7d1b0095 11226 tmp = tcg_temp_new_i32();
396e467c
FN
11227 tcg_gen_movi_i32(tmp, 0);
11228 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11229 tmp = load_reg(s, rd);
11230 } else {
39d5492a 11231 TCGV_UNUSED_I32(tmp);
396e467c 11232 }
99c475ab 11233
396e467c 11234 tmp2 = load_reg(s, rm);
5899f386 11235 switch (op) {
99c475ab 11236 case 0x0: /* and */
396e467c 11237 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11238 if (!s->condexec_mask)
396e467c 11239 gen_logic_CC(tmp);
99c475ab
FB
11240 break;
11241 case 0x1: /* eor */
396e467c 11242 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11243 if (!s->condexec_mask)
396e467c 11244 gen_logic_CC(tmp);
99c475ab
FB
11245 break;
11246 case 0x2: /* lsl */
9ee6e8bb 11247 if (s->condexec_mask) {
365af80e 11248 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11249 } else {
9ef39277 11250 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11251 gen_logic_CC(tmp2);
9ee6e8bb 11252 }
99c475ab
FB
11253 break;
11254 case 0x3: /* lsr */
9ee6e8bb 11255 if (s->condexec_mask) {
365af80e 11256 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11257 } else {
9ef39277 11258 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11259 gen_logic_CC(tmp2);
9ee6e8bb 11260 }
99c475ab
FB
11261 break;
11262 case 0x4: /* asr */
9ee6e8bb 11263 if (s->condexec_mask) {
365af80e 11264 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11265 } else {
9ef39277 11266 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11267 gen_logic_CC(tmp2);
9ee6e8bb 11268 }
99c475ab
FB
11269 break;
11270 case 0x5: /* adc */
49b4c31e 11271 if (s->condexec_mask) {
396e467c 11272 gen_adc(tmp, tmp2);
49b4c31e
RH
11273 } else {
11274 gen_adc_CC(tmp, tmp, tmp2);
11275 }
99c475ab
FB
11276 break;
11277 case 0x6: /* sbc */
2de68a49 11278 if (s->condexec_mask) {
396e467c 11279 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11280 } else {
11281 gen_sbc_CC(tmp, tmp, tmp2);
11282 }
99c475ab
FB
11283 break;
11284 case 0x7: /* ror */
9ee6e8bb 11285 if (s->condexec_mask) {
f669df27
AJ
11286 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11287 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11288 } else {
9ef39277 11289 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11290 gen_logic_CC(tmp2);
9ee6e8bb 11291 }
99c475ab
FB
11292 break;
11293 case 0x8: /* tst */
396e467c
FN
11294 tcg_gen_and_i32(tmp, tmp, tmp2);
11295 gen_logic_CC(tmp);
99c475ab 11296 rd = 16;
5899f386 11297 break;
99c475ab 11298 case 0x9: /* neg */
9ee6e8bb 11299 if (s->condexec_mask)
396e467c 11300 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11301 else
72485ec4 11302 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11303 break;
11304 case 0xa: /* cmp */
72485ec4 11305 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11306 rd = 16;
11307 break;
11308 case 0xb: /* cmn */
72485ec4 11309 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11310 rd = 16;
11311 break;
11312 case 0xc: /* orr */
396e467c 11313 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11314 if (!s->condexec_mask)
396e467c 11315 gen_logic_CC(tmp);
99c475ab
FB
11316 break;
11317 case 0xd: /* mul */
7b2919a0 11318 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11319 if (!s->condexec_mask)
396e467c 11320 gen_logic_CC(tmp);
99c475ab
FB
11321 break;
11322 case 0xe: /* bic */
f669df27 11323 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11324 if (!s->condexec_mask)
396e467c 11325 gen_logic_CC(tmp);
99c475ab
FB
11326 break;
11327 case 0xf: /* mvn */
396e467c 11328 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11329 if (!s->condexec_mask)
396e467c 11330 gen_logic_CC(tmp2);
99c475ab 11331 val = 1;
5899f386 11332 rm = rd;
99c475ab
FB
11333 break;
11334 }
11335 if (rd != 16) {
396e467c
FN
11336 if (val) {
11337 store_reg(s, rm, tmp2);
11338 if (op != 0xf)
7d1b0095 11339 tcg_temp_free_i32(tmp);
396e467c
FN
11340 } else {
11341 store_reg(s, rd, tmp);
7d1b0095 11342 tcg_temp_free_i32(tmp2);
396e467c
FN
11343 }
11344 } else {
7d1b0095
PM
11345 tcg_temp_free_i32(tmp);
11346 tcg_temp_free_i32(tmp2);
99c475ab
FB
11347 }
11348 break;
11349
11350 case 5:
11351 /* load/store register offset. */
11352 rd = insn & 7;
11353 rn = (insn >> 3) & 7;
11354 rm = (insn >> 6) & 7;
11355 op = (insn >> 9) & 7;
b0109805 11356 addr = load_reg(s, rn);
b26eefb6 11357 tmp = load_reg(s, rm);
b0109805 11358 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11359 tcg_temp_free_i32(tmp);
99c475ab 11360
c40c8556 11361 if (op < 3) { /* store */
b0109805 11362 tmp = load_reg(s, rd);
c40c8556
PM
11363 } else {
11364 tmp = tcg_temp_new_i32();
11365 }
99c475ab
FB
11366
11367 switch (op) {
11368 case 0: /* str */
9bb6558a 11369 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11370 break;
11371 case 1: /* strh */
9bb6558a 11372 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11373 break;
11374 case 2: /* strb */
9bb6558a 11375 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11376 break;
11377 case 3: /* ldrsb */
9bb6558a 11378 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11379 break;
11380 case 4: /* ldr */
9bb6558a 11381 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11382 break;
11383 case 5: /* ldrh */
9bb6558a 11384 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11385 break;
11386 case 6: /* ldrb */
9bb6558a 11387 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11388 break;
11389 case 7: /* ldrsh */
9bb6558a 11390 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11391 break;
11392 }
c40c8556 11393 if (op >= 3) { /* load */
b0109805 11394 store_reg(s, rd, tmp);
c40c8556
PM
11395 } else {
11396 tcg_temp_free_i32(tmp);
11397 }
7d1b0095 11398 tcg_temp_free_i32(addr);
99c475ab
FB
11399 break;
11400
11401 case 6:
11402 /* load/store word immediate offset */
11403 rd = insn & 7;
11404 rn = (insn >> 3) & 7;
b0109805 11405 addr = load_reg(s, rn);
99c475ab 11406 val = (insn >> 4) & 0x7c;
b0109805 11407 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11408
11409 if (insn & (1 << 11)) {
11410 /* load */
c40c8556 11411 tmp = tcg_temp_new_i32();
12dcc321 11412 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11413 store_reg(s, rd, tmp);
99c475ab
FB
11414 } else {
11415 /* store */
b0109805 11416 tmp = load_reg(s, rd);
12dcc321 11417 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11418 tcg_temp_free_i32(tmp);
99c475ab 11419 }
7d1b0095 11420 tcg_temp_free_i32(addr);
99c475ab
FB
11421 break;
11422
11423 case 7:
11424 /* load/store byte immediate offset */
11425 rd = insn & 7;
11426 rn = (insn >> 3) & 7;
b0109805 11427 addr = load_reg(s, rn);
99c475ab 11428 val = (insn >> 6) & 0x1f;
b0109805 11429 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11430
11431 if (insn & (1 << 11)) {
11432 /* load */
c40c8556 11433 tmp = tcg_temp_new_i32();
9bb6558a 11434 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11435 store_reg(s, rd, tmp);
99c475ab
FB
11436 } else {
11437 /* store */
b0109805 11438 tmp = load_reg(s, rd);
9bb6558a 11439 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11440 tcg_temp_free_i32(tmp);
99c475ab 11441 }
7d1b0095 11442 tcg_temp_free_i32(addr);
99c475ab
FB
11443 break;
11444
11445 case 8:
11446 /* load/store halfword immediate offset */
11447 rd = insn & 7;
11448 rn = (insn >> 3) & 7;
b0109805 11449 addr = load_reg(s, rn);
99c475ab 11450 val = (insn >> 5) & 0x3e;
b0109805 11451 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11452
11453 if (insn & (1 << 11)) {
11454 /* load */
c40c8556 11455 tmp = tcg_temp_new_i32();
9bb6558a 11456 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11457 store_reg(s, rd, tmp);
99c475ab
FB
11458 } else {
11459 /* store */
b0109805 11460 tmp = load_reg(s, rd);
9bb6558a 11461 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11462 tcg_temp_free_i32(tmp);
99c475ab 11463 }
7d1b0095 11464 tcg_temp_free_i32(addr);
99c475ab
FB
11465 break;
11466
11467 case 9:
11468 /* load/store from stack */
11469 rd = (insn >> 8) & 7;
b0109805 11470 addr = load_reg(s, 13);
99c475ab 11471 val = (insn & 0xff) * 4;
b0109805 11472 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11473
11474 if (insn & (1 << 11)) {
11475 /* load */
c40c8556 11476 tmp = tcg_temp_new_i32();
9bb6558a 11477 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11478 store_reg(s, rd, tmp);
99c475ab
FB
11479 } else {
11480 /* store */
b0109805 11481 tmp = load_reg(s, rd);
9bb6558a 11482 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11483 tcg_temp_free_i32(tmp);
99c475ab 11484 }
7d1b0095 11485 tcg_temp_free_i32(addr);
99c475ab
FB
11486 break;
11487
11488 case 10:
11489 /* add to high reg */
11490 rd = (insn >> 8) & 7;
5899f386
FB
11491 if (insn & (1 << 11)) {
11492 /* SP */
5e3f878a 11493 tmp = load_reg(s, 13);
5899f386
FB
11494 } else {
11495 /* PC. bit 1 is ignored. */
7d1b0095 11496 tmp = tcg_temp_new_i32();
5e3f878a 11497 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 11498 }
99c475ab 11499 val = (insn & 0xff) * 4;
5e3f878a
PB
11500 tcg_gen_addi_i32(tmp, tmp, val);
11501 store_reg(s, rd, tmp);
99c475ab
FB
11502 break;
11503
11504 case 11:
11505 /* misc */
11506 op = (insn >> 8) & 0xf;
11507 switch (op) {
11508 case 0:
11509 /* adjust stack pointer */
b26eefb6 11510 tmp = load_reg(s, 13);
99c475ab
FB
11511 val = (insn & 0x7f) * 4;
11512 if (insn & (1 << 7))
6a0d8a1d 11513 val = -(int32_t)val;
b26eefb6
PB
11514 tcg_gen_addi_i32(tmp, tmp, val);
11515 store_reg(s, 13, tmp);
99c475ab
FB
11516 break;
11517
9ee6e8bb
PB
11518 case 2: /* sign/zero extend. */
11519 ARCH(6);
11520 rd = insn & 7;
11521 rm = (insn >> 3) & 7;
b0109805 11522 tmp = load_reg(s, rm);
9ee6e8bb 11523 switch ((insn >> 6) & 3) {
b0109805
PB
11524 case 0: gen_sxth(tmp); break;
11525 case 1: gen_sxtb(tmp); break;
11526 case 2: gen_uxth(tmp); break;
11527 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11528 }
b0109805 11529 store_reg(s, rd, tmp);
9ee6e8bb 11530 break;
99c475ab
FB
11531 case 4: case 5: case 0xc: case 0xd:
11532 /* push/pop */
b0109805 11533 addr = load_reg(s, 13);
5899f386
FB
11534 if (insn & (1 << 8))
11535 offset = 4;
99c475ab 11536 else
5899f386
FB
11537 offset = 0;
11538 for (i = 0; i < 8; i++) {
11539 if (insn & (1 << i))
11540 offset += 4;
11541 }
11542 if ((insn & (1 << 11)) == 0) {
b0109805 11543 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11544 }
99c475ab
FB
11545 for (i = 0; i < 8; i++) {
11546 if (insn & (1 << i)) {
11547 if (insn & (1 << 11)) {
11548 /* pop */
c40c8556 11549 tmp = tcg_temp_new_i32();
12dcc321 11550 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11551 store_reg(s, i, tmp);
99c475ab
FB
11552 } else {
11553 /* push */
b0109805 11554 tmp = load_reg(s, i);
12dcc321 11555 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11556 tcg_temp_free_i32(tmp);
99c475ab 11557 }
5899f386 11558 /* advance to the next address. */
b0109805 11559 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11560 }
11561 }
39d5492a 11562 TCGV_UNUSED_I32(tmp);
99c475ab
FB
11563 if (insn & (1 << 8)) {
11564 if (insn & (1 << 11)) {
11565 /* pop pc */
c40c8556 11566 tmp = tcg_temp_new_i32();
12dcc321 11567 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11568 /* don't set the pc until the rest of the instruction
11569 has completed */
11570 } else {
11571 /* push lr */
b0109805 11572 tmp = load_reg(s, 14);
12dcc321 11573 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11574 tcg_temp_free_i32(tmp);
99c475ab 11575 }
b0109805 11576 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11577 }
5899f386 11578 if ((insn & (1 << 11)) == 0) {
b0109805 11579 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11580 }
99c475ab 11581 /* write back the new stack pointer */
b0109805 11582 store_reg(s, 13, addr);
99c475ab 11583 /* set the new PC value */
be5e7a76 11584 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11585 store_reg_from_load(s, 15, tmp);
be5e7a76 11586 }
99c475ab
FB
11587 break;
11588
9ee6e8bb
PB
11589 case 1: case 3: case 9: case 11: /* czb */
11590 rm = insn & 7;
d9ba4830 11591 tmp = load_reg(s, rm);
9ee6e8bb
PB
11592 s->condlabel = gen_new_label();
11593 s->condjmp = 1;
11594 if (insn & (1 << 11))
cb63669a 11595 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11596 else
cb63669a 11597 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11598 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11599 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11600 val = (uint32_t)s->pc + 2;
11601 val += offset;
11602 gen_jmp(s, val);
11603 break;
11604
11605 case 15: /* IT, nop-hint. */
11606 if ((insn & 0xf) == 0) {
11607 gen_nop_hint(s, (insn >> 4) & 0xf);
11608 break;
11609 }
11610 /* If Then. */
11611 s->condexec_cond = (insn >> 4) & 0xe;
11612 s->condexec_mask = insn & 0x1f;
11613 /* No actual code generated for this insn, just setup state. */
11614 break;
11615
06c949e6 11616 case 0xe: /* bkpt */
d4a2dc67
PM
11617 {
11618 int imm8 = extract32(insn, 0, 8);
be5e7a76 11619 ARCH(5);
73710361
GB
11620 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11621 default_exception_el(s));
06c949e6 11622 break;
d4a2dc67 11623 }
06c949e6 11624
19a6e31c
PM
11625 case 0xa: /* rev, and hlt */
11626 {
11627 int op1 = extract32(insn, 6, 2);
11628
11629 if (op1 == 2) {
11630 /* HLT */
11631 int imm6 = extract32(insn, 0, 6);
11632
11633 gen_hlt(s, imm6);
11634 break;
11635 }
11636
11637 /* Otherwise this is rev */
9ee6e8bb
PB
11638 ARCH(6);
11639 rn = (insn >> 3) & 0x7;
11640 rd = insn & 0x7;
b0109805 11641 tmp = load_reg(s, rn);
19a6e31c 11642 switch (op1) {
66896cb8 11643 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11644 case 1: gen_rev16(tmp); break;
11645 case 3: gen_revsh(tmp); break;
19a6e31c
PM
11646 default:
11647 g_assert_not_reached();
9ee6e8bb 11648 }
b0109805 11649 store_reg(s, rd, tmp);
9ee6e8bb 11650 break;
19a6e31c 11651 }
9ee6e8bb 11652
d9e028c1
PM
11653 case 6:
11654 switch ((insn >> 5) & 7) {
11655 case 2:
11656 /* setend */
11657 ARCH(6);
9886ecdf
PB
11658 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11659 gen_helper_setend(cpu_env);
dcba3a8d 11660 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 11661 }
9ee6e8bb 11662 break;
d9e028c1
PM
11663 case 3:
11664 /* cps */
11665 ARCH(6);
11666 if (IS_USER(s)) {
11667 break;
8984bd2e 11668 }
b53d8923 11669 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11670 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11671 /* FAULTMASK */
11672 if (insn & 1) {
11673 addr = tcg_const_i32(19);
11674 gen_helper_v7m_msr(cpu_env, addr, tmp);
11675 tcg_temp_free_i32(addr);
11676 }
11677 /* PRIMASK */
11678 if (insn & 2) {
11679 addr = tcg_const_i32(16);
11680 gen_helper_v7m_msr(cpu_env, addr, tmp);
11681 tcg_temp_free_i32(addr);
11682 }
11683 tcg_temp_free_i32(tmp);
11684 gen_lookup_tb(s);
11685 } else {
11686 if (insn & (1 << 4)) {
11687 shift = CPSR_A | CPSR_I | CPSR_F;
11688 } else {
11689 shift = 0;
11690 }
11691 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11692 }
d9e028c1
PM
11693 break;
11694 default:
11695 goto undef;
9ee6e8bb
PB
11696 }
11697 break;
11698
99c475ab
FB
11699 default:
11700 goto undef;
11701 }
11702 break;
11703
11704 case 12:
a7d3970d 11705 {
99c475ab 11706 /* load/store multiple */
39d5492a
PM
11707 TCGv_i32 loaded_var;
11708 TCGV_UNUSED_I32(loaded_var);
99c475ab 11709 rn = (insn >> 8) & 0x7;
b0109805 11710 addr = load_reg(s, rn);
99c475ab
FB
11711 for (i = 0; i < 8; i++) {
11712 if (insn & (1 << i)) {
99c475ab
FB
11713 if (insn & (1 << 11)) {
11714 /* load */
c40c8556 11715 tmp = tcg_temp_new_i32();
12dcc321 11716 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11717 if (i == rn) {
11718 loaded_var = tmp;
11719 } else {
11720 store_reg(s, i, tmp);
11721 }
99c475ab
FB
11722 } else {
11723 /* store */
b0109805 11724 tmp = load_reg(s, i);
12dcc321 11725 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11726 tcg_temp_free_i32(tmp);
99c475ab 11727 }
5899f386 11728 /* advance to the next address */
b0109805 11729 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11730 }
11731 }
b0109805 11732 if ((insn & (1 << rn)) == 0) {
a7d3970d 11733 /* base reg not in list: base register writeback */
b0109805
PB
11734 store_reg(s, rn, addr);
11735 } else {
a7d3970d
PM
11736 /* base reg in list: if load, complete it now */
11737 if (insn & (1 << 11)) {
11738 store_reg(s, rn, loaded_var);
11739 }
7d1b0095 11740 tcg_temp_free_i32(addr);
b0109805 11741 }
99c475ab 11742 break;
a7d3970d 11743 }
99c475ab
FB
11744 case 13:
11745 /* conditional branch or swi */
11746 cond = (insn >> 8) & 0xf;
11747 if (cond == 0xe)
11748 goto undef;
11749
11750 if (cond == 0xf) {
11751 /* swi */
eaed129d 11752 gen_set_pc_im(s, s->pc);
d4a2dc67 11753 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 11754 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
11755 break;
11756 }
11757 /* generate a conditional jump to next instruction */
e50e6a20 11758 s->condlabel = gen_new_label();
39fb730a 11759 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 11760 s->condjmp = 1;
99c475ab
FB
11761
11762 /* jump to the offset */
5899f386 11763 val = (uint32_t)s->pc + 2;
99c475ab 11764 offset = ((int32_t)insn << 24) >> 24;
5899f386 11765 val += offset << 1;
8aaca4c0 11766 gen_jmp(s, val);
99c475ab
FB
11767 break;
11768
11769 case 14:
358bf29e 11770 if (insn & (1 << 11)) {
9ee6e8bb
PB
11771 if (disas_thumb2_insn(env, s, insn))
11772 goto undef32;
358bf29e
PB
11773 break;
11774 }
9ee6e8bb 11775 /* unconditional branch */
99c475ab
FB
11776 val = (uint32_t)s->pc;
11777 offset = ((int32_t)insn << 21) >> 21;
11778 val += (offset << 1) + 2;
8aaca4c0 11779 gen_jmp(s, val);
99c475ab
FB
11780 break;
11781
11782 case 15:
9ee6e8bb 11783 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 11784 goto undef32;
9ee6e8bb 11785 break;
99c475ab
FB
11786 }
11787 return;
9ee6e8bb 11788undef32:
73710361
GB
11789 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11790 default_exception_el(s));
9ee6e8bb
PB
11791 return;
11792illegal_op:
99c475ab 11793undef:
73710361
GB
11794 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11795 default_exception_el(s));
99c475ab
FB
11796}
11797
541ebcd4
PM
11798static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11799{
11800 /* Return true if the insn at dc->pc might cross a page boundary.
11801 * (False positives are OK, false negatives are not.)
11802 */
11803 uint16_t insn;
11804
11805 if ((s->pc & 3) == 0) {
11806 /* At a 4-aligned address we can't be crossing a page */
11807 return false;
11808 }
11809
11810 /* This must be a Thumb insn */
f9fd40eb 11811 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4
PM
11812
11813 if ((insn >> 11) >= 0x1d) {
11814 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11815 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11816 * end up actually treating this as two 16-bit insns (see the
11817 * code at the start of disas_thumb2_insn()) but we don't bother
11818 * to check for that as it is unlikely, and false positives here
11819 * are harmless.
11820 */
11821 return true;
11822 }
11823 /* Definitely a 16-bit insn, can't be crossing a page. */
11824 return false;
11825}
11826
1d8a5535
LV
11827static int arm_tr_init_disas_context(DisasContextBase *dcbase,
11828 CPUState *cs, int max_insns)
2c0262af 11829{
1d8a5535 11830 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 11831 CPUARMState *env = cs->env_ptr;
4e5e1215 11832 ARMCPU *cpu = arm_env_get_cpu(env);
3b46e624 11833
dcba3a8d 11834 dc->pc = dc->base.pc_first;
e50e6a20 11835 dc->condjmp = 0;
3926cc84 11836
40f860cd 11837 dc->aarch64 = 0;
cef9ee70
SS
11838 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11839 * there is no secure EL1, so we route exceptions to EL3.
11840 */
11841 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11842 !arm_el_is_aa64(env, 3);
1d8a5535
LV
11843 dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
11844 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
11845 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
11846 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
11847 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
11848 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
c1e37810 11849 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11850#if !defined(CONFIG_USER_ONLY)
c1e37810 11851 dc->user = (dc->current_el == 0);
3926cc84 11852#endif
1d8a5535
LV
11853 dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
11854 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
11855 dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
11856 dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
11857 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
11858 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
11859 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
60322b39 11860 dc->cp_regs = cpu->cp_regs;
a984e42c 11861 dc->features = env->features;
40f860cd 11862
50225ad0
PM
11863 /* Single step state. The code-generation logic here is:
11864 * SS_ACTIVE == 0:
11865 * generate code with no special handling for single-stepping (except
11866 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11867 * this happens anyway because those changes are all system register or
11868 * PSTATE writes).
11869 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11870 * emit code for one insn
11871 * emit code to clear PSTATE.SS
11872 * emit code to generate software step exception for completed step
11873 * end TB (as usual for having generated an exception)
11874 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11875 * emit code to generate a software step exception
11876 * end the TB
11877 */
1d8a5535
LV
11878 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
11879 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
50225ad0
PM
11880 dc->is_ldex = false;
11881 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11882
1d8a5535 11883
a7812ae4
PB
11884 cpu_F0s = tcg_temp_new_i32();
11885 cpu_F1s = tcg_temp_new_i32();
11886 cpu_F0d = tcg_temp_new_i64();
11887 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
11888 cpu_V0 = cpu_F0d;
11889 cpu_V1 = cpu_F1d;
e677137d 11890 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11891 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
11892
11893 return max_insns;
11894}
11895
b1476854
LV
11896static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
11897{
11898 DisasContext *dc = container_of(dcbase, DisasContext, base);
11899
11900 /* A note on handling of the condexec (IT) bits:
11901 *
11902 * We want to avoid the overhead of having to write the updated condexec
11903 * bits back to the CPUARMState for every instruction in an IT block. So:
11904 * (1) if the condexec bits are not already zero then we write
11905 * zero back into the CPUARMState now. This avoids complications trying
11906 * to do it at the end of the block. (For example if we don't do this
11907 * it's hard to identify whether we can safely skip writing condexec
11908 * at the end of the TB, which we definitely want to do for the case
11909 * where a TB doesn't do anything with the IT state at all.)
11910 * (2) if we are going to leave the TB then we call gen_set_condexec()
11911 * which will write the correct value into CPUARMState if zero is wrong.
11912 * This is done both for leaving the TB at the end, and for leaving
11913 * it because of an exception we know will happen, which is done in
11914 * gen_exception_insn(). The latter is necessary because we need to
11915 * leave the TB with the PC/IT state just prior to execution of the
11916 * instruction which caused the exception.
11917 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11918 * then the CPUARMState will be wrong and we need to reset it.
11919 * This is handled in the same way as restoration of the
11920 * PC in these situations; we save the value of the condexec bits
11921 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11922 * then uses this to restore them after an exception.
11923 *
11924 * Note that there are no instructions which can read the condexec
11925 * bits, and none which can write non-static values to them, so
11926 * we don't need to care about whether CPUARMState is correct in the
11927 * middle of a TB.
11928 */
11929
11930 /* Reset the conditional execution bits immediately. This avoids
11931 complications trying to do it at the end of the block. */
11932 if (dc->condexec_mask || dc->condexec_cond) {
11933 TCGv_i32 tmp = tcg_temp_new_i32();
11934 tcg_gen_movi_i32(tmp, 0);
11935 store_cpu_field(tmp, condexec_bits);
11936 }
11937}
11938
f62bd897
LV
11939static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
11940{
11941 DisasContext *dc = container_of(dcbase, DisasContext, base);
11942
11943 dc->insn_start_idx = tcg_op_buf_count();
11944 tcg_gen_insn_start(dc->pc,
11945 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11946 0);
11947}
11948
a68956ad
LV
11949static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
11950 const CPUBreakpoint *bp)
11951{
11952 DisasContext *dc = container_of(dcbase, DisasContext, base);
11953
11954 if (bp->flags & BP_CPU) {
11955 gen_set_condexec(dc);
11956 gen_set_pc_im(dc, dc->pc);
11957 gen_helper_check_breakpoints(cpu_env);
11958 /* End the TB early; it's likely not going to be executed */
11959 dc->base.is_jmp = DISAS_TOO_MANY;
11960 } else {
11961 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
11962 /* The address covered by the breakpoint must be
11963 included in [tb->pc, tb->pc + tb->size) in order
11964 to for it to be properly cleared -- thus we
11965 increment the PC here so that the logic setting
11966 tb->size below does the right thing. */
11967 /* TODO: Advance PC by correct instruction length to
11968 * avoid disassembler error messages */
11969 dc->pc += 2;
11970 dc->base.is_jmp = DISAS_NORETURN;
11971 }
11972
11973 return true;
11974}
11975
1d8a5535
LV
11976/* generate intermediate code for basic block 'tb'. */
11977void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
11978{
11979 CPUARMState *env = cs->env_ptr;
11980 DisasContext dc1, *dc = &dc1;
11981 target_ulong next_page_start;
11982 int max_insns;
11983 bool end_of_page;
11984
11985 /* generate intermediate code */
11986
11987 /* The A64 decoder has its own top level loop, because it doesn't need
11988 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11989 */
11990 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
11991 gen_intermediate_code_a64(&dc->base, cs, tb);
11992 return;
11993 }
11994
11995 dc->base.tb = tb;
11996 dc->base.pc_first = dc->base.tb->pc;
11997 dc->base.pc_next = dc->base.pc_first;
11998 dc->base.is_jmp = DISAS_NEXT;
11999 dc->base.num_insns = 0;
12000 dc->base.singlestep_enabled = cs->singlestep_enabled;
12001
dcba3a8d 12002 next_page_start = (dc->base.pc_first & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2e70f6ef 12003 max_insns = tb->cflags & CF_COUNT_MASK;
190ce7fb 12004 if (max_insns == 0) {
2e70f6ef 12005 max_insns = CF_COUNT_MASK;
190ce7fb
RH
12006 }
12007 if (max_insns > TCG_MAX_INSNS) {
12008 max_insns = TCG_MAX_INSNS;
12009 }
1d8a5535 12010 max_insns = arm_tr_init_disas_context(&dc->base, cs, max_insns);
2e70f6ef 12011
cd42d5b2 12012 gen_tb_start(tb);
e12ce78d 12013
3849902c 12014 tcg_clear_temp_count();
b1476854 12015 arm_tr_tb_start(&dc->base, cs);
3849902c 12016
2c0262af 12017 do {
dcba3a8d 12018 dc->base.num_insns++;
f62bd897 12019 arm_tr_insn_start(&dc->base, cs);
b933066a 12020
f0c3c505 12021 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
b933066a 12022 CPUBreakpoint *bp;
f0c3c505 12023 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a68956ad
LV
12024 if (bp->pc == dc->base.pc_next) {
12025 if (arm_tr_breakpoint_check(&dc->base, cs, bp)) {
12026 break;
5d98bf8f 12027 }
1fddef4b
FB
12028 }
12029 }
a68956ad
LV
12030 if (dc->base.is_jmp > DISAS_TOO_MANY) {
12031 break;
12032 }
1fddef4b 12033 }
e50e6a20 12034
dcba3a8d 12035 if (dc->base.num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 12036 gen_io_start();
959082fc 12037 }
2e70f6ef 12038
3805c2eb
RH
12039#ifdef CONFIG_USER_ONLY
12040 /* Intercept jump to the magic kernel page. */
12041 if (dc->pc >= 0xffff0000) {
12042 /* We always get here via a jump, so know we are not in a
12043 conditional execution block. */
12044 gen_exception_internal(EXCP_KERNEL_TRAP);
dcba3a8d 12045 dc->base.is_jmp = DISAS_NORETURN;
3805c2eb
RH
12046 break;
12047 }
12048#endif
12049
50225ad0
PM
12050 if (dc->ss_active && !dc->pstate_ss) {
12051 /* Singlestep state is Active-pending.
12052 * If we're in this state at the start of a TB then either
12053 * a) we just took an exception to an EL which is being debugged
12054 * and this is the first insn in the exception handler
12055 * b) debug exceptions were masked and we just unmasked them
12056 * without changing EL (eg by clearing PSTATE.D)
12057 * In either case we're going to take a swstep exception in the
12058 * "did not step an insn" case, and so the syndrome ISV and EX
12059 * bits should be zero.
12060 */
dcba3a8d 12061 assert(dc->base.num_insns == 1);
73710361
GB
12062 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12063 default_exception_el(dc));
dcba3a8d
LV
12064 dc->base.is_jmp = DISAS_NORETURN;
12065 break;
50225ad0
PM
12066 }
12067
40f860cd 12068 if (dc->thumb) {
9ee6e8bb
PB
12069 disas_thumb_insn(env, dc);
12070 if (dc->condexec_mask) {
12071 dc->condexec_cond = (dc->condexec_cond & 0xe)
12072 | ((dc->condexec_mask >> 4) & 1);
12073 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12074 if (dc->condexec_mask == 0) {
12075 dc->condexec_cond = 0;
12076 }
12077 }
12078 } else {
f9fd40eb 12079 unsigned int insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
f4df2210
PM
12080 dc->pc += 4;
12081 disas_arm_insn(dc, insn);
9ee6e8bb 12082 }
e50e6a20 12083
dcba3a8d 12084 if (dc->condjmp && !dc->base.is_jmp) {
e50e6a20
FB
12085 gen_set_label(dc->condlabel);
12086 dc->condjmp = 0;
12087 }
3849902c
PM
12088
12089 if (tcg_check_temp_count()) {
0a2461fa
AG
12090 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
12091 dc->pc);
3849902c
PM
12092 }
12093
aaf2d97d 12094 /* Translation stops when a conditional branch is encountered.
e50e6a20 12095 * Otherwise the subsequent code could get translated several times.
b5ff1b31 12096 * Also stop translation when a page boundary is reached. This
bf20dc07 12097 * ensures prefetch aborts occur at the right place. */
541ebcd4
PM
12098
12099 /* We want to stop the TB if the next insn starts in a new page,
12100 * or if it spans between this page and the next. This means that
12101 * if we're looking at the last halfword in the page we need to
12102 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12103 * or a 32-bit Thumb insn (which won't).
12104 * This is to avoid generating a silly TB with a single 16-bit insn
12105 * in it at the end of this page (which would execute correctly
12106 * but isn't very efficient).
12107 */
12108 end_of_page = (dc->pc >= next_page_start) ||
12109 ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
12110
dcba3a8d 12111 } while (!dc->base.is_jmp && !tcg_op_buf_full() &&
b636649f 12112 !is_singlestepping(dc) &&
1b530a6d 12113 !singlestep &&
541ebcd4 12114 !end_of_page &&
dcba3a8d 12115 dc->base.num_insns < max_insns);
2e70f6ef
PB
12116
12117 if (tb->cflags & CF_LAST_IO) {
12118 if (dc->condjmp) {
12119 /* FIXME: This can theoretically happen with self-modifying
12120 code. */
a47dddd7 12121 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
12122 }
12123 gen_io_end();
12124 }
9ee6e8bb 12125
b5ff1b31 12126 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12127 instruction was a conditional branch or trap, and the PC has
12128 already been written. */
f021b2c4 12129 gen_set_condexec(dc);
dcba3a8d 12130 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
12131 /* Exception return branches need some special case code at the
12132 * end of the TB, which is complex enough that it has to
12133 * handle the single-step vs not and the condition-failed
12134 * insn codepath itself.
12135 */
12136 gen_bx_excret_final_code(dc);
12137 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12138 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 12139 switch (dc->base.is_jmp) {
7999a5c8 12140 case DISAS_SWI:
50225ad0 12141 gen_ss_advance(dc);
73710361
GB
12142 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12143 default_exception_el(dc));
7999a5c8
SF
12144 break;
12145 case DISAS_HVC:
37e6456e 12146 gen_ss_advance(dc);
73710361 12147 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12148 break;
12149 case DISAS_SMC:
37e6456e 12150 gen_ss_advance(dc);
73710361 12151 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12152 break;
12153 case DISAS_NEXT:
a68956ad 12154 case DISAS_TOO_MANY:
7999a5c8
SF
12155 case DISAS_UPDATE:
12156 gen_set_pc_im(dc, dc->pc);
12157 /* fall through */
12158 default:
5425415e
PM
12159 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12160 gen_singlestep_exception(dc);
a0c231e6
RH
12161 break;
12162 case DISAS_NORETURN:
12163 break;
7999a5c8 12164 }
8aaca4c0 12165 } else {
9ee6e8bb
PB
12166 /* While branches must always occur at the end of an IT block,
12167 there are a few other things that can cause us to terminate
65626741 12168 the TB in the middle of an IT block:
9ee6e8bb
PB
12169 - Exception generating instructions (bkpt, swi, undefined).
12170 - Page boundaries.
12171 - Hardware watchpoints.
12172 Hardware breakpoints have already been handled and skip this code.
12173 */
dcba3a8d 12174 switch(dc->base.is_jmp) {
8aaca4c0 12175 case DISAS_NEXT:
a68956ad 12176 case DISAS_TOO_MANY:
6e256c93 12177 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 12178 break;
577bf808 12179 case DISAS_JUMP:
8a6b28c7
EC
12180 gen_goto_ptr();
12181 break;
e8d52302
AB
12182 case DISAS_UPDATE:
12183 gen_set_pc_im(dc, dc->pc);
12184 /* fall through */
577bf808 12185 default:
8aaca4c0 12186 /* indicate that the hash table must be used to find the next TB */
57fec1fe 12187 tcg_gen_exit_tb(0);
8aaca4c0 12188 break;
a0c231e6 12189 case DISAS_NORETURN:
8aaca4c0
FB
12190 /* nothing more to generate */
12191 break;
9ee6e8bb 12192 case DISAS_WFI:
1ce94f81 12193 gen_helper_wfi(cpu_env);
84549b6d
PM
12194 /* The helper doesn't necessarily throw an exception, but we
12195 * must go back to the main loop to check for interrupts anyway.
12196 */
12197 tcg_gen_exit_tb(0);
9ee6e8bb 12198 break;
72c1d3af
PM
12199 case DISAS_WFE:
12200 gen_helper_wfe(cpu_env);
12201 break;
c87e5a61
PM
12202 case DISAS_YIELD:
12203 gen_helper_yield(cpu_env);
12204 break;
9ee6e8bb 12205 case DISAS_SWI:
73710361
GB
12206 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12207 default_exception_el(dc));
9ee6e8bb 12208 break;
37e6456e 12209 case DISAS_HVC:
73710361 12210 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12211 break;
12212 case DISAS_SMC:
73710361 12213 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12214 break;
8aaca4c0 12215 }
f021b2c4
PM
12216 }
12217
12218 if (dc->condjmp) {
12219 /* "Condition failed" instruction codepath for the branch/trap insn */
12220 gen_set_label(dc->condlabel);
12221 gen_set_condexec(dc);
b636649f 12222 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
12223 gen_set_pc_im(dc, dc->pc);
12224 gen_singlestep_exception(dc);
12225 } else {
6e256c93 12226 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 12227 }
2c0262af 12228 }
2e70f6ef 12229
dcba3a8d 12230 gen_tb_end(tb, dc->base.num_insns);
2c0262af
FB
12231
12232#ifdef DEBUG_DISAS
06486077 12233 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
dcba3a8d 12234 qemu_log_in_addr_range(dc->base.pc_first)) {
1ee73216 12235 qemu_log_lock();
93fcfe39 12236 qemu_log("----------------\n");
dcba3a8d
LV
12237 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
12238 log_target_disas(cs, dc->base.pc_first, dc->pc - dc->base.pc_first,
f9fd40eb 12239 dc->thumb | (dc->sctlr_b << 1));
93fcfe39 12240 qemu_log("\n");
1ee73216 12241 qemu_log_unlock();
2c0262af
FB
12242 }
12243#endif
dcba3a8d
LV
12244 tb->size = dc->pc - dc->base.pc_first;
12245 tb->icount = dc->base.num_insns;
2c0262af
FB
12246}
12247
b5ff1b31 12248static const char *cpu_mode_names[16] = {
28c9457d
EI
12249 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12250 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 12251};
9ee6e8bb 12252
878096ee
AF
12253void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12254 int flags)
2c0262af 12255{
878096ee
AF
12256 ARMCPU *cpu = ARM_CPU(cs);
12257 CPUARMState *env = &cpu->env;
2c0262af
FB
12258 int i;
12259
17731115
PM
12260 if (is_a64(env)) {
12261 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12262 return;
12263 }
12264
2c0262af 12265 for(i=0;i<16;i++) {
7fe48483 12266 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12267 if ((i % 4) == 3)
7fe48483 12268 cpu_fprintf(f, "\n");
2c0262af 12269 else
7fe48483 12270 cpu_fprintf(f, " ");
2c0262af 12271 }
06e5cf7a 12272
5b906f35
PM
12273 if (arm_feature(env, ARM_FEATURE_M)) {
12274 uint32_t xpsr = xpsr_read(env);
12275 const char *mode;
12276
12277 if (xpsr & XPSR_EXCP) {
12278 mode = "handler";
12279 } else {
12280 if (env->v7m.control & R_V7M_CONTROL_NPRIV_MASK) {
12281 mode = "unpriv-thread";
12282 } else {
12283 mode = "priv-thread";
12284 }
12285 }
12286
12287 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s\n",
12288 xpsr,
12289 xpsr & XPSR_N ? 'N' : '-',
12290 xpsr & XPSR_Z ? 'Z' : '-',
12291 xpsr & XPSR_C ? 'C' : '-',
12292 xpsr & XPSR_V ? 'V' : '-',
12293 xpsr & XPSR_T ? 'T' : 'A',
12294 mode);
06e5cf7a 12295 } else {
5b906f35
PM
12296 uint32_t psr = cpsr_read(env);
12297 const char *ns_status = "";
12298
12299 if (arm_feature(env, ARM_FEATURE_EL3) &&
12300 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12301 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12302 }
12303
12304 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12305 psr,
12306 psr & CPSR_N ? 'N' : '-',
12307 psr & CPSR_Z ? 'Z' : '-',
12308 psr & CPSR_C ? 'C' : '-',
12309 psr & CPSR_V ? 'V' : '-',
12310 psr & CPSR_T ? 'T' : 'A',
12311 ns_status,
12312 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
12313 }
b7bcbe95 12314
f2617cfc
PM
12315 if (flags & CPU_DUMP_FPU) {
12316 int numvfpregs = 0;
12317 if (arm_feature(env, ARM_FEATURE_VFP)) {
12318 numvfpregs += 16;
12319 }
12320 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12321 numvfpregs += 16;
12322 }
12323 for (i = 0; i < numvfpregs; i++) {
12324 uint64_t v = float64_val(env->vfp.regs[i]);
12325 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12326 i * 2, (uint32_t)v,
12327 i * 2 + 1, (uint32_t)(v >> 32),
12328 i, v);
12329 }
12330 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 12331 }
2c0262af 12332}
a6b025d3 12333
bad729e2
RH
12334void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12335 target_ulong *data)
d2856f1a 12336{
3926cc84 12337 if (is_a64(env)) {
bad729e2 12338 env->pc = data[0];
40f860cd 12339 env->condexec_bits = 0;
aaa1f954 12340 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12341 } else {
bad729e2
RH
12342 env->regs[15] = data[0];
12343 env->condexec_bits = data[1];
aaa1f954 12344 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12345 }
d2856f1a 12346}