]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
tcg/mips: constify tcg_target_callee_save_regs
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
1de7afc9 28#include "qemu/log.h"
534df156 29#include "qemu/bitops.h"
1d854765 30#include "arm_ldst.h"
19a6e31c 31#include "exec/semihost.h"
1497c961 32
2ef6175a
RH
33#include "exec/helper-proto.h"
34#include "exec/helper-gen.h"
2c0262af 35
a7e30d84 36#include "trace-tcg.h"
508127e2 37#include "exec/log.h"
a7e30d84
LV
38
39
2b51668f
PM
40#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
41#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 42/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 43#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
c99a55d3 44#define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
2b51668f
PM
45#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
46#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
47#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
48#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
49#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 50
86753403 51#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 52
f570c61e 53#include "translate.h"
e12ce78d 54
b5ff1b31
FB
55#if defined(CONFIG_USER_ONLY)
56#define IS_USER(s) 1
57#else
58#define IS_USER(s) (s->user)
59#endif
60
1bcea73e 61TCGv_env cpu_env;
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
b26eefb6 69/* FIXME: These should be removed. */
39d5492a 70static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 71static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 72
022c62cb 73#include "exec/gen-icount.h"
2e70f6ef 74
155c3eac
FN
75static const char *regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
78
b26eefb6
PB
79/* initialize TCG globals. */
80void arm_translate_init(void)
81{
155c3eac
FN
82 int i;
83
a7812ae4 84 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7c255043 85 tcg_ctx.tcg_env = cpu_env;
a7812ae4 86
155c3eac 87 for (i = 0; i < 16; i++) {
e1ccc054 88 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 89 offsetof(CPUARMState, regs[i]),
155c3eac
FN
90 regnames[i]);
91 }
e1ccc054
RH
92 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
93 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
94 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
95 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 96
e1ccc054 97 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 98 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 99 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 100 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 101
14ade10f 102 a64_translate_init();
b26eefb6
PB
103}
104
9bb6558a
PM
105/* Flags for the disas_set_da_iss info argument:
106 * lower bits hold the Rt register number, higher bits are flags.
107 */
108typedef enum ISSInfo {
109 ISSNone = 0,
110 ISSRegMask = 0x1f,
111 ISSInvalid = (1 << 5),
112 ISSIsAcqRel = (1 << 6),
113 ISSIsWrite = (1 << 7),
114 ISSIs16Bit = (1 << 8),
115} ISSInfo;
116
117/* Save the syndrome information for a Data Abort */
118static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
119{
120 uint32_t syn;
121 int sas = memop & MO_SIZE;
122 bool sse = memop & MO_SIGN;
123 bool is_acqrel = issinfo & ISSIsAcqRel;
124 bool is_write = issinfo & ISSIsWrite;
125 bool is_16bit = issinfo & ISSIs16Bit;
126 int srt = issinfo & ISSRegMask;
127
128 if (issinfo & ISSInvalid) {
129 /* Some callsites want to conditionally provide ISS info,
130 * eg "only if this was not a writeback"
131 */
132 return;
133 }
134
135 if (srt == 15) {
136 /* For AArch32, insns where the src/dest is R15 never generate
137 * ISS information. Catching that here saves checking at all
138 * the call sites.
139 */
140 return;
141 }
142
143 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
144 0, 0, 0, is_write, 0, is_16bit);
145 disas_set_insn_syndrome(s, syn);
146}
147
8bd5c820 148static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 149{
8bd5c820 150 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
151 * insns:
152 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
153 * otherwise, access as if at PL0.
154 */
155 switch (s->mmu_idx) {
156 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
157 case ARMMMUIdx_S12NSE0:
158 case ARMMMUIdx_S12NSE1:
8bd5c820 159 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
160 case ARMMMUIdx_S1E3:
161 case ARMMMUIdx_S1SE0:
162 case ARMMMUIdx_S1SE1:
8bd5c820 163 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
164 case ARMMMUIdx_MUser:
165 case ARMMMUIdx_MPriv:
3bef7012 166 case ARMMMUIdx_MNegPri:
e7b921c2 167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
579d21cc
PM
168 case ARMMMUIdx_S2NS:
169 default:
170 g_assert_not_reached();
171 }
172}
173
39d5492a 174static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 175{
39d5492a 176 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
177 tcg_gen_ld_i32(tmp, cpu_env, offset);
178 return tmp;
179}
180
0ecb72a5 181#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 182
39d5492a 183static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
184{
185 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 186 tcg_temp_free_i32(var);
d9ba4830
PB
187}
188
189#define store_cpu_field(var, name) \
0ecb72a5 190 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 191
b26eefb6 192/* Set a variable to the value of a CPU register. */
39d5492a 193static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
194{
195 if (reg == 15) {
196 uint32_t addr;
b90372ad 197 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
198 if (s->thumb)
199 addr = (long)s->pc + 2;
200 else
201 addr = (long)s->pc + 4;
202 tcg_gen_movi_i32(var, addr);
203 } else {
155c3eac 204 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
205 }
206}
207
208/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 209static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 210{
39d5492a 211 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
212 load_reg_var(s, tmp, reg);
213 return tmp;
214}
215
216/* Set a CPU register. The source must be a temporary and will be
217 marked as dead. */
39d5492a 218static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
219{
220 if (reg == 15) {
9b6a3ea7
PM
221 /* In Thumb mode, we must ignore bit 0.
222 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
223 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
224 * We choose to ignore [1:0] in ARM mode for all architecture versions.
225 */
226 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 227 s->base.is_jmp = DISAS_JUMP;
b26eefb6 228 }
155c3eac 229 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 230 tcg_temp_free_i32(var);
b26eefb6
PB
231}
232
b26eefb6 233/* Value extensions. */
86831435
PB
234#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
235#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
236#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
237#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
238
1497c961
PB
239#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
240#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 241
b26eefb6 242
39d5492a 243static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 244{
39d5492a 245 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 246 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
247 tcg_temp_free_i32(tmp_mask);
248}
d9ba4830
PB
249/* Set NZCV flags from the high 4 bits of var. */
250#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
251
d4a2dc67 252static void gen_exception_internal(int excp)
d9ba4830 253{
d4a2dc67
PM
254 TCGv_i32 tcg_excp = tcg_const_i32(excp);
255
256 assert(excp_is_internal(excp));
257 gen_helper_exception_internal(cpu_env, tcg_excp);
258 tcg_temp_free_i32(tcg_excp);
259}
260
73710361 261static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
262{
263 TCGv_i32 tcg_excp = tcg_const_i32(excp);
264 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 265 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 266
73710361
GB
267 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
268 tcg_syn, tcg_el);
269
270 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
271 tcg_temp_free_i32(tcg_syn);
272 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
273}
274
50225ad0
PM
275static void gen_ss_advance(DisasContext *s)
276{
277 /* If the singlestep state is Active-not-pending, advance to
278 * Active-pending.
279 */
280 if (s->ss_active) {
281 s->pstate_ss = 0;
282 gen_helper_clear_pstate_ss(cpu_env);
283 }
284}
285
286static void gen_step_complete_exception(DisasContext *s)
287{
288 /* We just completed step of an insn. Move from Active-not-pending
289 * to Active-pending, and then also take the swstep exception.
290 * This corresponds to making the (IMPDEF) choice to prioritize
291 * swstep exceptions over asynchronous exceptions taken to an exception
292 * level where debug is disabled. This choice has the advantage that
293 * we do not need to maintain internal state corresponding to the
294 * ISV/EX syndrome bits between completion of the step and generation
295 * of the exception, and our syndrome information is always correct.
296 */
297 gen_ss_advance(s);
73710361
GB
298 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
299 default_exception_el(s));
dcba3a8d 300 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
301}
302
5425415e
PM
303static void gen_singlestep_exception(DisasContext *s)
304{
305 /* Generate the right kind of exception for singlestep, which is
306 * either the architectural singlestep or EXCP_DEBUG for QEMU's
307 * gdb singlestepping.
308 */
309 if (s->ss_active) {
310 gen_step_complete_exception(s);
311 } else {
312 gen_exception_internal(EXCP_DEBUG);
313 }
314}
315
b636649f
PM
316static inline bool is_singlestepping(DisasContext *s)
317{
318 /* Return true if we are singlestepping either because of
319 * architectural singlestep or QEMU gdbstub singlestep. This does
320 * not include the command line '-singlestep' mode which is rather
321 * misnamed as it only means "one instruction per TB" and doesn't
322 * affect the code we generate.
323 */
dcba3a8d 324 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
325}
326
39d5492a 327static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 328{
39d5492a
PM
329 TCGv_i32 tmp1 = tcg_temp_new_i32();
330 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
331 tcg_gen_ext16s_i32(tmp1, a);
332 tcg_gen_ext16s_i32(tmp2, b);
3670669c 333 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 334 tcg_temp_free_i32(tmp2);
3670669c
PB
335 tcg_gen_sari_i32(a, a, 16);
336 tcg_gen_sari_i32(b, b, 16);
337 tcg_gen_mul_i32(b, b, a);
338 tcg_gen_mov_i32(a, tmp1);
7d1b0095 339 tcg_temp_free_i32(tmp1);
3670669c
PB
340}
341
342/* Byteswap each halfword. */
39d5492a 343static void gen_rev16(TCGv_i32 var)
3670669c 344{
39d5492a 345 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 346 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 347 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
348 tcg_gen_and_i32(tmp, tmp, mask);
349 tcg_gen_and_i32(var, var, mask);
3670669c 350 tcg_gen_shli_i32(var, var, 8);
3670669c 351 tcg_gen_or_i32(var, var, tmp);
68cedf73 352 tcg_temp_free_i32(mask);
7d1b0095 353 tcg_temp_free_i32(tmp);
3670669c
PB
354}
355
356/* Byteswap low halfword and sign extend. */
39d5492a 357static void gen_revsh(TCGv_i32 var)
3670669c 358{
1a855029
AJ
359 tcg_gen_ext16u_i32(var, var);
360 tcg_gen_bswap16_i32(var, var);
361 tcg_gen_ext16s_i32(var, var);
3670669c
PB
362}
363
838fa72d 364/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 365static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 366{
838fa72d
AJ
367 TCGv_i64 tmp64 = tcg_temp_new_i64();
368
369 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 370 tcg_temp_free_i32(b);
838fa72d
AJ
371 tcg_gen_shli_i64(tmp64, tmp64, 32);
372 tcg_gen_add_i64(a, tmp64, a);
373
374 tcg_temp_free_i64(tmp64);
375 return a;
376}
377
378/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 379static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
380{
381 TCGv_i64 tmp64 = tcg_temp_new_i64();
382
383 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 384 tcg_temp_free_i32(b);
838fa72d
AJ
385 tcg_gen_shli_i64(tmp64, tmp64, 32);
386 tcg_gen_sub_i64(a, tmp64, a);
387
388 tcg_temp_free_i64(tmp64);
389 return a;
3670669c
PB
390}
391
5e3f878a 392/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 393static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 394{
39d5492a
PM
395 TCGv_i32 lo = tcg_temp_new_i32();
396 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 397 TCGv_i64 ret;
5e3f878a 398
831d7fe8 399 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 400 tcg_temp_free_i32(a);
7d1b0095 401 tcg_temp_free_i32(b);
831d7fe8
RH
402
403 ret = tcg_temp_new_i64();
404 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
405 tcg_temp_free_i32(lo);
406 tcg_temp_free_i32(hi);
831d7fe8
RH
407
408 return ret;
5e3f878a
PB
409}
410
39d5492a 411static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 412{
39d5492a
PM
413 TCGv_i32 lo = tcg_temp_new_i32();
414 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 415 TCGv_i64 ret;
5e3f878a 416
831d7fe8 417 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 418 tcg_temp_free_i32(a);
7d1b0095 419 tcg_temp_free_i32(b);
831d7fe8
RH
420
421 ret = tcg_temp_new_i64();
422 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
423 tcg_temp_free_i32(lo);
424 tcg_temp_free_i32(hi);
831d7fe8
RH
425
426 return ret;
5e3f878a
PB
427}
428
8f01245e 429/* Swap low and high halfwords. */
39d5492a 430static void gen_swap_half(TCGv_i32 var)
8f01245e 431{
39d5492a 432 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
433 tcg_gen_shri_i32(tmp, var, 16);
434 tcg_gen_shli_i32(var, var, 16);
435 tcg_gen_or_i32(var, var, tmp);
7d1b0095 436 tcg_temp_free_i32(tmp);
8f01245e
PB
437}
438
b26eefb6
PB
439/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
440 tmp = (t0 ^ t1) & 0x8000;
441 t0 &= ~0x8000;
442 t1 &= ~0x8000;
443 t0 = (t0 + t1) ^ tmp;
444 */
445
39d5492a 446static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 447{
39d5492a 448 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
449 tcg_gen_xor_i32(tmp, t0, t1);
450 tcg_gen_andi_i32(tmp, tmp, 0x8000);
451 tcg_gen_andi_i32(t0, t0, ~0x8000);
452 tcg_gen_andi_i32(t1, t1, ~0x8000);
453 tcg_gen_add_i32(t0, t0, t1);
454 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
455 tcg_temp_free_i32(tmp);
456 tcg_temp_free_i32(t1);
b26eefb6
PB
457}
458
459/* Set CF to the top bit of var. */
39d5492a 460static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 461{
66c374de 462 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
463}
464
465/* Set N and Z flags from var. */
39d5492a 466static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 467{
66c374de
AJ
468 tcg_gen_mov_i32(cpu_NF, var);
469 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
470}
471
472/* T0 += T1 + CF. */
39d5492a 473static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 474{
396e467c 475 tcg_gen_add_i32(t0, t0, t1);
66c374de 476 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
477}
478
e9bb4aa9 479/* dest = T0 + T1 + CF. */
39d5492a 480static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 481{
e9bb4aa9 482 tcg_gen_add_i32(dest, t0, t1);
66c374de 483 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
484}
485
3670669c 486/* dest = T0 - T1 + CF - 1. */
39d5492a 487static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 488{
3670669c 489 tcg_gen_sub_i32(dest, t0, t1);
66c374de 490 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 491 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
492}
493
72485ec4 494/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 495static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 496{
39d5492a 497 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
498 tcg_gen_movi_i32(tmp, 0);
499 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 500 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 501 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
502 tcg_gen_xor_i32(tmp, t0, t1);
503 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
504 tcg_temp_free_i32(tmp);
505 tcg_gen_mov_i32(dest, cpu_NF);
506}
507
49b4c31e 508/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 509static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 510{
39d5492a 511 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
512 if (TCG_TARGET_HAS_add2_i32) {
513 tcg_gen_movi_i32(tmp, 0);
514 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 515 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
516 } else {
517 TCGv_i64 q0 = tcg_temp_new_i64();
518 TCGv_i64 q1 = tcg_temp_new_i64();
519 tcg_gen_extu_i32_i64(q0, t0);
520 tcg_gen_extu_i32_i64(q1, t1);
521 tcg_gen_add_i64(q0, q0, q1);
522 tcg_gen_extu_i32_i64(q1, cpu_CF);
523 tcg_gen_add_i64(q0, q0, q1);
524 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
525 tcg_temp_free_i64(q0);
526 tcg_temp_free_i64(q1);
527 }
528 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
529 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
530 tcg_gen_xor_i32(tmp, t0, t1);
531 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
532 tcg_temp_free_i32(tmp);
533 tcg_gen_mov_i32(dest, cpu_NF);
534}
535
72485ec4 536/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 537static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 538{
39d5492a 539 TCGv_i32 tmp;
72485ec4
AJ
540 tcg_gen_sub_i32(cpu_NF, t0, t1);
541 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
542 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
543 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
544 tmp = tcg_temp_new_i32();
545 tcg_gen_xor_i32(tmp, t0, t1);
546 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
547 tcg_temp_free_i32(tmp);
548 tcg_gen_mov_i32(dest, cpu_NF);
549}
550
e77f0832 551/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 552static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 553{
39d5492a 554 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
555 tcg_gen_not_i32(tmp, t1);
556 gen_adc_CC(dest, t0, tmp);
39d5492a 557 tcg_temp_free_i32(tmp);
2de68a49
RH
558}
559
365af80e 560#define GEN_SHIFT(name) \
39d5492a 561static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 562{ \
39d5492a 563 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
564 tmp1 = tcg_temp_new_i32(); \
565 tcg_gen_andi_i32(tmp1, t1, 0xff); \
566 tmp2 = tcg_const_i32(0); \
567 tmp3 = tcg_const_i32(0x1f); \
568 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
569 tcg_temp_free_i32(tmp3); \
570 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
571 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
572 tcg_temp_free_i32(tmp2); \
573 tcg_temp_free_i32(tmp1); \
574}
575GEN_SHIFT(shl)
576GEN_SHIFT(shr)
577#undef GEN_SHIFT
578
39d5492a 579static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 580{
39d5492a 581 TCGv_i32 tmp1, tmp2;
365af80e
AJ
582 tmp1 = tcg_temp_new_i32();
583 tcg_gen_andi_i32(tmp1, t1, 0xff);
584 tmp2 = tcg_const_i32(0x1f);
585 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
586 tcg_temp_free_i32(tmp2);
587 tcg_gen_sar_i32(dest, t0, tmp1);
588 tcg_temp_free_i32(tmp1);
589}
590
39d5492a 591static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 592{
39d5492a
PM
593 TCGv_i32 c0 = tcg_const_i32(0);
594 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
595 tcg_gen_neg_i32(tmp, src);
596 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
597 tcg_temp_free_i32(c0);
598 tcg_temp_free_i32(tmp);
599}
ad69471c 600
39d5492a 601static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 602{
9a119ff6 603 if (shift == 0) {
66c374de 604 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 605 } else {
66c374de
AJ
606 tcg_gen_shri_i32(cpu_CF, var, shift);
607 if (shift != 31) {
608 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
609 }
9a119ff6 610 }
9a119ff6 611}
b26eefb6 612
9a119ff6 613/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
614static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
615 int shift, int flags)
9a119ff6
PB
616{
617 switch (shiftop) {
618 case 0: /* LSL */
619 if (shift != 0) {
620 if (flags)
621 shifter_out_im(var, 32 - shift);
622 tcg_gen_shli_i32(var, var, shift);
623 }
624 break;
625 case 1: /* LSR */
626 if (shift == 0) {
627 if (flags) {
66c374de 628 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
629 }
630 tcg_gen_movi_i32(var, 0);
631 } else {
632 if (flags)
633 shifter_out_im(var, shift - 1);
634 tcg_gen_shri_i32(var, var, shift);
635 }
636 break;
637 case 2: /* ASR */
638 if (shift == 0)
639 shift = 32;
640 if (flags)
641 shifter_out_im(var, shift - 1);
642 if (shift == 32)
643 shift = 31;
644 tcg_gen_sari_i32(var, var, shift);
645 break;
646 case 3: /* ROR/RRX */
647 if (shift != 0) {
648 if (flags)
649 shifter_out_im(var, shift - 1);
f669df27 650 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 651 } else {
39d5492a 652 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 653 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
654 if (flags)
655 shifter_out_im(var, 0);
656 tcg_gen_shri_i32(var, var, 1);
b26eefb6 657 tcg_gen_or_i32(var, var, tmp);
7d1b0095 658 tcg_temp_free_i32(tmp);
b26eefb6
PB
659 }
660 }
661};
662
39d5492a
PM
663static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
664 TCGv_i32 shift, int flags)
8984bd2e
PB
665{
666 if (flags) {
667 switch (shiftop) {
9ef39277
BS
668 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
669 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
670 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
671 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
672 }
673 } else {
674 switch (shiftop) {
365af80e
AJ
675 case 0:
676 gen_shl(var, var, shift);
677 break;
678 case 1:
679 gen_shr(var, var, shift);
680 break;
681 case 2:
682 gen_sar(var, var, shift);
683 break;
f669df27
AJ
684 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
685 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
686 }
687 }
7d1b0095 688 tcg_temp_free_i32(shift);
8984bd2e
PB
689}
690
6ddbc6e4
PB
691#define PAS_OP(pfx) \
692 switch (op2) { \
693 case 0: gen_pas_helper(glue(pfx,add16)); break; \
694 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
695 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
696 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
697 case 4: gen_pas_helper(glue(pfx,add8)); break; \
698 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
699 }
39d5492a 700static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 701{
a7812ae4 702 TCGv_ptr tmp;
6ddbc6e4
PB
703
704 switch (op1) {
705#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
706 case 1:
a7812ae4 707 tmp = tcg_temp_new_ptr();
0ecb72a5 708 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 709 PAS_OP(s)
b75263d6 710 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
711 break;
712 case 5:
a7812ae4 713 tmp = tcg_temp_new_ptr();
0ecb72a5 714 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 715 PAS_OP(u)
b75263d6 716 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
717 break;
718#undef gen_pas_helper
719#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
720 case 2:
721 PAS_OP(q);
722 break;
723 case 3:
724 PAS_OP(sh);
725 break;
726 case 6:
727 PAS_OP(uq);
728 break;
729 case 7:
730 PAS_OP(uh);
731 break;
732#undef gen_pas_helper
733 }
734}
9ee6e8bb
PB
735#undef PAS_OP
736
6ddbc6e4
PB
737/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
738#define PAS_OP(pfx) \
ed89a2f1 739 switch (op1) { \
6ddbc6e4
PB
740 case 0: gen_pas_helper(glue(pfx,add8)); break; \
741 case 1: gen_pas_helper(glue(pfx,add16)); break; \
742 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
743 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
744 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
745 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
746 }
39d5492a 747static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 748{
a7812ae4 749 TCGv_ptr tmp;
6ddbc6e4 750
ed89a2f1 751 switch (op2) {
6ddbc6e4
PB
752#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
753 case 0:
a7812ae4 754 tmp = tcg_temp_new_ptr();
0ecb72a5 755 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 756 PAS_OP(s)
b75263d6 757 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
758 break;
759 case 4:
a7812ae4 760 tmp = tcg_temp_new_ptr();
0ecb72a5 761 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 762 PAS_OP(u)
b75263d6 763 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
764 break;
765#undef gen_pas_helper
766#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
767 case 1:
768 PAS_OP(q);
769 break;
770 case 2:
771 PAS_OP(sh);
772 break;
773 case 5:
774 PAS_OP(uq);
775 break;
776 case 6:
777 PAS_OP(uh);
778 break;
779#undef gen_pas_helper
780 }
781}
9ee6e8bb
PB
782#undef PAS_OP
783
39fb730a 784/*
6c2c63d3 785 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
786 * This is common between ARM and Aarch64 targets.
787 */
6c2c63d3 788void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 789{
6c2c63d3
RH
790 TCGv_i32 value;
791 TCGCond cond;
792 bool global = true;
d9ba4830 793
d9ba4830
PB
794 switch (cc) {
795 case 0: /* eq: Z */
d9ba4830 796 case 1: /* ne: !Z */
6c2c63d3
RH
797 cond = TCG_COND_EQ;
798 value = cpu_ZF;
d9ba4830 799 break;
6c2c63d3 800
d9ba4830 801 case 2: /* cs: C */
d9ba4830 802 case 3: /* cc: !C */
6c2c63d3
RH
803 cond = TCG_COND_NE;
804 value = cpu_CF;
d9ba4830 805 break;
6c2c63d3 806
d9ba4830 807 case 4: /* mi: N */
d9ba4830 808 case 5: /* pl: !N */
6c2c63d3
RH
809 cond = TCG_COND_LT;
810 value = cpu_NF;
d9ba4830 811 break;
6c2c63d3 812
d9ba4830 813 case 6: /* vs: V */
d9ba4830 814 case 7: /* vc: !V */
6c2c63d3
RH
815 cond = TCG_COND_LT;
816 value = cpu_VF;
d9ba4830 817 break;
6c2c63d3 818
d9ba4830 819 case 8: /* hi: C && !Z */
6c2c63d3
RH
820 case 9: /* ls: !C || Z -> !(C && !Z) */
821 cond = TCG_COND_NE;
822 value = tcg_temp_new_i32();
823 global = false;
824 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
825 ZF is non-zero for !Z; so AND the two subexpressions. */
826 tcg_gen_neg_i32(value, cpu_CF);
827 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 828 break;
6c2c63d3 829
d9ba4830 830 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 831 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
832 /* Since we're only interested in the sign bit, == 0 is >= 0. */
833 cond = TCG_COND_GE;
834 value = tcg_temp_new_i32();
835 global = false;
836 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 837 break;
6c2c63d3 838
d9ba4830 839 case 12: /* gt: !Z && N == V */
d9ba4830 840 case 13: /* le: Z || N != V */
6c2c63d3
RH
841 cond = TCG_COND_NE;
842 value = tcg_temp_new_i32();
843 global = false;
844 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
845 * the sign bit then AND with ZF to yield the result. */
846 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
847 tcg_gen_sari_i32(value, value, 31);
848 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 849 break;
6c2c63d3 850
9305eac0
RH
851 case 14: /* always */
852 case 15: /* always */
853 /* Use the ALWAYS condition, which will fold early.
854 * It doesn't matter what we use for the value. */
855 cond = TCG_COND_ALWAYS;
856 value = cpu_ZF;
857 goto no_invert;
858
d9ba4830
PB
859 default:
860 fprintf(stderr, "Bad condition code 0x%x\n", cc);
861 abort();
862 }
6c2c63d3
RH
863
864 if (cc & 1) {
865 cond = tcg_invert_cond(cond);
866 }
867
9305eac0 868 no_invert:
6c2c63d3
RH
869 cmp->cond = cond;
870 cmp->value = value;
871 cmp->value_global = global;
872}
873
874void arm_free_cc(DisasCompare *cmp)
875{
876 if (!cmp->value_global) {
877 tcg_temp_free_i32(cmp->value);
878 }
879}
880
881void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
882{
883 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
884}
885
886void arm_gen_test_cc(int cc, TCGLabel *label)
887{
888 DisasCompare cmp;
889 arm_test_cc(&cmp, cc);
890 arm_jump_cc(&cmp, label);
891 arm_free_cc(&cmp);
d9ba4830 892}
2c0262af 893
b1d8e52e 894static const uint8_t table_logic_cc[16] = {
2c0262af
FB
895 1, /* and */
896 1, /* xor */
897 0, /* sub */
898 0, /* rsb */
899 0, /* add */
900 0, /* adc */
901 0, /* sbc */
902 0, /* rsc */
903 1, /* andl */
904 1, /* xorl */
905 0, /* cmp */
906 0, /* cmn */
907 1, /* orr */
908 1, /* mov */
909 1, /* bic */
910 1, /* mvn */
911};
3b46e624 912
4d5e8c96
PM
913static inline void gen_set_condexec(DisasContext *s)
914{
915 if (s->condexec_mask) {
916 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
917 TCGv_i32 tmp = tcg_temp_new_i32();
918 tcg_gen_movi_i32(tmp, val);
919 store_cpu_field(tmp, condexec_bits);
920 }
921}
922
923static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
924{
925 tcg_gen_movi_i32(cpu_R[15], val);
926}
927
d9ba4830
PB
928/* Set PC and Thumb state from an immediate address. */
929static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 930{
39d5492a 931 TCGv_i32 tmp;
99c475ab 932
dcba3a8d 933 s->base.is_jmp = DISAS_JUMP;
d9ba4830 934 if (s->thumb != (addr & 1)) {
7d1b0095 935 tmp = tcg_temp_new_i32();
d9ba4830 936 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 937 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 938 tcg_temp_free_i32(tmp);
d9ba4830 939 }
155c3eac 940 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
941}
942
943/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 944static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 945{
dcba3a8d 946 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
947 tcg_gen_andi_i32(cpu_R[15], var, ~1);
948 tcg_gen_andi_i32(var, var, 1);
949 store_cpu_field(var, thumb);
d9ba4830
PB
950}
951
3bb8a96f
PM
952/* Set PC and Thumb state from var. var is marked as dead.
953 * For M-profile CPUs, include logic to detect exception-return
954 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
955 * and BX reg, and no others, and happens only for code in Handler mode.
956 */
957static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
958{
959 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 960 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
961 */
962 gen_bx(s, var);
963 if (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M)) {
dcba3a8d 964 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
965 }
966}
967
968static inline void gen_bx_excret_final_code(DisasContext *s)
969{
970 /* Generate the code to finish possible exception return and end the TB */
971 TCGLabel *excret_label = gen_new_label();
972
973 /* Is the new PC value in the magic range indicating exception return? */
974 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], 0xff000000, excret_label);
975 /* No: end the TB as we would for a DISAS_JMP */
976 if (is_singlestepping(s)) {
977 gen_singlestep_exception(s);
978 } else {
979 tcg_gen_exit_tb(0);
980 }
981 gen_set_label(excret_label);
982 /* Yes: this is an exception return.
983 * At this point in runtime env->regs[15] and env->thumb will hold
984 * the exception-return magic number, which do_v7m_exception_exit()
985 * will read. Nothing else will be able to see those values because
986 * the cpu-exec main loop guarantees that we will always go straight
987 * from raising the exception to the exception-handling code.
988 *
989 * gen_ss_advance(s) does nothing on M profile currently but
990 * calling it is conceptually the right thing as we have executed
991 * this instruction (compare SWI, HVC, SMC handling).
992 */
993 gen_ss_advance(s);
994 gen_exception_internal(EXCP_EXCEPTION_EXIT);
995}
996
fb602cb7
PM
997static inline void gen_bxns(DisasContext *s, int rm)
998{
999 TCGv_i32 var = load_reg(s, rm);
1000
1001 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1002 * we need to sync state before calling it, but:
1003 * - we don't need to do gen_set_pc_im() because the bxns helper will
1004 * always set the PC itself
1005 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1006 * unless it's outside an IT block or the last insn in an IT block,
1007 * so we know that condexec == 0 (already set at the top of the TB)
1008 * is correct in the non-UNPREDICTABLE cases, and we can choose
1009 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1010 */
1011 gen_helper_v7m_bxns(cpu_env, var);
1012 tcg_temp_free_i32(var);
ef475b5d 1013 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1014}
1015
21aeb343
JR
1016/* Variant of store_reg which uses branch&exchange logic when storing
1017 to r15 in ARM architecture v7 and above. The source must be a temporary
1018 and will be marked as dead. */
7dcc1f89 1019static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1020{
1021 if (reg == 15 && ENABLE_ARCH_7) {
1022 gen_bx(s, var);
1023 } else {
1024 store_reg(s, reg, var);
1025 }
1026}
1027
be5e7a76
DES
1028/* Variant of store_reg which uses branch&exchange logic when storing
1029 * to r15 in ARM architecture v5T and above. This is used for storing
1030 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1031 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1032static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1033{
1034 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1035 gen_bx_excret(s, var);
be5e7a76
DES
1036 } else {
1037 store_reg(s, reg, var);
1038 }
1039}
1040
e334bd31
PB
1041#ifdef CONFIG_USER_ONLY
1042#define IS_USER_ONLY 1
1043#else
1044#define IS_USER_ONLY 0
1045#endif
1046
08307563
PM
1047/* Abstractions of "generate code to do a guest load/store for
1048 * AArch32", where a vaddr is always 32 bits (and is zero
1049 * extended if we're a 64 bit core) and data is also
1050 * 32 bits unless specifically doing a 64 bit access.
1051 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1052 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1053 */
08307563 1054
7f5616f5 1055static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1056{
7f5616f5
RH
1057 TCGv addr = tcg_temp_new();
1058 tcg_gen_extu_i32_tl(addr, a32);
1059
e334bd31 1060 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1061 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1062 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1063 }
7f5616f5 1064 return addr;
08307563
PM
1065}
1066
7f5616f5
RH
1067static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1068 int index, TCGMemOp opc)
08307563 1069{
7f5616f5
RH
1070 TCGv addr = gen_aa32_addr(s, a32, opc);
1071 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1072 tcg_temp_free(addr);
08307563
PM
1073}
1074
7f5616f5
RH
1075static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1076 int index, TCGMemOp opc)
1077{
1078 TCGv addr = gen_aa32_addr(s, a32, opc);
1079 tcg_gen_qemu_st_i32(val, addr, index, opc);
1080 tcg_temp_free(addr);
1081}
08307563 1082
7f5616f5 1083#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1084static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1085 TCGv_i32 a32, int index) \
08307563 1086{ \
7f5616f5 1087 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1088} \
1089static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1090 TCGv_i32 val, \
1091 TCGv_i32 a32, int index, \
1092 ISSInfo issinfo) \
1093{ \
1094 gen_aa32_ld##SUFF(s, val, a32, index); \
1095 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1096}
1097
7f5616f5 1098#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1099static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1100 TCGv_i32 a32, int index) \
08307563 1101{ \
7f5616f5 1102 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1103} \
1104static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1105 TCGv_i32 val, \
1106 TCGv_i32 a32, int index, \
1107 ISSInfo issinfo) \
1108{ \
1109 gen_aa32_st##SUFF(s, val, a32, index); \
1110 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1111}
1112
7f5616f5 1113static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1114{
e334bd31
PB
1115 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1116 if (!IS_USER_ONLY && s->sctlr_b) {
1117 tcg_gen_rotri_i64(val, val, 32);
1118 }
08307563
PM
1119}
1120
7f5616f5
RH
1121static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1122 int index, TCGMemOp opc)
08307563 1123{
7f5616f5
RH
1124 TCGv addr = gen_aa32_addr(s, a32, opc);
1125 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1126 gen_aa32_frob64(s, val);
1127 tcg_temp_free(addr);
1128}
1129
1130static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1131 TCGv_i32 a32, int index)
1132{
1133 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1134}
1135
1136static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1137 int index, TCGMemOp opc)
1138{
1139 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1140
1141 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1142 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1143 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1144 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1145 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1146 tcg_temp_free_i64(tmp);
e334bd31 1147 } else {
7f5616f5 1148 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1149 }
7f5616f5 1150 tcg_temp_free(addr);
08307563
PM
1151}
1152
7f5616f5
RH
1153static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1154 TCGv_i32 a32, int index)
1155{
1156 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1157}
08307563 1158
7f5616f5
RH
1159DO_GEN_LD(8s, MO_SB)
1160DO_GEN_LD(8u, MO_UB)
1161DO_GEN_LD(16s, MO_SW)
1162DO_GEN_LD(16u, MO_UW)
1163DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1164DO_GEN_ST(8, MO_UB)
1165DO_GEN_ST(16, MO_UW)
1166DO_GEN_ST(32, MO_UL)
08307563 1167
37e6456e
PM
1168static inline void gen_hvc(DisasContext *s, int imm16)
1169{
1170 /* The pre HVC helper handles cases when HVC gets trapped
1171 * as an undefined insn by runtime configuration (ie before
1172 * the insn really executes).
1173 */
1174 gen_set_pc_im(s, s->pc - 4);
1175 gen_helper_pre_hvc(cpu_env);
1176 /* Otherwise we will treat this as a real exception which
1177 * happens after execution of the insn. (The distinction matters
1178 * for the PC value reported to the exception handler and also
1179 * for single stepping.)
1180 */
1181 s->svc_imm = imm16;
1182 gen_set_pc_im(s, s->pc);
dcba3a8d 1183 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1184}
1185
1186static inline void gen_smc(DisasContext *s)
1187{
1188 /* As with HVC, we may take an exception either before or after
1189 * the insn executes.
1190 */
1191 TCGv_i32 tmp;
1192
1193 gen_set_pc_im(s, s->pc - 4);
1194 tmp = tcg_const_i32(syn_aa32_smc());
1195 gen_helper_pre_smc(cpu_env, tmp);
1196 tcg_temp_free_i32(tmp);
1197 gen_set_pc_im(s, s->pc);
dcba3a8d 1198 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1199}
1200
d4a2dc67
PM
1201static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1202{
1203 gen_set_condexec(s);
1204 gen_set_pc_im(s, s->pc - offset);
1205 gen_exception_internal(excp);
dcba3a8d 1206 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1207}
1208
73710361
GB
1209static void gen_exception_insn(DisasContext *s, int offset, int excp,
1210 int syn, uint32_t target_el)
d4a2dc67
PM
1211{
1212 gen_set_condexec(s);
1213 gen_set_pc_im(s, s->pc - offset);
73710361 1214 gen_exception(excp, syn, target_el);
dcba3a8d 1215 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1216}
1217
b5ff1b31
FB
1218/* Force a TB lookup after an instruction that changes the CPU state. */
1219static inline void gen_lookup_tb(DisasContext *s)
1220{
a6445c52 1221 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1222 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1223}
1224
19a6e31c
PM
1225static inline void gen_hlt(DisasContext *s, int imm)
1226{
1227 /* HLT. This has two purposes.
1228 * Architecturally, it is an external halting debug instruction.
1229 * Since QEMU doesn't implement external debug, we treat this as
1230 * it is required for halting debug disabled: it will UNDEF.
1231 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1232 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1233 * must trigger semihosting even for ARMv7 and earlier, where
1234 * HLT was an undefined encoding.
1235 * In system mode, we don't allow userspace access to
1236 * semihosting, to provide some semblance of security
1237 * (and for consistency with our 32-bit semihosting).
1238 */
1239 if (semihosting_enabled() &&
1240#ifndef CONFIG_USER_ONLY
1241 s->current_el != 0 &&
1242#endif
1243 (imm == (s->thumb ? 0x3c : 0xf000))) {
1244 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1245 return;
1246 }
1247
1248 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1249 default_exception_el(s));
1250}
1251
b0109805 1252static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1253 TCGv_i32 var)
2c0262af 1254{
1e8d4eec 1255 int val, rm, shift, shiftop;
39d5492a 1256 TCGv_i32 offset;
2c0262af
FB
1257
1258 if (!(insn & (1 << 25))) {
1259 /* immediate */
1260 val = insn & 0xfff;
1261 if (!(insn & (1 << 23)))
1262 val = -val;
537730b9 1263 if (val != 0)
b0109805 1264 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1265 } else {
1266 /* shift/register */
1267 rm = (insn) & 0xf;
1268 shift = (insn >> 7) & 0x1f;
1e8d4eec 1269 shiftop = (insn >> 5) & 3;
b26eefb6 1270 offset = load_reg(s, rm);
9a119ff6 1271 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1272 if (!(insn & (1 << 23)))
b0109805 1273 tcg_gen_sub_i32(var, var, offset);
2c0262af 1274 else
b0109805 1275 tcg_gen_add_i32(var, var, offset);
7d1b0095 1276 tcg_temp_free_i32(offset);
2c0262af
FB
1277 }
1278}
1279
191f9a93 1280static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1281 int extra, TCGv_i32 var)
2c0262af
FB
1282{
1283 int val, rm;
39d5492a 1284 TCGv_i32 offset;
3b46e624 1285
2c0262af
FB
1286 if (insn & (1 << 22)) {
1287 /* immediate */
1288 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1289 if (!(insn & (1 << 23)))
1290 val = -val;
18acad92 1291 val += extra;
537730b9 1292 if (val != 0)
b0109805 1293 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1294 } else {
1295 /* register */
191f9a93 1296 if (extra)
b0109805 1297 tcg_gen_addi_i32(var, var, extra);
2c0262af 1298 rm = (insn) & 0xf;
b26eefb6 1299 offset = load_reg(s, rm);
2c0262af 1300 if (!(insn & (1 << 23)))
b0109805 1301 tcg_gen_sub_i32(var, var, offset);
2c0262af 1302 else
b0109805 1303 tcg_gen_add_i32(var, var, offset);
7d1b0095 1304 tcg_temp_free_i32(offset);
2c0262af
FB
1305 }
1306}
1307
5aaebd13
PM
1308static TCGv_ptr get_fpstatus_ptr(int neon)
1309{
1310 TCGv_ptr statusptr = tcg_temp_new_ptr();
1311 int offset;
1312 if (neon) {
0ecb72a5 1313 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1314 } else {
0ecb72a5 1315 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1316 }
1317 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1318 return statusptr;
1319}
1320
4373f3ce
PB
1321#define VFP_OP2(name) \
1322static inline void gen_vfp_##name(int dp) \
1323{ \
ae1857ec
PM
1324 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1325 if (dp) { \
1326 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1327 } else { \
1328 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1329 } \
1330 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1331}
1332
4373f3ce
PB
1333VFP_OP2(add)
1334VFP_OP2(sub)
1335VFP_OP2(mul)
1336VFP_OP2(div)
1337
1338#undef VFP_OP2
1339
605a6aed
PM
1340static inline void gen_vfp_F1_mul(int dp)
1341{
1342 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1343 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1344 if (dp) {
ae1857ec 1345 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1346 } else {
ae1857ec 1347 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1348 }
ae1857ec 1349 tcg_temp_free_ptr(fpst);
605a6aed
PM
1350}
1351
1352static inline void gen_vfp_F1_neg(int dp)
1353{
1354 /* Like gen_vfp_neg() but put result in F1 */
1355 if (dp) {
1356 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1357 } else {
1358 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1359 }
1360}
1361
4373f3ce
PB
1362static inline void gen_vfp_abs(int dp)
1363{
1364 if (dp)
1365 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1366 else
1367 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1368}
1369
1370static inline void gen_vfp_neg(int dp)
1371{
1372 if (dp)
1373 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1374 else
1375 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1376}
1377
1378static inline void gen_vfp_sqrt(int dp)
1379{
1380 if (dp)
1381 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1382 else
1383 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1384}
1385
1386static inline void gen_vfp_cmp(int dp)
1387{
1388 if (dp)
1389 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1390 else
1391 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1392}
1393
1394static inline void gen_vfp_cmpe(int dp)
1395{
1396 if (dp)
1397 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1398 else
1399 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1400}
1401
1402static inline void gen_vfp_F1_ld0(int dp)
1403{
1404 if (dp)
5b340b51 1405 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1406 else
5b340b51 1407 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1408}
1409
5500b06c
PM
1410#define VFP_GEN_ITOF(name) \
1411static inline void gen_vfp_##name(int dp, int neon) \
1412{ \
5aaebd13 1413 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1414 if (dp) { \
1415 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1416 } else { \
1417 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1418 } \
b7fa9214 1419 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1420}
1421
5500b06c
PM
1422VFP_GEN_ITOF(uito)
1423VFP_GEN_ITOF(sito)
1424#undef VFP_GEN_ITOF
4373f3ce 1425
5500b06c
PM
1426#define VFP_GEN_FTOI(name) \
1427static inline void gen_vfp_##name(int dp, int neon) \
1428{ \
5aaebd13 1429 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1430 if (dp) { \
1431 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1432 } else { \
1433 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1434 } \
b7fa9214 1435 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1436}
1437
5500b06c
PM
1438VFP_GEN_FTOI(toui)
1439VFP_GEN_FTOI(touiz)
1440VFP_GEN_FTOI(tosi)
1441VFP_GEN_FTOI(tosiz)
1442#undef VFP_GEN_FTOI
4373f3ce 1443
16d5b3ca 1444#define VFP_GEN_FIX(name, round) \
5500b06c 1445static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1446{ \
39d5492a 1447 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1448 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1449 if (dp) { \
16d5b3ca
WN
1450 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1451 statusptr); \
5500b06c 1452 } else { \
16d5b3ca
WN
1453 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1454 statusptr); \
5500b06c 1455 } \
b75263d6 1456 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1457 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1458}
16d5b3ca
WN
1459VFP_GEN_FIX(tosh, _round_to_zero)
1460VFP_GEN_FIX(tosl, _round_to_zero)
1461VFP_GEN_FIX(touh, _round_to_zero)
1462VFP_GEN_FIX(toul, _round_to_zero)
1463VFP_GEN_FIX(shto, )
1464VFP_GEN_FIX(slto, )
1465VFP_GEN_FIX(uhto, )
1466VFP_GEN_FIX(ulto, )
4373f3ce 1467#undef VFP_GEN_FIX
9ee6e8bb 1468
39d5492a 1469static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1470{
08307563 1471 if (dp) {
12dcc321 1472 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1473 } else {
12dcc321 1474 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1475 }
b5ff1b31
FB
1476}
1477
39d5492a 1478static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1479{
08307563 1480 if (dp) {
12dcc321 1481 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1482 } else {
12dcc321 1483 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1484 }
b5ff1b31
FB
1485}
1486
8e96005d
FB
1487static inline long
1488vfp_reg_offset (int dp, int reg)
1489{
1490 if (dp)
1491 return offsetof(CPUARMState, vfp.regs[reg]);
1492 else if (reg & 1) {
1493 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1494 + offsetof(CPU_DoubleU, l.upper);
1495 } else {
1496 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1497 + offsetof(CPU_DoubleU, l.lower);
1498 }
1499}
9ee6e8bb
PB
1500
1501/* Return the offset of a 32-bit piece of a NEON register.
1502 zero is the least significant end of the register. */
1503static inline long
1504neon_reg_offset (int reg, int n)
1505{
1506 int sreg;
1507 sreg = reg * 2 + n;
1508 return vfp_reg_offset(0, sreg);
1509}
1510
39d5492a 1511static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1512{
39d5492a 1513 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1514 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1515 return tmp;
1516}
1517
39d5492a 1518static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1519{
1520 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1521 tcg_temp_free_i32(var);
8f8e3aa4
PB
1522}
1523
a7812ae4 1524static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1525{
1526 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1527}
1528
a7812ae4 1529static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1530{
1531 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1532}
1533
4373f3ce
PB
1534#define tcg_gen_ld_f32 tcg_gen_ld_i32
1535#define tcg_gen_ld_f64 tcg_gen_ld_i64
1536#define tcg_gen_st_f32 tcg_gen_st_i32
1537#define tcg_gen_st_f64 tcg_gen_st_i64
1538
b7bcbe95
FB
1539static inline void gen_mov_F0_vreg(int dp, int reg)
1540{
1541 if (dp)
4373f3ce 1542 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1543 else
4373f3ce 1544 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1545}
1546
1547static inline void gen_mov_F1_vreg(int dp, int reg)
1548{
1549 if (dp)
4373f3ce 1550 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1551 else
4373f3ce 1552 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1553}
1554
1555static inline void gen_mov_vreg_F0(int dp, int reg)
1556{
1557 if (dp)
4373f3ce 1558 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1559 else
4373f3ce 1560 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1561}
1562
18c9b560
AZ
1563#define ARM_CP_RW_BIT (1 << 20)
1564
a7812ae4 1565static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1566{
0ecb72a5 1567 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1568}
1569
a7812ae4 1570static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1571{
0ecb72a5 1572 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1573}
1574
39d5492a 1575static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1576{
39d5492a 1577 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1578 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1579 return var;
e677137d
PB
1580}
1581
39d5492a 1582static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1583{
0ecb72a5 1584 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1585 tcg_temp_free_i32(var);
e677137d
PB
1586}
1587
1588static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1589{
1590 iwmmxt_store_reg(cpu_M0, rn);
1591}
1592
1593static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1594{
1595 iwmmxt_load_reg(cpu_M0, rn);
1596}
1597
1598static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1599{
1600 iwmmxt_load_reg(cpu_V1, rn);
1601 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1602}
1603
1604static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1605{
1606 iwmmxt_load_reg(cpu_V1, rn);
1607 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1608}
1609
1610static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1611{
1612 iwmmxt_load_reg(cpu_V1, rn);
1613 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1614}
1615
1616#define IWMMXT_OP(name) \
1617static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1618{ \
1619 iwmmxt_load_reg(cpu_V1, rn); \
1620 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1621}
1622
477955bd
PM
1623#define IWMMXT_OP_ENV(name) \
1624static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1625{ \
1626 iwmmxt_load_reg(cpu_V1, rn); \
1627 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1628}
1629
1630#define IWMMXT_OP_ENV_SIZE(name) \
1631IWMMXT_OP_ENV(name##b) \
1632IWMMXT_OP_ENV(name##w) \
1633IWMMXT_OP_ENV(name##l)
e677137d 1634
477955bd 1635#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1636static inline void gen_op_iwmmxt_##name##_M0(void) \
1637{ \
477955bd 1638 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1639}
1640
1641IWMMXT_OP(maddsq)
1642IWMMXT_OP(madduq)
1643IWMMXT_OP(sadb)
1644IWMMXT_OP(sadw)
1645IWMMXT_OP(mulslw)
1646IWMMXT_OP(mulshw)
1647IWMMXT_OP(mululw)
1648IWMMXT_OP(muluhw)
1649IWMMXT_OP(macsw)
1650IWMMXT_OP(macuw)
1651
477955bd
PM
1652IWMMXT_OP_ENV_SIZE(unpackl)
1653IWMMXT_OP_ENV_SIZE(unpackh)
1654
1655IWMMXT_OP_ENV1(unpacklub)
1656IWMMXT_OP_ENV1(unpackluw)
1657IWMMXT_OP_ENV1(unpacklul)
1658IWMMXT_OP_ENV1(unpackhub)
1659IWMMXT_OP_ENV1(unpackhuw)
1660IWMMXT_OP_ENV1(unpackhul)
1661IWMMXT_OP_ENV1(unpacklsb)
1662IWMMXT_OP_ENV1(unpacklsw)
1663IWMMXT_OP_ENV1(unpacklsl)
1664IWMMXT_OP_ENV1(unpackhsb)
1665IWMMXT_OP_ENV1(unpackhsw)
1666IWMMXT_OP_ENV1(unpackhsl)
1667
1668IWMMXT_OP_ENV_SIZE(cmpeq)
1669IWMMXT_OP_ENV_SIZE(cmpgtu)
1670IWMMXT_OP_ENV_SIZE(cmpgts)
1671
1672IWMMXT_OP_ENV_SIZE(mins)
1673IWMMXT_OP_ENV_SIZE(minu)
1674IWMMXT_OP_ENV_SIZE(maxs)
1675IWMMXT_OP_ENV_SIZE(maxu)
1676
1677IWMMXT_OP_ENV_SIZE(subn)
1678IWMMXT_OP_ENV_SIZE(addn)
1679IWMMXT_OP_ENV_SIZE(subu)
1680IWMMXT_OP_ENV_SIZE(addu)
1681IWMMXT_OP_ENV_SIZE(subs)
1682IWMMXT_OP_ENV_SIZE(adds)
1683
1684IWMMXT_OP_ENV(avgb0)
1685IWMMXT_OP_ENV(avgb1)
1686IWMMXT_OP_ENV(avgw0)
1687IWMMXT_OP_ENV(avgw1)
e677137d 1688
477955bd
PM
1689IWMMXT_OP_ENV(packuw)
1690IWMMXT_OP_ENV(packul)
1691IWMMXT_OP_ENV(packuq)
1692IWMMXT_OP_ENV(packsw)
1693IWMMXT_OP_ENV(packsl)
1694IWMMXT_OP_ENV(packsq)
e677137d 1695
e677137d
PB
1696static void gen_op_iwmmxt_set_mup(void)
1697{
39d5492a 1698 TCGv_i32 tmp;
e677137d
PB
1699 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1700 tcg_gen_ori_i32(tmp, tmp, 2);
1701 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1702}
1703
1704static void gen_op_iwmmxt_set_cup(void)
1705{
39d5492a 1706 TCGv_i32 tmp;
e677137d
PB
1707 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1708 tcg_gen_ori_i32(tmp, tmp, 1);
1709 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1710}
1711
1712static void gen_op_iwmmxt_setpsr_nz(void)
1713{
39d5492a 1714 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1715 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1716 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1717}
1718
1719static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1720{
1721 iwmmxt_load_reg(cpu_V1, rn);
86831435 1722 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1723 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1724}
1725
39d5492a
PM
1726static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1727 TCGv_i32 dest)
18c9b560
AZ
1728{
1729 int rd;
1730 uint32_t offset;
39d5492a 1731 TCGv_i32 tmp;
18c9b560
AZ
1732
1733 rd = (insn >> 16) & 0xf;
da6b5335 1734 tmp = load_reg(s, rd);
18c9b560
AZ
1735
1736 offset = (insn & 0xff) << ((insn >> 7) & 2);
1737 if (insn & (1 << 24)) {
1738 /* Pre indexed */
1739 if (insn & (1 << 23))
da6b5335 1740 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1741 else
da6b5335
FN
1742 tcg_gen_addi_i32(tmp, tmp, -offset);
1743 tcg_gen_mov_i32(dest, tmp);
18c9b560 1744 if (insn & (1 << 21))
da6b5335
FN
1745 store_reg(s, rd, tmp);
1746 else
7d1b0095 1747 tcg_temp_free_i32(tmp);
18c9b560
AZ
1748 } else if (insn & (1 << 21)) {
1749 /* Post indexed */
da6b5335 1750 tcg_gen_mov_i32(dest, tmp);
18c9b560 1751 if (insn & (1 << 23))
da6b5335 1752 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1753 else
da6b5335
FN
1754 tcg_gen_addi_i32(tmp, tmp, -offset);
1755 store_reg(s, rd, tmp);
18c9b560
AZ
1756 } else if (!(insn & (1 << 23)))
1757 return 1;
1758 return 0;
1759}
1760
39d5492a 1761static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1762{
1763 int rd = (insn >> 0) & 0xf;
39d5492a 1764 TCGv_i32 tmp;
18c9b560 1765
da6b5335
FN
1766 if (insn & (1 << 8)) {
1767 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1768 return 1;
da6b5335
FN
1769 } else {
1770 tmp = iwmmxt_load_creg(rd);
1771 }
1772 } else {
7d1b0095 1773 tmp = tcg_temp_new_i32();
da6b5335 1774 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1775 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1776 }
1777 tcg_gen_andi_i32(tmp, tmp, mask);
1778 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1779 tcg_temp_free_i32(tmp);
18c9b560
AZ
1780 return 0;
1781}
1782
a1c7273b 1783/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1784 (ie. an undefined instruction). */
7dcc1f89 1785static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1786{
1787 int rd, wrd;
1788 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1789 TCGv_i32 addr;
1790 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1791
1792 if ((insn & 0x0e000e00) == 0x0c000000) {
1793 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1794 wrd = insn & 0xf;
1795 rdlo = (insn >> 12) & 0xf;
1796 rdhi = (insn >> 16) & 0xf;
1797 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1798 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1799 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1800 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1801 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1802 } else { /* TMCRR */
da6b5335
FN
1803 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1804 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1805 gen_op_iwmmxt_set_mup();
1806 }
1807 return 0;
1808 }
1809
1810 wrd = (insn >> 12) & 0xf;
7d1b0095 1811 addr = tcg_temp_new_i32();
da6b5335 1812 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1813 tcg_temp_free_i32(addr);
18c9b560 1814 return 1;
da6b5335 1815 }
18c9b560
AZ
1816 if (insn & ARM_CP_RW_BIT) {
1817 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1818 tmp = tcg_temp_new_i32();
12dcc321 1819 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1820 iwmmxt_store_creg(wrd, tmp);
18c9b560 1821 } else {
e677137d
PB
1822 i = 1;
1823 if (insn & (1 << 8)) {
1824 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1825 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d
PB
1826 i = 0;
1827 } else { /* WLDRW wRd */
29531141 1828 tmp = tcg_temp_new_i32();
12dcc321 1829 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1830 }
1831 } else {
29531141 1832 tmp = tcg_temp_new_i32();
e677137d 1833 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1834 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
e677137d 1835 } else { /* WLDRB */
12dcc321 1836 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1837 }
1838 }
1839 if (i) {
1840 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1841 tcg_temp_free_i32(tmp);
e677137d 1842 }
18c9b560
AZ
1843 gen_op_iwmmxt_movq_wRn_M0(wrd);
1844 }
1845 } else {
1846 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1847 tmp = iwmmxt_load_creg(wrd);
12dcc321 1848 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1849 } else {
1850 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1851 tmp = tcg_temp_new_i32();
e677137d
PB
1852 if (insn & (1 << 8)) {
1853 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1854 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1855 } else { /* WSTRW wRd */
ecc7b3aa 1856 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1857 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1858 }
1859 } else {
1860 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1861 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1862 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
e677137d 1863 } else { /* WSTRB */
ecc7b3aa 1864 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1865 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1866 }
1867 }
18c9b560 1868 }
29531141 1869 tcg_temp_free_i32(tmp);
18c9b560 1870 }
7d1b0095 1871 tcg_temp_free_i32(addr);
18c9b560
AZ
1872 return 0;
1873 }
1874
1875 if ((insn & 0x0f000000) != 0x0e000000)
1876 return 1;
1877
1878 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1879 case 0x000: /* WOR */
1880 wrd = (insn >> 12) & 0xf;
1881 rd0 = (insn >> 0) & 0xf;
1882 rd1 = (insn >> 16) & 0xf;
1883 gen_op_iwmmxt_movq_M0_wRn(rd0);
1884 gen_op_iwmmxt_orq_M0_wRn(rd1);
1885 gen_op_iwmmxt_setpsr_nz();
1886 gen_op_iwmmxt_movq_wRn_M0(wrd);
1887 gen_op_iwmmxt_set_mup();
1888 gen_op_iwmmxt_set_cup();
1889 break;
1890 case 0x011: /* TMCR */
1891 if (insn & 0xf)
1892 return 1;
1893 rd = (insn >> 12) & 0xf;
1894 wrd = (insn >> 16) & 0xf;
1895 switch (wrd) {
1896 case ARM_IWMMXT_wCID:
1897 case ARM_IWMMXT_wCASF:
1898 break;
1899 case ARM_IWMMXT_wCon:
1900 gen_op_iwmmxt_set_cup();
1901 /* Fall through. */
1902 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1903 tmp = iwmmxt_load_creg(wrd);
1904 tmp2 = load_reg(s, rd);
f669df27 1905 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1906 tcg_temp_free_i32(tmp2);
da6b5335 1907 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1908 break;
1909 case ARM_IWMMXT_wCGR0:
1910 case ARM_IWMMXT_wCGR1:
1911 case ARM_IWMMXT_wCGR2:
1912 case ARM_IWMMXT_wCGR3:
1913 gen_op_iwmmxt_set_cup();
da6b5335
FN
1914 tmp = load_reg(s, rd);
1915 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1916 break;
1917 default:
1918 return 1;
1919 }
1920 break;
1921 case 0x100: /* WXOR */
1922 wrd = (insn >> 12) & 0xf;
1923 rd0 = (insn >> 0) & 0xf;
1924 rd1 = (insn >> 16) & 0xf;
1925 gen_op_iwmmxt_movq_M0_wRn(rd0);
1926 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1927 gen_op_iwmmxt_setpsr_nz();
1928 gen_op_iwmmxt_movq_wRn_M0(wrd);
1929 gen_op_iwmmxt_set_mup();
1930 gen_op_iwmmxt_set_cup();
1931 break;
1932 case 0x111: /* TMRC */
1933 if (insn & 0xf)
1934 return 1;
1935 rd = (insn >> 12) & 0xf;
1936 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1937 tmp = iwmmxt_load_creg(wrd);
1938 store_reg(s, rd, tmp);
18c9b560
AZ
1939 break;
1940 case 0x300: /* WANDN */
1941 wrd = (insn >> 12) & 0xf;
1942 rd0 = (insn >> 0) & 0xf;
1943 rd1 = (insn >> 16) & 0xf;
1944 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1945 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1946 gen_op_iwmmxt_andq_M0_wRn(rd1);
1947 gen_op_iwmmxt_setpsr_nz();
1948 gen_op_iwmmxt_movq_wRn_M0(wrd);
1949 gen_op_iwmmxt_set_mup();
1950 gen_op_iwmmxt_set_cup();
1951 break;
1952 case 0x200: /* WAND */
1953 wrd = (insn >> 12) & 0xf;
1954 rd0 = (insn >> 0) & 0xf;
1955 rd1 = (insn >> 16) & 0xf;
1956 gen_op_iwmmxt_movq_M0_wRn(rd0);
1957 gen_op_iwmmxt_andq_M0_wRn(rd1);
1958 gen_op_iwmmxt_setpsr_nz();
1959 gen_op_iwmmxt_movq_wRn_M0(wrd);
1960 gen_op_iwmmxt_set_mup();
1961 gen_op_iwmmxt_set_cup();
1962 break;
1963 case 0x810: case 0xa10: /* WMADD */
1964 wrd = (insn >> 12) & 0xf;
1965 rd0 = (insn >> 0) & 0xf;
1966 rd1 = (insn >> 16) & 0xf;
1967 gen_op_iwmmxt_movq_M0_wRn(rd0);
1968 if (insn & (1 << 21))
1969 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1970 else
1971 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1972 gen_op_iwmmxt_movq_wRn_M0(wrd);
1973 gen_op_iwmmxt_set_mup();
1974 break;
1975 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1976 wrd = (insn >> 12) & 0xf;
1977 rd0 = (insn >> 16) & 0xf;
1978 rd1 = (insn >> 0) & 0xf;
1979 gen_op_iwmmxt_movq_M0_wRn(rd0);
1980 switch ((insn >> 22) & 3) {
1981 case 0:
1982 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1983 break;
1984 case 1:
1985 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1986 break;
1987 case 2:
1988 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1989 break;
1990 case 3:
1991 return 1;
1992 }
1993 gen_op_iwmmxt_movq_wRn_M0(wrd);
1994 gen_op_iwmmxt_set_mup();
1995 gen_op_iwmmxt_set_cup();
1996 break;
1997 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1998 wrd = (insn >> 12) & 0xf;
1999 rd0 = (insn >> 16) & 0xf;
2000 rd1 = (insn >> 0) & 0xf;
2001 gen_op_iwmmxt_movq_M0_wRn(rd0);
2002 switch ((insn >> 22) & 3) {
2003 case 0:
2004 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2005 break;
2006 case 1:
2007 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2008 break;
2009 case 2:
2010 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2011 break;
2012 case 3:
2013 return 1;
2014 }
2015 gen_op_iwmmxt_movq_wRn_M0(wrd);
2016 gen_op_iwmmxt_set_mup();
2017 gen_op_iwmmxt_set_cup();
2018 break;
2019 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2020 wrd = (insn >> 12) & 0xf;
2021 rd0 = (insn >> 16) & 0xf;
2022 rd1 = (insn >> 0) & 0xf;
2023 gen_op_iwmmxt_movq_M0_wRn(rd0);
2024 if (insn & (1 << 22))
2025 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2026 else
2027 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2028 if (!(insn & (1 << 20)))
2029 gen_op_iwmmxt_addl_M0_wRn(wrd);
2030 gen_op_iwmmxt_movq_wRn_M0(wrd);
2031 gen_op_iwmmxt_set_mup();
2032 break;
2033 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2034 wrd = (insn >> 12) & 0xf;
2035 rd0 = (insn >> 16) & 0xf;
2036 rd1 = (insn >> 0) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2038 if (insn & (1 << 21)) {
2039 if (insn & (1 << 20))
2040 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2041 else
2042 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2043 } else {
2044 if (insn & (1 << 20))
2045 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2046 else
2047 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2048 }
18c9b560
AZ
2049 gen_op_iwmmxt_movq_wRn_M0(wrd);
2050 gen_op_iwmmxt_set_mup();
2051 break;
2052 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2053 wrd = (insn >> 12) & 0xf;
2054 rd0 = (insn >> 16) & 0xf;
2055 rd1 = (insn >> 0) & 0xf;
2056 gen_op_iwmmxt_movq_M0_wRn(rd0);
2057 if (insn & (1 << 21))
2058 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2059 else
2060 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2061 if (!(insn & (1 << 20))) {
e677137d
PB
2062 iwmmxt_load_reg(cpu_V1, wrd);
2063 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2064 }
2065 gen_op_iwmmxt_movq_wRn_M0(wrd);
2066 gen_op_iwmmxt_set_mup();
2067 break;
2068 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2069 wrd = (insn >> 12) & 0xf;
2070 rd0 = (insn >> 16) & 0xf;
2071 rd1 = (insn >> 0) & 0xf;
2072 gen_op_iwmmxt_movq_M0_wRn(rd0);
2073 switch ((insn >> 22) & 3) {
2074 case 0:
2075 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2076 break;
2077 case 1:
2078 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2079 break;
2080 case 2:
2081 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2082 break;
2083 case 3:
2084 return 1;
2085 }
2086 gen_op_iwmmxt_movq_wRn_M0(wrd);
2087 gen_op_iwmmxt_set_mup();
2088 gen_op_iwmmxt_set_cup();
2089 break;
2090 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2091 wrd = (insn >> 12) & 0xf;
2092 rd0 = (insn >> 16) & 0xf;
2093 rd1 = (insn >> 0) & 0xf;
2094 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2095 if (insn & (1 << 22)) {
2096 if (insn & (1 << 20))
2097 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2098 else
2099 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2100 } else {
2101 if (insn & (1 << 20))
2102 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2103 else
2104 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2105 }
18c9b560
AZ
2106 gen_op_iwmmxt_movq_wRn_M0(wrd);
2107 gen_op_iwmmxt_set_mup();
2108 gen_op_iwmmxt_set_cup();
2109 break;
2110 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2111 wrd = (insn >> 12) & 0xf;
2112 rd0 = (insn >> 16) & 0xf;
2113 rd1 = (insn >> 0) & 0xf;
2114 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2115 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2116 tcg_gen_andi_i32(tmp, tmp, 7);
2117 iwmmxt_load_reg(cpu_V1, rd1);
2118 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2119 tcg_temp_free_i32(tmp);
18c9b560
AZ
2120 gen_op_iwmmxt_movq_wRn_M0(wrd);
2121 gen_op_iwmmxt_set_mup();
2122 break;
2123 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2124 if (((insn >> 6) & 3) == 3)
2125 return 1;
18c9b560
AZ
2126 rd = (insn >> 12) & 0xf;
2127 wrd = (insn >> 16) & 0xf;
da6b5335 2128 tmp = load_reg(s, rd);
18c9b560
AZ
2129 gen_op_iwmmxt_movq_M0_wRn(wrd);
2130 switch ((insn >> 6) & 3) {
2131 case 0:
da6b5335
FN
2132 tmp2 = tcg_const_i32(0xff);
2133 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2134 break;
2135 case 1:
da6b5335
FN
2136 tmp2 = tcg_const_i32(0xffff);
2137 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2138 break;
2139 case 2:
da6b5335
FN
2140 tmp2 = tcg_const_i32(0xffffffff);
2141 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2142 break;
da6b5335 2143 default:
39d5492a
PM
2144 TCGV_UNUSED_I32(tmp2);
2145 TCGV_UNUSED_I32(tmp3);
18c9b560 2146 }
da6b5335 2147 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2148 tcg_temp_free_i32(tmp3);
2149 tcg_temp_free_i32(tmp2);
7d1b0095 2150 tcg_temp_free_i32(tmp);
18c9b560
AZ
2151 gen_op_iwmmxt_movq_wRn_M0(wrd);
2152 gen_op_iwmmxt_set_mup();
2153 break;
2154 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2155 rd = (insn >> 12) & 0xf;
2156 wrd = (insn >> 16) & 0xf;
da6b5335 2157 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2158 return 1;
2159 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2160 tmp = tcg_temp_new_i32();
18c9b560
AZ
2161 switch ((insn >> 22) & 3) {
2162 case 0:
da6b5335 2163 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2164 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2165 if (insn & 8) {
2166 tcg_gen_ext8s_i32(tmp, tmp);
2167 } else {
2168 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2169 }
2170 break;
2171 case 1:
da6b5335 2172 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2173 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2174 if (insn & 8) {
2175 tcg_gen_ext16s_i32(tmp, tmp);
2176 } else {
2177 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2178 }
2179 break;
2180 case 2:
da6b5335 2181 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2182 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2183 break;
18c9b560 2184 }
da6b5335 2185 store_reg(s, rd, tmp);
18c9b560
AZ
2186 break;
2187 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2188 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2189 return 1;
da6b5335 2190 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2191 switch ((insn >> 22) & 3) {
2192 case 0:
da6b5335 2193 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2194 break;
2195 case 1:
da6b5335 2196 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2197 break;
2198 case 2:
da6b5335 2199 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2200 break;
18c9b560 2201 }
da6b5335
FN
2202 tcg_gen_shli_i32(tmp, tmp, 28);
2203 gen_set_nzcv(tmp);
7d1b0095 2204 tcg_temp_free_i32(tmp);
18c9b560
AZ
2205 break;
2206 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2207 if (((insn >> 6) & 3) == 3)
2208 return 1;
18c9b560
AZ
2209 rd = (insn >> 12) & 0xf;
2210 wrd = (insn >> 16) & 0xf;
da6b5335 2211 tmp = load_reg(s, rd);
18c9b560
AZ
2212 switch ((insn >> 6) & 3) {
2213 case 0:
da6b5335 2214 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2215 break;
2216 case 1:
da6b5335 2217 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2218 break;
2219 case 2:
da6b5335 2220 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2221 break;
18c9b560 2222 }
7d1b0095 2223 tcg_temp_free_i32(tmp);
18c9b560
AZ
2224 gen_op_iwmmxt_movq_wRn_M0(wrd);
2225 gen_op_iwmmxt_set_mup();
2226 break;
2227 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2228 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2229 return 1;
da6b5335 2230 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2231 tmp2 = tcg_temp_new_i32();
da6b5335 2232 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2233 switch ((insn >> 22) & 3) {
2234 case 0:
2235 for (i = 0; i < 7; i ++) {
da6b5335
FN
2236 tcg_gen_shli_i32(tmp2, tmp2, 4);
2237 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2238 }
2239 break;
2240 case 1:
2241 for (i = 0; i < 3; i ++) {
da6b5335
FN
2242 tcg_gen_shli_i32(tmp2, tmp2, 8);
2243 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2244 }
2245 break;
2246 case 2:
da6b5335
FN
2247 tcg_gen_shli_i32(tmp2, tmp2, 16);
2248 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2249 break;
18c9b560 2250 }
da6b5335 2251 gen_set_nzcv(tmp);
7d1b0095
PM
2252 tcg_temp_free_i32(tmp2);
2253 tcg_temp_free_i32(tmp);
18c9b560
AZ
2254 break;
2255 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2256 wrd = (insn >> 12) & 0xf;
2257 rd0 = (insn >> 16) & 0xf;
2258 gen_op_iwmmxt_movq_M0_wRn(rd0);
2259 switch ((insn >> 22) & 3) {
2260 case 0:
e677137d 2261 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2262 break;
2263 case 1:
e677137d 2264 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2265 break;
2266 case 2:
e677137d 2267 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2268 break;
2269 case 3:
2270 return 1;
2271 }
2272 gen_op_iwmmxt_movq_wRn_M0(wrd);
2273 gen_op_iwmmxt_set_mup();
2274 break;
2275 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2276 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2277 return 1;
da6b5335 2278 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2279 tmp2 = tcg_temp_new_i32();
da6b5335 2280 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2281 switch ((insn >> 22) & 3) {
2282 case 0:
2283 for (i = 0; i < 7; i ++) {
da6b5335
FN
2284 tcg_gen_shli_i32(tmp2, tmp2, 4);
2285 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2286 }
2287 break;
2288 case 1:
2289 for (i = 0; i < 3; i ++) {
da6b5335
FN
2290 tcg_gen_shli_i32(tmp2, tmp2, 8);
2291 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2292 }
2293 break;
2294 case 2:
da6b5335
FN
2295 tcg_gen_shli_i32(tmp2, tmp2, 16);
2296 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2297 break;
18c9b560 2298 }
da6b5335 2299 gen_set_nzcv(tmp);
7d1b0095
PM
2300 tcg_temp_free_i32(tmp2);
2301 tcg_temp_free_i32(tmp);
18c9b560
AZ
2302 break;
2303 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2304 rd = (insn >> 12) & 0xf;
2305 rd0 = (insn >> 16) & 0xf;
da6b5335 2306 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2307 return 1;
2308 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2309 tmp = tcg_temp_new_i32();
18c9b560
AZ
2310 switch ((insn >> 22) & 3) {
2311 case 0:
da6b5335 2312 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2313 break;
2314 case 1:
da6b5335 2315 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2316 break;
2317 case 2:
da6b5335 2318 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2319 break;
18c9b560 2320 }
da6b5335 2321 store_reg(s, rd, tmp);
18c9b560
AZ
2322 break;
2323 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2324 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2325 wrd = (insn >> 12) & 0xf;
2326 rd0 = (insn >> 16) & 0xf;
2327 rd1 = (insn >> 0) & 0xf;
2328 gen_op_iwmmxt_movq_M0_wRn(rd0);
2329 switch ((insn >> 22) & 3) {
2330 case 0:
2331 if (insn & (1 << 21))
2332 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2333 else
2334 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2335 break;
2336 case 1:
2337 if (insn & (1 << 21))
2338 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2339 else
2340 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2341 break;
2342 case 2:
2343 if (insn & (1 << 21))
2344 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2345 else
2346 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2347 break;
2348 case 3:
2349 return 1;
2350 }
2351 gen_op_iwmmxt_movq_wRn_M0(wrd);
2352 gen_op_iwmmxt_set_mup();
2353 gen_op_iwmmxt_set_cup();
2354 break;
2355 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2356 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2357 wrd = (insn >> 12) & 0xf;
2358 rd0 = (insn >> 16) & 0xf;
2359 gen_op_iwmmxt_movq_M0_wRn(rd0);
2360 switch ((insn >> 22) & 3) {
2361 case 0:
2362 if (insn & (1 << 21))
2363 gen_op_iwmmxt_unpacklsb_M0();
2364 else
2365 gen_op_iwmmxt_unpacklub_M0();
2366 break;
2367 case 1:
2368 if (insn & (1 << 21))
2369 gen_op_iwmmxt_unpacklsw_M0();
2370 else
2371 gen_op_iwmmxt_unpackluw_M0();
2372 break;
2373 case 2:
2374 if (insn & (1 << 21))
2375 gen_op_iwmmxt_unpacklsl_M0();
2376 else
2377 gen_op_iwmmxt_unpacklul_M0();
2378 break;
2379 case 3:
2380 return 1;
2381 }
2382 gen_op_iwmmxt_movq_wRn_M0(wrd);
2383 gen_op_iwmmxt_set_mup();
2384 gen_op_iwmmxt_set_cup();
2385 break;
2386 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2387 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2388 wrd = (insn >> 12) & 0xf;
2389 rd0 = (insn >> 16) & 0xf;
2390 gen_op_iwmmxt_movq_M0_wRn(rd0);
2391 switch ((insn >> 22) & 3) {
2392 case 0:
2393 if (insn & (1 << 21))
2394 gen_op_iwmmxt_unpackhsb_M0();
2395 else
2396 gen_op_iwmmxt_unpackhub_M0();
2397 break;
2398 case 1:
2399 if (insn & (1 << 21))
2400 gen_op_iwmmxt_unpackhsw_M0();
2401 else
2402 gen_op_iwmmxt_unpackhuw_M0();
2403 break;
2404 case 2:
2405 if (insn & (1 << 21))
2406 gen_op_iwmmxt_unpackhsl_M0();
2407 else
2408 gen_op_iwmmxt_unpackhul_M0();
2409 break;
2410 case 3:
2411 return 1;
2412 }
2413 gen_op_iwmmxt_movq_wRn_M0(wrd);
2414 gen_op_iwmmxt_set_mup();
2415 gen_op_iwmmxt_set_cup();
2416 break;
2417 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2418 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2419 if (((insn >> 22) & 3) == 0)
2420 return 1;
18c9b560
AZ
2421 wrd = (insn >> 12) & 0xf;
2422 rd0 = (insn >> 16) & 0xf;
2423 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2424 tmp = tcg_temp_new_i32();
da6b5335 2425 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2426 tcg_temp_free_i32(tmp);
18c9b560 2427 return 1;
da6b5335 2428 }
18c9b560 2429 switch ((insn >> 22) & 3) {
18c9b560 2430 case 1:
477955bd 2431 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2432 break;
2433 case 2:
477955bd 2434 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2435 break;
2436 case 3:
477955bd 2437 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2438 break;
2439 }
7d1b0095 2440 tcg_temp_free_i32(tmp);
18c9b560
AZ
2441 gen_op_iwmmxt_movq_wRn_M0(wrd);
2442 gen_op_iwmmxt_set_mup();
2443 gen_op_iwmmxt_set_cup();
2444 break;
2445 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2446 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2447 if (((insn >> 22) & 3) == 0)
2448 return 1;
18c9b560
AZ
2449 wrd = (insn >> 12) & 0xf;
2450 rd0 = (insn >> 16) & 0xf;
2451 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2452 tmp = tcg_temp_new_i32();
da6b5335 2453 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2454 tcg_temp_free_i32(tmp);
18c9b560 2455 return 1;
da6b5335 2456 }
18c9b560 2457 switch ((insn >> 22) & 3) {
18c9b560 2458 case 1:
477955bd 2459 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2460 break;
2461 case 2:
477955bd 2462 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2463 break;
2464 case 3:
477955bd 2465 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2466 break;
2467 }
7d1b0095 2468 tcg_temp_free_i32(tmp);
18c9b560
AZ
2469 gen_op_iwmmxt_movq_wRn_M0(wrd);
2470 gen_op_iwmmxt_set_mup();
2471 gen_op_iwmmxt_set_cup();
2472 break;
2473 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2474 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2475 if (((insn >> 22) & 3) == 0)
2476 return 1;
18c9b560
AZ
2477 wrd = (insn >> 12) & 0xf;
2478 rd0 = (insn >> 16) & 0xf;
2479 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2480 tmp = tcg_temp_new_i32();
da6b5335 2481 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2482 tcg_temp_free_i32(tmp);
18c9b560 2483 return 1;
da6b5335 2484 }
18c9b560 2485 switch ((insn >> 22) & 3) {
18c9b560 2486 case 1:
477955bd 2487 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2488 break;
2489 case 2:
477955bd 2490 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2491 break;
2492 case 3:
477955bd 2493 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2494 break;
2495 }
7d1b0095 2496 tcg_temp_free_i32(tmp);
18c9b560
AZ
2497 gen_op_iwmmxt_movq_wRn_M0(wrd);
2498 gen_op_iwmmxt_set_mup();
2499 gen_op_iwmmxt_set_cup();
2500 break;
2501 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2502 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2503 if (((insn >> 22) & 3) == 0)
2504 return 1;
18c9b560
AZ
2505 wrd = (insn >> 12) & 0xf;
2506 rd0 = (insn >> 16) & 0xf;
2507 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2508 tmp = tcg_temp_new_i32();
18c9b560 2509 switch ((insn >> 22) & 3) {
18c9b560 2510 case 1:
da6b5335 2511 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2512 tcg_temp_free_i32(tmp);
18c9b560 2513 return 1;
da6b5335 2514 }
477955bd 2515 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2516 break;
2517 case 2:
da6b5335 2518 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2519 tcg_temp_free_i32(tmp);
18c9b560 2520 return 1;
da6b5335 2521 }
477955bd 2522 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2523 break;
2524 case 3:
da6b5335 2525 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2526 tcg_temp_free_i32(tmp);
18c9b560 2527 return 1;
da6b5335 2528 }
477955bd 2529 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2530 break;
2531 }
7d1b0095 2532 tcg_temp_free_i32(tmp);
18c9b560
AZ
2533 gen_op_iwmmxt_movq_wRn_M0(wrd);
2534 gen_op_iwmmxt_set_mup();
2535 gen_op_iwmmxt_set_cup();
2536 break;
2537 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2538 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2539 wrd = (insn >> 12) & 0xf;
2540 rd0 = (insn >> 16) & 0xf;
2541 rd1 = (insn >> 0) & 0xf;
2542 gen_op_iwmmxt_movq_M0_wRn(rd0);
2543 switch ((insn >> 22) & 3) {
2544 case 0:
2545 if (insn & (1 << 21))
2546 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2547 else
2548 gen_op_iwmmxt_minub_M0_wRn(rd1);
2549 break;
2550 case 1:
2551 if (insn & (1 << 21))
2552 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2553 else
2554 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2555 break;
2556 case 2:
2557 if (insn & (1 << 21))
2558 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2559 else
2560 gen_op_iwmmxt_minul_M0_wRn(rd1);
2561 break;
2562 case 3:
2563 return 1;
2564 }
2565 gen_op_iwmmxt_movq_wRn_M0(wrd);
2566 gen_op_iwmmxt_set_mup();
2567 break;
2568 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2569 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2570 wrd = (insn >> 12) & 0xf;
2571 rd0 = (insn >> 16) & 0xf;
2572 rd1 = (insn >> 0) & 0xf;
2573 gen_op_iwmmxt_movq_M0_wRn(rd0);
2574 switch ((insn >> 22) & 3) {
2575 case 0:
2576 if (insn & (1 << 21))
2577 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2578 else
2579 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2580 break;
2581 case 1:
2582 if (insn & (1 << 21))
2583 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2584 else
2585 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2586 break;
2587 case 2:
2588 if (insn & (1 << 21))
2589 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2590 else
2591 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2592 break;
2593 case 3:
2594 return 1;
2595 }
2596 gen_op_iwmmxt_movq_wRn_M0(wrd);
2597 gen_op_iwmmxt_set_mup();
2598 break;
2599 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2600 case 0x402: case 0x502: case 0x602: case 0x702:
2601 wrd = (insn >> 12) & 0xf;
2602 rd0 = (insn >> 16) & 0xf;
2603 rd1 = (insn >> 0) & 0xf;
2604 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2605 tmp = tcg_const_i32((insn >> 20) & 3);
2606 iwmmxt_load_reg(cpu_V1, rd1);
2607 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2608 tcg_temp_free_i32(tmp);
18c9b560
AZ
2609 gen_op_iwmmxt_movq_wRn_M0(wrd);
2610 gen_op_iwmmxt_set_mup();
2611 break;
2612 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2613 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2614 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2615 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2616 wrd = (insn >> 12) & 0xf;
2617 rd0 = (insn >> 16) & 0xf;
2618 rd1 = (insn >> 0) & 0xf;
2619 gen_op_iwmmxt_movq_M0_wRn(rd0);
2620 switch ((insn >> 20) & 0xf) {
2621 case 0x0:
2622 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2623 break;
2624 case 0x1:
2625 gen_op_iwmmxt_subub_M0_wRn(rd1);
2626 break;
2627 case 0x3:
2628 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2629 break;
2630 case 0x4:
2631 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2632 break;
2633 case 0x5:
2634 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2635 break;
2636 case 0x7:
2637 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2638 break;
2639 case 0x8:
2640 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2641 break;
2642 case 0x9:
2643 gen_op_iwmmxt_subul_M0_wRn(rd1);
2644 break;
2645 case 0xb:
2646 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2647 break;
2648 default:
2649 return 1;
2650 }
2651 gen_op_iwmmxt_movq_wRn_M0(wrd);
2652 gen_op_iwmmxt_set_mup();
2653 gen_op_iwmmxt_set_cup();
2654 break;
2655 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2656 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2657 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2658 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2659 wrd = (insn >> 12) & 0xf;
2660 rd0 = (insn >> 16) & 0xf;
2661 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2662 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2663 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2664 tcg_temp_free_i32(tmp);
18c9b560
AZ
2665 gen_op_iwmmxt_movq_wRn_M0(wrd);
2666 gen_op_iwmmxt_set_mup();
2667 gen_op_iwmmxt_set_cup();
2668 break;
2669 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2670 case 0x418: case 0x518: case 0x618: case 0x718:
2671 case 0x818: case 0x918: case 0xa18: case 0xb18:
2672 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2673 wrd = (insn >> 12) & 0xf;
2674 rd0 = (insn >> 16) & 0xf;
2675 rd1 = (insn >> 0) & 0xf;
2676 gen_op_iwmmxt_movq_M0_wRn(rd0);
2677 switch ((insn >> 20) & 0xf) {
2678 case 0x0:
2679 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2680 break;
2681 case 0x1:
2682 gen_op_iwmmxt_addub_M0_wRn(rd1);
2683 break;
2684 case 0x3:
2685 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2686 break;
2687 case 0x4:
2688 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2689 break;
2690 case 0x5:
2691 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2692 break;
2693 case 0x7:
2694 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2695 break;
2696 case 0x8:
2697 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2698 break;
2699 case 0x9:
2700 gen_op_iwmmxt_addul_M0_wRn(rd1);
2701 break;
2702 case 0xb:
2703 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2704 break;
2705 default:
2706 return 1;
2707 }
2708 gen_op_iwmmxt_movq_wRn_M0(wrd);
2709 gen_op_iwmmxt_set_mup();
2710 gen_op_iwmmxt_set_cup();
2711 break;
2712 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2713 case 0x408: case 0x508: case 0x608: case 0x708:
2714 case 0x808: case 0x908: case 0xa08: case 0xb08:
2715 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2716 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2717 return 1;
18c9b560
AZ
2718 wrd = (insn >> 12) & 0xf;
2719 rd0 = (insn >> 16) & 0xf;
2720 rd1 = (insn >> 0) & 0xf;
2721 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2722 switch ((insn >> 22) & 3) {
18c9b560
AZ
2723 case 1:
2724 if (insn & (1 << 21))
2725 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2726 else
2727 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2728 break;
2729 case 2:
2730 if (insn & (1 << 21))
2731 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2732 else
2733 gen_op_iwmmxt_packul_M0_wRn(rd1);
2734 break;
2735 case 3:
2736 if (insn & (1 << 21))
2737 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2738 else
2739 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2740 break;
2741 }
2742 gen_op_iwmmxt_movq_wRn_M0(wrd);
2743 gen_op_iwmmxt_set_mup();
2744 gen_op_iwmmxt_set_cup();
2745 break;
2746 case 0x201: case 0x203: case 0x205: case 0x207:
2747 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2748 case 0x211: case 0x213: case 0x215: case 0x217:
2749 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2750 wrd = (insn >> 5) & 0xf;
2751 rd0 = (insn >> 12) & 0xf;
2752 rd1 = (insn >> 0) & 0xf;
2753 if (rd0 == 0xf || rd1 == 0xf)
2754 return 1;
2755 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2756 tmp = load_reg(s, rd0);
2757 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2758 switch ((insn >> 16) & 0xf) {
2759 case 0x0: /* TMIA */
da6b5335 2760 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2761 break;
2762 case 0x8: /* TMIAPH */
da6b5335 2763 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2764 break;
2765 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2766 if (insn & (1 << 16))
da6b5335 2767 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2768 if (insn & (1 << 17))
da6b5335
FN
2769 tcg_gen_shri_i32(tmp2, tmp2, 16);
2770 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2771 break;
2772 default:
7d1b0095
PM
2773 tcg_temp_free_i32(tmp2);
2774 tcg_temp_free_i32(tmp);
18c9b560
AZ
2775 return 1;
2776 }
7d1b0095
PM
2777 tcg_temp_free_i32(tmp2);
2778 tcg_temp_free_i32(tmp);
18c9b560
AZ
2779 gen_op_iwmmxt_movq_wRn_M0(wrd);
2780 gen_op_iwmmxt_set_mup();
2781 break;
2782 default:
2783 return 1;
2784 }
2785
2786 return 0;
2787}
2788
a1c7273b 2789/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2790 (ie. an undefined instruction). */
7dcc1f89 2791static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2792{
2793 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2794 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2795
2796 if ((insn & 0x0ff00f10) == 0x0e200010) {
2797 /* Multiply with Internal Accumulate Format */
2798 rd0 = (insn >> 12) & 0xf;
2799 rd1 = insn & 0xf;
2800 acc = (insn >> 5) & 7;
2801
2802 if (acc != 0)
2803 return 1;
2804
3a554c0f
FN
2805 tmp = load_reg(s, rd0);
2806 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2807 switch ((insn >> 16) & 0xf) {
2808 case 0x0: /* MIA */
3a554c0f 2809 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2810 break;
2811 case 0x8: /* MIAPH */
3a554c0f 2812 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2813 break;
2814 case 0xc: /* MIABB */
2815 case 0xd: /* MIABT */
2816 case 0xe: /* MIATB */
2817 case 0xf: /* MIATT */
18c9b560 2818 if (insn & (1 << 16))
3a554c0f 2819 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2820 if (insn & (1 << 17))
3a554c0f
FN
2821 tcg_gen_shri_i32(tmp2, tmp2, 16);
2822 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2823 break;
2824 default:
2825 return 1;
2826 }
7d1b0095
PM
2827 tcg_temp_free_i32(tmp2);
2828 tcg_temp_free_i32(tmp);
18c9b560
AZ
2829
2830 gen_op_iwmmxt_movq_wRn_M0(acc);
2831 return 0;
2832 }
2833
2834 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2835 /* Internal Accumulator Access Format */
2836 rdhi = (insn >> 16) & 0xf;
2837 rdlo = (insn >> 12) & 0xf;
2838 acc = insn & 7;
2839
2840 if (acc != 0)
2841 return 1;
2842
2843 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2844 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2845 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2846 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2847 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2848 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2849 } else { /* MAR */
3a554c0f
FN
2850 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2851 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2852 }
2853 return 0;
2854 }
2855
2856 return 1;
2857}
2858
9ee6e8bb
PB
2859#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2860#define VFP_SREG(insn, bigbit, smallbit) \
2861 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2862#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2863 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2864 reg = (((insn) >> (bigbit)) & 0x0f) \
2865 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2866 } else { \
2867 if (insn & (1 << (smallbit))) \
2868 return 1; \
2869 reg = ((insn) >> (bigbit)) & 0x0f; \
2870 }} while (0)
2871
2872#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2873#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2874#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2875#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2876#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2877#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2878
4373f3ce 2879/* Move between integer and VFP cores. */
39d5492a 2880static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2881{
39d5492a 2882 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2883 tcg_gen_mov_i32(tmp, cpu_F0s);
2884 return tmp;
2885}
2886
39d5492a 2887static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2888{
2889 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2890 tcg_temp_free_i32(tmp);
4373f3ce
PB
2891}
2892
39d5492a 2893static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2894{
39d5492a 2895 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2896 if (shift)
2897 tcg_gen_shri_i32(var, var, shift);
86831435 2898 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2899 tcg_gen_shli_i32(tmp, var, 8);
2900 tcg_gen_or_i32(var, var, tmp);
2901 tcg_gen_shli_i32(tmp, var, 16);
2902 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2903 tcg_temp_free_i32(tmp);
ad69471c
PB
2904}
2905
39d5492a 2906static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2907{
39d5492a 2908 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2909 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2910 tcg_gen_shli_i32(tmp, var, 16);
2911 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2912 tcg_temp_free_i32(tmp);
ad69471c
PB
2913}
2914
39d5492a 2915static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2916{
39d5492a 2917 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2918 tcg_gen_andi_i32(var, var, 0xffff0000);
2919 tcg_gen_shri_i32(tmp, var, 16);
2920 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2921 tcg_temp_free_i32(tmp);
ad69471c
PB
2922}
2923
39d5492a 2924static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2925{
2926 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2927 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2928 switch (size) {
2929 case 0:
12dcc321 2930 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2931 gen_neon_dup_u8(tmp, 0);
2932 break;
2933 case 1:
12dcc321 2934 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2935 gen_neon_dup_low16(tmp);
2936 break;
2937 case 2:
12dcc321 2938 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2939 break;
2940 default: /* Avoid compiler warnings. */
2941 abort();
2942 }
2943 return tmp;
2944}
2945
04731fb5
WN
2946static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2947 uint32_t dp)
2948{
2949 uint32_t cc = extract32(insn, 20, 2);
2950
2951 if (dp) {
2952 TCGv_i64 frn, frm, dest;
2953 TCGv_i64 tmp, zero, zf, nf, vf;
2954
2955 zero = tcg_const_i64(0);
2956
2957 frn = tcg_temp_new_i64();
2958 frm = tcg_temp_new_i64();
2959 dest = tcg_temp_new_i64();
2960
2961 zf = tcg_temp_new_i64();
2962 nf = tcg_temp_new_i64();
2963 vf = tcg_temp_new_i64();
2964
2965 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2966 tcg_gen_ext_i32_i64(nf, cpu_NF);
2967 tcg_gen_ext_i32_i64(vf, cpu_VF);
2968
2969 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2970 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2971 switch (cc) {
2972 case 0: /* eq: Z */
2973 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2974 frn, frm);
2975 break;
2976 case 1: /* vs: V */
2977 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2978 frn, frm);
2979 break;
2980 case 2: /* ge: N == V -> N ^ V == 0 */
2981 tmp = tcg_temp_new_i64();
2982 tcg_gen_xor_i64(tmp, vf, nf);
2983 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2984 frn, frm);
2985 tcg_temp_free_i64(tmp);
2986 break;
2987 case 3: /* gt: !Z && N == V */
2988 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2989 frn, frm);
2990 tmp = tcg_temp_new_i64();
2991 tcg_gen_xor_i64(tmp, vf, nf);
2992 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2993 dest, frm);
2994 tcg_temp_free_i64(tmp);
2995 break;
2996 }
2997 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2998 tcg_temp_free_i64(frn);
2999 tcg_temp_free_i64(frm);
3000 tcg_temp_free_i64(dest);
3001
3002 tcg_temp_free_i64(zf);
3003 tcg_temp_free_i64(nf);
3004 tcg_temp_free_i64(vf);
3005
3006 tcg_temp_free_i64(zero);
3007 } else {
3008 TCGv_i32 frn, frm, dest;
3009 TCGv_i32 tmp, zero;
3010
3011 zero = tcg_const_i32(0);
3012
3013 frn = tcg_temp_new_i32();
3014 frm = tcg_temp_new_i32();
3015 dest = tcg_temp_new_i32();
3016 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3017 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3018 switch (cc) {
3019 case 0: /* eq: Z */
3020 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3021 frn, frm);
3022 break;
3023 case 1: /* vs: V */
3024 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3025 frn, frm);
3026 break;
3027 case 2: /* ge: N == V -> N ^ V == 0 */
3028 tmp = tcg_temp_new_i32();
3029 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3030 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3031 frn, frm);
3032 tcg_temp_free_i32(tmp);
3033 break;
3034 case 3: /* gt: !Z && N == V */
3035 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3036 frn, frm);
3037 tmp = tcg_temp_new_i32();
3038 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3039 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3040 dest, frm);
3041 tcg_temp_free_i32(tmp);
3042 break;
3043 }
3044 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3045 tcg_temp_free_i32(frn);
3046 tcg_temp_free_i32(frm);
3047 tcg_temp_free_i32(dest);
3048
3049 tcg_temp_free_i32(zero);
3050 }
3051
3052 return 0;
3053}
3054
40cfacdd
WN
3055static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3056 uint32_t rm, uint32_t dp)
3057{
3058 uint32_t vmin = extract32(insn, 6, 1);
3059 TCGv_ptr fpst = get_fpstatus_ptr(0);
3060
3061 if (dp) {
3062 TCGv_i64 frn, frm, dest;
3063
3064 frn = tcg_temp_new_i64();
3065 frm = tcg_temp_new_i64();
3066 dest = tcg_temp_new_i64();
3067
3068 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3069 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3070 if (vmin) {
f71a2ae5 3071 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3072 } else {
f71a2ae5 3073 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3074 }
3075 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3076 tcg_temp_free_i64(frn);
3077 tcg_temp_free_i64(frm);
3078 tcg_temp_free_i64(dest);
3079 } else {
3080 TCGv_i32 frn, frm, dest;
3081
3082 frn = tcg_temp_new_i32();
3083 frm = tcg_temp_new_i32();
3084 dest = tcg_temp_new_i32();
3085
3086 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3087 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3088 if (vmin) {
f71a2ae5 3089 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3090 } else {
f71a2ae5 3091 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3092 }
3093 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3094 tcg_temp_free_i32(frn);
3095 tcg_temp_free_i32(frm);
3096 tcg_temp_free_i32(dest);
3097 }
3098
3099 tcg_temp_free_ptr(fpst);
3100 return 0;
3101}
3102
7655f39b
WN
3103static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3104 int rounding)
3105{
3106 TCGv_ptr fpst = get_fpstatus_ptr(0);
3107 TCGv_i32 tcg_rmode;
3108
3109 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3110 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3111
3112 if (dp) {
3113 TCGv_i64 tcg_op;
3114 TCGv_i64 tcg_res;
3115 tcg_op = tcg_temp_new_i64();
3116 tcg_res = tcg_temp_new_i64();
3117 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3118 gen_helper_rintd(tcg_res, tcg_op, fpst);
3119 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3120 tcg_temp_free_i64(tcg_op);
3121 tcg_temp_free_i64(tcg_res);
3122 } else {
3123 TCGv_i32 tcg_op;
3124 TCGv_i32 tcg_res;
3125 tcg_op = tcg_temp_new_i32();
3126 tcg_res = tcg_temp_new_i32();
3127 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3128 gen_helper_rints(tcg_res, tcg_op, fpst);
3129 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3130 tcg_temp_free_i32(tcg_op);
3131 tcg_temp_free_i32(tcg_res);
3132 }
3133
3134 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3135 tcg_temp_free_i32(tcg_rmode);
3136
3137 tcg_temp_free_ptr(fpst);
3138 return 0;
3139}
3140
c9975a83
WN
3141static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3142 int rounding)
3143{
3144 bool is_signed = extract32(insn, 7, 1);
3145 TCGv_ptr fpst = get_fpstatus_ptr(0);
3146 TCGv_i32 tcg_rmode, tcg_shift;
3147
3148 tcg_shift = tcg_const_i32(0);
3149
3150 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3151 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3152
3153 if (dp) {
3154 TCGv_i64 tcg_double, tcg_res;
3155 TCGv_i32 tcg_tmp;
3156 /* Rd is encoded as a single precision register even when the source
3157 * is double precision.
3158 */
3159 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3160 tcg_double = tcg_temp_new_i64();
3161 tcg_res = tcg_temp_new_i64();
3162 tcg_tmp = tcg_temp_new_i32();
3163 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3164 if (is_signed) {
3165 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3166 } else {
3167 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3168 }
ecc7b3aa 3169 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3170 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3171 tcg_temp_free_i32(tcg_tmp);
3172 tcg_temp_free_i64(tcg_res);
3173 tcg_temp_free_i64(tcg_double);
3174 } else {
3175 TCGv_i32 tcg_single, tcg_res;
3176 tcg_single = tcg_temp_new_i32();
3177 tcg_res = tcg_temp_new_i32();
3178 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3179 if (is_signed) {
3180 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3181 } else {
3182 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3183 }
3184 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3185 tcg_temp_free_i32(tcg_res);
3186 tcg_temp_free_i32(tcg_single);
3187 }
3188
3189 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3190 tcg_temp_free_i32(tcg_rmode);
3191
3192 tcg_temp_free_i32(tcg_shift);
3193
3194 tcg_temp_free_ptr(fpst);
3195
3196 return 0;
3197}
7655f39b
WN
3198
3199/* Table for converting the most common AArch32 encoding of
3200 * rounding mode to arm_fprounding order (which matches the
3201 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3202 */
3203static const uint8_t fp_decode_rm[] = {
3204 FPROUNDING_TIEAWAY,
3205 FPROUNDING_TIEEVEN,
3206 FPROUNDING_POSINF,
3207 FPROUNDING_NEGINF,
3208};
3209
7dcc1f89 3210static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3211{
3212 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3213
d614a513 3214 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3215 return 1;
3216 }
3217
3218 if (dp) {
3219 VFP_DREG_D(rd, insn);
3220 VFP_DREG_N(rn, insn);
3221 VFP_DREG_M(rm, insn);
3222 } else {
3223 rd = VFP_SREG_D(insn);
3224 rn = VFP_SREG_N(insn);
3225 rm = VFP_SREG_M(insn);
3226 }
3227
3228 if ((insn & 0x0f800e50) == 0x0e000a00) {
3229 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3230 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3231 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3232 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3233 /* VRINTA, VRINTN, VRINTP, VRINTM */
3234 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3235 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3236 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3237 /* VCVTA, VCVTN, VCVTP, VCVTM */
3238 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3239 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3240 }
3241 return 1;
3242}
3243
a1c7273b 3244/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3245 (ie. an undefined instruction). */
7dcc1f89 3246static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3247{
3248 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3249 int dp, veclen;
39d5492a
PM
3250 TCGv_i32 addr;
3251 TCGv_i32 tmp;
3252 TCGv_i32 tmp2;
b7bcbe95 3253
d614a513 3254 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3255 return 1;
d614a513 3256 }
40f137e1 3257
2c7ffc41
PM
3258 /* FIXME: this access check should not take precedence over UNDEF
3259 * for invalid encodings; we will generate incorrect syndrome information
3260 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3261 */
9dbbc748 3262 if (s->fp_excp_el) {
2c7ffc41 3263 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3264 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3265 return 0;
3266 }
3267
5df8bac1 3268 if (!s->vfp_enabled) {
9ee6e8bb 3269 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3270 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3271 return 1;
3272 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3273 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3274 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3275 return 1;
a50c0f51 3276 }
40f137e1 3277 }
6a57f3eb
WN
3278
3279 if (extract32(insn, 28, 4) == 0xf) {
3280 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3281 * only used in v8 and above.
3282 */
7dcc1f89 3283 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3284 }
3285
b7bcbe95
FB
3286 dp = ((insn & 0xf00) == 0xb00);
3287 switch ((insn >> 24) & 0xf) {
3288 case 0xe:
3289 if (insn & (1 << 4)) {
3290 /* single register transfer */
b7bcbe95
FB
3291 rd = (insn >> 12) & 0xf;
3292 if (dp) {
9ee6e8bb
PB
3293 int size;
3294 int pass;
3295
3296 VFP_DREG_N(rn, insn);
3297 if (insn & 0xf)
b7bcbe95 3298 return 1;
9ee6e8bb 3299 if (insn & 0x00c00060
d614a513 3300 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3301 return 1;
d614a513 3302 }
9ee6e8bb
PB
3303
3304 pass = (insn >> 21) & 1;
3305 if (insn & (1 << 22)) {
3306 size = 0;
3307 offset = ((insn >> 5) & 3) * 8;
3308 } else if (insn & (1 << 5)) {
3309 size = 1;
3310 offset = (insn & (1 << 6)) ? 16 : 0;
3311 } else {
3312 size = 2;
3313 offset = 0;
3314 }
18c9b560 3315 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3316 /* vfp->arm */
ad69471c 3317 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3318 switch (size) {
3319 case 0:
9ee6e8bb 3320 if (offset)
ad69471c 3321 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3322 if (insn & (1 << 23))
ad69471c 3323 gen_uxtb(tmp);
9ee6e8bb 3324 else
ad69471c 3325 gen_sxtb(tmp);
9ee6e8bb
PB
3326 break;
3327 case 1:
9ee6e8bb
PB
3328 if (insn & (1 << 23)) {
3329 if (offset) {
ad69471c 3330 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3331 } else {
ad69471c 3332 gen_uxth(tmp);
9ee6e8bb
PB
3333 }
3334 } else {
3335 if (offset) {
ad69471c 3336 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3337 } else {
ad69471c 3338 gen_sxth(tmp);
9ee6e8bb
PB
3339 }
3340 }
3341 break;
3342 case 2:
9ee6e8bb
PB
3343 break;
3344 }
ad69471c 3345 store_reg(s, rd, tmp);
b7bcbe95
FB
3346 } else {
3347 /* arm->vfp */
ad69471c 3348 tmp = load_reg(s, rd);
9ee6e8bb
PB
3349 if (insn & (1 << 23)) {
3350 /* VDUP */
3351 if (size == 0) {
ad69471c 3352 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3353 } else if (size == 1) {
ad69471c 3354 gen_neon_dup_low16(tmp);
9ee6e8bb 3355 }
cbbccffc 3356 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3357 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3358 tcg_gen_mov_i32(tmp2, tmp);
3359 neon_store_reg(rn, n, tmp2);
3360 }
3361 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3362 } else {
3363 /* VMOV */
3364 switch (size) {
3365 case 0:
ad69471c 3366 tmp2 = neon_load_reg(rn, pass);
d593c48e 3367 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3368 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3369 break;
3370 case 1:
ad69471c 3371 tmp2 = neon_load_reg(rn, pass);
d593c48e 3372 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3373 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3374 break;
3375 case 2:
9ee6e8bb
PB
3376 break;
3377 }
ad69471c 3378 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3379 }
b7bcbe95 3380 }
9ee6e8bb
PB
3381 } else { /* !dp */
3382 if ((insn & 0x6f) != 0x00)
3383 return 1;
3384 rn = VFP_SREG_N(insn);
18c9b560 3385 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3386 /* vfp->arm */
3387 if (insn & (1 << 21)) {
3388 /* system register */
40f137e1 3389 rn >>= 1;
9ee6e8bb 3390
b7bcbe95 3391 switch (rn) {
40f137e1 3392 case ARM_VFP_FPSID:
4373f3ce 3393 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3394 VFP3 restricts all id registers to privileged
3395 accesses. */
3396 if (IS_USER(s)
d614a513 3397 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3398 return 1;
d614a513 3399 }
4373f3ce 3400 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3401 break;
40f137e1 3402 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3403 if (IS_USER(s))
3404 return 1;
4373f3ce 3405 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3406 break;
40f137e1
PB
3407 case ARM_VFP_FPINST:
3408 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3409 /* Not present in VFP3. */
3410 if (IS_USER(s)
d614a513 3411 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3412 return 1;
d614a513 3413 }
4373f3ce 3414 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3415 break;
40f137e1 3416 case ARM_VFP_FPSCR:
601d70b9 3417 if (rd == 15) {
4373f3ce
PB
3418 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3419 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3420 } else {
7d1b0095 3421 tmp = tcg_temp_new_i32();
4373f3ce
PB
3422 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3423 }
b7bcbe95 3424 break;
a50c0f51 3425 case ARM_VFP_MVFR2:
d614a513 3426 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3427 return 1;
3428 }
3429 /* fall through */
9ee6e8bb
PB
3430 case ARM_VFP_MVFR0:
3431 case ARM_VFP_MVFR1:
3432 if (IS_USER(s)
d614a513 3433 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3434 return 1;
d614a513 3435 }
4373f3ce 3436 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3437 break;
b7bcbe95
FB
3438 default:
3439 return 1;
3440 }
3441 } else {
3442 gen_mov_F0_vreg(0, rn);
4373f3ce 3443 tmp = gen_vfp_mrs();
b7bcbe95
FB
3444 }
3445 if (rd == 15) {
b5ff1b31 3446 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3447 gen_set_nzcv(tmp);
7d1b0095 3448 tcg_temp_free_i32(tmp);
4373f3ce
PB
3449 } else {
3450 store_reg(s, rd, tmp);
3451 }
b7bcbe95
FB
3452 } else {
3453 /* arm->vfp */
b7bcbe95 3454 if (insn & (1 << 21)) {
40f137e1 3455 rn >>= 1;
b7bcbe95
FB
3456 /* system register */
3457 switch (rn) {
40f137e1 3458 case ARM_VFP_FPSID:
9ee6e8bb
PB
3459 case ARM_VFP_MVFR0:
3460 case ARM_VFP_MVFR1:
b7bcbe95
FB
3461 /* Writes are ignored. */
3462 break;
40f137e1 3463 case ARM_VFP_FPSCR:
e4c1cfa5 3464 tmp = load_reg(s, rd);
4373f3ce 3465 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3466 tcg_temp_free_i32(tmp);
b5ff1b31 3467 gen_lookup_tb(s);
b7bcbe95 3468 break;
40f137e1 3469 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3470 if (IS_USER(s))
3471 return 1;
71b3c3de
JR
3472 /* TODO: VFP subarchitecture support.
3473 * For now, keep the EN bit only */
e4c1cfa5 3474 tmp = load_reg(s, rd);
71b3c3de 3475 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3476 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3477 gen_lookup_tb(s);
3478 break;
3479 case ARM_VFP_FPINST:
3480 case ARM_VFP_FPINST2:
23adb861
PM
3481 if (IS_USER(s)) {
3482 return 1;
3483 }
e4c1cfa5 3484 tmp = load_reg(s, rd);
4373f3ce 3485 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3486 break;
b7bcbe95
FB
3487 default:
3488 return 1;
3489 }
3490 } else {
e4c1cfa5 3491 tmp = load_reg(s, rd);
4373f3ce 3492 gen_vfp_msr(tmp);
b7bcbe95
FB
3493 gen_mov_vreg_F0(0, rn);
3494 }
3495 }
3496 }
3497 } else {
3498 /* data processing */
3499 /* The opcode is in bits 23, 21, 20 and 6. */
3500 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3501 if (dp) {
3502 if (op == 15) {
3503 /* rn is opcode */
3504 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3505 } else {
3506 /* rn is register number */
9ee6e8bb 3507 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3508 }
3509
239c20c7
WN
3510 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3511 ((rn & 0x1e) == 0x6))) {
3512 /* Integer or single/half precision destination. */
9ee6e8bb 3513 rd = VFP_SREG_D(insn);
b7bcbe95 3514 } else {
9ee6e8bb 3515 VFP_DREG_D(rd, insn);
b7bcbe95 3516 }
04595bf6 3517 if (op == 15 &&
239c20c7
WN
3518 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3519 ((rn & 0x1e) == 0x4))) {
3520 /* VCVT from int or half precision is always from S reg
3521 * regardless of dp bit. VCVT with immediate frac_bits
3522 * has same format as SREG_M.
04595bf6
PM
3523 */
3524 rm = VFP_SREG_M(insn);
b7bcbe95 3525 } else {
9ee6e8bb 3526 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3527 }
3528 } else {
9ee6e8bb 3529 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3530 if (op == 15 && rn == 15) {
3531 /* Double precision destination. */
9ee6e8bb
PB
3532 VFP_DREG_D(rd, insn);
3533 } else {
3534 rd = VFP_SREG_D(insn);
3535 }
04595bf6
PM
3536 /* NB that we implicitly rely on the encoding for the frac_bits
3537 * in VCVT of fixed to float being the same as that of an SREG_M
3538 */
9ee6e8bb 3539 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3540 }
3541
69d1fc22 3542 veclen = s->vec_len;
b7bcbe95
FB
3543 if (op == 15 && rn > 3)
3544 veclen = 0;
3545
3546 /* Shut up compiler warnings. */
3547 delta_m = 0;
3548 delta_d = 0;
3549 bank_mask = 0;
3b46e624 3550
b7bcbe95
FB
3551 if (veclen > 0) {
3552 if (dp)
3553 bank_mask = 0xc;
3554 else
3555 bank_mask = 0x18;
3556
3557 /* Figure out what type of vector operation this is. */
3558 if ((rd & bank_mask) == 0) {
3559 /* scalar */
3560 veclen = 0;
3561 } else {
3562 if (dp)
69d1fc22 3563 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3564 else
69d1fc22 3565 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3566
3567 if ((rm & bank_mask) == 0) {
3568 /* mixed scalar/vector */
3569 delta_m = 0;
3570 } else {
3571 /* vector */
3572 delta_m = delta_d;
3573 }
3574 }
3575 }
3576
3577 /* Load the initial operands. */
3578 if (op == 15) {
3579 switch (rn) {
3580 case 16:
3581 case 17:
3582 /* Integer source */
3583 gen_mov_F0_vreg(0, rm);
3584 break;
3585 case 8:
3586 case 9:
3587 /* Compare */
3588 gen_mov_F0_vreg(dp, rd);
3589 gen_mov_F1_vreg(dp, rm);
3590 break;
3591 case 10:
3592 case 11:
3593 /* Compare with zero */
3594 gen_mov_F0_vreg(dp, rd);
3595 gen_vfp_F1_ld0(dp);
3596 break;
9ee6e8bb
PB
3597 case 20:
3598 case 21:
3599 case 22:
3600 case 23:
644ad806
PB
3601 case 28:
3602 case 29:
3603 case 30:
3604 case 31:
9ee6e8bb
PB
3605 /* Source and destination the same. */
3606 gen_mov_F0_vreg(dp, rd);
3607 break;
6e0c0ed1
PM
3608 case 4:
3609 case 5:
3610 case 6:
3611 case 7:
239c20c7
WN
3612 /* VCVTB, VCVTT: only present with the halfprec extension
3613 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3614 * (we choose to UNDEF)
6e0c0ed1 3615 */
d614a513
PM
3616 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3617 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3618 return 1;
3619 }
239c20c7
WN
3620 if (!extract32(rn, 1, 1)) {
3621 /* Half precision source. */
3622 gen_mov_F0_vreg(0, rm);
3623 break;
3624 }
6e0c0ed1 3625 /* Otherwise fall through */
b7bcbe95
FB
3626 default:
3627 /* One source operand. */
3628 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3629 break;
b7bcbe95
FB
3630 }
3631 } else {
3632 /* Two source operands. */
3633 gen_mov_F0_vreg(dp, rn);
3634 gen_mov_F1_vreg(dp, rm);
3635 }
3636
3637 for (;;) {
3638 /* Perform the calculation. */
3639 switch (op) {
605a6aed
PM
3640 case 0: /* VMLA: fd + (fn * fm) */
3641 /* Note that order of inputs to the add matters for NaNs */
3642 gen_vfp_F1_mul(dp);
3643 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3644 gen_vfp_add(dp);
3645 break;
605a6aed 3646 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3647 gen_vfp_mul(dp);
605a6aed
PM
3648 gen_vfp_F1_neg(dp);
3649 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3650 gen_vfp_add(dp);
3651 break;
605a6aed
PM
3652 case 2: /* VNMLS: -fd + (fn * fm) */
3653 /* Note that it isn't valid to replace (-A + B) with (B - A)
3654 * or similar plausible looking simplifications
3655 * because this will give wrong results for NaNs.
3656 */
3657 gen_vfp_F1_mul(dp);
3658 gen_mov_F0_vreg(dp, rd);
3659 gen_vfp_neg(dp);
3660 gen_vfp_add(dp);
b7bcbe95 3661 break;
605a6aed 3662 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3663 gen_vfp_mul(dp);
605a6aed
PM
3664 gen_vfp_F1_neg(dp);
3665 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3666 gen_vfp_neg(dp);
605a6aed 3667 gen_vfp_add(dp);
b7bcbe95
FB
3668 break;
3669 case 4: /* mul: fn * fm */
3670 gen_vfp_mul(dp);
3671 break;
3672 case 5: /* nmul: -(fn * fm) */
3673 gen_vfp_mul(dp);
3674 gen_vfp_neg(dp);
3675 break;
3676 case 6: /* add: fn + fm */
3677 gen_vfp_add(dp);
3678 break;
3679 case 7: /* sub: fn - fm */
3680 gen_vfp_sub(dp);
3681 break;
3682 case 8: /* div: fn / fm */
3683 gen_vfp_div(dp);
3684 break;
da97f52c
PM
3685 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3686 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3687 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3688 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3689 /* These are fused multiply-add, and must be done as one
3690 * floating point operation with no rounding between the
3691 * multiplication and addition steps.
3692 * NB that doing the negations here as separate steps is
3693 * correct : an input NaN should come out with its sign bit
3694 * flipped if it is a negated-input.
3695 */
d614a513 3696 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3697 return 1;
3698 }
3699 if (dp) {
3700 TCGv_ptr fpst;
3701 TCGv_i64 frd;
3702 if (op & 1) {
3703 /* VFNMS, VFMS */
3704 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3705 }
3706 frd = tcg_temp_new_i64();
3707 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3708 if (op & 2) {
3709 /* VFNMA, VFNMS */
3710 gen_helper_vfp_negd(frd, frd);
3711 }
3712 fpst = get_fpstatus_ptr(0);
3713 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3714 cpu_F1d, frd, fpst);
3715 tcg_temp_free_ptr(fpst);
3716 tcg_temp_free_i64(frd);
3717 } else {
3718 TCGv_ptr fpst;
3719 TCGv_i32 frd;
3720 if (op & 1) {
3721 /* VFNMS, VFMS */
3722 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3723 }
3724 frd = tcg_temp_new_i32();
3725 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3726 if (op & 2) {
3727 gen_helper_vfp_negs(frd, frd);
3728 }
3729 fpst = get_fpstatus_ptr(0);
3730 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3731 cpu_F1s, frd, fpst);
3732 tcg_temp_free_ptr(fpst);
3733 tcg_temp_free_i32(frd);
3734 }
3735 break;
9ee6e8bb 3736 case 14: /* fconst */
d614a513
PM
3737 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3738 return 1;
3739 }
9ee6e8bb
PB
3740
3741 n = (insn << 12) & 0x80000000;
3742 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3743 if (dp) {
3744 if (i & 0x40)
3745 i |= 0x3f80;
3746 else
3747 i |= 0x4000;
3748 n |= i << 16;
4373f3ce 3749 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3750 } else {
3751 if (i & 0x40)
3752 i |= 0x780;
3753 else
3754 i |= 0x800;
3755 n |= i << 19;
5b340b51 3756 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3757 }
9ee6e8bb 3758 break;
b7bcbe95
FB
3759 case 15: /* extension space */
3760 switch (rn) {
3761 case 0: /* cpy */
3762 /* no-op */
3763 break;
3764 case 1: /* abs */
3765 gen_vfp_abs(dp);
3766 break;
3767 case 2: /* neg */
3768 gen_vfp_neg(dp);
3769 break;
3770 case 3: /* sqrt */
3771 gen_vfp_sqrt(dp);
3772 break;
239c20c7 3773 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3774 tmp = gen_vfp_mrs();
3775 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3776 if (dp) {
3777 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3778 cpu_env);
3779 } else {
3780 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3781 cpu_env);
3782 }
7d1b0095 3783 tcg_temp_free_i32(tmp);
60011498 3784 break;
239c20c7 3785 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3786 tmp = gen_vfp_mrs();
3787 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3788 if (dp) {
3789 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3790 cpu_env);
3791 } else {
3792 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3793 cpu_env);
3794 }
7d1b0095 3795 tcg_temp_free_i32(tmp);
60011498 3796 break;
239c20c7 3797 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3798 tmp = tcg_temp_new_i32();
239c20c7
WN
3799 if (dp) {
3800 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3801 cpu_env);
3802 } else {
3803 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3804 cpu_env);
3805 }
60011498
PB
3806 gen_mov_F0_vreg(0, rd);
3807 tmp2 = gen_vfp_mrs();
3808 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3809 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3810 tcg_temp_free_i32(tmp2);
60011498
PB
3811 gen_vfp_msr(tmp);
3812 break;
239c20c7 3813 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3814 tmp = tcg_temp_new_i32();
239c20c7
WN
3815 if (dp) {
3816 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3817 cpu_env);
3818 } else {
3819 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3820 cpu_env);
3821 }
60011498
PB
3822 tcg_gen_shli_i32(tmp, tmp, 16);
3823 gen_mov_F0_vreg(0, rd);
3824 tmp2 = gen_vfp_mrs();
3825 tcg_gen_ext16u_i32(tmp2, tmp2);
3826 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3827 tcg_temp_free_i32(tmp2);
60011498
PB
3828 gen_vfp_msr(tmp);
3829 break;
b7bcbe95
FB
3830 case 8: /* cmp */
3831 gen_vfp_cmp(dp);
3832 break;
3833 case 9: /* cmpe */
3834 gen_vfp_cmpe(dp);
3835 break;
3836 case 10: /* cmpz */
3837 gen_vfp_cmp(dp);
3838 break;
3839 case 11: /* cmpez */
3840 gen_vfp_F1_ld0(dp);
3841 gen_vfp_cmpe(dp);
3842 break;
664c6733
WN
3843 case 12: /* vrintr */
3844 {
3845 TCGv_ptr fpst = get_fpstatus_ptr(0);
3846 if (dp) {
3847 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3848 } else {
3849 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3850 }
3851 tcg_temp_free_ptr(fpst);
3852 break;
3853 }
a290c62a
WN
3854 case 13: /* vrintz */
3855 {
3856 TCGv_ptr fpst = get_fpstatus_ptr(0);
3857 TCGv_i32 tcg_rmode;
3858 tcg_rmode = tcg_const_i32(float_round_to_zero);
3859 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3860 if (dp) {
3861 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3862 } else {
3863 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3864 }
3865 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3866 tcg_temp_free_i32(tcg_rmode);
3867 tcg_temp_free_ptr(fpst);
3868 break;
3869 }
4e82bc01
WN
3870 case 14: /* vrintx */
3871 {
3872 TCGv_ptr fpst = get_fpstatus_ptr(0);
3873 if (dp) {
3874 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3875 } else {
3876 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3877 }
3878 tcg_temp_free_ptr(fpst);
3879 break;
3880 }
b7bcbe95
FB
3881 case 15: /* single<->double conversion */
3882 if (dp)
4373f3ce 3883 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3884 else
4373f3ce 3885 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3886 break;
3887 case 16: /* fuito */
5500b06c 3888 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3889 break;
3890 case 17: /* fsito */
5500b06c 3891 gen_vfp_sito(dp, 0);
b7bcbe95 3892 break;
9ee6e8bb 3893 case 20: /* fshto */
d614a513
PM
3894 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3895 return 1;
3896 }
5500b06c 3897 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3898 break;
3899 case 21: /* fslto */
d614a513
PM
3900 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3901 return 1;
3902 }
5500b06c 3903 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3904 break;
3905 case 22: /* fuhto */
d614a513
PM
3906 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3907 return 1;
3908 }
5500b06c 3909 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3910 break;
3911 case 23: /* fulto */
d614a513
PM
3912 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3913 return 1;
3914 }
5500b06c 3915 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3916 break;
b7bcbe95 3917 case 24: /* ftoui */
5500b06c 3918 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3919 break;
3920 case 25: /* ftouiz */
5500b06c 3921 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3922 break;
3923 case 26: /* ftosi */
5500b06c 3924 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3925 break;
3926 case 27: /* ftosiz */
5500b06c 3927 gen_vfp_tosiz(dp, 0);
b7bcbe95 3928 break;
9ee6e8bb 3929 case 28: /* ftosh */
d614a513
PM
3930 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3931 return 1;
3932 }
5500b06c 3933 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3934 break;
3935 case 29: /* ftosl */
d614a513
PM
3936 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3937 return 1;
3938 }
5500b06c 3939 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3940 break;
3941 case 30: /* ftouh */
d614a513
PM
3942 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3943 return 1;
3944 }
5500b06c 3945 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3946 break;
3947 case 31: /* ftoul */
d614a513
PM
3948 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3949 return 1;
3950 }
5500b06c 3951 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3952 break;
b7bcbe95 3953 default: /* undefined */
b7bcbe95
FB
3954 return 1;
3955 }
3956 break;
3957 default: /* undefined */
b7bcbe95
FB
3958 return 1;
3959 }
3960
3961 /* Write back the result. */
239c20c7
WN
3962 if (op == 15 && (rn >= 8 && rn <= 11)) {
3963 /* Comparison, do nothing. */
3964 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3965 (rn & 0x1e) == 0x6)) {
3966 /* VCVT double to int: always integer result.
3967 * VCVT double to half precision is always a single
3968 * precision result.
3969 */
b7bcbe95 3970 gen_mov_vreg_F0(0, rd);
239c20c7 3971 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3972 /* conversion */
3973 gen_mov_vreg_F0(!dp, rd);
239c20c7 3974 } else {
b7bcbe95 3975 gen_mov_vreg_F0(dp, rd);
239c20c7 3976 }
b7bcbe95
FB
3977
3978 /* break out of the loop if we have finished */
3979 if (veclen == 0)
3980 break;
3981
3982 if (op == 15 && delta_m == 0) {
3983 /* single source one-many */
3984 while (veclen--) {
3985 rd = ((rd + delta_d) & (bank_mask - 1))
3986 | (rd & bank_mask);
3987 gen_mov_vreg_F0(dp, rd);
3988 }
3989 break;
3990 }
3991 /* Setup the next operands. */
3992 veclen--;
3993 rd = ((rd + delta_d) & (bank_mask - 1))
3994 | (rd & bank_mask);
3995
3996 if (op == 15) {
3997 /* One source operand. */
3998 rm = ((rm + delta_m) & (bank_mask - 1))
3999 | (rm & bank_mask);
4000 gen_mov_F0_vreg(dp, rm);
4001 } else {
4002 /* Two source operands. */
4003 rn = ((rn + delta_d) & (bank_mask - 1))
4004 | (rn & bank_mask);
4005 gen_mov_F0_vreg(dp, rn);
4006 if (delta_m) {
4007 rm = ((rm + delta_m) & (bank_mask - 1))
4008 | (rm & bank_mask);
4009 gen_mov_F1_vreg(dp, rm);
4010 }
4011 }
4012 }
4013 }
4014 break;
4015 case 0xc:
4016 case 0xd:
8387da81 4017 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
4018 /* two-register transfer */
4019 rn = (insn >> 16) & 0xf;
4020 rd = (insn >> 12) & 0xf;
4021 if (dp) {
9ee6e8bb
PB
4022 VFP_DREG_M(rm, insn);
4023 } else {
4024 rm = VFP_SREG_M(insn);
4025 }
b7bcbe95 4026
18c9b560 4027 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4028 /* vfp->arm */
4029 if (dp) {
4373f3ce
PB
4030 gen_mov_F0_vreg(0, rm * 2);
4031 tmp = gen_vfp_mrs();
4032 store_reg(s, rd, tmp);
4033 gen_mov_F0_vreg(0, rm * 2 + 1);
4034 tmp = gen_vfp_mrs();
4035 store_reg(s, rn, tmp);
b7bcbe95
FB
4036 } else {
4037 gen_mov_F0_vreg(0, rm);
4373f3ce 4038 tmp = gen_vfp_mrs();
8387da81 4039 store_reg(s, rd, tmp);
b7bcbe95 4040 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4041 tmp = gen_vfp_mrs();
8387da81 4042 store_reg(s, rn, tmp);
b7bcbe95
FB
4043 }
4044 } else {
4045 /* arm->vfp */
4046 if (dp) {
4373f3ce
PB
4047 tmp = load_reg(s, rd);
4048 gen_vfp_msr(tmp);
4049 gen_mov_vreg_F0(0, rm * 2);
4050 tmp = load_reg(s, rn);
4051 gen_vfp_msr(tmp);
4052 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4053 } else {
8387da81 4054 tmp = load_reg(s, rd);
4373f3ce 4055 gen_vfp_msr(tmp);
b7bcbe95 4056 gen_mov_vreg_F0(0, rm);
8387da81 4057 tmp = load_reg(s, rn);
4373f3ce 4058 gen_vfp_msr(tmp);
b7bcbe95
FB
4059 gen_mov_vreg_F0(0, rm + 1);
4060 }
4061 }
4062 } else {
4063 /* Load/store */
4064 rn = (insn >> 16) & 0xf;
4065 if (dp)
9ee6e8bb 4066 VFP_DREG_D(rd, insn);
b7bcbe95 4067 else
9ee6e8bb 4068 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4069 if ((insn & 0x01200000) == 0x01000000) {
4070 /* Single load/store */
4071 offset = (insn & 0xff) << 2;
4072 if ((insn & (1 << 23)) == 0)
4073 offset = -offset;
934814f1
PM
4074 if (s->thumb && rn == 15) {
4075 /* This is actually UNPREDICTABLE */
4076 addr = tcg_temp_new_i32();
4077 tcg_gen_movi_i32(addr, s->pc & ~2);
4078 } else {
4079 addr = load_reg(s, rn);
4080 }
312eea9f 4081 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4082 if (insn & (1 << 20)) {
312eea9f 4083 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4084 gen_mov_vreg_F0(dp, rd);
4085 } else {
4086 gen_mov_F0_vreg(dp, rd);
312eea9f 4087 gen_vfp_st(s, dp, addr);
b7bcbe95 4088 }
7d1b0095 4089 tcg_temp_free_i32(addr);
b7bcbe95
FB
4090 } else {
4091 /* load/store multiple */
934814f1 4092 int w = insn & (1 << 21);
b7bcbe95
FB
4093 if (dp)
4094 n = (insn >> 1) & 0x7f;
4095 else
4096 n = insn & 0xff;
4097
934814f1
PM
4098 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4099 /* P == U , W == 1 => UNDEF */
4100 return 1;
4101 }
4102 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4103 /* UNPREDICTABLE cases for bad immediates: we choose to
4104 * UNDEF to avoid generating huge numbers of TCG ops
4105 */
4106 return 1;
4107 }
4108 if (rn == 15 && w) {
4109 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4110 return 1;
4111 }
4112
4113 if (s->thumb && rn == 15) {
4114 /* This is actually UNPREDICTABLE */
4115 addr = tcg_temp_new_i32();
4116 tcg_gen_movi_i32(addr, s->pc & ~2);
4117 } else {
4118 addr = load_reg(s, rn);
4119 }
b7bcbe95 4120 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4121 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4122
4123 if (dp)
4124 offset = 8;
4125 else
4126 offset = 4;
4127 for (i = 0; i < n; i++) {
18c9b560 4128 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4129 /* load */
312eea9f 4130 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4131 gen_mov_vreg_F0(dp, rd + i);
4132 } else {
4133 /* store */
4134 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4135 gen_vfp_st(s, dp, addr);
b7bcbe95 4136 }
312eea9f 4137 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4138 }
934814f1 4139 if (w) {
b7bcbe95
FB
4140 /* writeback */
4141 if (insn & (1 << 24))
4142 offset = -offset * n;
4143 else if (dp && (insn & 1))
4144 offset = 4;
4145 else
4146 offset = 0;
4147
4148 if (offset != 0)
312eea9f
FN
4149 tcg_gen_addi_i32(addr, addr, offset);
4150 store_reg(s, rn, addr);
4151 } else {
7d1b0095 4152 tcg_temp_free_i32(addr);
b7bcbe95
FB
4153 }
4154 }
4155 }
4156 break;
4157 default:
4158 /* Should never happen. */
4159 return 1;
4160 }
4161 return 0;
4162}
4163
90aa39a1 4164static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4165{
90aa39a1 4166#ifndef CONFIG_USER_ONLY
dcba3a8d 4167 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
4168 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4169#else
4170 return true;
4171#endif
4172}
6e256c93 4173
8a6b28c7
EC
4174static void gen_goto_ptr(void)
4175{
4176 TCGv addr = tcg_temp_new();
4177 tcg_gen_extu_i32_tl(addr, cpu_R[15]);
4178 tcg_gen_lookup_and_goto_ptr(addr);
4179 tcg_temp_free(addr);
4180}
4181
4cae8f56
AB
4182/* This will end the TB but doesn't guarantee we'll return to
4183 * cpu_loop_exec. Any live exit_requests will be processed as we
4184 * enter the next TB.
4185 */
8a6b28c7 4186static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4187{
4188 if (use_goto_tb(s, dest)) {
57fec1fe 4189 tcg_gen_goto_tb(n);
eaed129d 4190 gen_set_pc_im(s, dest);
dcba3a8d 4191 tcg_gen_exit_tb((uintptr_t)s->base.tb + n);
6e256c93 4192 } else {
eaed129d 4193 gen_set_pc_im(s, dest);
8a6b28c7 4194 gen_goto_ptr();
6e256c93 4195 }
dcba3a8d 4196 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
4197}
4198
8aaca4c0
FB
4199static inline void gen_jmp (DisasContext *s, uint32_t dest)
4200{
b636649f 4201 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4202 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4203 if (s->thumb)
d9ba4830
PB
4204 dest |= 1;
4205 gen_bx_im(s, dest);
8aaca4c0 4206 } else {
6e256c93 4207 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4208 }
4209}
4210
39d5492a 4211static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4212{
ee097184 4213 if (x)
d9ba4830 4214 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4215 else
d9ba4830 4216 gen_sxth(t0);
ee097184 4217 if (y)
d9ba4830 4218 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4219 else
d9ba4830
PB
4220 gen_sxth(t1);
4221 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4222}
4223
4224/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4225static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4226{
b5ff1b31
FB
4227 uint32_t mask;
4228
4229 mask = 0;
4230 if (flags & (1 << 0))
4231 mask |= 0xff;
4232 if (flags & (1 << 1))
4233 mask |= 0xff00;
4234 if (flags & (1 << 2))
4235 mask |= 0xff0000;
4236 if (flags & (1 << 3))
4237 mask |= 0xff000000;
9ee6e8bb 4238
2ae23e75 4239 /* Mask out undefined bits. */
9ee6e8bb 4240 mask &= ~CPSR_RESERVED;
d614a513 4241 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4242 mask &= ~CPSR_T;
d614a513
PM
4243 }
4244 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4245 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4246 }
4247 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4248 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4249 }
4250 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4251 mask &= ~CPSR_IT;
d614a513 4252 }
4051e12c
PM
4253 /* Mask out execution state and reserved bits. */
4254 if (!spsr) {
4255 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4256 }
b5ff1b31
FB
4257 /* Mask out privileged bits. */
4258 if (IS_USER(s))
9ee6e8bb 4259 mask &= CPSR_USER;
b5ff1b31
FB
4260 return mask;
4261}
4262
2fbac54b 4263/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4264static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4265{
39d5492a 4266 TCGv_i32 tmp;
b5ff1b31
FB
4267 if (spsr) {
4268 /* ??? This is also undefined in system mode. */
4269 if (IS_USER(s))
4270 return 1;
d9ba4830
PB
4271
4272 tmp = load_cpu_field(spsr);
4273 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4274 tcg_gen_andi_i32(t0, t0, mask);
4275 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4276 store_cpu_field(tmp, spsr);
b5ff1b31 4277 } else {
2fbac54b 4278 gen_set_cpsr(t0, mask);
b5ff1b31 4279 }
7d1b0095 4280 tcg_temp_free_i32(t0);
b5ff1b31
FB
4281 gen_lookup_tb(s);
4282 return 0;
4283}
4284
2fbac54b
FN
4285/* Returns nonzero if access to the PSR is not permitted. */
4286static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4287{
39d5492a 4288 TCGv_i32 tmp;
7d1b0095 4289 tmp = tcg_temp_new_i32();
2fbac54b
FN
4290 tcg_gen_movi_i32(tmp, val);
4291 return gen_set_psr(s, mask, spsr, tmp);
4292}
4293
8bfd0550
PM
4294static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4295 int *tgtmode, int *regno)
4296{
4297 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4298 * the target mode and register number, and identify the various
4299 * unpredictable cases.
4300 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4301 * + executed in user mode
4302 * + using R15 as the src/dest register
4303 * + accessing an unimplemented register
4304 * + accessing a register that's inaccessible at current PL/security state*
4305 * + accessing a register that you could access with a different insn
4306 * We choose to UNDEF in all these cases.
4307 * Since we don't know which of the various AArch32 modes we are in
4308 * we have to defer some checks to runtime.
4309 * Accesses to Monitor mode registers from Secure EL1 (which implies
4310 * that EL3 is AArch64) must trap to EL3.
4311 *
4312 * If the access checks fail this function will emit code to take
4313 * an exception and return false. Otherwise it will return true,
4314 * and set *tgtmode and *regno appropriately.
4315 */
4316 int exc_target = default_exception_el(s);
4317
4318 /* These instructions are present only in ARMv8, or in ARMv7 with the
4319 * Virtualization Extensions.
4320 */
4321 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4322 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4323 goto undef;
4324 }
4325
4326 if (IS_USER(s) || rn == 15) {
4327 goto undef;
4328 }
4329
4330 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4331 * of registers into (r, sysm).
4332 */
4333 if (r) {
4334 /* SPSRs for other modes */
4335 switch (sysm) {
4336 case 0xe: /* SPSR_fiq */
4337 *tgtmode = ARM_CPU_MODE_FIQ;
4338 break;
4339 case 0x10: /* SPSR_irq */
4340 *tgtmode = ARM_CPU_MODE_IRQ;
4341 break;
4342 case 0x12: /* SPSR_svc */
4343 *tgtmode = ARM_CPU_MODE_SVC;
4344 break;
4345 case 0x14: /* SPSR_abt */
4346 *tgtmode = ARM_CPU_MODE_ABT;
4347 break;
4348 case 0x16: /* SPSR_und */
4349 *tgtmode = ARM_CPU_MODE_UND;
4350 break;
4351 case 0x1c: /* SPSR_mon */
4352 *tgtmode = ARM_CPU_MODE_MON;
4353 break;
4354 case 0x1e: /* SPSR_hyp */
4355 *tgtmode = ARM_CPU_MODE_HYP;
4356 break;
4357 default: /* unallocated */
4358 goto undef;
4359 }
4360 /* We arbitrarily assign SPSR a register number of 16. */
4361 *regno = 16;
4362 } else {
4363 /* general purpose registers for other modes */
4364 switch (sysm) {
4365 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4366 *tgtmode = ARM_CPU_MODE_USR;
4367 *regno = sysm + 8;
4368 break;
4369 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4370 *tgtmode = ARM_CPU_MODE_FIQ;
4371 *regno = sysm;
4372 break;
4373 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4374 *tgtmode = ARM_CPU_MODE_IRQ;
4375 *regno = sysm & 1 ? 13 : 14;
4376 break;
4377 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4378 *tgtmode = ARM_CPU_MODE_SVC;
4379 *regno = sysm & 1 ? 13 : 14;
4380 break;
4381 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4382 *tgtmode = ARM_CPU_MODE_ABT;
4383 *regno = sysm & 1 ? 13 : 14;
4384 break;
4385 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4386 *tgtmode = ARM_CPU_MODE_UND;
4387 *regno = sysm & 1 ? 13 : 14;
4388 break;
4389 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4390 *tgtmode = ARM_CPU_MODE_MON;
4391 *regno = sysm & 1 ? 13 : 14;
4392 break;
4393 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4394 *tgtmode = ARM_CPU_MODE_HYP;
4395 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4396 *regno = sysm & 1 ? 13 : 17;
4397 break;
4398 default: /* unallocated */
4399 goto undef;
4400 }
4401 }
4402
4403 /* Catch the 'accessing inaccessible register' cases we can detect
4404 * at translate time.
4405 */
4406 switch (*tgtmode) {
4407 case ARM_CPU_MODE_MON:
4408 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4409 goto undef;
4410 }
4411 if (s->current_el == 1) {
4412 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4413 * then accesses to Mon registers trap to EL3
4414 */
4415 exc_target = 3;
4416 goto undef;
4417 }
4418 break;
4419 case ARM_CPU_MODE_HYP:
4420 /* Note that we can forbid accesses from EL2 here because they
4421 * must be from Hyp mode itself
4422 */
4423 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4424 goto undef;
4425 }
4426 break;
4427 default:
4428 break;
4429 }
4430
4431 return true;
4432
4433undef:
4434 /* If we get here then some access check did not pass */
4435 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4436 return false;
4437}
4438
4439static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4440{
4441 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4442 int tgtmode = 0, regno = 0;
4443
4444 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4445 return;
4446 }
4447
4448 /* Sync state because msr_banked() can raise exceptions */
4449 gen_set_condexec(s);
4450 gen_set_pc_im(s, s->pc - 4);
4451 tcg_reg = load_reg(s, rn);
4452 tcg_tgtmode = tcg_const_i32(tgtmode);
4453 tcg_regno = tcg_const_i32(regno);
4454 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4455 tcg_temp_free_i32(tcg_tgtmode);
4456 tcg_temp_free_i32(tcg_regno);
4457 tcg_temp_free_i32(tcg_reg);
dcba3a8d 4458 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4459}
4460
4461static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4462{
4463 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4464 int tgtmode = 0, regno = 0;
4465
4466 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4467 return;
4468 }
4469
4470 /* Sync state because mrs_banked() can raise exceptions */
4471 gen_set_condexec(s);
4472 gen_set_pc_im(s, s->pc - 4);
4473 tcg_reg = tcg_temp_new_i32();
4474 tcg_tgtmode = tcg_const_i32(tgtmode);
4475 tcg_regno = tcg_const_i32(regno);
4476 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4477 tcg_temp_free_i32(tcg_tgtmode);
4478 tcg_temp_free_i32(tcg_regno);
4479 store_reg(s, rn, tcg_reg);
dcba3a8d 4480 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4481}
4482
fb0e8e79
PM
4483/* Store value to PC as for an exception return (ie don't
4484 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4485 * will do the masking based on the new value of the Thumb bit.
4486 */
4487static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4488{
fb0e8e79
PM
4489 tcg_gen_mov_i32(cpu_R[15], pc);
4490 tcg_temp_free_i32(pc);
b5ff1b31
FB
4491}
4492
b0109805 4493/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4494static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4495{
fb0e8e79
PM
4496 store_pc_exc_ret(s, pc);
4497 /* The cpsr_write_eret helper will mask the low bits of PC
4498 * appropriately depending on the new Thumb bit, so it must
4499 * be called after storing the new PC.
4500 */
235ea1f5 4501 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 4502 tcg_temp_free_i32(cpsr);
b29fd33d 4503 /* Must exit loop to check un-masked IRQs */
dcba3a8d 4504 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 4505}
3b46e624 4506
fb0e8e79
PM
4507/* Generate an old-style exception return. Marks pc as dead. */
4508static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4509{
4510 gen_rfe(s, pc, load_cpu_field(spsr));
4511}
4512
c22edfeb
AB
4513/*
4514 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4515 * only call the helper when running single threaded TCG code to ensure
4516 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4517 * just skip this instruction. Currently the SEV/SEVL instructions
4518 * which are *one* of many ways to wake the CPU from WFE are not
4519 * implemented so we can't sleep like WFI does.
4520 */
9ee6e8bb
PB
4521static void gen_nop_hint(DisasContext *s, int val)
4522{
4523 switch (val) {
c87e5a61 4524 case 1: /* yield */
c22edfeb
AB
4525 if (!parallel_cpus) {
4526 gen_set_pc_im(s, s->pc);
dcba3a8d 4527 s->base.is_jmp = DISAS_YIELD;
c22edfeb 4528 }
c87e5a61 4529 break;
9ee6e8bb 4530 case 3: /* wfi */
eaed129d 4531 gen_set_pc_im(s, s->pc);
dcba3a8d 4532 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
4533 break;
4534 case 2: /* wfe */
c22edfeb
AB
4535 if (!parallel_cpus) {
4536 gen_set_pc_im(s, s->pc);
dcba3a8d 4537 s->base.is_jmp = DISAS_WFE;
c22edfeb 4538 }
72c1d3af 4539 break;
9ee6e8bb 4540 case 4: /* sev */
12b10571
MR
4541 case 5: /* sevl */
4542 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4543 default: /* nop */
4544 break;
4545 }
4546}
99c475ab 4547
ad69471c 4548#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4549
39d5492a 4550static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4551{
4552 switch (size) {
dd8fbd78
FN
4553 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4554 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4555 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4556 default: abort();
9ee6e8bb 4557 }
9ee6e8bb
PB
4558}
4559
39d5492a 4560static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4561{
4562 switch (size) {
dd8fbd78
FN
4563 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4564 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4565 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4566 default: return;
4567 }
4568}
4569
4570/* 32-bit pairwise ops end up the same as the elementwise versions. */
4571#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4572#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4573#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4574#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4575
ad69471c
PB
4576#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4577 switch ((size << 1) | u) { \
4578 case 0: \
dd8fbd78 4579 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4580 break; \
4581 case 1: \
dd8fbd78 4582 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4583 break; \
4584 case 2: \
dd8fbd78 4585 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4586 break; \
4587 case 3: \
dd8fbd78 4588 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4589 break; \
4590 case 4: \
dd8fbd78 4591 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4592 break; \
4593 case 5: \
dd8fbd78 4594 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4595 break; \
4596 default: return 1; \
4597 }} while (0)
9ee6e8bb
PB
4598
4599#define GEN_NEON_INTEGER_OP(name) do { \
4600 switch ((size << 1) | u) { \
ad69471c 4601 case 0: \
dd8fbd78 4602 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4603 break; \
4604 case 1: \
dd8fbd78 4605 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4606 break; \
4607 case 2: \
dd8fbd78 4608 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4609 break; \
4610 case 3: \
dd8fbd78 4611 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4612 break; \
4613 case 4: \
dd8fbd78 4614 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4615 break; \
4616 case 5: \
dd8fbd78 4617 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4618 break; \
9ee6e8bb
PB
4619 default: return 1; \
4620 }} while (0)
4621
39d5492a 4622static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4623{
39d5492a 4624 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4625 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4626 return tmp;
9ee6e8bb
PB
4627}
4628
39d5492a 4629static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4630{
dd8fbd78 4631 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4632 tcg_temp_free_i32(var);
9ee6e8bb
PB
4633}
4634
39d5492a 4635static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4636{
39d5492a 4637 TCGv_i32 tmp;
9ee6e8bb 4638 if (size == 1) {
0fad6efc
PM
4639 tmp = neon_load_reg(reg & 7, reg >> 4);
4640 if (reg & 8) {
dd8fbd78 4641 gen_neon_dup_high16(tmp);
0fad6efc
PM
4642 } else {
4643 gen_neon_dup_low16(tmp);
dd8fbd78 4644 }
0fad6efc
PM
4645 } else {
4646 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4647 }
dd8fbd78 4648 return tmp;
9ee6e8bb
PB
4649}
4650
02acedf9 4651static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4652{
39d5492a 4653 TCGv_i32 tmp, tmp2;
600b828c 4654 if (!q && size == 2) {
02acedf9
PM
4655 return 1;
4656 }
4657 tmp = tcg_const_i32(rd);
4658 tmp2 = tcg_const_i32(rm);
4659 if (q) {
4660 switch (size) {
4661 case 0:
02da0b2d 4662 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4663 break;
4664 case 1:
02da0b2d 4665 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4666 break;
4667 case 2:
02da0b2d 4668 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4669 break;
4670 default:
4671 abort();
4672 }
4673 } else {
4674 switch (size) {
4675 case 0:
02da0b2d 4676 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4677 break;
4678 case 1:
02da0b2d 4679 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4680 break;
4681 default:
4682 abort();
4683 }
4684 }
4685 tcg_temp_free_i32(tmp);
4686 tcg_temp_free_i32(tmp2);
4687 return 0;
19457615
FN
4688}
4689
d68a6f3a 4690static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4691{
39d5492a 4692 TCGv_i32 tmp, tmp2;
600b828c 4693 if (!q && size == 2) {
d68a6f3a
PM
4694 return 1;
4695 }
4696 tmp = tcg_const_i32(rd);
4697 tmp2 = tcg_const_i32(rm);
4698 if (q) {
4699 switch (size) {
4700 case 0:
02da0b2d 4701 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4702 break;
4703 case 1:
02da0b2d 4704 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4705 break;
4706 case 2:
02da0b2d 4707 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4708 break;
4709 default:
4710 abort();
4711 }
4712 } else {
4713 switch (size) {
4714 case 0:
02da0b2d 4715 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4716 break;
4717 case 1:
02da0b2d 4718 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4719 break;
4720 default:
4721 abort();
4722 }
4723 }
4724 tcg_temp_free_i32(tmp);
4725 tcg_temp_free_i32(tmp2);
4726 return 0;
19457615
FN
4727}
4728
39d5492a 4729static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4730{
39d5492a 4731 TCGv_i32 rd, tmp;
19457615 4732
7d1b0095
PM
4733 rd = tcg_temp_new_i32();
4734 tmp = tcg_temp_new_i32();
19457615
FN
4735
4736 tcg_gen_shli_i32(rd, t0, 8);
4737 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4738 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4739 tcg_gen_or_i32(rd, rd, tmp);
4740
4741 tcg_gen_shri_i32(t1, t1, 8);
4742 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4743 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4744 tcg_gen_or_i32(t1, t1, tmp);
4745 tcg_gen_mov_i32(t0, rd);
4746
7d1b0095
PM
4747 tcg_temp_free_i32(tmp);
4748 tcg_temp_free_i32(rd);
19457615
FN
4749}
4750
39d5492a 4751static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4752{
39d5492a 4753 TCGv_i32 rd, tmp;
19457615 4754
7d1b0095
PM
4755 rd = tcg_temp_new_i32();
4756 tmp = tcg_temp_new_i32();
19457615
FN
4757
4758 tcg_gen_shli_i32(rd, t0, 16);
4759 tcg_gen_andi_i32(tmp, t1, 0xffff);
4760 tcg_gen_or_i32(rd, rd, tmp);
4761 tcg_gen_shri_i32(t1, t1, 16);
4762 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4763 tcg_gen_or_i32(t1, t1, tmp);
4764 tcg_gen_mov_i32(t0, rd);
4765
7d1b0095
PM
4766 tcg_temp_free_i32(tmp);
4767 tcg_temp_free_i32(rd);
19457615
FN
4768}
4769
4770
9ee6e8bb
PB
4771static struct {
4772 int nregs;
4773 int interleave;
4774 int spacing;
4775} neon_ls_element_type[11] = {
4776 {4, 4, 1},
4777 {4, 4, 2},
4778 {4, 1, 1},
4779 {4, 2, 1},
4780 {3, 3, 1},
4781 {3, 3, 2},
4782 {3, 1, 1},
4783 {1, 1, 1},
4784 {2, 2, 1},
4785 {2, 2, 2},
4786 {2, 1, 1}
4787};
4788
4789/* Translate a NEON load/store element instruction. Return nonzero if the
4790 instruction is invalid. */
7dcc1f89 4791static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4792{
4793 int rd, rn, rm;
4794 int op;
4795 int nregs;
4796 int interleave;
84496233 4797 int spacing;
9ee6e8bb
PB
4798 int stride;
4799 int size;
4800 int reg;
4801 int pass;
4802 int load;
4803 int shift;
9ee6e8bb 4804 int n;
39d5492a
PM
4805 TCGv_i32 addr;
4806 TCGv_i32 tmp;
4807 TCGv_i32 tmp2;
84496233 4808 TCGv_i64 tmp64;
9ee6e8bb 4809
2c7ffc41
PM
4810 /* FIXME: this access check should not take precedence over UNDEF
4811 * for invalid encodings; we will generate incorrect syndrome information
4812 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4813 */
9dbbc748 4814 if (s->fp_excp_el) {
2c7ffc41 4815 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4816 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4817 return 0;
4818 }
4819
5df8bac1 4820 if (!s->vfp_enabled)
9ee6e8bb
PB
4821 return 1;
4822 VFP_DREG_D(rd, insn);
4823 rn = (insn >> 16) & 0xf;
4824 rm = insn & 0xf;
4825 load = (insn & (1 << 21)) != 0;
4826 if ((insn & (1 << 23)) == 0) {
4827 /* Load store all elements. */
4828 op = (insn >> 8) & 0xf;
4829 size = (insn >> 6) & 3;
84496233 4830 if (op > 10)
9ee6e8bb 4831 return 1;
f2dd89d0
PM
4832 /* Catch UNDEF cases for bad values of align field */
4833 switch (op & 0xc) {
4834 case 4:
4835 if (((insn >> 5) & 1) == 1) {
4836 return 1;
4837 }
4838 break;
4839 case 8:
4840 if (((insn >> 4) & 3) == 3) {
4841 return 1;
4842 }
4843 break;
4844 default:
4845 break;
4846 }
9ee6e8bb
PB
4847 nregs = neon_ls_element_type[op].nregs;
4848 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4849 spacing = neon_ls_element_type[op].spacing;
4850 if (size == 3 && (interleave | spacing) != 1)
4851 return 1;
e318a60b 4852 addr = tcg_temp_new_i32();
dcc65026 4853 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4854 stride = (1 << size) * interleave;
4855 for (reg = 0; reg < nregs; reg++) {
4856 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4857 load_reg_var(s, addr, rn);
4858 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4859 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4860 load_reg_var(s, addr, rn);
4861 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4862 }
84496233 4863 if (size == 3) {
8ed1237d 4864 tmp64 = tcg_temp_new_i64();
84496233 4865 if (load) {
12dcc321 4866 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4867 neon_store_reg64(tmp64, rd);
84496233 4868 } else {
84496233 4869 neon_load_reg64(tmp64, rd);
12dcc321 4870 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4871 }
8ed1237d 4872 tcg_temp_free_i64(tmp64);
84496233
JR
4873 tcg_gen_addi_i32(addr, addr, stride);
4874 } else {
4875 for (pass = 0; pass < 2; pass++) {
4876 if (size == 2) {
4877 if (load) {
58ab8e96 4878 tmp = tcg_temp_new_i32();
12dcc321 4879 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
4880 neon_store_reg(rd, pass, tmp);
4881 } else {
4882 tmp = neon_load_reg(rd, pass);
12dcc321 4883 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 4884 tcg_temp_free_i32(tmp);
84496233 4885 }
1b2b1e54 4886 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4887 } else if (size == 1) {
4888 if (load) {
58ab8e96 4889 tmp = tcg_temp_new_i32();
12dcc321 4890 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 4891 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4892 tmp2 = tcg_temp_new_i32();
12dcc321 4893 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 4894 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4895 tcg_gen_shli_i32(tmp2, tmp2, 16);
4896 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4897 tcg_temp_free_i32(tmp2);
84496233
JR
4898 neon_store_reg(rd, pass, tmp);
4899 } else {
4900 tmp = neon_load_reg(rd, pass);
7d1b0095 4901 tmp2 = tcg_temp_new_i32();
84496233 4902 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 4903 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 4904 tcg_temp_free_i32(tmp);
84496233 4905 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 4906 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 4907 tcg_temp_free_i32(tmp2);
1b2b1e54 4908 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4909 }
84496233
JR
4910 } else /* size == 0 */ {
4911 if (load) {
39d5492a 4912 TCGV_UNUSED_I32(tmp2);
84496233 4913 for (n = 0; n < 4; n++) {
58ab8e96 4914 tmp = tcg_temp_new_i32();
12dcc321 4915 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
4916 tcg_gen_addi_i32(addr, addr, stride);
4917 if (n == 0) {
4918 tmp2 = tmp;
4919 } else {
41ba8341
PB
4920 tcg_gen_shli_i32(tmp, tmp, n * 8);
4921 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4922 tcg_temp_free_i32(tmp);
84496233 4923 }
9ee6e8bb 4924 }
84496233
JR
4925 neon_store_reg(rd, pass, tmp2);
4926 } else {
4927 tmp2 = neon_load_reg(rd, pass);
4928 for (n = 0; n < 4; n++) {
7d1b0095 4929 tmp = tcg_temp_new_i32();
84496233
JR
4930 if (n == 0) {
4931 tcg_gen_mov_i32(tmp, tmp2);
4932 } else {
4933 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4934 }
12dcc321 4935 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 4936 tcg_temp_free_i32(tmp);
84496233
JR
4937 tcg_gen_addi_i32(addr, addr, stride);
4938 }
7d1b0095 4939 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4940 }
4941 }
4942 }
4943 }
84496233 4944 rd += spacing;
9ee6e8bb 4945 }
e318a60b 4946 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4947 stride = nregs * 8;
4948 } else {
4949 size = (insn >> 10) & 3;
4950 if (size == 3) {
4951 /* Load single element to all lanes. */
8e18cde3
PM
4952 int a = (insn >> 4) & 1;
4953 if (!load) {
9ee6e8bb 4954 return 1;
8e18cde3 4955 }
9ee6e8bb
PB
4956 size = (insn >> 6) & 3;
4957 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4958
4959 if (size == 3) {
4960 if (nregs != 4 || a == 0) {
9ee6e8bb 4961 return 1;
99c475ab 4962 }
8e18cde3
PM
4963 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4964 size = 2;
4965 }
4966 if (nregs == 1 && a == 1 && size == 0) {
4967 return 1;
4968 }
4969 if (nregs == 3 && a == 1) {
4970 return 1;
4971 }
e318a60b 4972 addr = tcg_temp_new_i32();
8e18cde3
PM
4973 load_reg_var(s, addr, rn);
4974 if (nregs == 1) {
4975 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4976 tmp = gen_load_and_replicate(s, addr, size);
4977 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4978 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4979 if (insn & (1 << 5)) {
4980 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4981 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4982 }
4983 tcg_temp_free_i32(tmp);
4984 } else {
4985 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4986 stride = (insn & (1 << 5)) ? 2 : 1;
4987 for (reg = 0; reg < nregs; reg++) {
4988 tmp = gen_load_and_replicate(s, addr, size);
4989 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4990 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4991 tcg_temp_free_i32(tmp);
4992 tcg_gen_addi_i32(addr, addr, 1 << size);
4993 rd += stride;
4994 }
9ee6e8bb 4995 }
e318a60b 4996 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4997 stride = (1 << size) * nregs;
4998 } else {
4999 /* Single element. */
93262b16 5000 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
5001 pass = (insn >> 7) & 1;
5002 switch (size) {
5003 case 0:
5004 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
5005 stride = 1;
5006 break;
5007 case 1:
5008 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
5009 stride = (insn & (1 << 5)) ? 2 : 1;
5010 break;
5011 case 2:
5012 shift = 0;
9ee6e8bb
PB
5013 stride = (insn & (1 << 6)) ? 2 : 1;
5014 break;
5015 default:
5016 abort();
5017 }
5018 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
5019 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5020 switch (nregs) {
5021 case 1:
5022 if (((idx & (1 << size)) != 0) ||
5023 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5024 return 1;
5025 }
5026 break;
5027 case 3:
5028 if ((idx & 1) != 0) {
5029 return 1;
5030 }
5031 /* fall through */
5032 case 2:
5033 if (size == 2 && (idx & 2) != 0) {
5034 return 1;
5035 }
5036 break;
5037 case 4:
5038 if ((size == 2) && ((idx & 3) == 3)) {
5039 return 1;
5040 }
5041 break;
5042 default:
5043 abort();
5044 }
5045 if ((rd + stride * (nregs - 1)) > 31) {
5046 /* Attempts to write off the end of the register file
5047 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5048 * the neon_load_reg() would write off the end of the array.
5049 */
5050 return 1;
5051 }
e318a60b 5052 addr = tcg_temp_new_i32();
dcc65026 5053 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5054 for (reg = 0; reg < nregs; reg++) {
5055 if (load) {
58ab8e96 5056 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
5057 switch (size) {
5058 case 0:
12dcc321 5059 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5060 break;
5061 case 1:
12dcc321 5062 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5063 break;
5064 case 2:
12dcc321 5065 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5066 break;
a50f5b91
PB
5067 default: /* Avoid compiler warnings. */
5068 abort();
9ee6e8bb
PB
5069 }
5070 if (size != 2) {
8f8e3aa4 5071 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
5072 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5073 shift, size ? 16 : 8);
7d1b0095 5074 tcg_temp_free_i32(tmp2);
9ee6e8bb 5075 }
8f8e3aa4 5076 neon_store_reg(rd, pass, tmp);
9ee6e8bb 5077 } else { /* Store */
8f8e3aa4
PB
5078 tmp = neon_load_reg(rd, pass);
5079 if (shift)
5080 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
5081 switch (size) {
5082 case 0:
12dcc321 5083 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5084 break;
5085 case 1:
12dcc321 5086 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5087 break;
5088 case 2:
12dcc321 5089 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5090 break;
99c475ab 5091 }
58ab8e96 5092 tcg_temp_free_i32(tmp);
99c475ab 5093 }
9ee6e8bb 5094 rd += stride;
1b2b1e54 5095 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5096 }
e318a60b 5097 tcg_temp_free_i32(addr);
9ee6e8bb 5098 stride = nregs * (1 << size);
99c475ab 5099 }
9ee6e8bb
PB
5100 }
5101 if (rm != 15) {
39d5492a 5102 TCGv_i32 base;
b26eefb6
PB
5103
5104 base = load_reg(s, rn);
9ee6e8bb 5105 if (rm == 13) {
b26eefb6 5106 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5107 } else {
39d5492a 5108 TCGv_i32 index;
b26eefb6
PB
5109 index = load_reg(s, rm);
5110 tcg_gen_add_i32(base, base, index);
7d1b0095 5111 tcg_temp_free_i32(index);
9ee6e8bb 5112 }
b26eefb6 5113 store_reg(s, rn, base);
9ee6e8bb
PB
5114 }
5115 return 0;
5116}
3b46e624 5117
8f8e3aa4 5118/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 5119static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
5120{
5121 tcg_gen_and_i32(t, t, c);
f669df27 5122 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
5123 tcg_gen_or_i32(dest, t, f);
5124}
5125
39d5492a 5126static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5127{
5128 switch (size) {
5129 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5130 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5131 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5132 default: abort();
5133 }
5134}
5135
39d5492a 5136static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5137{
5138 switch (size) {
02da0b2d
PM
5139 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5140 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5141 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5142 default: abort();
5143 }
5144}
5145
39d5492a 5146static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5147{
5148 switch (size) {
02da0b2d
PM
5149 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5150 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5151 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5152 default: abort();
5153 }
5154}
5155
39d5492a 5156static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5157{
5158 switch (size) {
02da0b2d
PM
5159 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5160 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5161 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5162 default: abort();
5163 }
5164}
5165
39d5492a 5166static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5167 int q, int u)
5168{
5169 if (q) {
5170 if (u) {
5171 switch (size) {
5172 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5173 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5174 default: abort();
5175 }
5176 } else {
5177 switch (size) {
5178 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5179 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5180 default: abort();
5181 }
5182 }
5183 } else {
5184 if (u) {
5185 switch (size) {
b408a9b0
CL
5186 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5187 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5188 default: abort();
5189 }
5190 } else {
5191 switch (size) {
5192 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5193 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5194 default: abort();
5195 }
5196 }
5197 }
5198}
5199
39d5492a 5200static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5201{
5202 if (u) {
5203 switch (size) {
5204 case 0: gen_helper_neon_widen_u8(dest, src); break;
5205 case 1: gen_helper_neon_widen_u16(dest, src); break;
5206 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5207 default: abort();
5208 }
5209 } else {
5210 switch (size) {
5211 case 0: gen_helper_neon_widen_s8(dest, src); break;
5212 case 1: gen_helper_neon_widen_s16(dest, src); break;
5213 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5214 default: abort();
5215 }
5216 }
7d1b0095 5217 tcg_temp_free_i32(src);
ad69471c
PB
5218}
5219
5220static inline void gen_neon_addl(int size)
5221{
5222 switch (size) {
5223 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5224 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5225 case 2: tcg_gen_add_i64(CPU_V001); break;
5226 default: abort();
5227 }
5228}
5229
5230static inline void gen_neon_subl(int size)
5231{
5232 switch (size) {
5233 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5234 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5235 case 2: tcg_gen_sub_i64(CPU_V001); break;
5236 default: abort();
5237 }
5238}
5239
a7812ae4 5240static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5241{
5242 switch (size) {
5243 case 0: gen_helper_neon_negl_u16(var, var); break;
5244 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5245 case 2:
5246 tcg_gen_neg_i64(var, var);
5247 break;
ad69471c
PB
5248 default: abort();
5249 }
5250}
5251
a7812ae4 5252static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5253{
5254 switch (size) {
02da0b2d
PM
5255 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5256 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5257 default: abort();
5258 }
5259}
5260
39d5492a
PM
5261static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5262 int size, int u)
ad69471c 5263{
a7812ae4 5264 TCGv_i64 tmp;
ad69471c
PB
5265
5266 switch ((size << 1) | u) {
5267 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5268 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5269 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5270 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5271 case 4:
5272 tmp = gen_muls_i64_i32(a, b);
5273 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5274 tcg_temp_free_i64(tmp);
ad69471c
PB
5275 break;
5276 case 5:
5277 tmp = gen_mulu_i64_i32(a, b);
5278 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5279 tcg_temp_free_i64(tmp);
ad69471c
PB
5280 break;
5281 default: abort();
5282 }
c6067f04
CL
5283
5284 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5285 Don't forget to clean them now. */
5286 if (size < 2) {
7d1b0095
PM
5287 tcg_temp_free_i32(a);
5288 tcg_temp_free_i32(b);
c6067f04 5289 }
ad69471c
PB
5290}
5291
39d5492a
PM
5292static void gen_neon_narrow_op(int op, int u, int size,
5293 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5294{
5295 if (op) {
5296 if (u) {
5297 gen_neon_unarrow_sats(size, dest, src);
5298 } else {
5299 gen_neon_narrow(size, dest, src);
5300 }
5301 } else {
5302 if (u) {
5303 gen_neon_narrow_satu(size, dest, src);
5304 } else {
5305 gen_neon_narrow_sats(size, dest, src);
5306 }
5307 }
5308}
5309
62698be3
PM
5310/* Symbolic constants for op fields for Neon 3-register same-length.
5311 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5312 * table A7-9.
5313 */
5314#define NEON_3R_VHADD 0
5315#define NEON_3R_VQADD 1
5316#define NEON_3R_VRHADD 2
5317#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5318#define NEON_3R_VHSUB 4
5319#define NEON_3R_VQSUB 5
5320#define NEON_3R_VCGT 6
5321#define NEON_3R_VCGE 7
5322#define NEON_3R_VSHL 8
5323#define NEON_3R_VQSHL 9
5324#define NEON_3R_VRSHL 10
5325#define NEON_3R_VQRSHL 11
5326#define NEON_3R_VMAX 12
5327#define NEON_3R_VMIN 13
5328#define NEON_3R_VABD 14
5329#define NEON_3R_VABA 15
5330#define NEON_3R_VADD_VSUB 16
5331#define NEON_3R_VTST_VCEQ 17
5332#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5333#define NEON_3R_VMUL 19
5334#define NEON_3R_VPMAX 20
5335#define NEON_3R_VPMIN 21
5336#define NEON_3R_VQDMULH_VQRDMULH 22
5337#define NEON_3R_VPADD 23
f1ecb913 5338#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 5339#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
5340#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5341#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5342#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5343#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5344#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5345#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5346
5347static const uint8_t neon_3r_sizes[] = {
5348 [NEON_3R_VHADD] = 0x7,
5349 [NEON_3R_VQADD] = 0xf,
5350 [NEON_3R_VRHADD] = 0x7,
5351 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5352 [NEON_3R_VHSUB] = 0x7,
5353 [NEON_3R_VQSUB] = 0xf,
5354 [NEON_3R_VCGT] = 0x7,
5355 [NEON_3R_VCGE] = 0x7,
5356 [NEON_3R_VSHL] = 0xf,
5357 [NEON_3R_VQSHL] = 0xf,
5358 [NEON_3R_VRSHL] = 0xf,
5359 [NEON_3R_VQRSHL] = 0xf,
5360 [NEON_3R_VMAX] = 0x7,
5361 [NEON_3R_VMIN] = 0x7,
5362 [NEON_3R_VABD] = 0x7,
5363 [NEON_3R_VABA] = 0x7,
5364 [NEON_3R_VADD_VSUB] = 0xf,
5365 [NEON_3R_VTST_VCEQ] = 0x7,
5366 [NEON_3R_VML] = 0x7,
5367 [NEON_3R_VMUL] = 0x7,
5368 [NEON_3R_VPMAX] = 0x7,
5369 [NEON_3R_VPMIN] = 0x7,
5370 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5371 [NEON_3R_VPADD] = 0x7,
f1ecb913 5372 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 5373 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5374 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5375 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5376 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5377 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5378 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5379 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5380};
5381
600b828c
PM
5382/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5383 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5384 * table A7-13.
5385 */
5386#define NEON_2RM_VREV64 0
5387#define NEON_2RM_VREV32 1
5388#define NEON_2RM_VREV16 2
5389#define NEON_2RM_VPADDL 4
5390#define NEON_2RM_VPADDL_U 5
9d935509
AB
5391#define NEON_2RM_AESE 6 /* Includes AESD */
5392#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5393#define NEON_2RM_VCLS 8
5394#define NEON_2RM_VCLZ 9
5395#define NEON_2RM_VCNT 10
5396#define NEON_2RM_VMVN 11
5397#define NEON_2RM_VPADAL 12
5398#define NEON_2RM_VPADAL_U 13
5399#define NEON_2RM_VQABS 14
5400#define NEON_2RM_VQNEG 15
5401#define NEON_2RM_VCGT0 16
5402#define NEON_2RM_VCGE0 17
5403#define NEON_2RM_VCEQ0 18
5404#define NEON_2RM_VCLE0 19
5405#define NEON_2RM_VCLT0 20
f1ecb913 5406#define NEON_2RM_SHA1H 21
600b828c
PM
5407#define NEON_2RM_VABS 22
5408#define NEON_2RM_VNEG 23
5409#define NEON_2RM_VCGT0_F 24
5410#define NEON_2RM_VCGE0_F 25
5411#define NEON_2RM_VCEQ0_F 26
5412#define NEON_2RM_VCLE0_F 27
5413#define NEON_2RM_VCLT0_F 28
5414#define NEON_2RM_VABS_F 30
5415#define NEON_2RM_VNEG_F 31
5416#define NEON_2RM_VSWP 32
5417#define NEON_2RM_VTRN 33
5418#define NEON_2RM_VUZP 34
5419#define NEON_2RM_VZIP 35
5420#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5421#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5422#define NEON_2RM_VSHLL 38
f1ecb913 5423#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5424#define NEON_2RM_VRINTN 40
2ce70625 5425#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5426#define NEON_2RM_VRINTA 42
5427#define NEON_2RM_VRINTZ 43
600b828c 5428#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5429#define NEON_2RM_VRINTM 45
600b828c 5430#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5431#define NEON_2RM_VRINTP 47
901ad525
WN
5432#define NEON_2RM_VCVTAU 48
5433#define NEON_2RM_VCVTAS 49
5434#define NEON_2RM_VCVTNU 50
5435#define NEON_2RM_VCVTNS 51
5436#define NEON_2RM_VCVTPU 52
5437#define NEON_2RM_VCVTPS 53
5438#define NEON_2RM_VCVTMU 54
5439#define NEON_2RM_VCVTMS 55
600b828c
PM
5440#define NEON_2RM_VRECPE 56
5441#define NEON_2RM_VRSQRTE 57
5442#define NEON_2RM_VRECPE_F 58
5443#define NEON_2RM_VRSQRTE_F 59
5444#define NEON_2RM_VCVT_FS 60
5445#define NEON_2RM_VCVT_FU 61
5446#define NEON_2RM_VCVT_SF 62
5447#define NEON_2RM_VCVT_UF 63
5448
5449static int neon_2rm_is_float_op(int op)
5450{
5451 /* Return true if this neon 2reg-misc op is float-to-float */
5452 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5453 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5454 op == NEON_2RM_VRINTM ||
5455 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5456 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5457}
5458
fe8fcf3d
PM
5459static bool neon_2rm_is_v8_op(int op)
5460{
5461 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5462 switch (op) {
5463 case NEON_2RM_VRINTN:
5464 case NEON_2RM_VRINTA:
5465 case NEON_2RM_VRINTM:
5466 case NEON_2RM_VRINTP:
5467 case NEON_2RM_VRINTZ:
5468 case NEON_2RM_VRINTX:
5469 case NEON_2RM_VCVTAU:
5470 case NEON_2RM_VCVTAS:
5471 case NEON_2RM_VCVTNU:
5472 case NEON_2RM_VCVTNS:
5473 case NEON_2RM_VCVTPU:
5474 case NEON_2RM_VCVTPS:
5475 case NEON_2RM_VCVTMU:
5476 case NEON_2RM_VCVTMS:
5477 return true;
5478 default:
5479 return false;
5480 }
5481}
5482
600b828c
PM
5483/* Each entry in this array has bit n set if the insn allows
5484 * size value n (otherwise it will UNDEF). Since unallocated
5485 * op values will have no bits set they always UNDEF.
5486 */
5487static const uint8_t neon_2rm_sizes[] = {
5488 [NEON_2RM_VREV64] = 0x7,
5489 [NEON_2RM_VREV32] = 0x3,
5490 [NEON_2RM_VREV16] = 0x1,
5491 [NEON_2RM_VPADDL] = 0x7,
5492 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5493 [NEON_2RM_AESE] = 0x1,
5494 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5495 [NEON_2RM_VCLS] = 0x7,
5496 [NEON_2RM_VCLZ] = 0x7,
5497 [NEON_2RM_VCNT] = 0x1,
5498 [NEON_2RM_VMVN] = 0x1,
5499 [NEON_2RM_VPADAL] = 0x7,
5500 [NEON_2RM_VPADAL_U] = 0x7,
5501 [NEON_2RM_VQABS] = 0x7,
5502 [NEON_2RM_VQNEG] = 0x7,
5503 [NEON_2RM_VCGT0] = 0x7,
5504 [NEON_2RM_VCGE0] = 0x7,
5505 [NEON_2RM_VCEQ0] = 0x7,
5506 [NEON_2RM_VCLE0] = 0x7,
5507 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5508 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5509 [NEON_2RM_VABS] = 0x7,
5510 [NEON_2RM_VNEG] = 0x7,
5511 [NEON_2RM_VCGT0_F] = 0x4,
5512 [NEON_2RM_VCGE0_F] = 0x4,
5513 [NEON_2RM_VCEQ0_F] = 0x4,
5514 [NEON_2RM_VCLE0_F] = 0x4,
5515 [NEON_2RM_VCLT0_F] = 0x4,
5516 [NEON_2RM_VABS_F] = 0x4,
5517 [NEON_2RM_VNEG_F] = 0x4,
5518 [NEON_2RM_VSWP] = 0x1,
5519 [NEON_2RM_VTRN] = 0x7,
5520 [NEON_2RM_VUZP] = 0x7,
5521 [NEON_2RM_VZIP] = 0x7,
5522 [NEON_2RM_VMOVN] = 0x7,
5523 [NEON_2RM_VQMOVN] = 0x7,
5524 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5525 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5526 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5527 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5528 [NEON_2RM_VRINTA] = 0x4,
5529 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5530 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5531 [NEON_2RM_VRINTM] = 0x4,
600b828c 5532 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5533 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5534 [NEON_2RM_VCVTAU] = 0x4,
5535 [NEON_2RM_VCVTAS] = 0x4,
5536 [NEON_2RM_VCVTNU] = 0x4,
5537 [NEON_2RM_VCVTNS] = 0x4,
5538 [NEON_2RM_VCVTPU] = 0x4,
5539 [NEON_2RM_VCVTPS] = 0x4,
5540 [NEON_2RM_VCVTMU] = 0x4,
5541 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5542 [NEON_2RM_VRECPE] = 0x4,
5543 [NEON_2RM_VRSQRTE] = 0x4,
5544 [NEON_2RM_VRECPE_F] = 0x4,
5545 [NEON_2RM_VRSQRTE_F] = 0x4,
5546 [NEON_2RM_VCVT_FS] = 0x4,
5547 [NEON_2RM_VCVT_FU] = 0x4,
5548 [NEON_2RM_VCVT_SF] = 0x4,
5549 [NEON_2RM_VCVT_UF] = 0x4,
5550};
5551
9ee6e8bb
PB
5552/* Translate a NEON data processing instruction. Return nonzero if the
5553 instruction is invalid.
ad69471c
PB
5554 We process data in a mixture of 32-bit and 64-bit chunks.
5555 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5556
7dcc1f89 5557static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5558{
5559 int op;
5560 int q;
5561 int rd, rn, rm;
5562 int size;
5563 int shift;
5564 int pass;
5565 int count;
5566 int pairwise;
5567 int u;
ca9a32e4 5568 uint32_t imm, mask;
39d5492a 5569 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 5570 TCGv_i64 tmp64;
9ee6e8bb 5571
2c7ffc41
PM
5572 /* FIXME: this access check should not take precedence over UNDEF
5573 * for invalid encodings; we will generate incorrect syndrome information
5574 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5575 */
9dbbc748 5576 if (s->fp_excp_el) {
2c7ffc41 5577 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5578 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5579 return 0;
5580 }
5581
5df8bac1 5582 if (!s->vfp_enabled)
9ee6e8bb
PB
5583 return 1;
5584 q = (insn & (1 << 6)) != 0;
5585 u = (insn >> 24) & 1;
5586 VFP_DREG_D(rd, insn);
5587 VFP_DREG_N(rn, insn);
5588 VFP_DREG_M(rm, insn);
5589 size = (insn >> 20) & 3;
5590 if ((insn & (1 << 23)) == 0) {
5591 /* Three register same length. */
5592 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5593 /* Catch invalid op and bad size combinations: UNDEF */
5594 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5595 return 1;
5596 }
25f84f79
PM
5597 /* All insns of this form UNDEF for either this condition or the
5598 * superset of cases "Q==1"; we catch the latter later.
5599 */
5600 if (q && ((rd | rn | rm) & 1)) {
5601 return 1;
5602 }
f1ecb913
AB
5603 /*
5604 * The SHA-1/SHA-256 3-register instructions require special treatment
5605 * here, as their size field is overloaded as an op type selector, and
5606 * they all consume their input in a single pass.
5607 */
5608 if (op == NEON_3R_SHA) {
5609 if (!q) {
5610 return 1;
5611 }
5612 if (!u) { /* SHA-1 */
d614a513 5613 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5614 return 1;
5615 }
5616 tmp = tcg_const_i32(rd);
5617 tmp2 = tcg_const_i32(rn);
5618 tmp3 = tcg_const_i32(rm);
5619 tmp4 = tcg_const_i32(size);
5620 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5621 tcg_temp_free_i32(tmp4);
5622 } else { /* SHA-256 */
d614a513 5623 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5624 return 1;
5625 }
5626 tmp = tcg_const_i32(rd);
5627 tmp2 = tcg_const_i32(rn);
5628 tmp3 = tcg_const_i32(rm);
5629 switch (size) {
5630 case 0:
5631 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5632 break;
5633 case 1:
5634 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5635 break;
5636 case 2:
5637 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5638 break;
5639 }
5640 }
5641 tcg_temp_free_i32(tmp);
5642 tcg_temp_free_i32(tmp2);
5643 tcg_temp_free_i32(tmp3);
5644 return 0;
5645 }
62698be3
PM
5646 if (size == 3 && op != NEON_3R_LOGIC) {
5647 /* 64-bit element instructions. */
9ee6e8bb 5648 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5649 neon_load_reg64(cpu_V0, rn + pass);
5650 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5651 switch (op) {
62698be3 5652 case NEON_3R_VQADD:
9ee6e8bb 5653 if (u) {
02da0b2d
PM
5654 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5655 cpu_V0, cpu_V1);
2c0262af 5656 } else {
02da0b2d
PM
5657 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5658 cpu_V0, cpu_V1);
2c0262af 5659 }
9ee6e8bb 5660 break;
62698be3 5661 case NEON_3R_VQSUB:
9ee6e8bb 5662 if (u) {
02da0b2d
PM
5663 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5664 cpu_V0, cpu_V1);
ad69471c 5665 } else {
02da0b2d
PM
5666 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5667 cpu_V0, cpu_V1);
ad69471c
PB
5668 }
5669 break;
62698be3 5670 case NEON_3R_VSHL:
ad69471c
PB
5671 if (u) {
5672 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5673 } else {
5674 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5675 }
5676 break;
62698be3 5677 case NEON_3R_VQSHL:
ad69471c 5678 if (u) {
02da0b2d
PM
5679 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5680 cpu_V1, cpu_V0);
ad69471c 5681 } else {
02da0b2d
PM
5682 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5683 cpu_V1, cpu_V0);
ad69471c
PB
5684 }
5685 break;
62698be3 5686 case NEON_3R_VRSHL:
ad69471c
PB
5687 if (u) {
5688 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5689 } else {
ad69471c
PB
5690 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5691 }
5692 break;
62698be3 5693 case NEON_3R_VQRSHL:
ad69471c 5694 if (u) {
02da0b2d
PM
5695 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5696 cpu_V1, cpu_V0);
ad69471c 5697 } else {
02da0b2d
PM
5698 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5699 cpu_V1, cpu_V0);
1e8d4eec 5700 }
9ee6e8bb 5701 break;
62698be3 5702 case NEON_3R_VADD_VSUB:
9ee6e8bb 5703 if (u) {
ad69471c 5704 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5705 } else {
ad69471c 5706 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5707 }
5708 break;
5709 default:
5710 abort();
2c0262af 5711 }
ad69471c 5712 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5713 }
9ee6e8bb 5714 return 0;
2c0262af 5715 }
25f84f79 5716 pairwise = 0;
9ee6e8bb 5717 switch (op) {
62698be3
PM
5718 case NEON_3R_VSHL:
5719 case NEON_3R_VQSHL:
5720 case NEON_3R_VRSHL:
5721 case NEON_3R_VQRSHL:
9ee6e8bb 5722 {
ad69471c
PB
5723 int rtmp;
5724 /* Shift instruction operands are reversed. */
5725 rtmp = rn;
9ee6e8bb 5726 rn = rm;
ad69471c 5727 rm = rtmp;
9ee6e8bb 5728 }
2c0262af 5729 break;
25f84f79
PM
5730 case NEON_3R_VPADD:
5731 if (u) {
5732 return 1;
5733 }
5734 /* Fall through */
62698be3
PM
5735 case NEON_3R_VPMAX:
5736 case NEON_3R_VPMIN:
9ee6e8bb 5737 pairwise = 1;
2c0262af 5738 break;
25f84f79
PM
5739 case NEON_3R_FLOAT_ARITH:
5740 pairwise = (u && size < 2); /* if VPADD (float) */
5741 break;
5742 case NEON_3R_FLOAT_MINMAX:
5743 pairwise = u; /* if VPMIN/VPMAX (float) */
5744 break;
5745 case NEON_3R_FLOAT_CMP:
5746 if (!u && size) {
5747 /* no encoding for U=0 C=1x */
5748 return 1;
5749 }
5750 break;
5751 case NEON_3R_FLOAT_ACMP:
5752 if (!u) {
5753 return 1;
5754 }
5755 break;
505935fc
WN
5756 case NEON_3R_FLOAT_MISC:
5757 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5758 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5759 return 1;
5760 }
2c0262af 5761 break;
25f84f79
PM
5762 case NEON_3R_VMUL:
5763 if (u && (size != 0)) {
5764 /* UNDEF on invalid size for polynomial subcase */
5765 return 1;
5766 }
2c0262af 5767 break;
da97f52c 5768 case NEON_3R_VFM:
d614a513 5769 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5770 return 1;
5771 }
5772 break;
9ee6e8bb 5773 default:
2c0262af 5774 break;
9ee6e8bb 5775 }
dd8fbd78 5776
25f84f79
PM
5777 if (pairwise && q) {
5778 /* All the pairwise insns UNDEF if Q is set */
5779 return 1;
5780 }
5781
9ee6e8bb
PB
5782 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5783
5784 if (pairwise) {
5785 /* Pairwise. */
a5a14945
JR
5786 if (pass < 1) {
5787 tmp = neon_load_reg(rn, 0);
5788 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5789 } else {
a5a14945
JR
5790 tmp = neon_load_reg(rm, 0);
5791 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5792 }
5793 } else {
5794 /* Elementwise. */
dd8fbd78
FN
5795 tmp = neon_load_reg(rn, pass);
5796 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5797 }
5798 switch (op) {
62698be3 5799 case NEON_3R_VHADD:
9ee6e8bb
PB
5800 GEN_NEON_INTEGER_OP(hadd);
5801 break;
62698be3 5802 case NEON_3R_VQADD:
02da0b2d 5803 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5804 break;
62698be3 5805 case NEON_3R_VRHADD:
9ee6e8bb 5806 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5807 break;
62698be3 5808 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5809 switch ((u << 2) | size) {
5810 case 0: /* VAND */
dd8fbd78 5811 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5812 break;
5813 case 1: /* BIC */
f669df27 5814 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5815 break;
5816 case 2: /* VORR */
dd8fbd78 5817 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5818 break;
5819 case 3: /* VORN */
f669df27 5820 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5821 break;
5822 case 4: /* VEOR */
dd8fbd78 5823 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5824 break;
5825 case 5: /* VBSL */
dd8fbd78
FN
5826 tmp3 = neon_load_reg(rd, pass);
5827 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5828 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5829 break;
5830 case 6: /* VBIT */
dd8fbd78
FN
5831 tmp3 = neon_load_reg(rd, pass);
5832 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5833 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5834 break;
5835 case 7: /* VBIF */
dd8fbd78
FN
5836 tmp3 = neon_load_reg(rd, pass);
5837 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5838 tcg_temp_free_i32(tmp3);
9ee6e8bb 5839 break;
2c0262af
FB
5840 }
5841 break;
62698be3 5842 case NEON_3R_VHSUB:
9ee6e8bb
PB
5843 GEN_NEON_INTEGER_OP(hsub);
5844 break;
62698be3 5845 case NEON_3R_VQSUB:
02da0b2d 5846 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5847 break;
62698be3 5848 case NEON_3R_VCGT:
9ee6e8bb
PB
5849 GEN_NEON_INTEGER_OP(cgt);
5850 break;
62698be3 5851 case NEON_3R_VCGE:
9ee6e8bb
PB
5852 GEN_NEON_INTEGER_OP(cge);
5853 break;
62698be3 5854 case NEON_3R_VSHL:
ad69471c 5855 GEN_NEON_INTEGER_OP(shl);
2c0262af 5856 break;
62698be3 5857 case NEON_3R_VQSHL:
02da0b2d 5858 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5859 break;
62698be3 5860 case NEON_3R_VRSHL:
ad69471c 5861 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5862 break;
62698be3 5863 case NEON_3R_VQRSHL:
02da0b2d 5864 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5865 break;
62698be3 5866 case NEON_3R_VMAX:
9ee6e8bb
PB
5867 GEN_NEON_INTEGER_OP(max);
5868 break;
62698be3 5869 case NEON_3R_VMIN:
9ee6e8bb
PB
5870 GEN_NEON_INTEGER_OP(min);
5871 break;
62698be3 5872 case NEON_3R_VABD:
9ee6e8bb
PB
5873 GEN_NEON_INTEGER_OP(abd);
5874 break;
62698be3 5875 case NEON_3R_VABA:
9ee6e8bb 5876 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5877 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5878 tmp2 = neon_load_reg(rd, pass);
5879 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5880 break;
62698be3 5881 case NEON_3R_VADD_VSUB:
9ee6e8bb 5882 if (!u) { /* VADD */
62698be3 5883 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5884 } else { /* VSUB */
5885 switch (size) {
dd8fbd78
FN
5886 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5887 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5888 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5889 default: abort();
9ee6e8bb
PB
5890 }
5891 }
5892 break;
62698be3 5893 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5894 if (!u) { /* VTST */
5895 switch (size) {
dd8fbd78
FN
5896 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5897 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5898 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5899 default: abort();
9ee6e8bb
PB
5900 }
5901 } else { /* VCEQ */
5902 switch (size) {
dd8fbd78
FN
5903 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5904 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5905 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5906 default: abort();
9ee6e8bb
PB
5907 }
5908 }
5909 break;
62698be3 5910 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5911 switch (size) {
dd8fbd78
FN
5912 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5913 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5914 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5915 default: abort();
9ee6e8bb 5916 }
7d1b0095 5917 tcg_temp_free_i32(tmp2);
dd8fbd78 5918 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5919 if (u) { /* VMLS */
dd8fbd78 5920 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5921 } else { /* VMLA */
dd8fbd78 5922 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5923 }
5924 break;
62698be3 5925 case NEON_3R_VMUL:
9ee6e8bb 5926 if (u) { /* polynomial */
dd8fbd78 5927 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5928 } else { /* Integer */
5929 switch (size) {
dd8fbd78
FN
5930 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5931 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5932 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5933 default: abort();
9ee6e8bb
PB
5934 }
5935 }
5936 break;
62698be3 5937 case NEON_3R_VPMAX:
9ee6e8bb
PB
5938 GEN_NEON_INTEGER_OP(pmax);
5939 break;
62698be3 5940 case NEON_3R_VPMIN:
9ee6e8bb
PB
5941 GEN_NEON_INTEGER_OP(pmin);
5942 break;
62698be3 5943 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5944 if (!u) { /* VQDMULH */
5945 switch (size) {
02da0b2d
PM
5946 case 1:
5947 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5948 break;
5949 case 2:
5950 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5951 break;
62698be3 5952 default: abort();
9ee6e8bb 5953 }
62698be3 5954 } else { /* VQRDMULH */
9ee6e8bb 5955 switch (size) {
02da0b2d
PM
5956 case 1:
5957 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5958 break;
5959 case 2:
5960 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5961 break;
62698be3 5962 default: abort();
9ee6e8bb
PB
5963 }
5964 }
5965 break;
62698be3 5966 case NEON_3R_VPADD:
9ee6e8bb 5967 switch (size) {
dd8fbd78
FN
5968 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5969 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5970 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5971 default: abort();
9ee6e8bb
PB
5972 }
5973 break;
62698be3 5974 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5975 {
5976 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5977 switch ((u << 2) | size) {
5978 case 0: /* VADD */
aa47cfdd
PM
5979 case 4: /* VPADD */
5980 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5981 break;
5982 case 2: /* VSUB */
aa47cfdd 5983 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5984 break;
5985 case 6: /* VABD */
aa47cfdd 5986 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5987 break;
5988 default:
62698be3 5989 abort();
9ee6e8bb 5990 }
aa47cfdd 5991 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5992 break;
aa47cfdd 5993 }
62698be3 5994 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5995 {
5996 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5997 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5998 if (!u) {
7d1b0095 5999 tcg_temp_free_i32(tmp2);
dd8fbd78 6000 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6001 if (size == 0) {
aa47cfdd 6002 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6003 } else {
aa47cfdd 6004 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
6005 }
6006 }
aa47cfdd 6007 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6008 break;
aa47cfdd 6009 }
62698be3 6010 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
6011 {
6012 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 6013 if (!u) {
aa47cfdd 6014 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 6015 } else {
aa47cfdd
PM
6016 if (size == 0) {
6017 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6018 } else {
6019 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6020 }
b5ff1b31 6021 }
aa47cfdd 6022 tcg_temp_free_ptr(fpstatus);
2c0262af 6023 break;
aa47cfdd 6024 }
62698be3 6025 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6026 {
6027 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6028 if (size == 0) {
6029 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6030 } else {
6031 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6032 }
6033 tcg_temp_free_ptr(fpstatus);
2c0262af 6034 break;
aa47cfdd 6035 }
62698be3 6036 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6037 {
6038 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6039 if (size == 0) {
f71a2ae5 6040 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6041 } else {
f71a2ae5 6042 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6043 }
6044 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6045 break;
aa47cfdd 6046 }
505935fc
WN
6047 case NEON_3R_FLOAT_MISC:
6048 if (u) {
6049 /* VMAXNM/VMINNM */
6050 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6051 if (size == 0) {
f71a2ae5 6052 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6053 } else {
f71a2ae5 6054 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6055 }
6056 tcg_temp_free_ptr(fpstatus);
6057 } else {
6058 if (size == 0) {
6059 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6060 } else {
6061 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6062 }
6063 }
2c0262af 6064 break;
da97f52c
PM
6065 case NEON_3R_VFM:
6066 {
6067 /* VFMA, VFMS: fused multiply-add */
6068 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6069 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6070 if (size) {
6071 /* VFMS */
6072 gen_helper_vfp_negs(tmp, tmp);
6073 }
6074 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6075 tcg_temp_free_i32(tmp3);
6076 tcg_temp_free_ptr(fpstatus);
6077 break;
6078 }
9ee6e8bb
PB
6079 default:
6080 abort();
2c0262af 6081 }
7d1b0095 6082 tcg_temp_free_i32(tmp2);
dd8fbd78 6083
9ee6e8bb
PB
6084 /* Save the result. For elementwise operations we can put it
6085 straight into the destination register. For pairwise operations
6086 we have to be careful to avoid clobbering the source operands. */
6087 if (pairwise && rd == rm) {
dd8fbd78 6088 neon_store_scratch(pass, tmp);
9ee6e8bb 6089 } else {
dd8fbd78 6090 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6091 }
6092
6093 } /* for pass */
6094 if (pairwise && rd == rm) {
6095 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6096 tmp = neon_load_scratch(pass);
6097 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6098 }
6099 }
ad69471c 6100 /* End of 3 register same size operations. */
9ee6e8bb
PB
6101 } else if (insn & (1 << 4)) {
6102 if ((insn & 0x00380080) != 0) {
6103 /* Two registers and shift. */
6104 op = (insn >> 8) & 0xf;
6105 if (insn & (1 << 7)) {
cc13115b
PM
6106 /* 64-bit shift. */
6107 if (op > 7) {
6108 return 1;
6109 }
9ee6e8bb
PB
6110 size = 3;
6111 } else {
6112 size = 2;
6113 while ((insn & (1 << (size + 19))) == 0)
6114 size--;
6115 }
6116 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 6117 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
6118 by immediate using the variable shift operations. */
6119 if (op < 8) {
6120 /* Shift by immediate:
6121 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6122 if (q && ((rd | rm) & 1)) {
6123 return 1;
6124 }
6125 if (!u && (op == 4 || op == 6)) {
6126 return 1;
6127 }
9ee6e8bb
PB
6128 /* Right shifts are encoded as N - shift, where N is the
6129 element size in bits. */
6130 if (op <= 4)
6131 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
6132 if (size == 3) {
6133 count = q + 1;
6134 } else {
6135 count = q ? 4: 2;
6136 }
6137 switch (size) {
6138 case 0:
6139 imm = (uint8_t) shift;
6140 imm |= imm << 8;
6141 imm |= imm << 16;
6142 break;
6143 case 1:
6144 imm = (uint16_t) shift;
6145 imm |= imm << 16;
6146 break;
6147 case 2:
6148 case 3:
6149 imm = shift;
6150 break;
6151 default:
6152 abort();
6153 }
6154
6155 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6156 if (size == 3) {
6157 neon_load_reg64(cpu_V0, rm + pass);
6158 tcg_gen_movi_i64(cpu_V1, imm);
6159 switch (op) {
6160 case 0: /* VSHR */
6161 case 1: /* VSRA */
6162 if (u)
6163 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6164 else
ad69471c 6165 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6166 break;
ad69471c
PB
6167 case 2: /* VRSHR */
6168 case 3: /* VRSRA */
6169 if (u)
6170 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6171 else
ad69471c 6172 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6173 break;
ad69471c 6174 case 4: /* VSRI */
ad69471c
PB
6175 case 5: /* VSHL, VSLI */
6176 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6177 break;
0322b26e 6178 case 6: /* VQSHLU */
02da0b2d
PM
6179 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6180 cpu_V0, cpu_V1);
ad69471c 6181 break;
0322b26e
PM
6182 case 7: /* VQSHL */
6183 if (u) {
02da0b2d 6184 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6185 cpu_V0, cpu_V1);
6186 } else {
02da0b2d 6187 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6188 cpu_V0, cpu_V1);
6189 }
9ee6e8bb 6190 break;
9ee6e8bb 6191 }
ad69471c
PB
6192 if (op == 1 || op == 3) {
6193 /* Accumulate. */
5371cb81 6194 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6195 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6196 } else if (op == 4 || (op == 5 && u)) {
6197 /* Insert */
923e6509
CL
6198 neon_load_reg64(cpu_V1, rd + pass);
6199 uint64_t mask;
6200 if (shift < -63 || shift > 63) {
6201 mask = 0;
6202 } else {
6203 if (op == 4) {
6204 mask = 0xffffffffffffffffull >> -shift;
6205 } else {
6206 mask = 0xffffffffffffffffull << shift;
6207 }
6208 }
6209 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6210 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6211 }
6212 neon_store_reg64(cpu_V0, rd + pass);
6213 } else { /* size < 3 */
6214 /* Operands in T0 and T1. */
dd8fbd78 6215 tmp = neon_load_reg(rm, pass);
7d1b0095 6216 tmp2 = tcg_temp_new_i32();
dd8fbd78 6217 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6218 switch (op) {
6219 case 0: /* VSHR */
6220 case 1: /* VSRA */
6221 GEN_NEON_INTEGER_OP(shl);
6222 break;
6223 case 2: /* VRSHR */
6224 case 3: /* VRSRA */
6225 GEN_NEON_INTEGER_OP(rshl);
6226 break;
6227 case 4: /* VSRI */
ad69471c
PB
6228 case 5: /* VSHL, VSLI */
6229 switch (size) {
dd8fbd78
FN
6230 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6231 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6232 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6233 default: abort();
ad69471c
PB
6234 }
6235 break;
0322b26e 6236 case 6: /* VQSHLU */
ad69471c 6237 switch (size) {
0322b26e 6238 case 0:
02da0b2d
PM
6239 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6240 tmp, tmp2);
0322b26e
PM
6241 break;
6242 case 1:
02da0b2d
PM
6243 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6244 tmp, tmp2);
0322b26e
PM
6245 break;
6246 case 2:
02da0b2d
PM
6247 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6248 tmp, tmp2);
0322b26e
PM
6249 break;
6250 default:
cc13115b 6251 abort();
ad69471c
PB
6252 }
6253 break;
0322b26e 6254 case 7: /* VQSHL */
02da0b2d 6255 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6256 break;
ad69471c 6257 }
7d1b0095 6258 tcg_temp_free_i32(tmp2);
ad69471c
PB
6259
6260 if (op == 1 || op == 3) {
6261 /* Accumulate. */
dd8fbd78 6262 tmp2 = neon_load_reg(rd, pass);
5371cb81 6263 gen_neon_add(size, tmp, tmp2);
7d1b0095 6264 tcg_temp_free_i32(tmp2);
ad69471c
PB
6265 } else if (op == 4 || (op == 5 && u)) {
6266 /* Insert */
6267 switch (size) {
6268 case 0:
6269 if (op == 4)
ca9a32e4 6270 mask = 0xff >> -shift;
ad69471c 6271 else
ca9a32e4
JR
6272 mask = (uint8_t)(0xff << shift);
6273 mask |= mask << 8;
6274 mask |= mask << 16;
ad69471c
PB
6275 break;
6276 case 1:
6277 if (op == 4)
ca9a32e4 6278 mask = 0xffff >> -shift;
ad69471c 6279 else
ca9a32e4
JR
6280 mask = (uint16_t)(0xffff << shift);
6281 mask |= mask << 16;
ad69471c
PB
6282 break;
6283 case 2:
ca9a32e4
JR
6284 if (shift < -31 || shift > 31) {
6285 mask = 0;
6286 } else {
6287 if (op == 4)
6288 mask = 0xffffffffu >> -shift;
6289 else
6290 mask = 0xffffffffu << shift;
6291 }
ad69471c
PB
6292 break;
6293 default:
6294 abort();
6295 }
dd8fbd78 6296 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6297 tcg_gen_andi_i32(tmp, tmp, mask);
6298 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6299 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6300 tcg_temp_free_i32(tmp2);
ad69471c 6301 }
dd8fbd78 6302 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6303 }
6304 } /* for pass */
6305 } else if (op < 10) {
ad69471c 6306 /* Shift by immediate and narrow:
9ee6e8bb 6307 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6308 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6309 if (rm & 1) {
6310 return 1;
6311 }
9ee6e8bb
PB
6312 shift = shift - (1 << (size + 3));
6313 size++;
92cdfaeb 6314 if (size == 3) {
a7812ae4 6315 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6316 neon_load_reg64(cpu_V0, rm);
6317 neon_load_reg64(cpu_V1, rm + 1);
6318 for (pass = 0; pass < 2; pass++) {
6319 TCGv_i64 in;
6320 if (pass == 0) {
6321 in = cpu_V0;
6322 } else {
6323 in = cpu_V1;
6324 }
ad69471c 6325 if (q) {
0b36f4cd 6326 if (input_unsigned) {
92cdfaeb 6327 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6328 } else {
92cdfaeb 6329 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6330 }
ad69471c 6331 } else {
0b36f4cd 6332 if (input_unsigned) {
92cdfaeb 6333 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6334 } else {
92cdfaeb 6335 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6336 }
ad69471c 6337 }
7d1b0095 6338 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6339 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6340 neon_store_reg(rd, pass, tmp);
6341 } /* for pass */
6342 tcg_temp_free_i64(tmp64);
6343 } else {
6344 if (size == 1) {
6345 imm = (uint16_t)shift;
6346 imm |= imm << 16;
2c0262af 6347 } else {
92cdfaeb
PM
6348 /* size == 2 */
6349 imm = (uint32_t)shift;
6350 }
6351 tmp2 = tcg_const_i32(imm);
6352 tmp4 = neon_load_reg(rm + 1, 0);
6353 tmp5 = neon_load_reg(rm + 1, 1);
6354 for (pass = 0; pass < 2; pass++) {
6355 if (pass == 0) {
6356 tmp = neon_load_reg(rm, 0);
6357 } else {
6358 tmp = tmp4;
6359 }
0b36f4cd
CL
6360 gen_neon_shift_narrow(size, tmp, tmp2, q,
6361 input_unsigned);
92cdfaeb
PM
6362 if (pass == 0) {
6363 tmp3 = neon_load_reg(rm, 1);
6364 } else {
6365 tmp3 = tmp5;
6366 }
0b36f4cd
CL
6367 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6368 input_unsigned);
36aa55dc 6369 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6370 tcg_temp_free_i32(tmp);
6371 tcg_temp_free_i32(tmp3);
6372 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6373 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6374 neon_store_reg(rd, pass, tmp);
6375 } /* for pass */
c6067f04 6376 tcg_temp_free_i32(tmp2);
b75263d6 6377 }
9ee6e8bb 6378 } else if (op == 10) {
cc13115b
PM
6379 /* VSHLL, VMOVL */
6380 if (q || (rd & 1)) {
9ee6e8bb 6381 return 1;
cc13115b 6382 }
ad69471c
PB
6383 tmp = neon_load_reg(rm, 0);
6384 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6385 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6386 if (pass == 1)
6387 tmp = tmp2;
6388
6389 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6390
9ee6e8bb
PB
6391 if (shift != 0) {
6392 /* The shift is less than the width of the source
ad69471c
PB
6393 type, so we can just shift the whole register. */
6394 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6395 /* Widen the result of shift: we need to clear
6396 * the potential overflow bits resulting from
6397 * left bits of the narrow input appearing as
6398 * right bits of left the neighbour narrow
6399 * input. */
ad69471c
PB
6400 if (size < 2 || !u) {
6401 uint64_t imm64;
6402 if (size == 0) {
6403 imm = (0xffu >> (8 - shift));
6404 imm |= imm << 16;
acdf01ef 6405 } else if (size == 1) {
ad69471c 6406 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6407 } else {
6408 /* size == 2 */
6409 imm = 0xffffffff >> (32 - shift);
6410 }
6411 if (size < 2) {
6412 imm64 = imm | (((uint64_t)imm) << 32);
6413 } else {
6414 imm64 = imm;
9ee6e8bb 6415 }
acdf01ef 6416 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6417 }
6418 }
ad69471c 6419 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6420 }
f73534a5 6421 } else if (op >= 14) {
9ee6e8bb 6422 /* VCVT fixed-point. */
cc13115b
PM
6423 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6424 return 1;
6425 }
f73534a5
PM
6426 /* We have already masked out the must-be-1 top bit of imm6,
6427 * hence this 32-shift where the ARM ARM has 64-imm6.
6428 */
6429 shift = 32 - shift;
9ee6e8bb 6430 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6431 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6432 if (!(op & 1)) {
9ee6e8bb 6433 if (u)
5500b06c 6434 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6435 else
5500b06c 6436 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6437 } else {
6438 if (u)
5500b06c 6439 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6440 else
5500b06c 6441 gen_vfp_tosl(0, shift, 1);
2c0262af 6442 }
4373f3ce 6443 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6444 }
6445 } else {
9ee6e8bb
PB
6446 return 1;
6447 }
6448 } else { /* (insn & 0x00380080) == 0 */
6449 int invert;
7d80fee5
PM
6450 if (q && (rd & 1)) {
6451 return 1;
6452 }
9ee6e8bb
PB
6453
6454 op = (insn >> 8) & 0xf;
6455 /* One register and immediate. */
6456 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6457 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6458 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6459 * We choose to not special-case this and will behave as if a
6460 * valid constant encoding of 0 had been given.
6461 */
9ee6e8bb
PB
6462 switch (op) {
6463 case 0: case 1:
6464 /* no-op */
6465 break;
6466 case 2: case 3:
6467 imm <<= 8;
6468 break;
6469 case 4: case 5:
6470 imm <<= 16;
6471 break;
6472 case 6: case 7:
6473 imm <<= 24;
6474 break;
6475 case 8: case 9:
6476 imm |= imm << 16;
6477 break;
6478 case 10: case 11:
6479 imm = (imm << 8) | (imm << 24);
6480 break;
6481 case 12:
8e31209e 6482 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6483 break;
6484 case 13:
6485 imm = (imm << 16) | 0xffff;
6486 break;
6487 case 14:
6488 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6489 if (invert)
6490 imm = ~imm;
6491 break;
6492 case 15:
7d80fee5
PM
6493 if (invert) {
6494 return 1;
6495 }
9ee6e8bb
PB
6496 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6497 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6498 break;
6499 }
6500 if (invert)
6501 imm = ~imm;
6502
9ee6e8bb
PB
6503 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6504 if (op & 1 && op < 12) {
ad69471c 6505 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6506 if (invert) {
6507 /* The immediate value has already been inverted, so
6508 BIC becomes AND. */
ad69471c 6509 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6510 } else {
ad69471c 6511 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6512 }
9ee6e8bb 6513 } else {
ad69471c 6514 /* VMOV, VMVN. */
7d1b0095 6515 tmp = tcg_temp_new_i32();
9ee6e8bb 6516 if (op == 14 && invert) {
a5a14945 6517 int n;
ad69471c
PB
6518 uint32_t val;
6519 val = 0;
9ee6e8bb
PB
6520 for (n = 0; n < 4; n++) {
6521 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6522 val |= 0xff << (n * 8);
9ee6e8bb 6523 }
ad69471c
PB
6524 tcg_gen_movi_i32(tmp, val);
6525 } else {
6526 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6527 }
9ee6e8bb 6528 }
ad69471c 6529 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6530 }
6531 }
e4b3861d 6532 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6533 if (size != 3) {
6534 op = (insn >> 8) & 0xf;
6535 if ((insn & (1 << 6)) == 0) {
6536 /* Three registers of different lengths. */
6537 int src1_wide;
6538 int src2_wide;
6539 int prewiden;
526d0096
PM
6540 /* undefreq: bit 0 : UNDEF if size == 0
6541 * bit 1 : UNDEF if size == 1
6542 * bit 2 : UNDEF if size == 2
6543 * bit 3 : UNDEF if U == 1
6544 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6545 */
6546 int undefreq;
6547 /* prewiden, src1_wide, src2_wide, undefreq */
6548 static const int neon_3reg_wide[16][4] = {
6549 {1, 0, 0, 0}, /* VADDL */
6550 {1, 1, 0, 0}, /* VADDW */
6551 {1, 0, 0, 0}, /* VSUBL */
6552 {1, 1, 0, 0}, /* VSUBW */
6553 {0, 1, 1, 0}, /* VADDHN */
6554 {0, 0, 0, 0}, /* VABAL */
6555 {0, 1, 1, 0}, /* VSUBHN */
6556 {0, 0, 0, 0}, /* VABDL */
6557 {0, 0, 0, 0}, /* VMLAL */
526d0096 6558 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6559 {0, 0, 0, 0}, /* VMLSL */
526d0096 6560 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6561 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6562 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6563 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6564 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6565 };
6566
6567 prewiden = neon_3reg_wide[op][0];
6568 src1_wide = neon_3reg_wide[op][1];
6569 src2_wide = neon_3reg_wide[op][2];
695272dc 6570 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6571
526d0096
PM
6572 if ((undefreq & (1 << size)) ||
6573 ((undefreq & 8) && u)) {
695272dc
PM
6574 return 1;
6575 }
6576 if ((src1_wide && (rn & 1)) ||
6577 (src2_wide && (rm & 1)) ||
6578 (!src2_wide && (rd & 1))) {
ad69471c 6579 return 1;
695272dc 6580 }
ad69471c 6581
4e624eda
PM
6582 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6583 * outside the loop below as it only performs a single pass.
6584 */
6585 if (op == 14 && size == 2) {
6586 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6587
d614a513 6588 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6589 return 1;
6590 }
6591 tcg_rn = tcg_temp_new_i64();
6592 tcg_rm = tcg_temp_new_i64();
6593 tcg_rd = tcg_temp_new_i64();
6594 neon_load_reg64(tcg_rn, rn);
6595 neon_load_reg64(tcg_rm, rm);
6596 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6597 neon_store_reg64(tcg_rd, rd);
6598 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6599 neon_store_reg64(tcg_rd, rd + 1);
6600 tcg_temp_free_i64(tcg_rn);
6601 tcg_temp_free_i64(tcg_rm);
6602 tcg_temp_free_i64(tcg_rd);
6603 return 0;
6604 }
6605
9ee6e8bb
PB
6606 /* Avoid overlapping operands. Wide source operands are
6607 always aligned so will never overlap with wide
6608 destinations in problematic ways. */
8f8e3aa4 6609 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6610 tmp = neon_load_reg(rm, 1);
6611 neon_store_scratch(2, tmp);
8f8e3aa4 6612 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6613 tmp = neon_load_reg(rn, 1);
6614 neon_store_scratch(2, tmp);
9ee6e8bb 6615 }
39d5492a 6616 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6617 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6618 if (src1_wide) {
6619 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6620 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6621 } else {
ad69471c 6622 if (pass == 1 && rd == rn) {
dd8fbd78 6623 tmp = neon_load_scratch(2);
9ee6e8bb 6624 } else {
ad69471c
PB
6625 tmp = neon_load_reg(rn, pass);
6626 }
6627 if (prewiden) {
6628 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6629 }
6630 }
ad69471c
PB
6631 if (src2_wide) {
6632 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6633 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6634 } else {
ad69471c 6635 if (pass == 1 && rd == rm) {
dd8fbd78 6636 tmp2 = neon_load_scratch(2);
9ee6e8bb 6637 } else {
ad69471c
PB
6638 tmp2 = neon_load_reg(rm, pass);
6639 }
6640 if (prewiden) {
6641 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6642 }
9ee6e8bb
PB
6643 }
6644 switch (op) {
6645 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6646 gen_neon_addl(size);
9ee6e8bb 6647 break;
79b0e534 6648 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6649 gen_neon_subl(size);
9ee6e8bb
PB
6650 break;
6651 case 5: case 7: /* VABAL, VABDL */
6652 switch ((size << 1) | u) {
ad69471c
PB
6653 case 0:
6654 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6655 break;
6656 case 1:
6657 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6658 break;
6659 case 2:
6660 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6661 break;
6662 case 3:
6663 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6664 break;
6665 case 4:
6666 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6667 break;
6668 case 5:
6669 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6670 break;
9ee6e8bb
PB
6671 default: abort();
6672 }
7d1b0095
PM
6673 tcg_temp_free_i32(tmp2);
6674 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6675 break;
6676 case 8: case 9: case 10: case 11: case 12: case 13:
6677 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6678 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6679 break;
6680 case 14: /* Polynomial VMULL */
e5ca24cb 6681 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6682 tcg_temp_free_i32(tmp2);
6683 tcg_temp_free_i32(tmp);
e5ca24cb 6684 break;
695272dc
PM
6685 default: /* 15 is RESERVED: caught earlier */
6686 abort();
9ee6e8bb 6687 }
ebcd88ce
PM
6688 if (op == 13) {
6689 /* VQDMULL */
6690 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6691 neon_store_reg64(cpu_V0, rd + pass);
6692 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6693 /* Accumulate. */
ebcd88ce 6694 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6695 switch (op) {
4dc064e6
PM
6696 case 10: /* VMLSL */
6697 gen_neon_negl(cpu_V0, size);
6698 /* Fall through */
6699 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6700 gen_neon_addl(size);
9ee6e8bb
PB
6701 break;
6702 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6703 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6704 if (op == 11) {
6705 gen_neon_negl(cpu_V0, size);
6706 }
ad69471c
PB
6707 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6708 break;
9ee6e8bb
PB
6709 default:
6710 abort();
6711 }
ad69471c 6712 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6713 } else if (op == 4 || op == 6) {
6714 /* Narrowing operation. */
7d1b0095 6715 tmp = tcg_temp_new_i32();
79b0e534 6716 if (!u) {
9ee6e8bb 6717 switch (size) {
ad69471c
PB
6718 case 0:
6719 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6720 break;
6721 case 1:
6722 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6723 break;
6724 case 2:
6725 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6726 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6727 break;
9ee6e8bb
PB
6728 default: abort();
6729 }
6730 } else {
6731 switch (size) {
ad69471c
PB
6732 case 0:
6733 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6734 break;
6735 case 1:
6736 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6737 break;
6738 case 2:
6739 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6740 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6741 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6742 break;
9ee6e8bb
PB
6743 default: abort();
6744 }
6745 }
ad69471c
PB
6746 if (pass == 0) {
6747 tmp3 = tmp;
6748 } else {
6749 neon_store_reg(rd, 0, tmp3);
6750 neon_store_reg(rd, 1, tmp);
6751 }
9ee6e8bb
PB
6752 } else {
6753 /* Write back the result. */
ad69471c 6754 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6755 }
6756 }
6757 } else {
3e3326df
PM
6758 /* Two registers and a scalar. NB that for ops of this form
6759 * the ARM ARM labels bit 24 as Q, but it is in our variable
6760 * 'u', not 'q'.
6761 */
6762 if (size == 0) {
6763 return 1;
6764 }
9ee6e8bb 6765 switch (op) {
9ee6e8bb 6766 case 1: /* Float VMLA scalar */
9ee6e8bb 6767 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6768 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6769 if (size == 1) {
6770 return 1;
6771 }
6772 /* fall through */
6773 case 0: /* Integer VMLA scalar */
6774 case 4: /* Integer VMLS scalar */
6775 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6776 case 12: /* VQDMULH scalar */
6777 case 13: /* VQRDMULH scalar */
3e3326df
PM
6778 if (u && ((rd | rn) & 1)) {
6779 return 1;
6780 }
dd8fbd78
FN
6781 tmp = neon_get_scalar(size, rm);
6782 neon_store_scratch(0, tmp);
9ee6e8bb 6783 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6784 tmp = neon_load_scratch(0);
6785 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6786 if (op == 12) {
6787 if (size == 1) {
02da0b2d 6788 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6789 } else {
02da0b2d 6790 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6791 }
6792 } else if (op == 13) {
6793 if (size == 1) {
02da0b2d 6794 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6795 } else {
02da0b2d 6796 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6797 }
6798 } else if (op & 1) {
aa47cfdd
PM
6799 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6800 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6801 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6802 } else {
6803 switch (size) {
dd8fbd78
FN
6804 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6805 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6806 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6807 default: abort();
9ee6e8bb
PB
6808 }
6809 }
7d1b0095 6810 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6811 if (op < 8) {
6812 /* Accumulate. */
dd8fbd78 6813 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6814 switch (op) {
6815 case 0:
dd8fbd78 6816 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6817 break;
6818 case 1:
aa47cfdd
PM
6819 {
6820 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6821 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6822 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6823 break;
aa47cfdd 6824 }
9ee6e8bb 6825 case 4:
dd8fbd78 6826 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6827 break;
6828 case 5:
aa47cfdd
PM
6829 {
6830 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6831 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6832 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6833 break;
aa47cfdd 6834 }
9ee6e8bb
PB
6835 default:
6836 abort();
6837 }
7d1b0095 6838 tcg_temp_free_i32(tmp2);
9ee6e8bb 6839 }
dd8fbd78 6840 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6841 }
6842 break;
9ee6e8bb 6843 case 3: /* VQDMLAL scalar */
9ee6e8bb 6844 case 7: /* VQDMLSL scalar */
9ee6e8bb 6845 case 11: /* VQDMULL scalar */
3e3326df 6846 if (u == 1) {
ad69471c 6847 return 1;
3e3326df
PM
6848 }
6849 /* fall through */
6850 case 2: /* VMLAL sclar */
6851 case 6: /* VMLSL scalar */
6852 case 10: /* VMULL scalar */
6853 if (rd & 1) {
6854 return 1;
6855 }
dd8fbd78 6856 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6857 /* We need a copy of tmp2 because gen_neon_mull
6858 * deletes it during pass 0. */
7d1b0095 6859 tmp4 = tcg_temp_new_i32();
c6067f04 6860 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6861 tmp3 = neon_load_reg(rn, 1);
ad69471c 6862
9ee6e8bb 6863 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6864 if (pass == 0) {
6865 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6866 } else {
dd8fbd78 6867 tmp = tmp3;
c6067f04 6868 tmp2 = tmp4;
9ee6e8bb 6869 }
ad69471c 6870 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6871 if (op != 11) {
6872 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6873 }
9ee6e8bb 6874 switch (op) {
4dc064e6
PM
6875 case 6:
6876 gen_neon_negl(cpu_V0, size);
6877 /* Fall through */
6878 case 2:
ad69471c 6879 gen_neon_addl(size);
9ee6e8bb
PB
6880 break;
6881 case 3: case 7:
ad69471c 6882 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6883 if (op == 7) {
6884 gen_neon_negl(cpu_V0, size);
6885 }
ad69471c 6886 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6887 break;
6888 case 10:
6889 /* no-op */
6890 break;
6891 case 11:
ad69471c 6892 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6893 break;
6894 default:
6895 abort();
6896 }
ad69471c 6897 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6898 }
dd8fbd78 6899
dd8fbd78 6900
9ee6e8bb
PB
6901 break;
6902 default: /* 14 and 15 are RESERVED */
6903 return 1;
6904 }
6905 }
6906 } else { /* size == 3 */
6907 if (!u) {
6908 /* Extract. */
9ee6e8bb 6909 imm = (insn >> 8) & 0xf;
ad69471c
PB
6910
6911 if (imm > 7 && !q)
6912 return 1;
6913
52579ea1
PM
6914 if (q && ((rd | rn | rm) & 1)) {
6915 return 1;
6916 }
6917
ad69471c
PB
6918 if (imm == 0) {
6919 neon_load_reg64(cpu_V0, rn);
6920 if (q) {
6921 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6922 }
ad69471c
PB
6923 } else if (imm == 8) {
6924 neon_load_reg64(cpu_V0, rn + 1);
6925 if (q) {
6926 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6927 }
ad69471c 6928 } else if (q) {
a7812ae4 6929 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6930 if (imm < 8) {
6931 neon_load_reg64(cpu_V0, rn);
a7812ae4 6932 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6933 } else {
6934 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6935 neon_load_reg64(tmp64, rm);
ad69471c
PB
6936 }
6937 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6938 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6939 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6940 if (imm < 8) {
6941 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6942 } else {
ad69471c
PB
6943 neon_load_reg64(cpu_V1, rm + 1);
6944 imm -= 8;
9ee6e8bb 6945 }
ad69471c 6946 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6947 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6948 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6949 tcg_temp_free_i64(tmp64);
ad69471c 6950 } else {
a7812ae4 6951 /* BUGFIX */
ad69471c 6952 neon_load_reg64(cpu_V0, rn);
a7812ae4 6953 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6954 neon_load_reg64(cpu_V1, rm);
a7812ae4 6955 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6956 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6957 }
6958 neon_store_reg64(cpu_V0, rd);
6959 if (q) {
6960 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6961 }
6962 } else if ((insn & (1 << 11)) == 0) {
6963 /* Two register misc. */
6964 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6965 size = (insn >> 18) & 3;
600b828c
PM
6966 /* UNDEF for unknown op values and bad op-size combinations */
6967 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6968 return 1;
6969 }
fe8fcf3d
PM
6970 if (neon_2rm_is_v8_op(op) &&
6971 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6972 return 1;
6973 }
fc2a9b37
PM
6974 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6975 q && ((rm | rd) & 1)) {
6976 return 1;
6977 }
9ee6e8bb 6978 switch (op) {
600b828c 6979 case NEON_2RM_VREV64:
9ee6e8bb 6980 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6981 tmp = neon_load_reg(rm, pass * 2);
6982 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6983 switch (size) {
dd8fbd78
FN
6984 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6985 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6986 case 2: /* no-op */ break;
6987 default: abort();
6988 }
dd8fbd78 6989 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6990 if (size == 2) {
dd8fbd78 6991 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6992 } else {
9ee6e8bb 6993 switch (size) {
dd8fbd78
FN
6994 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6995 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6996 default: abort();
6997 }
dd8fbd78 6998 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6999 }
7000 }
7001 break;
600b828c
PM
7002 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7003 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
7004 for (pass = 0; pass < q + 1; pass++) {
7005 tmp = neon_load_reg(rm, pass * 2);
7006 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7007 tmp = neon_load_reg(rm, pass * 2 + 1);
7008 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7009 switch (size) {
7010 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7011 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7012 case 2: tcg_gen_add_i64(CPU_V001); break;
7013 default: abort();
7014 }
600b828c 7015 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 7016 /* Accumulate. */
ad69471c
PB
7017 neon_load_reg64(cpu_V1, rd + pass);
7018 gen_neon_addl(size);
9ee6e8bb 7019 }
ad69471c 7020 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7021 }
7022 break;
600b828c 7023 case NEON_2RM_VTRN:
9ee6e8bb 7024 if (size == 2) {
a5a14945 7025 int n;
9ee6e8bb 7026 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7027 tmp = neon_load_reg(rm, n);
7028 tmp2 = neon_load_reg(rd, n + 1);
7029 neon_store_reg(rm, n, tmp2);
7030 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7031 }
7032 } else {
7033 goto elementwise;
7034 }
7035 break;
600b828c 7036 case NEON_2RM_VUZP:
02acedf9 7037 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7038 return 1;
9ee6e8bb
PB
7039 }
7040 break;
600b828c 7041 case NEON_2RM_VZIP:
d68a6f3a 7042 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7043 return 1;
9ee6e8bb
PB
7044 }
7045 break;
600b828c
PM
7046 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7047 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7048 if (rm & 1) {
7049 return 1;
7050 }
39d5492a 7051 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 7052 for (pass = 0; pass < 2; pass++) {
ad69471c 7053 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7054 tmp = tcg_temp_new_i32();
600b828c
PM
7055 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7056 tmp, cpu_V0);
ad69471c
PB
7057 if (pass == 0) {
7058 tmp2 = tmp;
7059 } else {
7060 neon_store_reg(rd, 0, tmp2);
7061 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7062 }
9ee6e8bb
PB
7063 }
7064 break;
600b828c 7065 case NEON_2RM_VSHLL:
fc2a9b37 7066 if (q || (rd & 1)) {
9ee6e8bb 7067 return 1;
600b828c 7068 }
ad69471c
PB
7069 tmp = neon_load_reg(rm, 0);
7070 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7071 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7072 if (pass == 1)
7073 tmp = tmp2;
7074 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7075 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7076 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7077 }
7078 break;
600b828c 7079 case NEON_2RM_VCVT_F16_F32:
d614a513 7080 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7081 q || (rm & 1)) {
7082 return 1;
7083 }
7d1b0095
PM
7084 tmp = tcg_temp_new_i32();
7085 tmp2 = tcg_temp_new_i32();
60011498 7086 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 7087 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 7088 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 7089 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7090 tcg_gen_shli_i32(tmp2, tmp2, 16);
7091 tcg_gen_or_i32(tmp2, tmp2, tmp);
7092 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 7093 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
7094 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7095 neon_store_reg(rd, 0, tmp2);
7d1b0095 7096 tmp2 = tcg_temp_new_i32();
2d981da7 7097 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7098 tcg_gen_shli_i32(tmp2, tmp2, 16);
7099 tcg_gen_or_i32(tmp2, tmp2, tmp);
7100 neon_store_reg(rd, 1, tmp2);
7d1b0095 7101 tcg_temp_free_i32(tmp);
60011498 7102 break;
600b828c 7103 case NEON_2RM_VCVT_F32_F16:
d614a513 7104 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7105 q || (rd & 1)) {
7106 return 1;
7107 }
7d1b0095 7108 tmp3 = tcg_temp_new_i32();
60011498
PB
7109 tmp = neon_load_reg(rm, 0);
7110 tmp2 = neon_load_reg(rm, 1);
7111 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 7112 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7113 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7114 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 7115 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7116 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7117 tcg_temp_free_i32(tmp);
60011498 7118 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 7119 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7120 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7121 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 7122 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7123 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7124 tcg_temp_free_i32(tmp2);
7125 tcg_temp_free_i32(tmp3);
60011498 7126 break;
9d935509 7127 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 7128 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
7129 || ((rm | rd) & 1)) {
7130 return 1;
7131 }
7132 tmp = tcg_const_i32(rd);
7133 tmp2 = tcg_const_i32(rm);
7134
7135 /* Bit 6 is the lowest opcode bit; it distinguishes between
7136 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7137 */
7138 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7139
7140 if (op == NEON_2RM_AESE) {
7141 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
7142 } else {
7143 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
7144 }
7145 tcg_temp_free_i32(tmp);
7146 tcg_temp_free_i32(tmp2);
7147 tcg_temp_free_i32(tmp3);
7148 break;
f1ecb913 7149 case NEON_2RM_SHA1H:
d614a513 7150 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
7151 || ((rm | rd) & 1)) {
7152 return 1;
7153 }
7154 tmp = tcg_const_i32(rd);
7155 tmp2 = tcg_const_i32(rm);
7156
7157 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
7158
7159 tcg_temp_free_i32(tmp);
7160 tcg_temp_free_i32(tmp2);
7161 break;
7162 case NEON_2RM_SHA1SU1:
7163 if ((rm | rd) & 1) {
7164 return 1;
7165 }
7166 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7167 if (q) {
d614a513 7168 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
7169 return 1;
7170 }
d614a513 7171 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
7172 return 1;
7173 }
7174 tmp = tcg_const_i32(rd);
7175 tmp2 = tcg_const_i32(rm);
7176 if (q) {
7177 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
7178 } else {
7179 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
7180 }
7181 tcg_temp_free_i32(tmp);
7182 tcg_temp_free_i32(tmp2);
7183 break;
9ee6e8bb
PB
7184 default:
7185 elementwise:
7186 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7187 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7188 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7189 neon_reg_offset(rm, pass));
39d5492a 7190 TCGV_UNUSED_I32(tmp);
9ee6e8bb 7191 } else {
dd8fbd78 7192 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7193 }
7194 switch (op) {
600b828c 7195 case NEON_2RM_VREV32:
9ee6e8bb 7196 switch (size) {
dd8fbd78
FN
7197 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7198 case 1: gen_swap_half(tmp); break;
600b828c 7199 default: abort();
9ee6e8bb
PB
7200 }
7201 break;
600b828c 7202 case NEON_2RM_VREV16:
dd8fbd78 7203 gen_rev16(tmp);
9ee6e8bb 7204 break;
600b828c 7205 case NEON_2RM_VCLS:
9ee6e8bb 7206 switch (size) {
dd8fbd78
FN
7207 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7208 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7209 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7210 default: abort();
9ee6e8bb
PB
7211 }
7212 break;
600b828c 7213 case NEON_2RM_VCLZ:
9ee6e8bb 7214 switch (size) {
dd8fbd78
FN
7215 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7216 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 7217 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 7218 default: abort();
9ee6e8bb
PB
7219 }
7220 break;
600b828c 7221 case NEON_2RM_VCNT:
dd8fbd78 7222 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7223 break;
600b828c 7224 case NEON_2RM_VMVN:
dd8fbd78 7225 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7226 break;
600b828c 7227 case NEON_2RM_VQABS:
9ee6e8bb 7228 switch (size) {
02da0b2d
PM
7229 case 0:
7230 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7231 break;
7232 case 1:
7233 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7234 break;
7235 case 2:
7236 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7237 break;
600b828c 7238 default: abort();
9ee6e8bb
PB
7239 }
7240 break;
600b828c 7241 case NEON_2RM_VQNEG:
9ee6e8bb 7242 switch (size) {
02da0b2d
PM
7243 case 0:
7244 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7245 break;
7246 case 1:
7247 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7248 break;
7249 case 2:
7250 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7251 break;
600b828c 7252 default: abort();
9ee6e8bb
PB
7253 }
7254 break;
600b828c 7255 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7256 tmp2 = tcg_const_i32(0);
9ee6e8bb 7257 switch(size) {
dd8fbd78
FN
7258 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7259 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7260 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7261 default: abort();
9ee6e8bb 7262 }
39d5492a 7263 tcg_temp_free_i32(tmp2);
600b828c 7264 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7265 tcg_gen_not_i32(tmp, tmp);
600b828c 7266 }
9ee6e8bb 7267 break;
600b828c 7268 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7269 tmp2 = tcg_const_i32(0);
9ee6e8bb 7270 switch(size) {
dd8fbd78
FN
7271 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7272 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7273 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7274 default: abort();
9ee6e8bb 7275 }
39d5492a 7276 tcg_temp_free_i32(tmp2);
600b828c 7277 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7278 tcg_gen_not_i32(tmp, tmp);
600b828c 7279 }
9ee6e8bb 7280 break;
600b828c 7281 case NEON_2RM_VCEQ0:
dd8fbd78 7282 tmp2 = tcg_const_i32(0);
9ee6e8bb 7283 switch(size) {
dd8fbd78
FN
7284 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7285 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7286 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7287 default: abort();
9ee6e8bb 7288 }
39d5492a 7289 tcg_temp_free_i32(tmp2);
9ee6e8bb 7290 break;
600b828c 7291 case NEON_2RM_VABS:
9ee6e8bb 7292 switch(size) {
dd8fbd78
FN
7293 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7294 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7295 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7296 default: abort();
9ee6e8bb
PB
7297 }
7298 break;
600b828c 7299 case NEON_2RM_VNEG:
dd8fbd78
FN
7300 tmp2 = tcg_const_i32(0);
7301 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7302 tcg_temp_free_i32(tmp2);
9ee6e8bb 7303 break;
600b828c 7304 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7305 {
7306 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7307 tmp2 = tcg_const_i32(0);
aa47cfdd 7308 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7309 tcg_temp_free_i32(tmp2);
aa47cfdd 7310 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7311 break;
aa47cfdd 7312 }
600b828c 7313 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7314 {
7315 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7316 tmp2 = tcg_const_i32(0);
aa47cfdd 7317 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7318 tcg_temp_free_i32(tmp2);
aa47cfdd 7319 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7320 break;
aa47cfdd 7321 }
600b828c 7322 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7323 {
7324 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7325 tmp2 = tcg_const_i32(0);
aa47cfdd 7326 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7327 tcg_temp_free_i32(tmp2);
aa47cfdd 7328 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7329 break;
aa47cfdd 7330 }
600b828c 7331 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7332 {
7333 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7334 tmp2 = tcg_const_i32(0);
aa47cfdd 7335 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7336 tcg_temp_free_i32(tmp2);
aa47cfdd 7337 tcg_temp_free_ptr(fpstatus);
0e326109 7338 break;
aa47cfdd 7339 }
600b828c 7340 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7341 {
7342 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7343 tmp2 = tcg_const_i32(0);
aa47cfdd 7344 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7345 tcg_temp_free_i32(tmp2);
aa47cfdd 7346 tcg_temp_free_ptr(fpstatus);
0e326109 7347 break;
aa47cfdd 7348 }
600b828c 7349 case NEON_2RM_VABS_F:
4373f3ce 7350 gen_vfp_abs(0);
9ee6e8bb 7351 break;
600b828c 7352 case NEON_2RM_VNEG_F:
4373f3ce 7353 gen_vfp_neg(0);
9ee6e8bb 7354 break;
600b828c 7355 case NEON_2RM_VSWP:
dd8fbd78
FN
7356 tmp2 = neon_load_reg(rd, pass);
7357 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7358 break;
600b828c 7359 case NEON_2RM_VTRN:
dd8fbd78 7360 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7361 switch (size) {
dd8fbd78
FN
7362 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7363 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7364 default: abort();
9ee6e8bb 7365 }
dd8fbd78 7366 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7367 break;
34f7b0a2
WN
7368 case NEON_2RM_VRINTN:
7369 case NEON_2RM_VRINTA:
7370 case NEON_2RM_VRINTM:
7371 case NEON_2RM_VRINTP:
7372 case NEON_2RM_VRINTZ:
7373 {
7374 TCGv_i32 tcg_rmode;
7375 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7376 int rmode;
7377
7378 if (op == NEON_2RM_VRINTZ) {
7379 rmode = FPROUNDING_ZERO;
7380 } else {
7381 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7382 }
7383
7384 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7385 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7386 cpu_env);
7387 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7388 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7389 cpu_env);
7390 tcg_temp_free_ptr(fpstatus);
7391 tcg_temp_free_i32(tcg_rmode);
7392 break;
7393 }
2ce70625
WN
7394 case NEON_2RM_VRINTX:
7395 {
7396 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7397 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7398 tcg_temp_free_ptr(fpstatus);
7399 break;
7400 }
901ad525
WN
7401 case NEON_2RM_VCVTAU:
7402 case NEON_2RM_VCVTAS:
7403 case NEON_2RM_VCVTNU:
7404 case NEON_2RM_VCVTNS:
7405 case NEON_2RM_VCVTPU:
7406 case NEON_2RM_VCVTPS:
7407 case NEON_2RM_VCVTMU:
7408 case NEON_2RM_VCVTMS:
7409 {
7410 bool is_signed = !extract32(insn, 7, 1);
7411 TCGv_ptr fpst = get_fpstatus_ptr(1);
7412 TCGv_i32 tcg_rmode, tcg_shift;
7413 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7414
7415 tcg_shift = tcg_const_i32(0);
7416 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7417 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7418 cpu_env);
7419
7420 if (is_signed) {
7421 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7422 tcg_shift, fpst);
7423 } else {
7424 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7425 tcg_shift, fpst);
7426 }
7427
7428 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7429 cpu_env);
7430 tcg_temp_free_i32(tcg_rmode);
7431 tcg_temp_free_i32(tcg_shift);
7432 tcg_temp_free_ptr(fpst);
7433 break;
7434 }
600b828c 7435 case NEON_2RM_VRECPE:
b6d4443a
AB
7436 {
7437 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7438 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7439 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7440 break;
b6d4443a 7441 }
600b828c 7442 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7443 {
7444 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7445 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7446 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7447 break;
c2fb418e 7448 }
600b828c 7449 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7450 {
7451 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7452 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7453 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7454 break;
b6d4443a 7455 }
600b828c 7456 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7457 {
7458 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7459 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7460 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7461 break;
c2fb418e 7462 }
600b828c 7463 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7464 gen_vfp_sito(0, 1);
9ee6e8bb 7465 break;
600b828c 7466 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7467 gen_vfp_uito(0, 1);
9ee6e8bb 7468 break;
600b828c 7469 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7470 gen_vfp_tosiz(0, 1);
9ee6e8bb 7471 break;
600b828c 7472 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7473 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7474 break;
7475 default:
600b828c
PM
7476 /* Reserved op values were caught by the
7477 * neon_2rm_sizes[] check earlier.
7478 */
7479 abort();
9ee6e8bb 7480 }
600b828c 7481 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7482 tcg_gen_st_f32(cpu_F0s, cpu_env,
7483 neon_reg_offset(rd, pass));
9ee6e8bb 7484 } else {
dd8fbd78 7485 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7486 }
7487 }
7488 break;
7489 }
7490 } else if ((insn & (1 << 10)) == 0) {
7491 /* VTBL, VTBX. */
56907d77
PM
7492 int n = ((insn >> 8) & 3) + 1;
7493 if ((rn + n) > 32) {
7494 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7495 * helper function running off the end of the register file.
7496 */
7497 return 1;
7498 }
7499 n <<= 3;
9ee6e8bb 7500 if (insn & (1 << 6)) {
8f8e3aa4 7501 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7502 } else {
7d1b0095 7503 tmp = tcg_temp_new_i32();
8f8e3aa4 7504 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7505 }
8f8e3aa4 7506 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
7507 tmp4 = tcg_const_i32(rn);
7508 tmp5 = tcg_const_i32(n);
9ef39277 7509 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 7510 tcg_temp_free_i32(tmp);
9ee6e8bb 7511 if (insn & (1 << 6)) {
8f8e3aa4 7512 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7513 } else {
7d1b0095 7514 tmp = tcg_temp_new_i32();
8f8e3aa4 7515 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7516 }
8f8e3aa4 7517 tmp3 = neon_load_reg(rm, 1);
9ef39277 7518 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
7519 tcg_temp_free_i32(tmp5);
7520 tcg_temp_free_i32(tmp4);
8f8e3aa4 7521 neon_store_reg(rd, 0, tmp2);
3018f259 7522 neon_store_reg(rd, 1, tmp3);
7d1b0095 7523 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7524 } else if ((insn & 0x380) == 0) {
7525 /* VDUP */
133da6aa
JR
7526 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7527 return 1;
7528 }
9ee6e8bb 7529 if (insn & (1 << 19)) {
dd8fbd78 7530 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7531 } else {
dd8fbd78 7532 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7533 }
7534 if (insn & (1 << 16)) {
dd8fbd78 7535 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7536 } else if (insn & (1 << 17)) {
7537 if ((insn >> 18) & 1)
dd8fbd78 7538 gen_neon_dup_high16(tmp);
9ee6e8bb 7539 else
dd8fbd78 7540 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7541 }
7542 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7543 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7544 tcg_gen_mov_i32(tmp2, tmp);
7545 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7546 }
7d1b0095 7547 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7548 } else {
7549 return 1;
7550 }
7551 }
7552 }
7553 return 0;
7554}
7555
7dcc1f89 7556static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7557{
4b6a83fb
PM
7558 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7559 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7560
7561 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7562
7563 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7564 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7565 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7566 return 1;
7567 }
d614a513 7568 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7569 return disas_iwmmxt_insn(s, insn);
d614a513 7570 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7571 return disas_dsp_insn(s, insn);
c0f4af17
PM
7572 }
7573 return 1;
4b6a83fb
PM
7574 }
7575
7576 /* Otherwise treat as a generic register access */
7577 is64 = (insn & (1 << 25)) == 0;
7578 if (!is64 && ((insn & (1 << 4)) == 0)) {
7579 /* cdp */
7580 return 1;
7581 }
7582
7583 crm = insn & 0xf;
7584 if (is64) {
7585 crn = 0;
7586 opc1 = (insn >> 4) & 0xf;
7587 opc2 = 0;
7588 rt2 = (insn >> 16) & 0xf;
7589 } else {
7590 crn = (insn >> 16) & 0xf;
7591 opc1 = (insn >> 21) & 7;
7592 opc2 = (insn >> 5) & 7;
7593 rt2 = 0;
7594 }
7595 isread = (insn >> 20) & 1;
7596 rt = (insn >> 12) & 0xf;
7597
60322b39 7598 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7599 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7600 if (ri) {
7601 /* Check access permissions */
dcbff19b 7602 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7603 return 1;
7604 }
7605
c0f4af17 7606 if (ri->accessfn ||
d614a513 7607 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7608 /* Emit code to perform further access permissions checks at
7609 * runtime; this may result in an exception.
c0f4af17
PM
7610 * Note that on XScale all cp0..c13 registers do an access check
7611 * call in order to handle c15_cpar.
f59df3f2
PM
7612 */
7613 TCGv_ptr tmpptr;
3f208fd7 7614 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7615 uint32_t syndrome;
7616
7617 /* Note that since we are an implementation which takes an
7618 * exception on a trapped conditional instruction only if the
7619 * instruction passes its condition code check, we can take
7620 * advantage of the clause in the ARM ARM that allows us to set
7621 * the COND field in the instruction to 0xE in all cases.
7622 * We could fish the actual condition out of the insn (ARM)
7623 * or the condexec bits (Thumb) but it isn't necessary.
7624 */
7625 switch (cpnum) {
7626 case 14:
7627 if (is64) {
7628 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7629 isread, false);
8bcbf37c
PM
7630 } else {
7631 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7632 rt, isread, false);
8bcbf37c
PM
7633 }
7634 break;
7635 case 15:
7636 if (is64) {
7637 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7638 isread, false);
8bcbf37c
PM
7639 } else {
7640 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7641 rt, isread, false);
8bcbf37c
PM
7642 }
7643 break;
7644 default:
7645 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7646 * so this can only happen if this is an ARMv7 or earlier CPU,
7647 * in which case the syndrome information won't actually be
7648 * guest visible.
7649 */
d614a513 7650 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7651 syndrome = syn_uncategorized();
7652 break;
7653 }
7654
43bfa4a1 7655 gen_set_condexec(s);
3977ee5d 7656 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7657 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7658 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7659 tcg_isread = tcg_const_i32(isread);
7660 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7661 tcg_isread);
f59df3f2 7662 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7663 tcg_temp_free_i32(tcg_syn);
3f208fd7 7664 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7665 }
7666
4b6a83fb
PM
7667 /* Handle special cases first */
7668 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7669 case ARM_CP_NOP:
7670 return 0;
7671 case ARM_CP_WFI:
7672 if (isread) {
7673 return 1;
7674 }
eaed129d 7675 gen_set_pc_im(s, s->pc);
dcba3a8d 7676 s->base.is_jmp = DISAS_WFI;
2bee5105 7677 return 0;
4b6a83fb
PM
7678 default:
7679 break;
7680 }
7681
dcba3a8d 7682 if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7683 gen_io_start();
7684 }
7685
4b6a83fb
PM
7686 if (isread) {
7687 /* Read */
7688 if (is64) {
7689 TCGv_i64 tmp64;
7690 TCGv_i32 tmp;
7691 if (ri->type & ARM_CP_CONST) {
7692 tmp64 = tcg_const_i64(ri->resetvalue);
7693 } else if (ri->readfn) {
7694 TCGv_ptr tmpptr;
4b6a83fb
PM
7695 tmp64 = tcg_temp_new_i64();
7696 tmpptr = tcg_const_ptr(ri);
7697 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7698 tcg_temp_free_ptr(tmpptr);
7699 } else {
7700 tmp64 = tcg_temp_new_i64();
7701 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7702 }
7703 tmp = tcg_temp_new_i32();
ecc7b3aa 7704 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7705 store_reg(s, rt, tmp);
7706 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7707 tmp = tcg_temp_new_i32();
ecc7b3aa 7708 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7709 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7710 store_reg(s, rt2, tmp);
7711 } else {
39d5492a 7712 TCGv_i32 tmp;
4b6a83fb
PM
7713 if (ri->type & ARM_CP_CONST) {
7714 tmp = tcg_const_i32(ri->resetvalue);
7715 } else if (ri->readfn) {
7716 TCGv_ptr tmpptr;
4b6a83fb
PM
7717 tmp = tcg_temp_new_i32();
7718 tmpptr = tcg_const_ptr(ri);
7719 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7720 tcg_temp_free_ptr(tmpptr);
7721 } else {
7722 tmp = load_cpu_offset(ri->fieldoffset);
7723 }
7724 if (rt == 15) {
7725 /* Destination register of r15 for 32 bit loads sets
7726 * the condition codes from the high 4 bits of the value
7727 */
7728 gen_set_nzcv(tmp);
7729 tcg_temp_free_i32(tmp);
7730 } else {
7731 store_reg(s, rt, tmp);
7732 }
7733 }
7734 } else {
7735 /* Write */
7736 if (ri->type & ARM_CP_CONST) {
7737 /* If not forbidden by access permissions, treat as WI */
7738 return 0;
7739 }
7740
7741 if (is64) {
39d5492a 7742 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7743 TCGv_i64 tmp64 = tcg_temp_new_i64();
7744 tmplo = load_reg(s, rt);
7745 tmphi = load_reg(s, rt2);
7746 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7747 tcg_temp_free_i32(tmplo);
7748 tcg_temp_free_i32(tmphi);
7749 if (ri->writefn) {
7750 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7751 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7752 tcg_temp_free_ptr(tmpptr);
7753 } else {
7754 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7755 }
7756 tcg_temp_free_i64(tmp64);
7757 } else {
7758 if (ri->writefn) {
39d5492a 7759 TCGv_i32 tmp;
4b6a83fb 7760 TCGv_ptr tmpptr;
4b6a83fb
PM
7761 tmp = load_reg(s, rt);
7762 tmpptr = tcg_const_ptr(ri);
7763 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7764 tcg_temp_free_ptr(tmpptr);
7765 tcg_temp_free_i32(tmp);
7766 } else {
39d5492a 7767 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7768 store_cpu_offset(tmp, ri->fieldoffset);
7769 }
7770 }
2452731c
PM
7771 }
7772
dcba3a8d 7773 if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7774 /* I/O operations must end the TB here (whether read or write) */
7775 gen_io_end();
7776 gen_lookup_tb(s);
7777 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7778 /* We default to ending the TB on a coprocessor register write,
7779 * but allow this to be suppressed by the register definition
7780 * (usually only necessary to work around guest bugs).
7781 */
2452731c 7782 gen_lookup_tb(s);
4b6a83fb 7783 }
2452731c 7784
4b6a83fb
PM
7785 return 0;
7786 }
7787
626187d8
PM
7788 /* Unknown register; this might be a guest error or a QEMU
7789 * unimplemented feature.
7790 */
7791 if (is64) {
7792 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7793 "64 bit system register cp:%d opc1: %d crm:%d "
7794 "(%s)\n",
7795 isread ? "read" : "write", cpnum, opc1, crm,
7796 s->ns ? "non-secure" : "secure");
626187d8
PM
7797 } else {
7798 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7799 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7800 "(%s)\n",
7801 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7802 s->ns ? "non-secure" : "secure");
626187d8
PM
7803 }
7804
4a9a539f 7805 return 1;
9ee6e8bb
PB
7806}
7807
5e3f878a
PB
7808
7809/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7810static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7811{
39d5492a 7812 TCGv_i32 tmp;
7d1b0095 7813 tmp = tcg_temp_new_i32();
ecc7b3aa 7814 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7815 store_reg(s, rlow, tmp);
7d1b0095 7816 tmp = tcg_temp_new_i32();
5e3f878a 7817 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7818 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7819 store_reg(s, rhigh, tmp);
7820}
7821
7822/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7823static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7824{
a7812ae4 7825 TCGv_i64 tmp;
39d5492a 7826 TCGv_i32 tmp2;
5e3f878a 7827
36aa55dc 7828 /* Load value and extend to 64 bits. */
a7812ae4 7829 tmp = tcg_temp_new_i64();
5e3f878a
PB
7830 tmp2 = load_reg(s, rlow);
7831 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7832 tcg_temp_free_i32(tmp2);
5e3f878a 7833 tcg_gen_add_i64(val, val, tmp);
b75263d6 7834 tcg_temp_free_i64(tmp);
5e3f878a
PB
7835}
7836
7837/* load and add a 64-bit value from a register pair. */
a7812ae4 7838static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7839{
a7812ae4 7840 TCGv_i64 tmp;
39d5492a
PM
7841 TCGv_i32 tmpl;
7842 TCGv_i32 tmph;
5e3f878a
PB
7843
7844 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7845 tmpl = load_reg(s, rlow);
7846 tmph = load_reg(s, rhigh);
a7812ae4 7847 tmp = tcg_temp_new_i64();
36aa55dc 7848 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7849 tcg_temp_free_i32(tmpl);
7850 tcg_temp_free_i32(tmph);
5e3f878a 7851 tcg_gen_add_i64(val, val, tmp);
b75263d6 7852 tcg_temp_free_i64(tmp);
5e3f878a
PB
7853}
7854
c9f10124 7855/* Set N and Z flags from hi|lo. */
39d5492a 7856static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7857{
c9f10124
RH
7858 tcg_gen_mov_i32(cpu_NF, hi);
7859 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7860}
7861
426f5abc
PB
7862/* Load/Store exclusive instructions are implemented by remembering
7863 the value/address loaded, and seeing if these are the same
354161b3 7864 when the store is performed. This should be sufficient to implement
426f5abc 7865 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
7866 regular stores. The compare vs the remembered value is done during
7867 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 7868static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7869 TCGv_i32 addr, int size)
426f5abc 7870{
94ee24e7 7871 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 7872 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 7873
50225ad0
PM
7874 s->is_ldex = true;
7875
426f5abc 7876 if (size == 3) {
39d5492a 7877 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 7878 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 7879
354161b3
EC
7880 gen_aa32_ld_i64(s, t64, addr, get_mem_index(s), opc);
7881 tcg_gen_mov_i64(cpu_exclusive_val, t64);
7882 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7883 tcg_temp_free_i64(t64);
7884
7885 store_reg(s, rt2, tmp2);
03d05e2d 7886 } else {
354161b3 7887 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 7888 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7889 }
03d05e2d
PM
7890
7891 store_reg(s, rt, tmp);
7892 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7893}
7894
7895static void gen_clrex(DisasContext *s)
7896{
03d05e2d 7897 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7898}
7899
426f5abc 7900static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7901 TCGv_i32 addr, int size)
426f5abc 7902{
354161b3
EC
7903 TCGv_i32 t0, t1, t2;
7904 TCGv_i64 extaddr;
7905 TCGv taddr;
42a268c2
RH
7906 TCGLabel *done_label;
7907 TCGLabel *fail_label;
354161b3 7908 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
7909
7910 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7911 [addr] = {Rt};
7912 {Rd} = 0;
7913 } else {
7914 {Rd} = 1;
7915 } */
7916 fail_label = gen_new_label();
7917 done_label = gen_new_label();
03d05e2d
PM
7918 extaddr = tcg_temp_new_i64();
7919 tcg_gen_extu_i32_i64(extaddr, addr);
7920 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7921 tcg_temp_free_i64(extaddr);
7922
354161b3
EC
7923 taddr = gen_aa32_addr(s, addr, opc);
7924 t0 = tcg_temp_new_i32();
7925 t1 = load_reg(s, rt);
426f5abc 7926 if (size == 3) {
354161b3
EC
7927 TCGv_i64 o64 = tcg_temp_new_i64();
7928 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 7929
354161b3
EC
7930 t2 = load_reg(s, rt2);
7931 tcg_gen_concat_i32_i64(n64, t1, t2);
7932 tcg_temp_free_i32(t2);
7933 gen_aa32_frob64(s, n64);
03d05e2d 7934
354161b3
EC
7935 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7936 get_mem_index(s), opc);
7937 tcg_temp_free_i64(n64);
7938
7939 gen_aa32_frob64(s, o64);
7940 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7941 tcg_gen_extrl_i64_i32(t0, o64);
7942
7943 tcg_temp_free_i64(o64);
7944 } else {
7945 t2 = tcg_temp_new_i32();
7946 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7947 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7948 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7949 tcg_temp_free_i32(t2);
426f5abc 7950 }
354161b3
EC
7951 tcg_temp_free_i32(t1);
7952 tcg_temp_free(taddr);
7953 tcg_gen_mov_i32(cpu_R[rd], t0);
7954 tcg_temp_free_i32(t0);
426f5abc 7955 tcg_gen_br(done_label);
354161b3 7956
426f5abc
PB
7957 gen_set_label(fail_label);
7958 tcg_gen_movi_i32(cpu_R[rd], 1);
7959 gen_set_label(done_label);
03d05e2d 7960 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 7961}
426f5abc 7962
81465888
PM
7963/* gen_srs:
7964 * @env: CPUARMState
7965 * @s: DisasContext
7966 * @mode: mode field from insn (which stack to store to)
7967 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7968 * @writeback: true if writeback bit set
7969 *
7970 * Generate code for the SRS (Store Return State) insn.
7971 */
7972static void gen_srs(DisasContext *s,
7973 uint32_t mode, uint32_t amode, bool writeback)
7974{
7975 int32_t offset;
cbc0326b
PM
7976 TCGv_i32 addr, tmp;
7977 bool undef = false;
7978
7979 /* SRS is:
7980 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 7981 * and specified mode is monitor mode
cbc0326b
PM
7982 * - UNDEFINED in Hyp mode
7983 * - UNPREDICTABLE in User or System mode
7984 * - UNPREDICTABLE if the specified mode is:
7985 * -- not implemented
7986 * -- not a valid mode number
7987 * -- a mode that's at a higher exception level
7988 * -- Monitor, if we are Non-secure
f01377f5 7989 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 7990 */
ba63cf47 7991 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
7992 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7993 return;
7994 }
7995
7996 if (s->current_el == 0 || s->current_el == 2) {
7997 undef = true;
7998 }
7999
8000 switch (mode) {
8001 case ARM_CPU_MODE_USR:
8002 case ARM_CPU_MODE_FIQ:
8003 case ARM_CPU_MODE_IRQ:
8004 case ARM_CPU_MODE_SVC:
8005 case ARM_CPU_MODE_ABT:
8006 case ARM_CPU_MODE_UND:
8007 case ARM_CPU_MODE_SYS:
8008 break;
8009 case ARM_CPU_MODE_HYP:
8010 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8011 undef = true;
8012 }
8013 break;
8014 case ARM_CPU_MODE_MON:
8015 /* No need to check specifically for "are we non-secure" because
8016 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8017 * so if this isn't EL3 then we must be non-secure.
8018 */
8019 if (s->current_el != 3) {
8020 undef = true;
8021 }
8022 break;
8023 default:
8024 undef = true;
8025 }
8026
8027 if (undef) {
8028 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8029 default_exception_el(s));
8030 return;
8031 }
8032
8033 addr = tcg_temp_new_i32();
8034 tmp = tcg_const_i32(mode);
f01377f5
PM
8035 /* get_r13_banked() will raise an exception if called from System mode */
8036 gen_set_condexec(s);
8037 gen_set_pc_im(s, s->pc - 4);
81465888
PM
8038 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8039 tcg_temp_free_i32(tmp);
8040 switch (amode) {
8041 case 0: /* DA */
8042 offset = -4;
8043 break;
8044 case 1: /* IA */
8045 offset = 0;
8046 break;
8047 case 2: /* DB */
8048 offset = -8;
8049 break;
8050 case 3: /* IB */
8051 offset = 4;
8052 break;
8053 default:
8054 abort();
8055 }
8056 tcg_gen_addi_i32(addr, addr, offset);
8057 tmp = load_reg(s, 14);
12dcc321 8058 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8059 tcg_temp_free_i32(tmp);
81465888
PM
8060 tmp = load_cpu_field(spsr);
8061 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 8062 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8063 tcg_temp_free_i32(tmp);
81465888
PM
8064 if (writeback) {
8065 switch (amode) {
8066 case 0:
8067 offset = -8;
8068 break;
8069 case 1:
8070 offset = 4;
8071 break;
8072 case 2:
8073 offset = -4;
8074 break;
8075 case 3:
8076 offset = 0;
8077 break;
8078 default:
8079 abort();
8080 }
8081 tcg_gen_addi_i32(addr, addr, offset);
8082 tmp = tcg_const_i32(mode);
8083 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8084 tcg_temp_free_i32(tmp);
8085 }
8086 tcg_temp_free_i32(addr);
dcba3a8d 8087 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
8088}
8089
f4df2210 8090static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8091{
f4df2210 8092 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8093 TCGv_i32 tmp;
8094 TCGv_i32 tmp2;
8095 TCGv_i32 tmp3;
8096 TCGv_i32 addr;
a7812ae4 8097 TCGv_i64 tmp64;
9ee6e8bb 8098
e13886e3
PM
8099 /* M variants do not implement ARM mode; this must raise the INVSTATE
8100 * UsageFault exception.
8101 */
b53d8923 8102 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
8103 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8104 default_exception_el(s));
8105 return;
b53d8923 8106 }
9ee6e8bb
PB
8107 cond = insn >> 28;
8108 if (cond == 0xf){
be5e7a76
DES
8109 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8110 * choose to UNDEF. In ARMv5 and above the space is used
8111 * for miscellaneous unconditional instructions.
8112 */
8113 ARCH(5);
8114
9ee6e8bb
PB
8115 /* Unconditional instructions. */
8116 if (((insn >> 25) & 7) == 1) {
8117 /* NEON Data processing. */
d614a513 8118 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8119 goto illegal_op;
d614a513 8120 }
9ee6e8bb 8121
7dcc1f89 8122 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8123 goto illegal_op;
7dcc1f89 8124 }
9ee6e8bb
PB
8125 return;
8126 }
8127 if ((insn & 0x0f100000) == 0x04000000) {
8128 /* NEON load/store. */
d614a513 8129 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8130 goto illegal_op;
d614a513 8131 }
9ee6e8bb 8132
7dcc1f89 8133 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8134 goto illegal_op;
7dcc1f89 8135 }
9ee6e8bb
PB
8136 return;
8137 }
6a57f3eb
WN
8138 if ((insn & 0x0f000e10) == 0x0e000a00) {
8139 /* VFP. */
7dcc1f89 8140 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8141 goto illegal_op;
8142 }
8143 return;
8144 }
3d185e5d
PM
8145 if (((insn & 0x0f30f000) == 0x0510f000) ||
8146 ((insn & 0x0f30f010) == 0x0710f000)) {
8147 if ((insn & (1 << 22)) == 0) {
8148 /* PLDW; v7MP */
d614a513 8149 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8150 goto illegal_op;
8151 }
8152 }
8153 /* Otherwise PLD; v5TE+ */
be5e7a76 8154 ARCH(5TE);
3d185e5d
PM
8155 return;
8156 }
8157 if (((insn & 0x0f70f000) == 0x0450f000) ||
8158 ((insn & 0x0f70f010) == 0x0650f000)) {
8159 ARCH(7);
8160 return; /* PLI; V7 */
8161 }
8162 if (((insn & 0x0f700000) == 0x04100000) ||
8163 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8164 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8165 goto illegal_op;
8166 }
8167 return; /* v7MP: Unallocated memory hint: must NOP */
8168 }
8169
8170 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8171 ARCH(6);
8172 /* setend */
9886ecdf
PB
8173 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8174 gen_helper_setend(cpu_env);
dcba3a8d 8175 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8176 }
8177 return;
8178 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8179 switch ((insn >> 4) & 0xf) {
8180 case 1: /* clrex */
8181 ARCH(6K);
426f5abc 8182 gen_clrex(s);
9ee6e8bb
PB
8183 return;
8184 case 4: /* dsb */
8185 case 5: /* dmb */
9ee6e8bb 8186 ARCH(7);
61e4c432 8187 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8188 return;
6df99dec
SS
8189 case 6: /* isb */
8190 /* We need to break the TB after this insn to execute
8191 * self-modifying code correctly and also to take
8192 * any pending interrupts immediately.
8193 */
0b609cc1 8194 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 8195 return;
9ee6e8bb
PB
8196 default:
8197 goto illegal_op;
8198 }
8199 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8200 /* srs */
81465888
PM
8201 ARCH(6);
8202 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8203 return;
ea825eee 8204 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8205 /* rfe */
c67b6b71 8206 int32_t offset;
9ee6e8bb
PB
8207 if (IS_USER(s))
8208 goto illegal_op;
8209 ARCH(6);
8210 rn = (insn >> 16) & 0xf;
b0109805 8211 addr = load_reg(s, rn);
9ee6e8bb
PB
8212 i = (insn >> 23) & 3;
8213 switch (i) {
b0109805 8214 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8215 case 1: offset = 0; break; /* IA */
8216 case 2: offset = -8; break; /* DB */
b0109805 8217 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8218 default: abort();
8219 }
8220 if (offset)
b0109805
PB
8221 tcg_gen_addi_i32(addr, addr, offset);
8222 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8223 tmp = tcg_temp_new_i32();
12dcc321 8224 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8225 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8226 tmp2 = tcg_temp_new_i32();
12dcc321 8227 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8228 if (insn & (1 << 21)) {
8229 /* Base writeback. */
8230 switch (i) {
b0109805 8231 case 0: offset = -8; break;
c67b6b71
FN
8232 case 1: offset = 4; break;
8233 case 2: offset = -4; break;
b0109805 8234 case 3: offset = 0; break;
9ee6e8bb
PB
8235 default: abort();
8236 }
8237 if (offset)
b0109805
PB
8238 tcg_gen_addi_i32(addr, addr, offset);
8239 store_reg(s, rn, addr);
8240 } else {
7d1b0095 8241 tcg_temp_free_i32(addr);
9ee6e8bb 8242 }
b0109805 8243 gen_rfe(s, tmp, tmp2);
c67b6b71 8244 return;
9ee6e8bb
PB
8245 } else if ((insn & 0x0e000000) == 0x0a000000) {
8246 /* branch link and change to thumb (blx <offset>) */
8247 int32_t offset;
8248
8249 val = (uint32_t)s->pc;
7d1b0095 8250 tmp = tcg_temp_new_i32();
d9ba4830
PB
8251 tcg_gen_movi_i32(tmp, val);
8252 store_reg(s, 14, tmp);
9ee6e8bb
PB
8253 /* Sign-extend the 24-bit offset */
8254 offset = (((int32_t)insn) << 8) >> 8;
8255 /* offset * 4 + bit24 * 2 + (thumb bit) */
8256 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8257 /* pipeline offset */
8258 val += 4;
be5e7a76 8259 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8260 gen_bx_im(s, val);
9ee6e8bb
PB
8261 return;
8262 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8263 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8264 /* iWMMXt register transfer. */
c0f4af17 8265 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8266 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8267 return;
c0f4af17
PM
8268 }
8269 }
9ee6e8bb
PB
8270 }
8271 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8272 /* Coprocessor double register transfer. */
be5e7a76 8273 ARCH(5TE);
9ee6e8bb
PB
8274 } else if ((insn & 0x0f000010) == 0x0e000010) {
8275 /* Additional coprocessor register transfer. */
7997d92f 8276 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8277 uint32_t mask;
8278 uint32_t val;
8279 /* cps (privileged) */
8280 if (IS_USER(s))
8281 return;
8282 mask = val = 0;
8283 if (insn & (1 << 19)) {
8284 if (insn & (1 << 8))
8285 mask |= CPSR_A;
8286 if (insn & (1 << 7))
8287 mask |= CPSR_I;
8288 if (insn & (1 << 6))
8289 mask |= CPSR_F;
8290 if (insn & (1 << 18))
8291 val |= mask;
8292 }
7997d92f 8293 if (insn & (1 << 17)) {
9ee6e8bb
PB
8294 mask |= CPSR_M;
8295 val |= (insn & 0x1f);
8296 }
8297 if (mask) {
2fbac54b 8298 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8299 }
8300 return;
8301 }
8302 goto illegal_op;
8303 }
8304 if (cond != 0xe) {
8305 /* if not always execute, we generate a conditional jump to
8306 next instruction */
8307 s->condlabel = gen_new_label();
39fb730a 8308 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8309 s->condjmp = 1;
8310 }
8311 if ((insn & 0x0f900000) == 0x03000000) {
8312 if ((insn & (1 << 21)) == 0) {
8313 ARCH(6T2);
8314 rd = (insn >> 12) & 0xf;
8315 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8316 if ((insn & (1 << 22)) == 0) {
8317 /* MOVW */
7d1b0095 8318 tmp = tcg_temp_new_i32();
5e3f878a 8319 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8320 } else {
8321 /* MOVT */
5e3f878a 8322 tmp = load_reg(s, rd);
86831435 8323 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8324 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8325 }
5e3f878a 8326 store_reg(s, rd, tmp);
9ee6e8bb
PB
8327 } else {
8328 if (((insn >> 12) & 0xf) != 0xf)
8329 goto illegal_op;
8330 if (((insn >> 16) & 0xf) == 0) {
8331 gen_nop_hint(s, insn & 0xff);
8332 } else {
8333 /* CPSR = immediate */
8334 val = insn & 0xff;
8335 shift = ((insn >> 8) & 0xf) * 2;
8336 if (shift)
8337 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8338 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8339 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8340 i, val)) {
9ee6e8bb 8341 goto illegal_op;
7dcc1f89 8342 }
9ee6e8bb
PB
8343 }
8344 }
8345 } else if ((insn & 0x0f900000) == 0x01000000
8346 && (insn & 0x00000090) != 0x00000090) {
8347 /* miscellaneous instructions */
8348 op1 = (insn >> 21) & 3;
8349 sh = (insn >> 4) & 0xf;
8350 rm = insn & 0xf;
8351 switch (sh) {
8bfd0550
PM
8352 case 0x0: /* MSR, MRS */
8353 if (insn & (1 << 9)) {
8354 /* MSR (banked) and MRS (banked) */
8355 int sysm = extract32(insn, 16, 4) |
8356 (extract32(insn, 8, 1) << 4);
8357 int r = extract32(insn, 22, 1);
8358
8359 if (op1 & 1) {
8360 /* MSR (banked) */
8361 gen_msr_banked(s, r, sysm, rm);
8362 } else {
8363 /* MRS (banked) */
8364 int rd = extract32(insn, 12, 4);
8365
8366 gen_mrs_banked(s, r, sysm, rd);
8367 }
8368 break;
8369 }
8370
8371 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8372 if (op1 & 1) {
8373 /* PSR = reg */
2fbac54b 8374 tmp = load_reg(s, rm);
9ee6e8bb 8375 i = ((op1 & 2) != 0);
7dcc1f89 8376 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8377 goto illegal_op;
8378 } else {
8379 /* reg = PSR */
8380 rd = (insn >> 12) & 0xf;
8381 if (op1 & 2) {
8382 if (IS_USER(s))
8383 goto illegal_op;
d9ba4830 8384 tmp = load_cpu_field(spsr);
9ee6e8bb 8385 } else {
7d1b0095 8386 tmp = tcg_temp_new_i32();
9ef39277 8387 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8388 }
d9ba4830 8389 store_reg(s, rd, tmp);
9ee6e8bb
PB
8390 }
8391 break;
8392 case 0x1:
8393 if (op1 == 1) {
8394 /* branch/exchange thumb (bx). */
be5e7a76 8395 ARCH(4T);
d9ba4830
PB
8396 tmp = load_reg(s, rm);
8397 gen_bx(s, tmp);
9ee6e8bb
PB
8398 } else if (op1 == 3) {
8399 /* clz */
be5e7a76 8400 ARCH(5);
9ee6e8bb 8401 rd = (insn >> 12) & 0xf;
1497c961 8402 tmp = load_reg(s, rm);
7539a012 8403 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8404 store_reg(s, rd, tmp);
9ee6e8bb
PB
8405 } else {
8406 goto illegal_op;
8407 }
8408 break;
8409 case 0x2:
8410 if (op1 == 1) {
8411 ARCH(5J); /* bxj */
8412 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8413 tmp = load_reg(s, rm);
8414 gen_bx(s, tmp);
9ee6e8bb
PB
8415 } else {
8416 goto illegal_op;
8417 }
8418 break;
8419 case 0x3:
8420 if (op1 != 1)
8421 goto illegal_op;
8422
be5e7a76 8423 ARCH(5);
9ee6e8bb 8424 /* branch link/exchange thumb (blx) */
d9ba4830 8425 tmp = load_reg(s, rm);
7d1b0095 8426 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8427 tcg_gen_movi_i32(tmp2, s->pc);
8428 store_reg(s, 14, tmp2);
8429 gen_bx(s, tmp);
9ee6e8bb 8430 break;
eb0ecd5a
WN
8431 case 0x4:
8432 {
8433 /* crc32/crc32c */
8434 uint32_t c = extract32(insn, 8, 4);
8435
8436 /* Check this CPU supports ARMv8 CRC instructions.
8437 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8438 * Bits 8, 10 and 11 should be zero.
8439 */
d614a513 8440 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8441 (c & 0xd) != 0) {
8442 goto illegal_op;
8443 }
8444
8445 rn = extract32(insn, 16, 4);
8446 rd = extract32(insn, 12, 4);
8447
8448 tmp = load_reg(s, rn);
8449 tmp2 = load_reg(s, rm);
aa633469
PM
8450 if (op1 == 0) {
8451 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8452 } else if (op1 == 1) {
8453 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8454 }
eb0ecd5a
WN
8455 tmp3 = tcg_const_i32(1 << op1);
8456 if (c & 0x2) {
8457 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8458 } else {
8459 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8460 }
8461 tcg_temp_free_i32(tmp2);
8462 tcg_temp_free_i32(tmp3);
8463 store_reg(s, rd, tmp);
8464 break;
8465 }
9ee6e8bb 8466 case 0x5: /* saturating add/subtract */
be5e7a76 8467 ARCH(5TE);
9ee6e8bb
PB
8468 rd = (insn >> 12) & 0xf;
8469 rn = (insn >> 16) & 0xf;
b40d0353 8470 tmp = load_reg(s, rm);
5e3f878a 8471 tmp2 = load_reg(s, rn);
9ee6e8bb 8472 if (op1 & 2)
9ef39277 8473 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8474 if (op1 & 1)
9ef39277 8475 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8476 else
9ef39277 8477 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8478 tcg_temp_free_i32(tmp2);
5e3f878a 8479 store_reg(s, rd, tmp);
9ee6e8bb 8480 break;
49e14940 8481 case 7:
d4a2dc67
PM
8482 {
8483 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8484 switch (op1) {
19a6e31c
PM
8485 case 0:
8486 /* HLT */
8487 gen_hlt(s, imm16);
8488 break;
37e6456e
PM
8489 case 1:
8490 /* bkpt */
8491 ARCH(5);
8492 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
8493 syn_aa32_bkpt(imm16, false),
8494 default_exception_el(s));
37e6456e
PM
8495 break;
8496 case 2:
8497 /* Hypervisor call (v7) */
8498 ARCH(7);
8499 if (IS_USER(s)) {
8500 goto illegal_op;
8501 }
8502 gen_hvc(s, imm16);
8503 break;
8504 case 3:
8505 /* Secure monitor call (v6+) */
8506 ARCH(6K);
8507 if (IS_USER(s)) {
8508 goto illegal_op;
8509 }
8510 gen_smc(s);
8511 break;
8512 default:
19a6e31c 8513 g_assert_not_reached();
49e14940 8514 }
9ee6e8bb 8515 break;
d4a2dc67 8516 }
9ee6e8bb
PB
8517 case 0x8: /* signed multiply */
8518 case 0xa:
8519 case 0xc:
8520 case 0xe:
be5e7a76 8521 ARCH(5TE);
9ee6e8bb
PB
8522 rs = (insn >> 8) & 0xf;
8523 rn = (insn >> 12) & 0xf;
8524 rd = (insn >> 16) & 0xf;
8525 if (op1 == 1) {
8526 /* (32 * 16) >> 16 */
5e3f878a
PB
8527 tmp = load_reg(s, rm);
8528 tmp2 = load_reg(s, rs);
9ee6e8bb 8529 if (sh & 4)
5e3f878a 8530 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8531 else
5e3f878a 8532 gen_sxth(tmp2);
a7812ae4
PB
8533 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8534 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8535 tmp = tcg_temp_new_i32();
ecc7b3aa 8536 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8537 tcg_temp_free_i64(tmp64);
9ee6e8bb 8538 if ((sh & 2) == 0) {
5e3f878a 8539 tmp2 = load_reg(s, rn);
9ef39277 8540 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8541 tcg_temp_free_i32(tmp2);
9ee6e8bb 8542 }
5e3f878a 8543 store_reg(s, rd, tmp);
9ee6e8bb
PB
8544 } else {
8545 /* 16 * 16 */
5e3f878a
PB
8546 tmp = load_reg(s, rm);
8547 tmp2 = load_reg(s, rs);
8548 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8549 tcg_temp_free_i32(tmp2);
9ee6e8bb 8550 if (op1 == 2) {
a7812ae4
PB
8551 tmp64 = tcg_temp_new_i64();
8552 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8553 tcg_temp_free_i32(tmp);
a7812ae4
PB
8554 gen_addq(s, tmp64, rn, rd);
8555 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8556 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8557 } else {
8558 if (op1 == 0) {
5e3f878a 8559 tmp2 = load_reg(s, rn);
9ef39277 8560 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8561 tcg_temp_free_i32(tmp2);
9ee6e8bb 8562 }
5e3f878a 8563 store_reg(s, rd, tmp);
9ee6e8bb
PB
8564 }
8565 }
8566 break;
8567 default:
8568 goto illegal_op;
8569 }
8570 } else if (((insn & 0x0e000000) == 0 &&
8571 (insn & 0x00000090) != 0x90) ||
8572 ((insn & 0x0e000000) == (1 << 25))) {
8573 int set_cc, logic_cc, shiftop;
8574
8575 op1 = (insn >> 21) & 0xf;
8576 set_cc = (insn >> 20) & 1;
8577 logic_cc = table_logic_cc[op1] & set_cc;
8578
8579 /* data processing instruction */
8580 if (insn & (1 << 25)) {
8581 /* immediate operand */
8582 val = insn & 0xff;
8583 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8584 if (shift) {
9ee6e8bb 8585 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8586 }
7d1b0095 8587 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8588 tcg_gen_movi_i32(tmp2, val);
8589 if (logic_cc && shift) {
8590 gen_set_CF_bit31(tmp2);
8591 }
9ee6e8bb
PB
8592 } else {
8593 /* register */
8594 rm = (insn) & 0xf;
e9bb4aa9 8595 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8596 shiftop = (insn >> 5) & 3;
8597 if (!(insn & (1 << 4))) {
8598 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8599 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8600 } else {
8601 rs = (insn >> 8) & 0xf;
8984bd2e 8602 tmp = load_reg(s, rs);
e9bb4aa9 8603 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8604 }
8605 }
8606 if (op1 != 0x0f && op1 != 0x0d) {
8607 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8608 tmp = load_reg(s, rn);
8609 } else {
39d5492a 8610 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
8611 }
8612 rd = (insn >> 12) & 0xf;
8613 switch(op1) {
8614 case 0x00:
e9bb4aa9
JR
8615 tcg_gen_and_i32(tmp, tmp, tmp2);
8616 if (logic_cc) {
8617 gen_logic_CC(tmp);
8618 }
7dcc1f89 8619 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8620 break;
8621 case 0x01:
e9bb4aa9
JR
8622 tcg_gen_xor_i32(tmp, tmp, tmp2);
8623 if (logic_cc) {
8624 gen_logic_CC(tmp);
8625 }
7dcc1f89 8626 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8627 break;
8628 case 0x02:
8629 if (set_cc && rd == 15) {
8630 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8631 if (IS_USER(s)) {
9ee6e8bb 8632 goto illegal_op;
e9bb4aa9 8633 }
72485ec4 8634 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8635 gen_exception_return(s, tmp);
9ee6e8bb 8636 } else {
e9bb4aa9 8637 if (set_cc) {
72485ec4 8638 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8639 } else {
8640 tcg_gen_sub_i32(tmp, tmp, tmp2);
8641 }
7dcc1f89 8642 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8643 }
8644 break;
8645 case 0x03:
e9bb4aa9 8646 if (set_cc) {
72485ec4 8647 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8648 } else {
8649 tcg_gen_sub_i32(tmp, tmp2, tmp);
8650 }
7dcc1f89 8651 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8652 break;
8653 case 0x04:
e9bb4aa9 8654 if (set_cc) {
72485ec4 8655 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8656 } else {
8657 tcg_gen_add_i32(tmp, tmp, tmp2);
8658 }
7dcc1f89 8659 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8660 break;
8661 case 0x05:
e9bb4aa9 8662 if (set_cc) {
49b4c31e 8663 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8664 } else {
8665 gen_add_carry(tmp, tmp, tmp2);
8666 }
7dcc1f89 8667 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8668 break;
8669 case 0x06:
e9bb4aa9 8670 if (set_cc) {
2de68a49 8671 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8672 } else {
8673 gen_sub_carry(tmp, tmp, tmp2);
8674 }
7dcc1f89 8675 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8676 break;
8677 case 0x07:
e9bb4aa9 8678 if (set_cc) {
2de68a49 8679 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8680 } else {
8681 gen_sub_carry(tmp, tmp2, tmp);
8682 }
7dcc1f89 8683 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8684 break;
8685 case 0x08:
8686 if (set_cc) {
e9bb4aa9
JR
8687 tcg_gen_and_i32(tmp, tmp, tmp2);
8688 gen_logic_CC(tmp);
9ee6e8bb 8689 }
7d1b0095 8690 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8691 break;
8692 case 0x09:
8693 if (set_cc) {
e9bb4aa9
JR
8694 tcg_gen_xor_i32(tmp, tmp, tmp2);
8695 gen_logic_CC(tmp);
9ee6e8bb 8696 }
7d1b0095 8697 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8698 break;
8699 case 0x0a:
8700 if (set_cc) {
72485ec4 8701 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8702 }
7d1b0095 8703 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8704 break;
8705 case 0x0b:
8706 if (set_cc) {
72485ec4 8707 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8708 }
7d1b0095 8709 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8710 break;
8711 case 0x0c:
e9bb4aa9
JR
8712 tcg_gen_or_i32(tmp, tmp, tmp2);
8713 if (logic_cc) {
8714 gen_logic_CC(tmp);
8715 }
7dcc1f89 8716 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8717 break;
8718 case 0x0d:
8719 if (logic_cc && rd == 15) {
8720 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8721 if (IS_USER(s)) {
9ee6e8bb 8722 goto illegal_op;
e9bb4aa9
JR
8723 }
8724 gen_exception_return(s, tmp2);
9ee6e8bb 8725 } else {
e9bb4aa9
JR
8726 if (logic_cc) {
8727 gen_logic_CC(tmp2);
8728 }
7dcc1f89 8729 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8730 }
8731 break;
8732 case 0x0e:
f669df27 8733 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8734 if (logic_cc) {
8735 gen_logic_CC(tmp);
8736 }
7dcc1f89 8737 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8738 break;
8739 default:
8740 case 0x0f:
e9bb4aa9
JR
8741 tcg_gen_not_i32(tmp2, tmp2);
8742 if (logic_cc) {
8743 gen_logic_CC(tmp2);
8744 }
7dcc1f89 8745 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8746 break;
8747 }
e9bb4aa9 8748 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8749 tcg_temp_free_i32(tmp2);
e9bb4aa9 8750 }
9ee6e8bb
PB
8751 } else {
8752 /* other instructions */
8753 op1 = (insn >> 24) & 0xf;
8754 switch(op1) {
8755 case 0x0:
8756 case 0x1:
8757 /* multiplies, extra load/stores */
8758 sh = (insn >> 5) & 3;
8759 if (sh == 0) {
8760 if (op1 == 0x0) {
8761 rd = (insn >> 16) & 0xf;
8762 rn = (insn >> 12) & 0xf;
8763 rs = (insn >> 8) & 0xf;
8764 rm = (insn) & 0xf;
8765 op1 = (insn >> 20) & 0xf;
8766 switch (op1) {
8767 case 0: case 1: case 2: case 3: case 6:
8768 /* 32 bit mul */
5e3f878a
PB
8769 tmp = load_reg(s, rs);
8770 tmp2 = load_reg(s, rm);
8771 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8772 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8773 if (insn & (1 << 22)) {
8774 /* Subtract (mls) */
8775 ARCH(6T2);
5e3f878a
PB
8776 tmp2 = load_reg(s, rn);
8777 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8778 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8779 } else if (insn & (1 << 21)) {
8780 /* Add */
5e3f878a
PB
8781 tmp2 = load_reg(s, rn);
8782 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8783 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8784 }
8785 if (insn & (1 << 20))
5e3f878a
PB
8786 gen_logic_CC(tmp);
8787 store_reg(s, rd, tmp);
9ee6e8bb 8788 break;
8aac08b1
AJ
8789 case 4:
8790 /* 64 bit mul double accumulate (UMAAL) */
8791 ARCH(6);
8792 tmp = load_reg(s, rs);
8793 tmp2 = load_reg(s, rm);
8794 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8795 gen_addq_lo(s, tmp64, rn);
8796 gen_addq_lo(s, tmp64, rd);
8797 gen_storeq_reg(s, rn, rd, tmp64);
8798 tcg_temp_free_i64(tmp64);
8799 break;
8800 case 8: case 9: case 10: case 11:
8801 case 12: case 13: case 14: case 15:
8802 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8803 tmp = load_reg(s, rs);
8804 tmp2 = load_reg(s, rm);
8aac08b1 8805 if (insn & (1 << 22)) {
c9f10124 8806 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8807 } else {
c9f10124 8808 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8809 }
8810 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8811 TCGv_i32 al = load_reg(s, rn);
8812 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8813 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8814 tcg_temp_free_i32(al);
8815 tcg_temp_free_i32(ah);
9ee6e8bb 8816 }
8aac08b1 8817 if (insn & (1 << 20)) {
c9f10124 8818 gen_logicq_cc(tmp, tmp2);
8aac08b1 8819 }
c9f10124
RH
8820 store_reg(s, rn, tmp);
8821 store_reg(s, rd, tmp2);
9ee6e8bb 8822 break;
8aac08b1
AJ
8823 default:
8824 goto illegal_op;
9ee6e8bb
PB
8825 }
8826 } else {
8827 rn = (insn >> 16) & 0xf;
8828 rd = (insn >> 12) & 0xf;
8829 if (insn & (1 << 23)) {
8830 /* load/store exclusive */
2359bf80 8831 int op2 = (insn >> 8) & 3;
86753403 8832 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8833
8834 switch (op2) {
8835 case 0: /* lda/stl */
8836 if (op1 == 1) {
8837 goto illegal_op;
8838 }
8839 ARCH(8);
8840 break;
8841 case 1: /* reserved */
8842 goto illegal_op;
8843 case 2: /* ldaex/stlex */
8844 ARCH(8);
8845 break;
8846 case 3: /* ldrex/strex */
8847 if (op1) {
8848 ARCH(6K);
8849 } else {
8850 ARCH(6);
8851 }
8852 break;
8853 }
8854
3174f8e9 8855 addr = tcg_temp_local_new_i32();
98a46317 8856 load_reg_var(s, addr, rn);
2359bf80
MR
8857
8858 /* Since the emulation does not have barriers,
8859 the acquire/release semantics need no special
8860 handling */
8861 if (op2 == 0) {
8862 if (insn & (1 << 20)) {
8863 tmp = tcg_temp_new_i32();
8864 switch (op1) {
8865 case 0: /* lda */
9bb6558a
PM
8866 gen_aa32_ld32u_iss(s, tmp, addr,
8867 get_mem_index(s),
8868 rd | ISSIsAcqRel);
2359bf80
MR
8869 break;
8870 case 2: /* ldab */
9bb6558a
PM
8871 gen_aa32_ld8u_iss(s, tmp, addr,
8872 get_mem_index(s),
8873 rd | ISSIsAcqRel);
2359bf80
MR
8874 break;
8875 case 3: /* ldah */
9bb6558a
PM
8876 gen_aa32_ld16u_iss(s, tmp, addr,
8877 get_mem_index(s),
8878 rd | ISSIsAcqRel);
2359bf80
MR
8879 break;
8880 default:
8881 abort();
8882 }
8883 store_reg(s, rd, tmp);
8884 } else {
8885 rm = insn & 0xf;
8886 tmp = load_reg(s, rm);
8887 switch (op1) {
8888 case 0: /* stl */
9bb6558a
PM
8889 gen_aa32_st32_iss(s, tmp, addr,
8890 get_mem_index(s),
8891 rm | ISSIsAcqRel);
2359bf80
MR
8892 break;
8893 case 2: /* stlb */
9bb6558a
PM
8894 gen_aa32_st8_iss(s, tmp, addr,
8895 get_mem_index(s),
8896 rm | ISSIsAcqRel);
2359bf80
MR
8897 break;
8898 case 3: /* stlh */
9bb6558a
PM
8899 gen_aa32_st16_iss(s, tmp, addr,
8900 get_mem_index(s),
8901 rm | ISSIsAcqRel);
2359bf80
MR
8902 break;
8903 default:
8904 abort();
8905 }
8906 tcg_temp_free_i32(tmp);
8907 }
8908 } else if (insn & (1 << 20)) {
86753403
PB
8909 switch (op1) {
8910 case 0: /* ldrex */
426f5abc 8911 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8912 break;
8913 case 1: /* ldrexd */
426f5abc 8914 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8915 break;
8916 case 2: /* ldrexb */
426f5abc 8917 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8918 break;
8919 case 3: /* ldrexh */
426f5abc 8920 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8921 break;
8922 default:
8923 abort();
8924 }
9ee6e8bb
PB
8925 } else {
8926 rm = insn & 0xf;
86753403
PB
8927 switch (op1) {
8928 case 0: /* strex */
426f5abc 8929 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8930 break;
8931 case 1: /* strexd */
502e64fe 8932 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8933 break;
8934 case 2: /* strexb */
426f5abc 8935 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8936 break;
8937 case 3: /* strexh */
426f5abc 8938 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8939 break;
8940 default:
8941 abort();
8942 }
9ee6e8bb 8943 }
39d5492a 8944 tcg_temp_free_i32(addr);
9ee6e8bb 8945 } else {
cf12bce0
EC
8946 TCGv taddr;
8947 TCGMemOp opc = s->be_data;
8948
9ee6e8bb
PB
8949 /* SWP instruction */
8950 rm = (insn) & 0xf;
8951
9ee6e8bb 8952 if (insn & (1 << 22)) {
cf12bce0 8953 opc |= MO_UB;
9ee6e8bb 8954 } else {
cf12bce0 8955 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 8956 }
cf12bce0
EC
8957
8958 addr = load_reg(s, rn);
8959 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 8960 tcg_temp_free_i32(addr);
cf12bce0
EC
8961
8962 tmp = load_reg(s, rm);
8963 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
8964 get_mem_index(s), opc);
8965 tcg_temp_free(taddr);
8966 store_reg(s, rd, tmp);
9ee6e8bb
PB
8967 }
8968 }
8969 } else {
8970 int address_offset;
3960c336 8971 bool load = insn & (1 << 20);
63f26fcf
PM
8972 bool wbit = insn & (1 << 21);
8973 bool pbit = insn & (1 << 24);
3960c336 8974 bool doubleword = false;
9bb6558a
PM
8975 ISSInfo issinfo;
8976
9ee6e8bb
PB
8977 /* Misc load/store */
8978 rn = (insn >> 16) & 0xf;
8979 rd = (insn >> 12) & 0xf;
3960c336 8980
9bb6558a
PM
8981 /* ISS not valid if writeback */
8982 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
8983
3960c336
PM
8984 if (!load && (sh & 2)) {
8985 /* doubleword */
8986 ARCH(5TE);
8987 if (rd & 1) {
8988 /* UNPREDICTABLE; we choose to UNDEF */
8989 goto illegal_op;
8990 }
8991 load = (sh & 1) == 0;
8992 doubleword = true;
8993 }
8994
b0109805 8995 addr = load_reg(s, rn);
63f26fcf 8996 if (pbit) {
b0109805 8997 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 8998 }
9ee6e8bb 8999 address_offset = 0;
3960c336
PM
9000
9001 if (doubleword) {
9002 if (!load) {
9ee6e8bb 9003 /* store */
b0109805 9004 tmp = load_reg(s, rd);
12dcc321 9005 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9006 tcg_temp_free_i32(tmp);
b0109805
PB
9007 tcg_gen_addi_i32(addr, addr, 4);
9008 tmp = load_reg(s, rd + 1);
12dcc321 9009 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9010 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9011 } else {
9012 /* load */
5a839c0d 9013 tmp = tcg_temp_new_i32();
12dcc321 9014 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9015 store_reg(s, rd, tmp);
9016 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9017 tmp = tcg_temp_new_i32();
12dcc321 9018 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9019 rd++;
9ee6e8bb
PB
9020 }
9021 address_offset = -4;
3960c336
PM
9022 } else if (load) {
9023 /* load */
9024 tmp = tcg_temp_new_i32();
9025 switch (sh) {
9026 case 1:
9bb6558a
PM
9027 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9028 issinfo);
3960c336
PM
9029 break;
9030 case 2:
9bb6558a
PM
9031 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9032 issinfo);
3960c336
PM
9033 break;
9034 default:
9035 case 3:
9bb6558a
PM
9036 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9037 issinfo);
3960c336
PM
9038 break;
9039 }
9ee6e8bb
PB
9040 } else {
9041 /* store */
b0109805 9042 tmp = load_reg(s, rd);
9bb6558a 9043 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 9044 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9045 }
9046 /* Perform base writeback before the loaded value to
9047 ensure correct behavior with overlapping index registers.
b6af0975 9048 ldrd with base writeback is undefined if the
9ee6e8bb 9049 destination and index registers overlap. */
63f26fcf 9050 if (!pbit) {
b0109805
PB
9051 gen_add_datah_offset(s, insn, address_offset, addr);
9052 store_reg(s, rn, addr);
63f26fcf 9053 } else if (wbit) {
9ee6e8bb 9054 if (address_offset)
b0109805
PB
9055 tcg_gen_addi_i32(addr, addr, address_offset);
9056 store_reg(s, rn, addr);
9057 } else {
7d1b0095 9058 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9059 }
9060 if (load) {
9061 /* Complete the load. */
b0109805 9062 store_reg(s, rd, tmp);
9ee6e8bb
PB
9063 }
9064 }
9065 break;
9066 case 0x4:
9067 case 0x5:
9068 goto do_ldst;
9069 case 0x6:
9070 case 0x7:
9071 if (insn & (1 << 4)) {
9072 ARCH(6);
9073 /* Armv6 Media instructions. */
9074 rm = insn & 0xf;
9075 rn = (insn >> 16) & 0xf;
2c0262af 9076 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
9077 rs = (insn >> 8) & 0xf;
9078 switch ((insn >> 23) & 3) {
9079 case 0: /* Parallel add/subtract. */
9080 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
9081 tmp = load_reg(s, rn);
9082 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9083 sh = (insn >> 5) & 7;
9084 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9085 goto illegal_op;
6ddbc6e4 9086 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 9087 tcg_temp_free_i32(tmp2);
6ddbc6e4 9088 store_reg(s, rd, tmp);
9ee6e8bb
PB
9089 break;
9090 case 1:
9091 if ((insn & 0x00700020) == 0) {
6c95676b 9092 /* Halfword pack. */
3670669c
PB
9093 tmp = load_reg(s, rn);
9094 tmp2 = load_reg(s, rm);
9ee6e8bb 9095 shift = (insn >> 7) & 0x1f;
3670669c
PB
9096 if (insn & (1 << 6)) {
9097 /* pkhtb */
22478e79
AZ
9098 if (shift == 0)
9099 shift = 31;
9100 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 9101 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 9102 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
9103 } else {
9104 /* pkhbt */
22478e79
AZ
9105 if (shift)
9106 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 9107 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
9108 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9109 }
9110 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9111 tcg_temp_free_i32(tmp2);
3670669c 9112 store_reg(s, rd, tmp);
9ee6e8bb
PB
9113 } else if ((insn & 0x00200020) == 0x00200000) {
9114 /* [us]sat */
6ddbc6e4 9115 tmp = load_reg(s, rm);
9ee6e8bb
PB
9116 shift = (insn >> 7) & 0x1f;
9117 if (insn & (1 << 6)) {
9118 if (shift == 0)
9119 shift = 31;
6ddbc6e4 9120 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9121 } else {
6ddbc6e4 9122 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9123 }
9124 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9125 tmp2 = tcg_const_i32(sh);
9126 if (insn & (1 << 22))
9ef39277 9127 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9128 else
9ef39277 9129 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9130 tcg_temp_free_i32(tmp2);
6ddbc6e4 9131 store_reg(s, rd, tmp);
9ee6e8bb
PB
9132 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9133 /* [us]sat16 */
6ddbc6e4 9134 tmp = load_reg(s, rm);
9ee6e8bb 9135 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9136 tmp2 = tcg_const_i32(sh);
9137 if (insn & (1 << 22))
9ef39277 9138 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9139 else
9ef39277 9140 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9141 tcg_temp_free_i32(tmp2);
6ddbc6e4 9142 store_reg(s, rd, tmp);
9ee6e8bb
PB
9143 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9144 /* Select bytes. */
6ddbc6e4
PB
9145 tmp = load_reg(s, rn);
9146 tmp2 = load_reg(s, rm);
7d1b0095 9147 tmp3 = tcg_temp_new_i32();
0ecb72a5 9148 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9149 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9150 tcg_temp_free_i32(tmp3);
9151 tcg_temp_free_i32(tmp2);
6ddbc6e4 9152 store_reg(s, rd, tmp);
9ee6e8bb 9153 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9154 tmp = load_reg(s, rm);
9ee6e8bb 9155 shift = (insn >> 10) & 3;
1301f322 9156 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9157 rotate, a shift is sufficient. */
9158 if (shift != 0)
f669df27 9159 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9160 op1 = (insn >> 20) & 7;
9161 switch (op1) {
5e3f878a
PB
9162 case 0: gen_sxtb16(tmp); break;
9163 case 2: gen_sxtb(tmp); break;
9164 case 3: gen_sxth(tmp); break;
9165 case 4: gen_uxtb16(tmp); break;
9166 case 6: gen_uxtb(tmp); break;
9167 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9168 default: goto illegal_op;
9169 }
9170 if (rn != 15) {
5e3f878a 9171 tmp2 = load_reg(s, rn);
9ee6e8bb 9172 if ((op1 & 3) == 0) {
5e3f878a 9173 gen_add16(tmp, tmp2);
9ee6e8bb 9174 } else {
5e3f878a 9175 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9176 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9177 }
9178 }
6c95676b 9179 store_reg(s, rd, tmp);
9ee6e8bb
PB
9180 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9181 /* rev */
b0109805 9182 tmp = load_reg(s, rm);
9ee6e8bb
PB
9183 if (insn & (1 << 22)) {
9184 if (insn & (1 << 7)) {
b0109805 9185 gen_revsh(tmp);
9ee6e8bb
PB
9186 } else {
9187 ARCH(6T2);
b0109805 9188 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9189 }
9190 } else {
9191 if (insn & (1 << 7))
b0109805 9192 gen_rev16(tmp);
9ee6e8bb 9193 else
66896cb8 9194 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9195 }
b0109805 9196 store_reg(s, rd, tmp);
9ee6e8bb
PB
9197 } else {
9198 goto illegal_op;
9199 }
9200 break;
9201 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9202 switch ((insn >> 20) & 0x7) {
9203 case 5:
9204 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9205 /* op2 not 00x or 11x : UNDEF */
9206 goto illegal_op;
9207 }
838fa72d
AJ
9208 /* Signed multiply most significant [accumulate].
9209 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9210 tmp = load_reg(s, rm);
9211 tmp2 = load_reg(s, rs);
a7812ae4 9212 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9213
955a7dd5 9214 if (rd != 15) {
838fa72d 9215 tmp = load_reg(s, rd);
9ee6e8bb 9216 if (insn & (1 << 6)) {
838fa72d 9217 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9218 } else {
838fa72d 9219 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9220 }
9221 }
838fa72d
AJ
9222 if (insn & (1 << 5)) {
9223 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9224 }
9225 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9226 tmp = tcg_temp_new_i32();
ecc7b3aa 9227 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9228 tcg_temp_free_i64(tmp64);
955a7dd5 9229 store_reg(s, rn, tmp);
41e9564d
PM
9230 break;
9231 case 0:
9232 case 4:
9233 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9234 if (insn & (1 << 7)) {
9235 goto illegal_op;
9236 }
9237 tmp = load_reg(s, rm);
9238 tmp2 = load_reg(s, rs);
9ee6e8bb 9239 if (insn & (1 << 5))
5e3f878a
PB
9240 gen_swap_half(tmp2);
9241 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9242 if (insn & (1 << 22)) {
5e3f878a 9243 /* smlald, smlsld */
33bbd75a
PC
9244 TCGv_i64 tmp64_2;
9245
a7812ae4 9246 tmp64 = tcg_temp_new_i64();
33bbd75a 9247 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9248 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9249 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9250 tcg_temp_free_i32(tmp);
33bbd75a
PC
9251 tcg_temp_free_i32(tmp2);
9252 if (insn & (1 << 6)) {
9253 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9254 } else {
9255 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9256 }
9257 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9258 gen_addq(s, tmp64, rd, rn);
9259 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9260 tcg_temp_free_i64(tmp64);
9ee6e8bb 9261 } else {
5e3f878a 9262 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9263 if (insn & (1 << 6)) {
9264 /* This subtraction cannot overflow. */
9265 tcg_gen_sub_i32(tmp, tmp, tmp2);
9266 } else {
9267 /* This addition cannot overflow 32 bits;
9268 * however it may overflow considered as a
9269 * signed operation, in which case we must set
9270 * the Q flag.
9271 */
9272 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9273 }
9274 tcg_temp_free_i32(tmp2);
22478e79 9275 if (rd != 15)
9ee6e8bb 9276 {
22478e79 9277 tmp2 = load_reg(s, rd);
9ef39277 9278 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9279 tcg_temp_free_i32(tmp2);
9ee6e8bb 9280 }
22478e79 9281 store_reg(s, rn, tmp);
9ee6e8bb 9282 }
41e9564d 9283 break;
b8b8ea05
PM
9284 case 1:
9285 case 3:
9286 /* SDIV, UDIV */
d614a513 9287 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9288 goto illegal_op;
9289 }
9290 if (((insn >> 5) & 7) || (rd != 15)) {
9291 goto illegal_op;
9292 }
9293 tmp = load_reg(s, rm);
9294 tmp2 = load_reg(s, rs);
9295 if (insn & (1 << 21)) {
9296 gen_helper_udiv(tmp, tmp, tmp2);
9297 } else {
9298 gen_helper_sdiv(tmp, tmp, tmp2);
9299 }
9300 tcg_temp_free_i32(tmp2);
9301 store_reg(s, rn, tmp);
9302 break;
41e9564d
PM
9303 default:
9304 goto illegal_op;
9ee6e8bb
PB
9305 }
9306 break;
9307 case 3:
9308 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9309 switch (op1) {
9310 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9311 ARCH(6);
9312 tmp = load_reg(s, rm);
9313 tmp2 = load_reg(s, rs);
9314 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9315 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9316 if (rd != 15) {
9317 tmp2 = load_reg(s, rd);
6ddbc6e4 9318 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9319 tcg_temp_free_i32(tmp2);
9ee6e8bb 9320 }
ded9d295 9321 store_reg(s, rn, tmp);
9ee6e8bb
PB
9322 break;
9323 case 0x20: case 0x24: case 0x28: case 0x2c:
9324 /* Bitfield insert/clear. */
9325 ARCH(6T2);
9326 shift = (insn >> 7) & 0x1f;
9327 i = (insn >> 16) & 0x1f;
45140a57
KB
9328 if (i < shift) {
9329 /* UNPREDICTABLE; we choose to UNDEF */
9330 goto illegal_op;
9331 }
9ee6e8bb
PB
9332 i = i + 1 - shift;
9333 if (rm == 15) {
7d1b0095 9334 tmp = tcg_temp_new_i32();
5e3f878a 9335 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9336 } else {
5e3f878a 9337 tmp = load_reg(s, rm);
9ee6e8bb
PB
9338 }
9339 if (i != 32) {
5e3f878a 9340 tmp2 = load_reg(s, rd);
d593c48e 9341 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9342 tcg_temp_free_i32(tmp2);
9ee6e8bb 9343 }
5e3f878a 9344 store_reg(s, rd, tmp);
9ee6e8bb
PB
9345 break;
9346 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9347 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9348 ARCH(6T2);
5e3f878a 9349 tmp = load_reg(s, rm);
9ee6e8bb
PB
9350 shift = (insn >> 7) & 0x1f;
9351 i = ((insn >> 16) & 0x1f) + 1;
9352 if (shift + i > 32)
9353 goto illegal_op;
9354 if (i < 32) {
9355 if (op1 & 0x20) {
59a71b4c 9356 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9357 } else {
59a71b4c 9358 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9359 }
9360 }
5e3f878a 9361 store_reg(s, rd, tmp);
9ee6e8bb
PB
9362 break;
9363 default:
9364 goto illegal_op;
9365 }
9366 break;
9367 }
9368 break;
9369 }
9370 do_ldst:
9371 /* Check for undefined extension instructions
9372 * per the ARM Bible IE:
9373 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9374 */
9375 sh = (0xf << 20) | (0xf << 4);
9376 if (op1 == 0x7 && ((insn & sh) == sh))
9377 {
9378 goto illegal_op;
9379 }
9380 /* load/store byte/word */
9381 rn = (insn >> 16) & 0xf;
9382 rd = (insn >> 12) & 0xf;
b0109805 9383 tmp2 = load_reg(s, rn);
a99caa48
PM
9384 if ((insn & 0x01200000) == 0x00200000) {
9385 /* ldrt/strt */
579d21cc 9386 i = get_a32_user_mem_index(s);
a99caa48
PM
9387 } else {
9388 i = get_mem_index(s);
9389 }
9ee6e8bb 9390 if (insn & (1 << 24))
b0109805 9391 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9392 if (insn & (1 << 20)) {
9393 /* load */
5a839c0d 9394 tmp = tcg_temp_new_i32();
9ee6e8bb 9395 if (insn & (1 << 22)) {
9bb6558a 9396 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9397 } else {
9bb6558a 9398 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9399 }
9ee6e8bb
PB
9400 } else {
9401 /* store */
b0109805 9402 tmp = load_reg(s, rd);
5a839c0d 9403 if (insn & (1 << 22)) {
9bb6558a 9404 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9405 } else {
9bb6558a 9406 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9407 }
9408 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9409 }
9410 if (!(insn & (1 << 24))) {
b0109805
PB
9411 gen_add_data_offset(s, insn, tmp2);
9412 store_reg(s, rn, tmp2);
9413 } else if (insn & (1 << 21)) {
9414 store_reg(s, rn, tmp2);
9415 } else {
7d1b0095 9416 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9417 }
9418 if (insn & (1 << 20)) {
9419 /* Complete the load. */
7dcc1f89 9420 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9421 }
9422 break;
9423 case 0x08:
9424 case 0x09:
9425 {
da3e53dd
PM
9426 int j, n, loaded_base;
9427 bool exc_return = false;
9428 bool is_load = extract32(insn, 20, 1);
9429 bool user = false;
39d5492a 9430 TCGv_i32 loaded_var;
9ee6e8bb
PB
9431 /* load/store multiple words */
9432 /* XXX: store correct base if write back */
9ee6e8bb 9433 if (insn & (1 << 22)) {
da3e53dd 9434 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9435 if (IS_USER(s))
9436 goto illegal_op; /* only usable in supervisor mode */
9437
da3e53dd
PM
9438 if (is_load && extract32(insn, 15, 1)) {
9439 exc_return = true;
9440 } else {
9441 user = true;
9442 }
9ee6e8bb
PB
9443 }
9444 rn = (insn >> 16) & 0xf;
b0109805 9445 addr = load_reg(s, rn);
9ee6e8bb
PB
9446
9447 /* compute total size */
9448 loaded_base = 0;
39d5492a 9449 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9450 n = 0;
9451 for(i=0;i<16;i++) {
9452 if (insn & (1 << i))
9453 n++;
9454 }
9455 /* XXX: test invalid n == 0 case ? */
9456 if (insn & (1 << 23)) {
9457 if (insn & (1 << 24)) {
9458 /* pre increment */
b0109805 9459 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9460 } else {
9461 /* post increment */
9462 }
9463 } else {
9464 if (insn & (1 << 24)) {
9465 /* pre decrement */
b0109805 9466 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9467 } else {
9468 /* post decrement */
9469 if (n != 1)
b0109805 9470 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9471 }
9472 }
9473 j = 0;
9474 for(i=0;i<16;i++) {
9475 if (insn & (1 << i)) {
da3e53dd 9476 if (is_load) {
9ee6e8bb 9477 /* load */
5a839c0d 9478 tmp = tcg_temp_new_i32();
12dcc321 9479 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9480 if (user) {
b75263d6 9481 tmp2 = tcg_const_i32(i);
1ce94f81 9482 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9483 tcg_temp_free_i32(tmp2);
7d1b0095 9484 tcg_temp_free_i32(tmp);
9ee6e8bb 9485 } else if (i == rn) {
b0109805 9486 loaded_var = tmp;
9ee6e8bb 9487 loaded_base = 1;
fb0e8e79
PM
9488 } else if (rn == 15 && exc_return) {
9489 store_pc_exc_ret(s, tmp);
9ee6e8bb 9490 } else {
7dcc1f89 9491 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9492 }
9493 } else {
9494 /* store */
9495 if (i == 15) {
9496 /* special case: r15 = PC + 8 */
9497 val = (long)s->pc + 4;
7d1b0095 9498 tmp = tcg_temp_new_i32();
b0109805 9499 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9500 } else if (user) {
7d1b0095 9501 tmp = tcg_temp_new_i32();
b75263d6 9502 tmp2 = tcg_const_i32(i);
9ef39277 9503 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9504 tcg_temp_free_i32(tmp2);
9ee6e8bb 9505 } else {
b0109805 9506 tmp = load_reg(s, i);
9ee6e8bb 9507 }
12dcc321 9508 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9509 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9510 }
9511 j++;
9512 /* no need to add after the last transfer */
9513 if (j != n)
b0109805 9514 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9515 }
9516 }
9517 if (insn & (1 << 21)) {
9518 /* write back */
9519 if (insn & (1 << 23)) {
9520 if (insn & (1 << 24)) {
9521 /* pre increment */
9522 } else {
9523 /* post increment */
b0109805 9524 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9525 }
9526 } else {
9527 if (insn & (1 << 24)) {
9528 /* pre decrement */
9529 if (n != 1)
b0109805 9530 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9531 } else {
9532 /* post decrement */
b0109805 9533 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9534 }
9535 }
b0109805
PB
9536 store_reg(s, rn, addr);
9537 } else {
7d1b0095 9538 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9539 }
9540 if (loaded_base) {
b0109805 9541 store_reg(s, rn, loaded_var);
9ee6e8bb 9542 }
da3e53dd 9543 if (exc_return) {
9ee6e8bb 9544 /* Restore CPSR from SPSR. */
d9ba4830 9545 tmp = load_cpu_field(spsr);
235ea1f5 9546 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 9547 tcg_temp_free_i32(tmp);
b29fd33d 9548 /* Must exit loop to check un-masked IRQs */
dcba3a8d 9549 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
9550 }
9551 }
9552 break;
9553 case 0xa:
9554 case 0xb:
9555 {
9556 int32_t offset;
9557
9558 /* branch (and link) */
9559 val = (int32_t)s->pc;
9560 if (insn & (1 << 24)) {
7d1b0095 9561 tmp = tcg_temp_new_i32();
5e3f878a
PB
9562 tcg_gen_movi_i32(tmp, val);
9563 store_reg(s, 14, tmp);
9ee6e8bb 9564 }
534df156
PM
9565 offset = sextract32(insn << 2, 0, 26);
9566 val += offset + 4;
9ee6e8bb
PB
9567 gen_jmp(s, val);
9568 }
9569 break;
9570 case 0xc:
9571 case 0xd:
9572 case 0xe:
6a57f3eb
WN
9573 if (((insn >> 8) & 0xe) == 10) {
9574 /* VFP. */
7dcc1f89 9575 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9576 goto illegal_op;
9577 }
7dcc1f89 9578 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9579 /* Coprocessor. */
9ee6e8bb 9580 goto illegal_op;
6a57f3eb 9581 }
9ee6e8bb
PB
9582 break;
9583 case 0xf:
9584 /* swi */
eaed129d 9585 gen_set_pc_im(s, s->pc);
d4a2dc67 9586 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 9587 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
9588 break;
9589 default:
9590 illegal_op:
73710361
GB
9591 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9592 default_exception_el(s));
9ee6e8bb
PB
9593 break;
9594 }
9595 }
9596}
9597
9598/* Return true if this is a Thumb-2 logical op. */
9599static int
9600thumb2_logic_op(int op)
9601{
9602 return (op < 8);
9603}
9604
9605/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9606 then set condition code flags based on the result of the operation.
9607 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9608 to the high bit of T1.
9609 Returns zero if the opcode is valid. */
9610
9611static int
39d5492a
PM
9612gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9613 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9614{
9615 int logic_cc;
9616
9617 logic_cc = 0;
9618 switch (op) {
9619 case 0: /* and */
396e467c 9620 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9621 logic_cc = conds;
9622 break;
9623 case 1: /* bic */
f669df27 9624 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9625 logic_cc = conds;
9626 break;
9627 case 2: /* orr */
396e467c 9628 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9629 logic_cc = conds;
9630 break;
9631 case 3: /* orn */
29501f1b 9632 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9633 logic_cc = conds;
9634 break;
9635 case 4: /* eor */
396e467c 9636 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9637 logic_cc = conds;
9638 break;
9639 case 8: /* add */
9640 if (conds)
72485ec4 9641 gen_add_CC(t0, t0, t1);
9ee6e8bb 9642 else
396e467c 9643 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9644 break;
9645 case 10: /* adc */
9646 if (conds)
49b4c31e 9647 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9648 else
396e467c 9649 gen_adc(t0, t1);
9ee6e8bb
PB
9650 break;
9651 case 11: /* sbc */
2de68a49
RH
9652 if (conds) {
9653 gen_sbc_CC(t0, t0, t1);
9654 } else {
396e467c 9655 gen_sub_carry(t0, t0, t1);
2de68a49 9656 }
9ee6e8bb
PB
9657 break;
9658 case 13: /* sub */
9659 if (conds)
72485ec4 9660 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9661 else
396e467c 9662 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9663 break;
9664 case 14: /* rsb */
9665 if (conds)
72485ec4 9666 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9667 else
396e467c 9668 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9669 break;
9670 default: /* 5, 6, 7, 9, 12, 15. */
9671 return 1;
9672 }
9673 if (logic_cc) {
396e467c 9674 gen_logic_CC(t0);
9ee6e8bb 9675 if (shifter_out)
396e467c 9676 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9677 }
9678 return 0;
9679}
9680
9681/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9682 is not legal. */
0ecb72a5 9683static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 9684{
b0109805 9685 uint32_t insn, imm, shift, offset;
9ee6e8bb 9686 uint32_t rd, rn, rm, rs;
39d5492a
PM
9687 TCGv_i32 tmp;
9688 TCGv_i32 tmp2;
9689 TCGv_i32 tmp3;
9690 TCGv_i32 addr;
a7812ae4 9691 TCGv_i64 tmp64;
9ee6e8bb
PB
9692 int op;
9693 int shiftop;
9694 int conds;
9695 int logic_cc;
9696
d614a513
PM
9697 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9698 || arm_dc_feature(s, ARM_FEATURE_M))) {
601d70b9 9699 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
9700 16-bit instructions to get correct prefetch abort behavior. */
9701 insn = insn_hw1;
9702 if ((insn & (1 << 12)) == 0) {
be5e7a76 9703 ARCH(5);
9ee6e8bb
PB
9704 /* Second half of blx. */
9705 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
9706 tmp = load_reg(s, 14);
9707 tcg_gen_addi_i32(tmp, tmp, offset);
9708 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 9709
7d1b0095 9710 tmp2 = tcg_temp_new_i32();
b0109805 9711 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9712 store_reg(s, 14, tmp2);
9713 gen_bx(s, tmp);
9ee6e8bb
PB
9714 return 0;
9715 }
9716 if (insn & (1 << 11)) {
9717 /* Second half of bl. */
9718 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 9719 tmp = load_reg(s, 14);
6a0d8a1d 9720 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 9721
7d1b0095 9722 tmp2 = tcg_temp_new_i32();
b0109805 9723 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9724 store_reg(s, 14, tmp2);
9725 gen_bx(s, tmp);
9ee6e8bb
PB
9726 return 0;
9727 }
9728 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9729 /* Instruction spans a page boundary. Implement it as two
9730 16-bit instructions in case the second half causes an
9731 prefetch abort. */
9732 offset = ((int32_t)insn << 21) >> 9;
396e467c 9733 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
9734 return 0;
9735 }
9736 /* Fall through to 32-bit decode. */
9737 }
9738
f9fd40eb 9739 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
9ee6e8bb
PB
9740 s->pc += 2;
9741 insn |= (uint32_t)insn_hw1 << 16;
9742
9743 if ((insn & 0xf800e800) != 0xf000e800) {
9744 ARCH(6T2);
9745 }
9746
9747 rn = (insn >> 16) & 0xf;
9748 rs = (insn >> 12) & 0xf;
9749 rd = (insn >> 8) & 0xf;
9750 rm = insn & 0xf;
9751 switch ((insn >> 25) & 0xf) {
9752 case 0: case 1: case 2: case 3:
9753 /* 16-bit instructions. Should never happen. */
9754 abort();
9755 case 4:
9756 if (insn & (1 << 22)) {
ebfe27c5
PM
9757 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9758 * - load/store doubleword, load/store exclusive, ldacq/strel,
9759 * table branch.
9760 */
9ee6e8bb 9761 if (insn & 0x01200000) {
ebfe27c5
PM
9762 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9763 * - load/store dual (post-indexed)
9764 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9765 * - load/store dual (literal and immediate)
9766 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9767 * - load/store dual (pre-indexed)
9768 */
9ee6e8bb 9769 if (rn == 15) {
ebfe27c5
PM
9770 if (insn & (1 << 21)) {
9771 /* UNPREDICTABLE */
9772 goto illegal_op;
9773 }
7d1b0095 9774 addr = tcg_temp_new_i32();
b0109805 9775 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9776 } else {
b0109805 9777 addr = load_reg(s, rn);
9ee6e8bb
PB
9778 }
9779 offset = (insn & 0xff) * 4;
9780 if ((insn & (1 << 23)) == 0)
9781 offset = -offset;
9782 if (insn & (1 << 24)) {
b0109805 9783 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9784 offset = 0;
9785 }
9786 if (insn & (1 << 20)) {
9787 /* ldrd */
e2592fad 9788 tmp = tcg_temp_new_i32();
12dcc321 9789 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9790 store_reg(s, rs, tmp);
9791 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9792 tmp = tcg_temp_new_i32();
12dcc321 9793 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9794 store_reg(s, rd, tmp);
9ee6e8bb
PB
9795 } else {
9796 /* strd */
b0109805 9797 tmp = load_reg(s, rs);
12dcc321 9798 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9799 tcg_temp_free_i32(tmp);
b0109805
PB
9800 tcg_gen_addi_i32(addr, addr, 4);
9801 tmp = load_reg(s, rd);
12dcc321 9802 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9803 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9804 }
9805 if (insn & (1 << 21)) {
9806 /* Base writeback. */
b0109805
PB
9807 tcg_gen_addi_i32(addr, addr, offset - 4);
9808 store_reg(s, rn, addr);
9809 } else {
7d1b0095 9810 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9811 }
9812 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
9813 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9814 * - load/store exclusive word
9815 */
9816 if (rs == 15) {
9817 goto illegal_op;
9818 }
39d5492a 9819 addr = tcg_temp_local_new_i32();
98a46317 9820 load_reg_var(s, addr, rn);
426f5abc 9821 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9822 if (insn & (1 << 20)) {
426f5abc 9823 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9824 } else {
426f5abc 9825 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9826 }
39d5492a 9827 tcg_temp_free_i32(addr);
2359bf80 9828 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9829 /* Table Branch. */
9830 if (rn == 15) {
7d1b0095 9831 addr = tcg_temp_new_i32();
b0109805 9832 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9833 } else {
b0109805 9834 addr = load_reg(s, rn);
9ee6e8bb 9835 }
b26eefb6 9836 tmp = load_reg(s, rm);
b0109805 9837 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9838 if (insn & (1 << 4)) {
9839 /* tbh */
b0109805 9840 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9841 tcg_temp_free_i32(tmp);
e2592fad 9842 tmp = tcg_temp_new_i32();
12dcc321 9843 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9844 } else { /* tbb */
7d1b0095 9845 tcg_temp_free_i32(tmp);
e2592fad 9846 tmp = tcg_temp_new_i32();
12dcc321 9847 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9848 }
7d1b0095 9849 tcg_temp_free_i32(addr);
b0109805
PB
9850 tcg_gen_shli_i32(tmp, tmp, 1);
9851 tcg_gen_addi_i32(tmp, tmp, s->pc);
9852 store_reg(s, 15, tmp);
9ee6e8bb 9853 } else {
2359bf80 9854 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9855 op = (insn >> 4) & 0x3;
2359bf80
MR
9856 switch (op2) {
9857 case 0:
426f5abc 9858 goto illegal_op;
2359bf80
MR
9859 case 1:
9860 /* Load/store exclusive byte/halfword/doubleword */
9861 if (op == 2) {
9862 goto illegal_op;
9863 }
9864 ARCH(7);
9865 break;
9866 case 2:
9867 /* Load-acquire/store-release */
9868 if (op == 3) {
9869 goto illegal_op;
9870 }
9871 /* Fall through */
9872 case 3:
9873 /* Load-acquire/store-release exclusive */
9874 ARCH(8);
9875 break;
426f5abc 9876 }
39d5492a 9877 addr = tcg_temp_local_new_i32();
98a46317 9878 load_reg_var(s, addr, rn);
2359bf80
MR
9879 if (!(op2 & 1)) {
9880 if (insn & (1 << 20)) {
9881 tmp = tcg_temp_new_i32();
9882 switch (op) {
9883 case 0: /* ldab */
9bb6558a
PM
9884 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9885 rs | ISSIsAcqRel);
2359bf80
MR
9886 break;
9887 case 1: /* ldah */
9bb6558a
PM
9888 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9889 rs | ISSIsAcqRel);
2359bf80
MR
9890 break;
9891 case 2: /* lda */
9bb6558a
PM
9892 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
9893 rs | ISSIsAcqRel);
2359bf80
MR
9894 break;
9895 default:
9896 abort();
9897 }
9898 store_reg(s, rs, tmp);
9899 } else {
9900 tmp = load_reg(s, rs);
9901 switch (op) {
9902 case 0: /* stlb */
9bb6558a
PM
9903 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
9904 rs | ISSIsAcqRel);
2359bf80
MR
9905 break;
9906 case 1: /* stlh */
9bb6558a
PM
9907 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
9908 rs | ISSIsAcqRel);
2359bf80
MR
9909 break;
9910 case 2: /* stl */
9bb6558a
PM
9911 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
9912 rs | ISSIsAcqRel);
2359bf80
MR
9913 break;
9914 default:
9915 abort();
9916 }
9917 tcg_temp_free_i32(tmp);
9918 }
9919 } else if (insn & (1 << 20)) {
426f5abc 9920 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9921 } else {
426f5abc 9922 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9923 }
39d5492a 9924 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9925 }
9926 } else {
9927 /* Load/store multiple, RFE, SRS. */
9928 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9929 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9930 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9931 goto illegal_op;
00115976 9932 }
9ee6e8bb
PB
9933 if (insn & (1 << 20)) {
9934 /* rfe */
b0109805
PB
9935 addr = load_reg(s, rn);
9936 if ((insn & (1 << 24)) == 0)
9937 tcg_gen_addi_i32(addr, addr, -8);
9938 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9939 tmp = tcg_temp_new_i32();
12dcc321 9940 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9941 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9942 tmp2 = tcg_temp_new_i32();
12dcc321 9943 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9944 if (insn & (1 << 21)) {
9945 /* Base writeback. */
b0109805
PB
9946 if (insn & (1 << 24)) {
9947 tcg_gen_addi_i32(addr, addr, 4);
9948 } else {
9949 tcg_gen_addi_i32(addr, addr, -4);
9950 }
9951 store_reg(s, rn, addr);
9952 } else {
7d1b0095 9953 tcg_temp_free_i32(addr);
9ee6e8bb 9954 }
b0109805 9955 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9956 } else {
9957 /* srs */
81465888
PM
9958 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9959 insn & (1 << 21));
9ee6e8bb
PB
9960 }
9961 } else {
5856d44e 9962 int i, loaded_base = 0;
39d5492a 9963 TCGv_i32 loaded_var;
9ee6e8bb 9964 /* Load/store multiple. */
b0109805 9965 addr = load_reg(s, rn);
9ee6e8bb
PB
9966 offset = 0;
9967 for (i = 0; i < 16; i++) {
9968 if (insn & (1 << i))
9969 offset += 4;
9970 }
9971 if (insn & (1 << 24)) {
b0109805 9972 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9973 }
9974
39d5492a 9975 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9976 for (i = 0; i < 16; i++) {
9977 if ((insn & (1 << i)) == 0)
9978 continue;
9979 if (insn & (1 << 20)) {
9980 /* Load. */
e2592fad 9981 tmp = tcg_temp_new_i32();
12dcc321 9982 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9983 if (i == 15) {
3bb8a96f 9984 gen_bx_excret(s, tmp);
5856d44e
YO
9985 } else if (i == rn) {
9986 loaded_var = tmp;
9987 loaded_base = 1;
9ee6e8bb 9988 } else {
b0109805 9989 store_reg(s, i, tmp);
9ee6e8bb
PB
9990 }
9991 } else {
9992 /* Store. */
b0109805 9993 tmp = load_reg(s, i);
12dcc321 9994 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9995 tcg_temp_free_i32(tmp);
9ee6e8bb 9996 }
b0109805 9997 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9998 }
5856d44e
YO
9999 if (loaded_base) {
10000 store_reg(s, rn, loaded_var);
10001 }
9ee6e8bb
PB
10002 if (insn & (1 << 21)) {
10003 /* Base register writeback. */
10004 if (insn & (1 << 24)) {
b0109805 10005 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10006 }
10007 /* Fault if writeback register is in register list. */
10008 if (insn & (1 << rn))
10009 goto illegal_op;
b0109805
PB
10010 store_reg(s, rn, addr);
10011 } else {
7d1b0095 10012 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10013 }
10014 }
10015 }
10016 break;
2af9ab77
JB
10017 case 5:
10018
9ee6e8bb 10019 op = (insn >> 21) & 0xf;
2af9ab77 10020 if (op == 6) {
62b44f05
AR
10021 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10022 goto illegal_op;
10023 }
2af9ab77
JB
10024 /* Halfword pack. */
10025 tmp = load_reg(s, rn);
10026 tmp2 = load_reg(s, rm);
10027 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
10028 if (insn & (1 << 5)) {
10029 /* pkhtb */
10030 if (shift == 0)
10031 shift = 31;
10032 tcg_gen_sari_i32(tmp2, tmp2, shift);
10033 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10034 tcg_gen_ext16u_i32(tmp2, tmp2);
10035 } else {
10036 /* pkhbt */
10037 if (shift)
10038 tcg_gen_shli_i32(tmp2, tmp2, shift);
10039 tcg_gen_ext16u_i32(tmp, tmp);
10040 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10041 }
10042 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10043 tcg_temp_free_i32(tmp2);
3174f8e9
FN
10044 store_reg(s, rd, tmp);
10045 } else {
2af9ab77
JB
10046 /* Data processing register constant shift. */
10047 if (rn == 15) {
7d1b0095 10048 tmp = tcg_temp_new_i32();
2af9ab77
JB
10049 tcg_gen_movi_i32(tmp, 0);
10050 } else {
10051 tmp = load_reg(s, rn);
10052 }
10053 tmp2 = load_reg(s, rm);
10054
10055 shiftop = (insn >> 4) & 3;
10056 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10057 conds = (insn & (1 << 20)) != 0;
10058 logic_cc = (conds && thumb2_logic_op(op));
10059 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10060 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10061 goto illegal_op;
7d1b0095 10062 tcg_temp_free_i32(tmp2);
2af9ab77
JB
10063 if (rd != 15) {
10064 store_reg(s, rd, tmp);
10065 } else {
7d1b0095 10066 tcg_temp_free_i32(tmp);
2af9ab77 10067 }
3174f8e9 10068 }
9ee6e8bb
PB
10069 break;
10070 case 13: /* Misc data processing. */
10071 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10072 if (op < 4 && (insn & 0xf000) != 0xf000)
10073 goto illegal_op;
10074 switch (op) {
10075 case 0: /* Register controlled shift. */
8984bd2e
PB
10076 tmp = load_reg(s, rn);
10077 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10078 if ((insn & 0x70) != 0)
10079 goto illegal_op;
10080 op = (insn >> 21) & 3;
8984bd2e
PB
10081 logic_cc = (insn & (1 << 20)) != 0;
10082 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10083 if (logic_cc)
10084 gen_logic_CC(tmp);
bedb8a6b 10085 store_reg(s, rd, tmp);
9ee6e8bb
PB
10086 break;
10087 case 1: /* Sign/zero extend. */
62b44f05
AR
10088 op = (insn >> 20) & 7;
10089 switch (op) {
10090 case 0: /* SXTAH, SXTH */
10091 case 1: /* UXTAH, UXTH */
10092 case 4: /* SXTAB, SXTB */
10093 case 5: /* UXTAB, UXTB */
10094 break;
10095 case 2: /* SXTAB16, SXTB16 */
10096 case 3: /* UXTAB16, UXTB16 */
10097 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10098 goto illegal_op;
10099 }
10100 break;
10101 default:
10102 goto illegal_op;
10103 }
10104 if (rn != 15) {
10105 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10106 goto illegal_op;
10107 }
10108 }
5e3f878a 10109 tmp = load_reg(s, rm);
9ee6e8bb 10110 shift = (insn >> 4) & 3;
1301f322 10111 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10112 rotate, a shift is sufficient. */
10113 if (shift != 0)
f669df27 10114 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10115 op = (insn >> 20) & 7;
10116 switch (op) {
5e3f878a
PB
10117 case 0: gen_sxth(tmp); break;
10118 case 1: gen_uxth(tmp); break;
10119 case 2: gen_sxtb16(tmp); break;
10120 case 3: gen_uxtb16(tmp); break;
10121 case 4: gen_sxtb(tmp); break;
10122 case 5: gen_uxtb(tmp); break;
62b44f05
AR
10123 default:
10124 g_assert_not_reached();
9ee6e8bb
PB
10125 }
10126 if (rn != 15) {
5e3f878a 10127 tmp2 = load_reg(s, rn);
9ee6e8bb 10128 if ((op >> 1) == 1) {
5e3f878a 10129 gen_add16(tmp, tmp2);
9ee6e8bb 10130 } else {
5e3f878a 10131 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10132 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10133 }
10134 }
5e3f878a 10135 store_reg(s, rd, tmp);
9ee6e8bb
PB
10136 break;
10137 case 2: /* SIMD add/subtract. */
62b44f05
AR
10138 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10139 goto illegal_op;
10140 }
9ee6e8bb
PB
10141 op = (insn >> 20) & 7;
10142 shift = (insn >> 4) & 7;
10143 if ((op & 3) == 3 || (shift & 3) == 3)
10144 goto illegal_op;
6ddbc6e4
PB
10145 tmp = load_reg(s, rn);
10146 tmp2 = load_reg(s, rm);
10147 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10148 tcg_temp_free_i32(tmp2);
6ddbc6e4 10149 store_reg(s, rd, tmp);
9ee6e8bb
PB
10150 break;
10151 case 3: /* Other data processing. */
10152 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10153 if (op < 4) {
10154 /* Saturating add/subtract. */
62b44f05
AR
10155 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10156 goto illegal_op;
10157 }
d9ba4830
PB
10158 tmp = load_reg(s, rn);
10159 tmp2 = load_reg(s, rm);
9ee6e8bb 10160 if (op & 1)
9ef39277 10161 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10162 if (op & 2)
9ef39277 10163 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10164 else
9ef39277 10165 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10166 tcg_temp_free_i32(tmp2);
9ee6e8bb 10167 } else {
62b44f05
AR
10168 switch (op) {
10169 case 0x0a: /* rbit */
10170 case 0x08: /* rev */
10171 case 0x09: /* rev16 */
10172 case 0x0b: /* revsh */
10173 case 0x18: /* clz */
10174 break;
10175 case 0x10: /* sel */
10176 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10177 goto illegal_op;
10178 }
10179 break;
10180 case 0x20: /* crc32/crc32c */
10181 case 0x21:
10182 case 0x22:
10183 case 0x28:
10184 case 0x29:
10185 case 0x2a:
10186 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10187 goto illegal_op;
10188 }
10189 break;
10190 default:
10191 goto illegal_op;
10192 }
d9ba4830 10193 tmp = load_reg(s, rn);
9ee6e8bb
PB
10194 switch (op) {
10195 case 0x0a: /* rbit */
d9ba4830 10196 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10197 break;
10198 case 0x08: /* rev */
66896cb8 10199 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10200 break;
10201 case 0x09: /* rev16 */
d9ba4830 10202 gen_rev16(tmp);
9ee6e8bb
PB
10203 break;
10204 case 0x0b: /* revsh */
d9ba4830 10205 gen_revsh(tmp);
9ee6e8bb
PB
10206 break;
10207 case 0x10: /* sel */
d9ba4830 10208 tmp2 = load_reg(s, rm);
7d1b0095 10209 tmp3 = tcg_temp_new_i32();
0ecb72a5 10210 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10211 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10212 tcg_temp_free_i32(tmp3);
10213 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10214 break;
10215 case 0x18: /* clz */
7539a012 10216 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10217 break;
eb0ecd5a
WN
10218 case 0x20:
10219 case 0x21:
10220 case 0x22:
10221 case 0x28:
10222 case 0x29:
10223 case 0x2a:
10224 {
10225 /* crc32/crc32c */
10226 uint32_t sz = op & 0x3;
10227 uint32_t c = op & 0x8;
10228
eb0ecd5a 10229 tmp2 = load_reg(s, rm);
aa633469
PM
10230 if (sz == 0) {
10231 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10232 } else if (sz == 1) {
10233 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10234 }
eb0ecd5a
WN
10235 tmp3 = tcg_const_i32(1 << sz);
10236 if (c) {
10237 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10238 } else {
10239 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10240 }
10241 tcg_temp_free_i32(tmp2);
10242 tcg_temp_free_i32(tmp3);
10243 break;
10244 }
9ee6e8bb 10245 default:
62b44f05 10246 g_assert_not_reached();
9ee6e8bb
PB
10247 }
10248 }
d9ba4830 10249 store_reg(s, rd, tmp);
9ee6e8bb
PB
10250 break;
10251 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10252 switch ((insn >> 20) & 7) {
10253 case 0: /* 32 x 32 -> 32 */
10254 case 7: /* Unsigned sum of absolute differences. */
10255 break;
10256 case 1: /* 16 x 16 -> 32 */
10257 case 2: /* Dual multiply add. */
10258 case 3: /* 32 * 16 -> 32msb */
10259 case 4: /* Dual multiply subtract. */
10260 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10261 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10262 goto illegal_op;
10263 }
10264 break;
10265 }
9ee6e8bb 10266 op = (insn >> 4) & 0xf;
d9ba4830
PB
10267 tmp = load_reg(s, rn);
10268 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10269 switch ((insn >> 20) & 7) {
10270 case 0: /* 32 x 32 -> 32 */
d9ba4830 10271 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10272 tcg_temp_free_i32(tmp2);
9ee6e8bb 10273 if (rs != 15) {
d9ba4830 10274 tmp2 = load_reg(s, rs);
9ee6e8bb 10275 if (op)
d9ba4830 10276 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10277 else
d9ba4830 10278 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10279 tcg_temp_free_i32(tmp2);
9ee6e8bb 10280 }
9ee6e8bb
PB
10281 break;
10282 case 1: /* 16 x 16 -> 32 */
d9ba4830 10283 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10284 tcg_temp_free_i32(tmp2);
9ee6e8bb 10285 if (rs != 15) {
d9ba4830 10286 tmp2 = load_reg(s, rs);
9ef39277 10287 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10288 tcg_temp_free_i32(tmp2);
9ee6e8bb 10289 }
9ee6e8bb
PB
10290 break;
10291 case 2: /* Dual multiply add. */
10292 case 4: /* Dual multiply subtract. */
10293 if (op)
d9ba4830
PB
10294 gen_swap_half(tmp2);
10295 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10296 if (insn & (1 << 22)) {
e1d177b9 10297 /* This subtraction cannot overflow. */
d9ba4830 10298 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10299 } else {
e1d177b9
PM
10300 /* This addition cannot overflow 32 bits;
10301 * however it may overflow considered as a signed
10302 * operation, in which case we must set the Q flag.
10303 */
9ef39277 10304 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10305 }
7d1b0095 10306 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10307 if (rs != 15)
10308 {
d9ba4830 10309 tmp2 = load_reg(s, rs);
9ef39277 10310 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10311 tcg_temp_free_i32(tmp2);
9ee6e8bb 10312 }
9ee6e8bb
PB
10313 break;
10314 case 3: /* 32 * 16 -> 32msb */
10315 if (op)
d9ba4830 10316 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10317 else
d9ba4830 10318 gen_sxth(tmp2);
a7812ae4
PB
10319 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10320 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10321 tmp = tcg_temp_new_i32();
ecc7b3aa 10322 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10323 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10324 if (rs != 15)
10325 {
d9ba4830 10326 tmp2 = load_reg(s, rs);
9ef39277 10327 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10328 tcg_temp_free_i32(tmp2);
9ee6e8bb 10329 }
9ee6e8bb 10330 break;
838fa72d
AJ
10331 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10332 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10333 if (rs != 15) {
838fa72d
AJ
10334 tmp = load_reg(s, rs);
10335 if (insn & (1 << 20)) {
10336 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10337 } else {
838fa72d 10338 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10339 }
2c0262af 10340 }
838fa72d
AJ
10341 if (insn & (1 << 4)) {
10342 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10343 }
10344 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10345 tmp = tcg_temp_new_i32();
ecc7b3aa 10346 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10347 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10348 break;
10349 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10350 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10351 tcg_temp_free_i32(tmp2);
9ee6e8bb 10352 if (rs != 15) {
d9ba4830
PB
10353 tmp2 = load_reg(s, rs);
10354 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10355 tcg_temp_free_i32(tmp2);
5fd46862 10356 }
9ee6e8bb 10357 break;
2c0262af 10358 }
d9ba4830 10359 store_reg(s, rd, tmp);
2c0262af 10360 break;
9ee6e8bb
PB
10361 case 6: case 7: /* 64-bit multiply, Divide. */
10362 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10363 tmp = load_reg(s, rn);
10364 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10365 if ((op & 0x50) == 0x10) {
10366 /* sdiv, udiv */
d614a513 10367 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10368 goto illegal_op;
47789990 10369 }
9ee6e8bb 10370 if (op & 0x20)
5e3f878a 10371 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10372 else
5e3f878a 10373 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10374 tcg_temp_free_i32(tmp2);
5e3f878a 10375 store_reg(s, rd, tmp);
9ee6e8bb
PB
10376 } else if ((op & 0xe) == 0xc) {
10377 /* Dual multiply accumulate long. */
62b44f05
AR
10378 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10379 tcg_temp_free_i32(tmp);
10380 tcg_temp_free_i32(tmp2);
10381 goto illegal_op;
10382 }
9ee6e8bb 10383 if (op & 1)
5e3f878a
PB
10384 gen_swap_half(tmp2);
10385 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10386 if (op & 0x10) {
5e3f878a 10387 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10388 } else {
5e3f878a 10389 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10390 }
7d1b0095 10391 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10392 /* BUGFIX */
10393 tmp64 = tcg_temp_new_i64();
10394 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10395 tcg_temp_free_i32(tmp);
a7812ae4
PB
10396 gen_addq(s, tmp64, rs, rd);
10397 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10398 tcg_temp_free_i64(tmp64);
2c0262af 10399 } else {
9ee6e8bb
PB
10400 if (op & 0x20) {
10401 /* Unsigned 64-bit multiply */
a7812ae4 10402 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10403 } else {
9ee6e8bb
PB
10404 if (op & 8) {
10405 /* smlalxy */
62b44f05
AR
10406 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10407 tcg_temp_free_i32(tmp2);
10408 tcg_temp_free_i32(tmp);
10409 goto illegal_op;
10410 }
5e3f878a 10411 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10412 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10413 tmp64 = tcg_temp_new_i64();
10414 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10415 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10416 } else {
10417 /* Signed 64-bit multiply */
a7812ae4 10418 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10419 }
b5ff1b31 10420 }
9ee6e8bb
PB
10421 if (op & 4) {
10422 /* umaal */
62b44f05
AR
10423 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10424 tcg_temp_free_i64(tmp64);
10425 goto illegal_op;
10426 }
a7812ae4
PB
10427 gen_addq_lo(s, tmp64, rs);
10428 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10429 } else if (op & 0x40) {
10430 /* 64-bit accumulate. */
a7812ae4 10431 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10432 }
a7812ae4 10433 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10434 tcg_temp_free_i64(tmp64);
5fd46862 10435 }
2c0262af 10436 break;
9ee6e8bb
PB
10437 }
10438 break;
10439 case 6: case 7: case 14: case 15:
10440 /* Coprocessor. */
7517748e
PM
10441 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10442 /* We don't currently implement M profile FP support,
10443 * so this entire space should give a NOCP fault.
10444 */
10445 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10446 default_exception_el(s));
10447 break;
10448 }
9ee6e8bb
PB
10449 if (((insn >> 24) & 3) == 3) {
10450 /* Translate into the equivalent ARM encoding. */
f06053e3 10451 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10452 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10453 goto illegal_op;
7dcc1f89 10454 }
6a57f3eb 10455 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10456 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10457 goto illegal_op;
10458 }
9ee6e8bb
PB
10459 } else {
10460 if (insn & (1 << 28))
10461 goto illegal_op;
7dcc1f89 10462 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10463 goto illegal_op;
7dcc1f89 10464 }
9ee6e8bb
PB
10465 }
10466 break;
10467 case 8: case 9: case 10: case 11:
10468 if (insn & (1 << 15)) {
10469 /* Branches, misc control. */
10470 if (insn & 0x5000) {
10471 /* Unconditional branch. */
10472 /* signextend(hw1[10:0]) -> offset[:12]. */
10473 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10474 /* hw1[10:0] -> offset[11:1]. */
10475 offset |= (insn & 0x7ff) << 1;
10476 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10477 offset[24:22] already have the same value because of the
10478 sign extension above. */
10479 offset ^= ((~insn) & (1 << 13)) << 10;
10480 offset ^= ((~insn) & (1 << 11)) << 11;
10481
9ee6e8bb
PB
10482 if (insn & (1 << 14)) {
10483 /* Branch and link. */
3174f8e9 10484 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10485 }
3b46e624 10486
b0109805 10487 offset += s->pc;
9ee6e8bb
PB
10488 if (insn & (1 << 12)) {
10489 /* b/bl */
b0109805 10490 gen_jmp(s, offset);
9ee6e8bb
PB
10491 } else {
10492 /* blx */
b0109805 10493 offset &= ~(uint32_t)2;
be5e7a76 10494 /* thumb2 bx, no need to check */
b0109805 10495 gen_bx_im(s, offset);
2c0262af 10496 }
9ee6e8bb
PB
10497 } else if (((insn >> 23) & 7) == 7) {
10498 /* Misc control */
10499 if (insn & (1 << 13))
10500 goto illegal_op;
10501
10502 if (insn & (1 << 26)) {
001b3cab
PM
10503 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10504 goto illegal_op;
10505 }
37e6456e
PM
10506 if (!(insn & (1 << 20))) {
10507 /* Hypervisor call (v7) */
10508 int imm16 = extract32(insn, 16, 4) << 12
10509 | extract32(insn, 0, 12);
10510 ARCH(7);
10511 if (IS_USER(s)) {
10512 goto illegal_op;
10513 }
10514 gen_hvc(s, imm16);
10515 } else {
10516 /* Secure monitor call (v6+) */
10517 ARCH(6K);
10518 if (IS_USER(s)) {
10519 goto illegal_op;
10520 }
10521 gen_smc(s);
10522 }
2c0262af 10523 } else {
9ee6e8bb
PB
10524 op = (insn >> 20) & 7;
10525 switch (op) {
10526 case 0: /* msr cpsr. */
b53d8923 10527 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 10528 tmp = load_reg(s, rn);
b28b3377
PM
10529 /* the constant is the mask and SYSm fields */
10530 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 10531 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10532 tcg_temp_free_i32(addr);
7d1b0095 10533 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10534 gen_lookup_tb(s);
10535 break;
10536 }
10537 /* fall through */
10538 case 1: /* msr spsr. */
b53d8923 10539 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10540 goto illegal_op;
b53d8923 10541 }
8bfd0550
PM
10542
10543 if (extract32(insn, 5, 1)) {
10544 /* MSR (banked) */
10545 int sysm = extract32(insn, 8, 4) |
10546 (extract32(insn, 4, 1) << 4);
10547 int r = op & 1;
10548
10549 gen_msr_banked(s, r, sysm, rm);
10550 break;
10551 }
10552
10553 /* MSR (for PSRs) */
2fbac54b
FN
10554 tmp = load_reg(s, rn);
10555 if (gen_set_psr(s,
7dcc1f89 10556 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10557 op == 1, tmp))
9ee6e8bb
PB
10558 goto illegal_op;
10559 break;
10560 case 2: /* cps, nop-hint. */
10561 if (((insn >> 8) & 7) == 0) {
10562 gen_nop_hint(s, insn & 0xff);
10563 }
10564 /* Implemented as NOP in user mode. */
10565 if (IS_USER(s))
10566 break;
10567 offset = 0;
10568 imm = 0;
10569 if (insn & (1 << 10)) {
10570 if (insn & (1 << 7))
10571 offset |= CPSR_A;
10572 if (insn & (1 << 6))
10573 offset |= CPSR_I;
10574 if (insn & (1 << 5))
10575 offset |= CPSR_F;
10576 if (insn & (1 << 9))
10577 imm = CPSR_A | CPSR_I | CPSR_F;
10578 }
10579 if (insn & (1 << 8)) {
10580 offset |= 0x1f;
10581 imm |= (insn & 0x1f);
10582 }
10583 if (offset) {
2fbac54b 10584 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10585 }
10586 break;
10587 case 3: /* Special control operations. */
426f5abc 10588 ARCH(7);
9ee6e8bb
PB
10589 op = (insn >> 4) & 0xf;
10590 switch (op) {
10591 case 2: /* clrex */
426f5abc 10592 gen_clrex(s);
9ee6e8bb
PB
10593 break;
10594 case 4: /* dsb */
10595 case 5: /* dmb */
61e4c432 10596 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10597 break;
6df99dec
SS
10598 case 6: /* isb */
10599 /* We need to break the TB after this insn
10600 * to execute self-modifying code correctly
10601 * and also to take any pending interrupts
10602 * immediately.
10603 */
0b609cc1 10604 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 10605 break;
9ee6e8bb
PB
10606 default:
10607 goto illegal_op;
10608 }
10609 break;
10610 case 4: /* bxj */
9d7c59c8
PM
10611 /* Trivial implementation equivalent to bx.
10612 * This instruction doesn't exist at all for M-profile.
10613 */
10614 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10615 goto illegal_op;
10616 }
d9ba4830
PB
10617 tmp = load_reg(s, rn);
10618 gen_bx(s, tmp);
9ee6e8bb
PB
10619 break;
10620 case 5: /* Exception return. */
b8b45b68
RV
10621 if (IS_USER(s)) {
10622 goto illegal_op;
10623 }
10624 if (rn != 14 || rd != 15) {
10625 goto illegal_op;
10626 }
10627 tmp = load_reg(s, rn);
10628 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10629 gen_exception_return(s, tmp);
10630 break;
8bfd0550 10631 case 6: /* MRS */
43ac6574
PM
10632 if (extract32(insn, 5, 1) &&
10633 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10634 /* MRS (banked) */
10635 int sysm = extract32(insn, 16, 4) |
10636 (extract32(insn, 4, 1) << 4);
10637
10638 gen_mrs_banked(s, 0, sysm, rd);
10639 break;
10640 }
10641
3d54026f
PM
10642 if (extract32(insn, 16, 4) != 0xf) {
10643 goto illegal_op;
10644 }
10645 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10646 extract32(insn, 0, 8) != 0) {
10647 goto illegal_op;
10648 }
10649
8bfd0550 10650 /* mrs cpsr */
7d1b0095 10651 tmp = tcg_temp_new_i32();
b53d8923 10652 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10653 addr = tcg_const_i32(insn & 0xff);
10654 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10655 tcg_temp_free_i32(addr);
9ee6e8bb 10656 } else {
9ef39277 10657 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10658 }
8984bd2e 10659 store_reg(s, rd, tmp);
9ee6e8bb 10660 break;
8bfd0550 10661 case 7: /* MRS */
43ac6574
PM
10662 if (extract32(insn, 5, 1) &&
10663 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10664 /* MRS (banked) */
10665 int sysm = extract32(insn, 16, 4) |
10666 (extract32(insn, 4, 1) << 4);
10667
10668 gen_mrs_banked(s, 1, sysm, rd);
10669 break;
10670 }
10671
10672 /* mrs spsr. */
9ee6e8bb 10673 /* Not accessible in user mode. */
b53d8923 10674 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10675 goto illegal_op;
b53d8923 10676 }
3d54026f
PM
10677
10678 if (extract32(insn, 16, 4) != 0xf ||
10679 extract32(insn, 0, 8) != 0) {
10680 goto illegal_op;
10681 }
10682
d9ba4830
PB
10683 tmp = load_cpu_field(spsr);
10684 store_reg(s, rd, tmp);
9ee6e8bb 10685 break;
2c0262af
FB
10686 }
10687 }
9ee6e8bb
PB
10688 } else {
10689 /* Conditional branch. */
10690 op = (insn >> 22) & 0xf;
10691 /* Generate a conditional jump to next instruction. */
10692 s->condlabel = gen_new_label();
39fb730a 10693 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
10694 s->condjmp = 1;
10695
10696 /* offset[11:1] = insn[10:0] */
10697 offset = (insn & 0x7ff) << 1;
10698 /* offset[17:12] = insn[21:16]. */
10699 offset |= (insn & 0x003f0000) >> 4;
10700 /* offset[31:20] = insn[26]. */
10701 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10702 /* offset[18] = insn[13]. */
10703 offset |= (insn & (1 << 13)) << 5;
10704 /* offset[19] = insn[11]. */
10705 offset |= (insn & (1 << 11)) << 8;
10706
10707 /* jump to the offset */
b0109805 10708 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10709 }
10710 } else {
10711 /* Data processing immediate. */
10712 if (insn & (1 << 25)) {
10713 if (insn & (1 << 24)) {
10714 if (insn & (1 << 20))
10715 goto illegal_op;
10716 /* Bitfield/Saturate. */
10717 op = (insn >> 21) & 7;
10718 imm = insn & 0x1f;
10719 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10720 if (rn == 15) {
7d1b0095 10721 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10722 tcg_gen_movi_i32(tmp, 0);
10723 } else {
10724 tmp = load_reg(s, rn);
10725 }
9ee6e8bb
PB
10726 switch (op) {
10727 case 2: /* Signed bitfield extract. */
10728 imm++;
10729 if (shift + imm > 32)
10730 goto illegal_op;
59a71b4c
RH
10731 if (imm < 32) {
10732 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10733 }
9ee6e8bb
PB
10734 break;
10735 case 6: /* Unsigned bitfield extract. */
10736 imm++;
10737 if (shift + imm > 32)
10738 goto illegal_op;
59a71b4c
RH
10739 if (imm < 32) {
10740 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10741 }
9ee6e8bb
PB
10742 break;
10743 case 3: /* Bitfield insert/clear. */
10744 if (imm < shift)
10745 goto illegal_op;
10746 imm = imm + 1 - shift;
10747 if (imm != 32) {
6ddbc6e4 10748 tmp2 = load_reg(s, rd);
d593c48e 10749 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10750 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10751 }
10752 break;
10753 case 7:
10754 goto illegal_op;
10755 default: /* Saturate. */
9ee6e8bb
PB
10756 if (shift) {
10757 if (op & 1)
6ddbc6e4 10758 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10759 else
6ddbc6e4 10760 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10761 }
6ddbc6e4 10762 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10763 if (op & 4) {
10764 /* Unsigned. */
62b44f05
AR
10765 if ((op & 1) && shift == 0) {
10766 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10767 tcg_temp_free_i32(tmp);
10768 tcg_temp_free_i32(tmp2);
10769 goto illegal_op;
10770 }
9ef39277 10771 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10772 } else {
9ef39277 10773 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10774 }
2c0262af 10775 } else {
9ee6e8bb 10776 /* Signed. */
62b44f05
AR
10777 if ((op & 1) && shift == 0) {
10778 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10779 tcg_temp_free_i32(tmp);
10780 tcg_temp_free_i32(tmp2);
10781 goto illegal_op;
10782 }
9ef39277 10783 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10784 } else {
9ef39277 10785 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10786 }
2c0262af 10787 }
b75263d6 10788 tcg_temp_free_i32(tmp2);
9ee6e8bb 10789 break;
2c0262af 10790 }
6ddbc6e4 10791 store_reg(s, rd, tmp);
9ee6e8bb
PB
10792 } else {
10793 imm = ((insn & 0x04000000) >> 15)
10794 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10795 if (insn & (1 << 22)) {
10796 /* 16-bit immediate. */
10797 imm |= (insn >> 4) & 0xf000;
10798 if (insn & (1 << 23)) {
10799 /* movt */
5e3f878a 10800 tmp = load_reg(s, rd);
86831435 10801 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10802 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10803 } else {
9ee6e8bb 10804 /* movw */
7d1b0095 10805 tmp = tcg_temp_new_i32();
5e3f878a 10806 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10807 }
10808 } else {
9ee6e8bb
PB
10809 /* Add/sub 12-bit immediate. */
10810 if (rn == 15) {
b0109805 10811 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10812 if (insn & (1 << 23))
b0109805 10813 offset -= imm;
9ee6e8bb 10814 else
b0109805 10815 offset += imm;
7d1b0095 10816 tmp = tcg_temp_new_i32();
5e3f878a 10817 tcg_gen_movi_i32(tmp, offset);
2c0262af 10818 } else {
5e3f878a 10819 tmp = load_reg(s, rn);
9ee6e8bb 10820 if (insn & (1 << 23))
5e3f878a 10821 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10822 else
5e3f878a 10823 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10824 }
9ee6e8bb 10825 }
5e3f878a 10826 store_reg(s, rd, tmp);
191abaa2 10827 }
9ee6e8bb
PB
10828 } else {
10829 int shifter_out = 0;
10830 /* modified 12-bit immediate. */
10831 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10832 imm = (insn & 0xff);
10833 switch (shift) {
10834 case 0: /* XY */
10835 /* Nothing to do. */
10836 break;
10837 case 1: /* 00XY00XY */
10838 imm |= imm << 16;
10839 break;
10840 case 2: /* XY00XY00 */
10841 imm |= imm << 16;
10842 imm <<= 8;
10843 break;
10844 case 3: /* XYXYXYXY */
10845 imm |= imm << 16;
10846 imm |= imm << 8;
10847 break;
10848 default: /* Rotated constant. */
10849 shift = (shift << 1) | (imm >> 7);
10850 imm |= 0x80;
10851 imm = imm << (32 - shift);
10852 shifter_out = 1;
10853 break;
b5ff1b31 10854 }
7d1b0095 10855 tmp2 = tcg_temp_new_i32();
3174f8e9 10856 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10857 rn = (insn >> 16) & 0xf;
3174f8e9 10858 if (rn == 15) {
7d1b0095 10859 tmp = tcg_temp_new_i32();
3174f8e9
FN
10860 tcg_gen_movi_i32(tmp, 0);
10861 } else {
10862 tmp = load_reg(s, rn);
10863 }
9ee6e8bb
PB
10864 op = (insn >> 21) & 0xf;
10865 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10866 shifter_out, tmp, tmp2))
9ee6e8bb 10867 goto illegal_op;
7d1b0095 10868 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10869 rd = (insn >> 8) & 0xf;
10870 if (rd != 15) {
3174f8e9
FN
10871 store_reg(s, rd, tmp);
10872 } else {
7d1b0095 10873 tcg_temp_free_i32(tmp);
2c0262af 10874 }
2c0262af 10875 }
9ee6e8bb
PB
10876 }
10877 break;
10878 case 12: /* Load/store single data item. */
10879 {
10880 int postinc = 0;
10881 int writeback = 0;
a99caa48 10882 int memidx;
9bb6558a
PM
10883 ISSInfo issinfo;
10884
9ee6e8bb 10885 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10886 if (disas_neon_ls_insn(s, insn)) {
c1713132 10887 goto illegal_op;
7dcc1f89 10888 }
9ee6e8bb
PB
10889 break;
10890 }
a2fdc890
PM
10891 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10892 if (rs == 15) {
10893 if (!(insn & (1 << 20))) {
10894 goto illegal_op;
10895 }
10896 if (op != 2) {
10897 /* Byte or halfword load space with dest == r15 : memory hints.
10898 * Catch them early so we don't emit pointless addressing code.
10899 * This space is a mix of:
10900 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10901 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10902 * cores)
10903 * unallocated hints, which must be treated as NOPs
10904 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10905 * which is easiest for the decoding logic
10906 * Some space which must UNDEF
10907 */
10908 int op1 = (insn >> 23) & 3;
10909 int op2 = (insn >> 6) & 0x3f;
10910 if (op & 2) {
10911 goto illegal_op;
10912 }
10913 if (rn == 15) {
02afbf64
PM
10914 /* UNPREDICTABLE, unallocated hint or
10915 * PLD/PLDW/PLI (literal)
10916 */
a2fdc890
PM
10917 return 0;
10918 }
10919 if (op1 & 1) {
02afbf64 10920 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10921 }
10922 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10923 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10924 }
10925 /* UNDEF space, or an UNPREDICTABLE */
10926 return 1;
10927 }
10928 }
a99caa48 10929 memidx = get_mem_index(s);
9ee6e8bb 10930 if (rn == 15) {
7d1b0095 10931 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10932 /* PC relative. */
10933 /* s->pc has already been incremented by 4. */
10934 imm = s->pc & 0xfffffffc;
10935 if (insn & (1 << 23))
10936 imm += insn & 0xfff;
10937 else
10938 imm -= insn & 0xfff;
b0109805 10939 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10940 } else {
b0109805 10941 addr = load_reg(s, rn);
9ee6e8bb
PB
10942 if (insn & (1 << 23)) {
10943 /* Positive offset. */
10944 imm = insn & 0xfff;
b0109805 10945 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10946 } else {
9ee6e8bb 10947 imm = insn & 0xff;
2a0308c5
PM
10948 switch ((insn >> 8) & 0xf) {
10949 case 0x0: /* Shifted Register. */
9ee6e8bb 10950 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10951 if (shift > 3) {
10952 tcg_temp_free_i32(addr);
18c9b560 10953 goto illegal_op;
2a0308c5 10954 }
b26eefb6 10955 tmp = load_reg(s, rm);
9ee6e8bb 10956 if (shift)
b26eefb6 10957 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10958 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10959 tcg_temp_free_i32(tmp);
9ee6e8bb 10960 break;
2a0308c5 10961 case 0xc: /* Negative offset. */
b0109805 10962 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10963 break;
2a0308c5 10964 case 0xe: /* User privilege. */
b0109805 10965 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 10966 memidx = get_a32_user_mem_index(s);
9ee6e8bb 10967 break;
2a0308c5 10968 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10969 imm = -imm;
10970 /* Fall through. */
2a0308c5 10971 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10972 postinc = 1;
10973 writeback = 1;
10974 break;
2a0308c5 10975 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10976 imm = -imm;
10977 /* Fall through. */
2a0308c5 10978 case 0xf: /* Pre-increment. */
b0109805 10979 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10980 writeback = 1;
10981 break;
10982 default:
2a0308c5 10983 tcg_temp_free_i32(addr);
b7bcbe95 10984 goto illegal_op;
9ee6e8bb
PB
10985 }
10986 }
10987 }
9bb6558a
PM
10988
10989 issinfo = writeback ? ISSInvalid : rs;
10990
9ee6e8bb
PB
10991 if (insn & (1 << 20)) {
10992 /* Load. */
5a839c0d 10993 tmp = tcg_temp_new_i32();
a2fdc890 10994 switch (op) {
5a839c0d 10995 case 0:
9bb6558a 10996 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10997 break;
10998 case 4:
9bb6558a 10999 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11000 break;
11001 case 1:
9bb6558a 11002 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11003 break;
11004 case 5:
9bb6558a 11005 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11006 break;
11007 case 2:
9bb6558a 11008 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11009 break;
2a0308c5 11010 default:
5a839c0d 11011 tcg_temp_free_i32(tmp);
2a0308c5
PM
11012 tcg_temp_free_i32(addr);
11013 goto illegal_op;
a2fdc890
PM
11014 }
11015 if (rs == 15) {
3bb8a96f 11016 gen_bx_excret(s, tmp);
9ee6e8bb 11017 } else {
a2fdc890 11018 store_reg(s, rs, tmp);
9ee6e8bb
PB
11019 }
11020 } else {
11021 /* Store. */
b0109805 11022 tmp = load_reg(s, rs);
9ee6e8bb 11023 switch (op) {
5a839c0d 11024 case 0:
9bb6558a 11025 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11026 break;
11027 case 1:
9bb6558a 11028 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11029 break;
11030 case 2:
9bb6558a 11031 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11032 break;
2a0308c5 11033 default:
5a839c0d 11034 tcg_temp_free_i32(tmp);
2a0308c5
PM
11035 tcg_temp_free_i32(addr);
11036 goto illegal_op;
b7bcbe95 11037 }
5a839c0d 11038 tcg_temp_free_i32(tmp);
2c0262af 11039 }
9ee6e8bb 11040 if (postinc)
b0109805
PB
11041 tcg_gen_addi_i32(addr, addr, imm);
11042 if (writeback) {
11043 store_reg(s, rn, addr);
11044 } else {
7d1b0095 11045 tcg_temp_free_i32(addr);
b0109805 11046 }
9ee6e8bb
PB
11047 }
11048 break;
11049 default:
11050 goto illegal_op;
2c0262af 11051 }
9ee6e8bb
PB
11052 return 0;
11053illegal_op:
11054 return 1;
2c0262af
FB
11055}
11056
0ecb72a5 11057static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
11058{
11059 uint32_t val, insn, op, rm, rn, rd, shift, cond;
11060 int32_t offset;
11061 int i;
39d5492a
PM
11062 TCGv_i32 tmp;
11063 TCGv_i32 tmp2;
11064 TCGv_i32 addr;
99c475ab 11065
9ee6e8bb
PB
11066 if (s->condexec_mask) {
11067 cond = s->condexec_cond;
bedd2912
JB
11068 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
11069 s->condlabel = gen_new_label();
39fb730a 11070 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
11071 s->condjmp = 1;
11072 }
9ee6e8bb
PB
11073 }
11074
f9fd40eb 11075 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
99c475ab 11076 s->pc += 2;
b5ff1b31 11077
99c475ab
FB
11078 switch (insn >> 12) {
11079 case 0: case 1:
396e467c 11080
99c475ab
FB
11081 rd = insn & 7;
11082 op = (insn >> 11) & 3;
11083 if (op == 3) {
11084 /* add/subtract */
11085 rn = (insn >> 3) & 7;
396e467c 11086 tmp = load_reg(s, rn);
99c475ab
FB
11087 if (insn & (1 << 10)) {
11088 /* immediate */
7d1b0095 11089 tmp2 = tcg_temp_new_i32();
396e467c 11090 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
11091 } else {
11092 /* reg */
11093 rm = (insn >> 6) & 7;
396e467c 11094 tmp2 = load_reg(s, rm);
99c475ab 11095 }
9ee6e8bb
PB
11096 if (insn & (1 << 9)) {
11097 if (s->condexec_mask)
396e467c 11098 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11099 else
72485ec4 11100 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
11101 } else {
11102 if (s->condexec_mask)
396e467c 11103 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 11104 else
72485ec4 11105 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 11106 }
7d1b0095 11107 tcg_temp_free_i32(tmp2);
396e467c 11108 store_reg(s, rd, tmp);
99c475ab
FB
11109 } else {
11110 /* shift immediate */
11111 rm = (insn >> 3) & 7;
11112 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
11113 tmp = load_reg(s, rm);
11114 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11115 if (!s->condexec_mask)
11116 gen_logic_CC(tmp);
11117 store_reg(s, rd, tmp);
99c475ab
FB
11118 }
11119 break;
11120 case 2: case 3:
11121 /* arithmetic large immediate */
11122 op = (insn >> 11) & 3;
11123 rd = (insn >> 8) & 0x7;
396e467c 11124 if (op == 0) { /* mov */
7d1b0095 11125 tmp = tcg_temp_new_i32();
396e467c 11126 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11127 if (!s->condexec_mask)
396e467c
FN
11128 gen_logic_CC(tmp);
11129 store_reg(s, rd, tmp);
11130 } else {
11131 tmp = load_reg(s, rd);
7d1b0095 11132 tmp2 = tcg_temp_new_i32();
396e467c
FN
11133 tcg_gen_movi_i32(tmp2, insn & 0xff);
11134 switch (op) {
11135 case 1: /* cmp */
72485ec4 11136 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11137 tcg_temp_free_i32(tmp);
11138 tcg_temp_free_i32(tmp2);
396e467c
FN
11139 break;
11140 case 2: /* add */
11141 if (s->condexec_mask)
11142 tcg_gen_add_i32(tmp, tmp, tmp2);
11143 else
72485ec4 11144 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11145 tcg_temp_free_i32(tmp2);
396e467c
FN
11146 store_reg(s, rd, tmp);
11147 break;
11148 case 3: /* sub */
11149 if (s->condexec_mask)
11150 tcg_gen_sub_i32(tmp, tmp, tmp2);
11151 else
72485ec4 11152 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11153 tcg_temp_free_i32(tmp2);
396e467c
FN
11154 store_reg(s, rd, tmp);
11155 break;
11156 }
99c475ab 11157 }
99c475ab
FB
11158 break;
11159 case 4:
11160 if (insn & (1 << 11)) {
11161 rd = (insn >> 8) & 7;
5899f386
FB
11162 /* load pc-relative. Bit 1 of PC is ignored. */
11163 val = s->pc + 2 + ((insn & 0xff) * 4);
11164 val &= ~(uint32_t)2;
7d1b0095 11165 addr = tcg_temp_new_i32();
b0109805 11166 tcg_gen_movi_i32(addr, val);
c40c8556 11167 tmp = tcg_temp_new_i32();
9bb6558a
PM
11168 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11169 rd | ISSIs16Bit);
7d1b0095 11170 tcg_temp_free_i32(addr);
b0109805 11171 store_reg(s, rd, tmp);
99c475ab
FB
11172 break;
11173 }
11174 if (insn & (1 << 10)) {
ebfe27c5
PM
11175 /* 0b0100_01xx_xxxx_xxxx
11176 * - data processing extended, branch and exchange
11177 */
99c475ab
FB
11178 rd = (insn & 7) | ((insn >> 4) & 8);
11179 rm = (insn >> 3) & 0xf;
11180 op = (insn >> 8) & 3;
11181 switch (op) {
11182 case 0: /* add */
396e467c
FN
11183 tmp = load_reg(s, rd);
11184 tmp2 = load_reg(s, rm);
11185 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11186 tcg_temp_free_i32(tmp2);
396e467c 11187 store_reg(s, rd, tmp);
99c475ab
FB
11188 break;
11189 case 1: /* cmp */
396e467c
FN
11190 tmp = load_reg(s, rd);
11191 tmp2 = load_reg(s, rm);
72485ec4 11192 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11193 tcg_temp_free_i32(tmp2);
11194 tcg_temp_free_i32(tmp);
99c475ab
FB
11195 break;
11196 case 2: /* mov/cpy */
396e467c
FN
11197 tmp = load_reg(s, rm);
11198 store_reg(s, rd, tmp);
99c475ab 11199 break;
ebfe27c5
PM
11200 case 3:
11201 {
11202 /* 0b0100_0111_xxxx_xxxx
11203 * - branch [and link] exchange thumb register
11204 */
11205 bool link = insn & (1 << 7);
11206
fb602cb7 11207 if (insn & 3) {
ebfe27c5
PM
11208 goto undef;
11209 }
11210 if (link) {
be5e7a76 11211 ARCH(5);
ebfe27c5 11212 }
fb602cb7
PM
11213 if ((insn & 4)) {
11214 /* BXNS/BLXNS: only exists for v8M with the
11215 * security extensions, and always UNDEF if NonSecure.
11216 * We don't implement these in the user-only mode
11217 * either (in theory you can use them from Secure User
11218 * mode but they are too tied in to system emulation.)
11219 */
11220 if (!s->v8m_secure || IS_USER_ONLY) {
11221 goto undef;
11222 }
11223 if (link) {
11224 /* BLXNS: not yet implemented */
11225 goto undef;
11226 } else {
11227 gen_bxns(s, rm);
11228 }
11229 break;
11230 }
11231 /* BLX/BX */
ebfe27c5
PM
11232 tmp = load_reg(s, rm);
11233 if (link) {
99c475ab 11234 val = (uint32_t)s->pc | 1;
7d1b0095 11235 tmp2 = tcg_temp_new_i32();
b0109805
PB
11236 tcg_gen_movi_i32(tmp2, val);
11237 store_reg(s, 14, tmp2);
3bb8a96f
PM
11238 gen_bx(s, tmp);
11239 } else {
11240 /* Only BX works as exception-return, not BLX */
11241 gen_bx_excret(s, tmp);
99c475ab 11242 }
99c475ab
FB
11243 break;
11244 }
ebfe27c5 11245 }
99c475ab
FB
11246 break;
11247 }
11248
11249 /* data processing register */
11250 rd = insn & 7;
11251 rm = (insn >> 3) & 7;
11252 op = (insn >> 6) & 0xf;
11253 if (op == 2 || op == 3 || op == 4 || op == 7) {
11254 /* the shift/rotate ops want the operands backwards */
11255 val = rm;
11256 rm = rd;
11257 rd = val;
11258 val = 1;
11259 } else {
11260 val = 0;
11261 }
11262
396e467c 11263 if (op == 9) { /* neg */
7d1b0095 11264 tmp = tcg_temp_new_i32();
396e467c
FN
11265 tcg_gen_movi_i32(tmp, 0);
11266 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11267 tmp = load_reg(s, rd);
11268 } else {
39d5492a 11269 TCGV_UNUSED_I32(tmp);
396e467c 11270 }
99c475ab 11271
396e467c 11272 tmp2 = load_reg(s, rm);
5899f386 11273 switch (op) {
99c475ab 11274 case 0x0: /* and */
396e467c 11275 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11276 if (!s->condexec_mask)
396e467c 11277 gen_logic_CC(tmp);
99c475ab
FB
11278 break;
11279 case 0x1: /* eor */
396e467c 11280 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11281 if (!s->condexec_mask)
396e467c 11282 gen_logic_CC(tmp);
99c475ab
FB
11283 break;
11284 case 0x2: /* lsl */
9ee6e8bb 11285 if (s->condexec_mask) {
365af80e 11286 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11287 } else {
9ef39277 11288 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11289 gen_logic_CC(tmp2);
9ee6e8bb 11290 }
99c475ab
FB
11291 break;
11292 case 0x3: /* lsr */
9ee6e8bb 11293 if (s->condexec_mask) {
365af80e 11294 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11295 } else {
9ef39277 11296 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11297 gen_logic_CC(tmp2);
9ee6e8bb 11298 }
99c475ab
FB
11299 break;
11300 case 0x4: /* asr */
9ee6e8bb 11301 if (s->condexec_mask) {
365af80e 11302 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11303 } else {
9ef39277 11304 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11305 gen_logic_CC(tmp2);
9ee6e8bb 11306 }
99c475ab
FB
11307 break;
11308 case 0x5: /* adc */
49b4c31e 11309 if (s->condexec_mask) {
396e467c 11310 gen_adc(tmp, tmp2);
49b4c31e
RH
11311 } else {
11312 gen_adc_CC(tmp, tmp, tmp2);
11313 }
99c475ab
FB
11314 break;
11315 case 0x6: /* sbc */
2de68a49 11316 if (s->condexec_mask) {
396e467c 11317 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11318 } else {
11319 gen_sbc_CC(tmp, tmp, tmp2);
11320 }
99c475ab
FB
11321 break;
11322 case 0x7: /* ror */
9ee6e8bb 11323 if (s->condexec_mask) {
f669df27
AJ
11324 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11325 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11326 } else {
9ef39277 11327 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11328 gen_logic_CC(tmp2);
9ee6e8bb 11329 }
99c475ab
FB
11330 break;
11331 case 0x8: /* tst */
396e467c
FN
11332 tcg_gen_and_i32(tmp, tmp, tmp2);
11333 gen_logic_CC(tmp);
99c475ab 11334 rd = 16;
5899f386 11335 break;
99c475ab 11336 case 0x9: /* neg */
9ee6e8bb 11337 if (s->condexec_mask)
396e467c 11338 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11339 else
72485ec4 11340 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11341 break;
11342 case 0xa: /* cmp */
72485ec4 11343 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11344 rd = 16;
11345 break;
11346 case 0xb: /* cmn */
72485ec4 11347 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11348 rd = 16;
11349 break;
11350 case 0xc: /* orr */
396e467c 11351 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11352 if (!s->condexec_mask)
396e467c 11353 gen_logic_CC(tmp);
99c475ab
FB
11354 break;
11355 case 0xd: /* mul */
7b2919a0 11356 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11357 if (!s->condexec_mask)
396e467c 11358 gen_logic_CC(tmp);
99c475ab
FB
11359 break;
11360 case 0xe: /* bic */
f669df27 11361 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11362 if (!s->condexec_mask)
396e467c 11363 gen_logic_CC(tmp);
99c475ab
FB
11364 break;
11365 case 0xf: /* mvn */
396e467c 11366 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11367 if (!s->condexec_mask)
396e467c 11368 gen_logic_CC(tmp2);
99c475ab 11369 val = 1;
5899f386 11370 rm = rd;
99c475ab
FB
11371 break;
11372 }
11373 if (rd != 16) {
396e467c
FN
11374 if (val) {
11375 store_reg(s, rm, tmp2);
11376 if (op != 0xf)
7d1b0095 11377 tcg_temp_free_i32(tmp);
396e467c
FN
11378 } else {
11379 store_reg(s, rd, tmp);
7d1b0095 11380 tcg_temp_free_i32(tmp2);
396e467c
FN
11381 }
11382 } else {
7d1b0095
PM
11383 tcg_temp_free_i32(tmp);
11384 tcg_temp_free_i32(tmp2);
99c475ab
FB
11385 }
11386 break;
11387
11388 case 5:
11389 /* load/store register offset. */
11390 rd = insn & 7;
11391 rn = (insn >> 3) & 7;
11392 rm = (insn >> 6) & 7;
11393 op = (insn >> 9) & 7;
b0109805 11394 addr = load_reg(s, rn);
b26eefb6 11395 tmp = load_reg(s, rm);
b0109805 11396 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11397 tcg_temp_free_i32(tmp);
99c475ab 11398
c40c8556 11399 if (op < 3) { /* store */
b0109805 11400 tmp = load_reg(s, rd);
c40c8556
PM
11401 } else {
11402 tmp = tcg_temp_new_i32();
11403 }
99c475ab
FB
11404
11405 switch (op) {
11406 case 0: /* str */
9bb6558a 11407 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11408 break;
11409 case 1: /* strh */
9bb6558a 11410 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11411 break;
11412 case 2: /* strb */
9bb6558a 11413 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11414 break;
11415 case 3: /* ldrsb */
9bb6558a 11416 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11417 break;
11418 case 4: /* ldr */
9bb6558a 11419 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11420 break;
11421 case 5: /* ldrh */
9bb6558a 11422 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11423 break;
11424 case 6: /* ldrb */
9bb6558a 11425 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11426 break;
11427 case 7: /* ldrsh */
9bb6558a 11428 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11429 break;
11430 }
c40c8556 11431 if (op >= 3) { /* load */
b0109805 11432 store_reg(s, rd, tmp);
c40c8556
PM
11433 } else {
11434 tcg_temp_free_i32(tmp);
11435 }
7d1b0095 11436 tcg_temp_free_i32(addr);
99c475ab
FB
11437 break;
11438
11439 case 6:
11440 /* load/store word immediate offset */
11441 rd = insn & 7;
11442 rn = (insn >> 3) & 7;
b0109805 11443 addr = load_reg(s, rn);
99c475ab 11444 val = (insn >> 4) & 0x7c;
b0109805 11445 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11446
11447 if (insn & (1 << 11)) {
11448 /* load */
c40c8556 11449 tmp = tcg_temp_new_i32();
12dcc321 11450 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11451 store_reg(s, rd, tmp);
99c475ab
FB
11452 } else {
11453 /* store */
b0109805 11454 tmp = load_reg(s, rd);
12dcc321 11455 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11456 tcg_temp_free_i32(tmp);
99c475ab 11457 }
7d1b0095 11458 tcg_temp_free_i32(addr);
99c475ab
FB
11459 break;
11460
11461 case 7:
11462 /* load/store byte immediate offset */
11463 rd = insn & 7;
11464 rn = (insn >> 3) & 7;
b0109805 11465 addr = load_reg(s, rn);
99c475ab 11466 val = (insn >> 6) & 0x1f;
b0109805 11467 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11468
11469 if (insn & (1 << 11)) {
11470 /* load */
c40c8556 11471 tmp = tcg_temp_new_i32();
9bb6558a 11472 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11473 store_reg(s, rd, tmp);
99c475ab
FB
11474 } else {
11475 /* store */
b0109805 11476 tmp = load_reg(s, rd);
9bb6558a 11477 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11478 tcg_temp_free_i32(tmp);
99c475ab 11479 }
7d1b0095 11480 tcg_temp_free_i32(addr);
99c475ab
FB
11481 break;
11482
11483 case 8:
11484 /* load/store halfword immediate offset */
11485 rd = insn & 7;
11486 rn = (insn >> 3) & 7;
b0109805 11487 addr = load_reg(s, rn);
99c475ab 11488 val = (insn >> 5) & 0x3e;
b0109805 11489 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11490
11491 if (insn & (1 << 11)) {
11492 /* load */
c40c8556 11493 tmp = tcg_temp_new_i32();
9bb6558a 11494 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11495 store_reg(s, rd, tmp);
99c475ab
FB
11496 } else {
11497 /* store */
b0109805 11498 tmp = load_reg(s, rd);
9bb6558a 11499 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11500 tcg_temp_free_i32(tmp);
99c475ab 11501 }
7d1b0095 11502 tcg_temp_free_i32(addr);
99c475ab
FB
11503 break;
11504
11505 case 9:
11506 /* load/store from stack */
11507 rd = (insn >> 8) & 7;
b0109805 11508 addr = load_reg(s, 13);
99c475ab 11509 val = (insn & 0xff) * 4;
b0109805 11510 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11511
11512 if (insn & (1 << 11)) {
11513 /* load */
c40c8556 11514 tmp = tcg_temp_new_i32();
9bb6558a 11515 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11516 store_reg(s, rd, tmp);
99c475ab
FB
11517 } else {
11518 /* store */
b0109805 11519 tmp = load_reg(s, rd);
9bb6558a 11520 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11521 tcg_temp_free_i32(tmp);
99c475ab 11522 }
7d1b0095 11523 tcg_temp_free_i32(addr);
99c475ab
FB
11524 break;
11525
11526 case 10:
11527 /* add to high reg */
11528 rd = (insn >> 8) & 7;
5899f386
FB
11529 if (insn & (1 << 11)) {
11530 /* SP */
5e3f878a 11531 tmp = load_reg(s, 13);
5899f386
FB
11532 } else {
11533 /* PC. bit 1 is ignored. */
7d1b0095 11534 tmp = tcg_temp_new_i32();
5e3f878a 11535 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 11536 }
99c475ab 11537 val = (insn & 0xff) * 4;
5e3f878a
PB
11538 tcg_gen_addi_i32(tmp, tmp, val);
11539 store_reg(s, rd, tmp);
99c475ab
FB
11540 break;
11541
11542 case 11:
11543 /* misc */
11544 op = (insn >> 8) & 0xf;
11545 switch (op) {
11546 case 0:
11547 /* adjust stack pointer */
b26eefb6 11548 tmp = load_reg(s, 13);
99c475ab
FB
11549 val = (insn & 0x7f) * 4;
11550 if (insn & (1 << 7))
6a0d8a1d 11551 val = -(int32_t)val;
b26eefb6
PB
11552 tcg_gen_addi_i32(tmp, tmp, val);
11553 store_reg(s, 13, tmp);
99c475ab
FB
11554 break;
11555
9ee6e8bb
PB
11556 case 2: /* sign/zero extend. */
11557 ARCH(6);
11558 rd = insn & 7;
11559 rm = (insn >> 3) & 7;
b0109805 11560 tmp = load_reg(s, rm);
9ee6e8bb 11561 switch ((insn >> 6) & 3) {
b0109805
PB
11562 case 0: gen_sxth(tmp); break;
11563 case 1: gen_sxtb(tmp); break;
11564 case 2: gen_uxth(tmp); break;
11565 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11566 }
b0109805 11567 store_reg(s, rd, tmp);
9ee6e8bb 11568 break;
99c475ab
FB
11569 case 4: case 5: case 0xc: case 0xd:
11570 /* push/pop */
b0109805 11571 addr = load_reg(s, 13);
5899f386
FB
11572 if (insn & (1 << 8))
11573 offset = 4;
99c475ab 11574 else
5899f386
FB
11575 offset = 0;
11576 for (i = 0; i < 8; i++) {
11577 if (insn & (1 << i))
11578 offset += 4;
11579 }
11580 if ((insn & (1 << 11)) == 0) {
b0109805 11581 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11582 }
99c475ab
FB
11583 for (i = 0; i < 8; i++) {
11584 if (insn & (1 << i)) {
11585 if (insn & (1 << 11)) {
11586 /* pop */
c40c8556 11587 tmp = tcg_temp_new_i32();
12dcc321 11588 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11589 store_reg(s, i, tmp);
99c475ab
FB
11590 } else {
11591 /* push */
b0109805 11592 tmp = load_reg(s, i);
12dcc321 11593 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11594 tcg_temp_free_i32(tmp);
99c475ab 11595 }
5899f386 11596 /* advance to the next address. */
b0109805 11597 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11598 }
11599 }
39d5492a 11600 TCGV_UNUSED_I32(tmp);
99c475ab
FB
11601 if (insn & (1 << 8)) {
11602 if (insn & (1 << 11)) {
11603 /* pop pc */
c40c8556 11604 tmp = tcg_temp_new_i32();
12dcc321 11605 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11606 /* don't set the pc until the rest of the instruction
11607 has completed */
11608 } else {
11609 /* push lr */
b0109805 11610 tmp = load_reg(s, 14);
12dcc321 11611 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11612 tcg_temp_free_i32(tmp);
99c475ab 11613 }
b0109805 11614 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11615 }
5899f386 11616 if ((insn & (1 << 11)) == 0) {
b0109805 11617 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11618 }
99c475ab 11619 /* write back the new stack pointer */
b0109805 11620 store_reg(s, 13, addr);
99c475ab 11621 /* set the new PC value */
be5e7a76 11622 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11623 store_reg_from_load(s, 15, tmp);
be5e7a76 11624 }
99c475ab
FB
11625 break;
11626
9ee6e8bb
PB
11627 case 1: case 3: case 9: case 11: /* czb */
11628 rm = insn & 7;
d9ba4830 11629 tmp = load_reg(s, rm);
9ee6e8bb
PB
11630 s->condlabel = gen_new_label();
11631 s->condjmp = 1;
11632 if (insn & (1 << 11))
cb63669a 11633 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11634 else
cb63669a 11635 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11636 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11637 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11638 val = (uint32_t)s->pc + 2;
11639 val += offset;
11640 gen_jmp(s, val);
11641 break;
11642
11643 case 15: /* IT, nop-hint. */
11644 if ((insn & 0xf) == 0) {
11645 gen_nop_hint(s, (insn >> 4) & 0xf);
11646 break;
11647 }
11648 /* If Then. */
11649 s->condexec_cond = (insn >> 4) & 0xe;
11650 s->condexec_mask = insn & 0x1f;
11651 /* No actual code generated for this insn, just setup state. */
11652 break;
11653
06c949e6 11654 case 0xe: /* bkpt */
d4a2dc67
PM
11655 {
11656 int imm8 = extract32(insn, 0, 8);
be5e7a76 11657 ARCH(5);
73710361
GB
11658 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11659 default_exception_el(s));
06c949e6 11660 break;
d4a2dc67 11661 }
06c949e6 11662
19a6e31c
PM
11663 case 0xa: /* rev, and hlt */
11664 {
11665 int op1 = extract32(insn, 6, 2);
11666
11667 if (op1 == 2) {
11668 /* HLT */
11669 int imm6 = extract32(insn, 0, 6);
11670
11671 gen_hlt(s, imm6);
11672 break;
11673 }
11674
11675 /* Otherwise this is rev */
9ee6e8bb
PB
11676 ARCH(6);
11677 rn = (insn >> 3) & 0x7;
11678 rd = insn & 0x7;
b0109805 11679 tmp = load_reg(s, rn);
19a6e31c 11680 switch (op1) {
66896cb8 11681 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11682 case 1: gen_rev16(tmp); break;
11683 case 3: gen_revsh(tmp); break;
19a6e31c
PM
11684 default:
11685 g_assert_not_reached();
9ee6e8bb 11686 }
b0109805 11687 store_reg(s, rd, tmp);
9ee6e8bb 11688 break;
19a6e31c 11689 }
9ee6e8bb 11690
d9e028c1
PM
11691 case 6:
11692 switch ((insn >> 5) & 7) {
11693 case 2:
11694 /* setend */
11695 ARCH(6);
9886ecdf
PB
11696 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11697 gen_helper_setend(cpu_env);
dcba3a8d 11698 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 11699 }
9ee6e8bb 11700 break;
d9e028c1
PM
11701 case 3:
11702 /* cps */
11703 ARCH(6);
11704 if (IS_USER(s)) {
11705 break;
8984bd2e 11706 }
b53d8923 11707 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11708 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11709 /* FAULTMASK */
11710 if (insn & 1) {
11711 addr = tcg_const_i32(19);
11712 gen_helper_v7m_msr(cpu_env, addr, tmp);
11713 tcg_temp_free_i32(addr);
11714 }
11715 /* PRIMASK */
11716 if (insn & 2) {
11717 addr = tcg_const_i32(16);
11718 gen_helper_v7m_msr(cpu_env, addr, tmp);
11719 tcg_temp_free_i32(addr);
11720 }
11721 tcg_temp_free_i32(tmp);
11722 gen_lookup_tb(s);
11723 } else {
11724 if (insn & (1 << 4)) {
11725 shift = CPSR_A | CPSR_I | CPSR_F;
11726 } else {
11727 shift = 0;
11728 }
11729 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11730 }
d9e028c1
PM
11731 break;
11732 default:
11733 goto undef;
9ee6e8bb
PB
11734 }
11735 break;
11736
99c475ab
FB
11737 default:
11738 goto undef;
11739 }
11740 break;
11741
11742 case 12:
a7d3970d 11743 {
99c475ab 11744 /* load/store multiple */
39d5492a
PM
11745 TCGv_i32 loaded_var;
11746 TCGV_UNUSED_I32(loaded_var);
99c475ab 11747 rn = (insn >> 8) & 0x7;
b0109805 11748 addr = load_reg(s, rn);
99c475ab
FB
11749 for (i = 0; i < 8; i++) {
11750 if (insn & (1 << i)) {
99c475ab
FB
11751 if (insn & (1 << 11)) {
11752 /* load */
c40c8556 11753 tmp = tcg_temp_new_i32();
12dcc321 11754 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11755 if (i == rn) {
11756 loaded_var = tmp;
11757 } else {
11758 store_reg(s, i, tmp);
11759 }
99c475ab
FB
11760 } else {
11761 /* store */
b0109805 11762 tmp = load_reg(s, i);
12dcc321 11763 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11764 tcg_temp_free_i32(tmp);
99c475ab 11765 }
5899f386 11766 /* advance to the next address */
b0109805 11767 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11768 }
11769 }
b0109805 11770 if ((insn & (1 << rn)) == 0) {
a7d3970d 11771 /* base reg not in list: base register writeback */
b0109805
PB
11772 store_reg(s, rn, addr);
11773 } else {
a7d3970d
PM
11774 /* base reg in list: if load, complete it now */
11775 if (insn & (1 << 11)) {
11776 store_reg(s, rn, loaded_var);
11777 }
7d1b0095 11778 tcg_temp_free_i32(addr);
b0109805 11779 }
99c475ab 11780 break;
a7d3970d 11781 }
99c475ab
FB
11782 case 13:
11783 /* conditional branch or swi */
11784 cond = (insn >> 8) & 0xf;
11785 if (cond == 0xe)
11786 goto undef;
11787
11788 if (cond == 0xf) {
11789 /* swi */
eaed129d 11790 gen_set_pc_im(s, s->pc);
d4a2dc67 11791 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 11792 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
11793 break;
11794 }
11795 /* generate a conditional jump to next instruction */
e50e6a20 11796 s->condlabel = gen_new_label();
39fb730a 11797 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 11798 s->condjmp = 1;
99c475ab
FB
11799
11800 /* jump to the offset */
5899f386 11801 val = (uint32_t)s->pc + 2;
99c475ab 11802 offset = ((int32_t)insn << 24) >> 24;
5899f386 11803 val += offset << 1;
8aaca4c0 11804 gen_jmp(s, val);
99c475ab
FB
11805 break;
11806
11807 case 14:
358bf29e 11808 if (insn & (1 << 11)) {
9ee6e8bb
PB
11809 if (disas_thumb2_insn(env, s, insn))
11810 goto undef32;
358bf29e
PB
11811 break;
11812 }
9ee6e8bb 11813 /* unconditional branch */
99c475ab
FB
11814 val = (uint32_t)s->pc;
11815 offset = ((int32_t)insn << 21) >> 21;
11816 val += (offset << 1) + 2;
8aaca4c0 11817 gen_jmp(s, val);
99c475ab
FB
11818 break;
11819
11820 case 15:
9ee6e8bb 11821 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 11822 goto undef32;
9ee6e8bb 11823 break;
99c475ab
FB
11824 }
11825 return;
9ee6e8bb 11826undef32:
73710361
GB
11827 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11828 default_exception_el(s));
9ee6e8bb
PB
11829 return;
11830illegal_op:
99c475ab 11831undef:
73710361
GB
11832 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11833 default_exception_el(s));
99c475ab
FB
11834}
11835
541ebcd4
PM
11836static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11837{
11838 /* Return true if the insn at dc->pc might cross a page boundary.
11839 * (False positives are OK, false negatives are not.)
11840 */
11841 uint16_t insn;
11842
11843 if ((s->pc & 3) == 0) {
11844 /* At a 4-aligned address we can't be crossing a page */
11845 return false;
11846 }
11847
11848 /* This must be a Thumb insn */
f9fd40eb 11849 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4
PM
11850
11851 if ((insn >> 11) >= 0x1d) {
11852 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11853 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11854 * end up actually treating this as two 16-bit insns (see the
11855 * code at the start of disas_thumb2_insn()) but we don't bother
11856 * to check for that as it is unlikely, and false positives here
11857 * are harmless.
11858 */
11859 return true;
11860 }
11861 /* Definitely a 16-bit insn, can't be crossing a page. */
11862 return false;
11863}
11864
1d8a5535
LV
11865static int arm_tr_init_disas_context(DisasContextBase *dcbase,
11866 CPUState *cs, int max_insns)
2c0262af 11867{
1d8a5535 11868 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 11869 CPUARMState *env = cs->env_ptr;
4e5e1215 11870 ARMCPU *cpu = arm_env_get_cpu(env);
3b46e624 11871
dcba3a8d 11872 dc->pc = dc->base.pc_first;
e50e6a20 11873 dc->condjmp = 0;
3926cc84 11874
40f860cd 11875 dc->aarch64 = 0;
cef9ee70
SS
11876 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11877 * there is no secure EL1, so we route exceptions to EL3.
11878 */
11879 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11880 !arm_el_is_aa64(env, 3);
1d8a5535
LV
11881 dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
11882 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
11883 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
11884 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
11885 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
11886 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
c1e37810 11887 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11888#if !defined(CONFIG_USER_ONLY)
c1e37810 11889 dc->user = (dc->current_el == 0);
3926cc84 11890#endif
1d8a5535
LV
11891 dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
11892 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
11893 dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
11894 dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
11895 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
11896 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
11897 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
fb602cb7
PM
11898 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11899 regime_is_secure(env, dc->mmu_idx);
60322b39 11900 dc->cp_regs = cpu->cp_regs;
a984e42c 11901 dc->features = env->features;
40f860cd 11902
50225ad0
PM
11903 /* Single step state. The code-generation logic here is:
11904 * SS_ACTIVE == 0:
11905 * generate code with no special handling for single-stepping (except
11906 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11907 * this happens anyway because those changes are all system register or
11908 * PSTATE writes).
11909 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11910 * emit code for one insn
11911 * emit code to clear PSTATE.SS
11912 * emit code to generate software step exception for completed step
11913 * end TB (as usual for having generated an exception)
11914 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11915 * emit code to generate a software step exception
11916 * end the TB
11917 */
1d8a5535
LV
11918 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
11919 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
50225ad0
PM
11920 dc->is_ldex = false;
11921 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11922
13189a90
LV
11923 dc->next_page_start =
11924 (dc->base.pc_first & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1d8a5535 11925
f7708456
RH
11926 /* If architectural single step active, limit to 1. */
11927 if (is_singlestepping(dc)) {
11928 max_insns = 1;
11929 }
11930
d0264d86
RH
11931 /* ARM is a fixed-length ISA. Bound the number of insns to execute
11932 to those left on the page. */
11933 if (!dc->thumb) {
11934 int bound = (dc->next_page_start - dc->base.pc_first) / 4;
11935 max_insns = MIN(max_insns, bound);
11936 }
11937
a7812ae4
PB
11938 cpu_F0s = tcg_temp_new_i32();
11939 cpu_F1s = tcg_temp_new_i32();
11940 cpu_F0d = tcg_temp_new_i64();
11941 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
11942 cpu_V0 = cpu_F0d;
11943 cpu_V1 = cpu_F1d;
e677137d 11944 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11945 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
11946
11947 return max_insns;
11948}
11949
b1476854
LV
11950static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
11951{
11952 DisasContext *dc = container_of(dcbase, DisasContext, base);
11953
11954 /* A note on handling of the condexec (IT) bits:
11955 *
11956 * We want to avoid the overhead of having to write the updated condexec
11957 * bits back to the CPUARMState for every instruction in an IT block. So:
11958 * (1) if the condexec bits are not already zero then we write
11959 * zero back into the CPUARMState now. This avoids complications trying
11960 * to do it at the end of the block. (For example if we don't do this
11961 * it's hard to identify whether we can safely skip writing condexec
11962 * at the end of the TB, which we definitely want to do for the case
11963 * where a TB doesn't do anything with the IT state at all.)
11964 * (2) if we are going to leave the TB then we call gen_set_condexec()
11965 * which will write the correct value into CPUARMState if zero is wrong.
11966 * This is done both for leaving the TB at the end, and for leaving
11967 * it because of an exception we know will happen, which is done in
11968 * gen_exception_insn(). The latter is necessary because we need to
11969 * leave the TB with the PC/IT state just prior to execution of the
11970 * instruction which caused the exception.
11971 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11972 * then the CPUARMState will be wrong and we need to reset it.
11973 * This is handled in the same way as restoration of the
11974 * PC in these situations; we save the value of the condexec bits
11975 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11976 * then uses this to restore them after an exception.
11977 *
11978 * Note that there are no instructions which can read the condexec
11979 * bits, and none which can write non-static values to them, so
11980 * we don't need to care about whether CPUARMState is correct in the
11981 * middle of a TB.
11982 */
11983
11984 /* Reset the conditional execution bits immediately. This avoids
11985 complications trying to do it at the end of the block. */
11986 if (dc->condexec_mask || dc->condexec_cond) {
11987 TCGv_i32 tmp = tcg_temp_new_i32();
11988 tcg_gen_movi_i32(tmp, 0);
11989 store_cpu_field(tmp, condexec_bits);
11990 }
23169224 11991 tcg_clear_temp_count();
b1476854
LV
11992}
11993
f62bd897
LV
11994static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
11995{
11996 DisasContext *dc = container_of(dcbase, DisasContext, base);
11997
11998 dc->insn_start_idx = tcg_op_buf_count();
11999 tcg_gen_insn_start(dc->pc,
12000 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
12001 0);
12002}
12003
a68956ad
LV
12004static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
12005 const CPUBreakpoint *bp)
12006{
12007 DisasContext *dc = container_of(dcbase, DisasContext, base);
12008
12009 if (bp->flags & BP_CPU) {
12010 gen_set_condexec(dc);
12011 gen_set_pc_im(dc, dc->pc);
12012 gen_helper_check_breakpoints(cpu_env);
12013 /* End the TB early; it's likely not going to be executed */
12014 dc->base.is_jmp = DISAS_TOO_MANY;
12015 } else {
12016 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12017 /* The address covered by the breakpoint must be
12018 included in [tb->pc, tb->pc + tb->size) in order
12019 to for it to be properly cleared -- thus we
12020 increment the PC here so that the logic setting
12021 tb->size below does the right thing. */
12022 /* TODO: Advance PC by correct instruction length to
12023 * avoid disassembler error messages */
12024 dc->pc += 2;
12025 dc->base.is_jmp = DISAS_NORETURN;
12026 }
12027
12028 return true;
12029}
12030
722ef0a5 12031static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 12032{
13189a90
LV
12033#ifdef CONFIG_USER_ONLY
12034 /* Intercept jump to the magic kernel page. */
12035 if (dc->pc >= 0xffff0000) {
12036 /* We always get here via a jump, so know we are not in a
12037 conditional execution block. */
12038 gen_exception_internal(EXCP_KERNEL_TRAP);
12039 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12040 return true;
13189a90
LV
12041 }
12042#endif
12043
12044 if (dc->ss_active && !dc->pstate_ss) {
12045 /* Singlestep state is Active-pending.
12046 * If we're in this state at the start of a TB then either
12047 * a) we just took an exception to an EL which is being debugged
12048 * and this is the first insn in the exception handler
12049 * b) debug exceptions were masked and we just unmasked them
12050 * without changing EL (eg by clearing PSTATE.D)
12051 * In either case we're going to take a swstep exception in the
12052 * "did not step an insn" case, and so the syndrome ISV and EX
12053 * bits should be zero.
12054 */
12055 assert(dc->base.num_insns == 1);
12056 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12057 default_exception_el(dc));
12058 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12059 return true;
13189a90
LV
12060 }
12061
722ef0a5
RH
12062 return false;
12063}
13189a90 12064
d0264d86 12065static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 12066{
13189a90
LV
12067 if (dc->condjmp && !dc->base.is_jmp) {
12068 gen_set_label(dc->condlabel);
12069 dc->condjmp = 0;
12070 }
13189a90 12071 dc->base.pc_next = dc->pc;
23169224 12072 translator_loop_temp_check(&dc->base);
13189a90
LV
12073}
12074
722ef0a5
RH
12075static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12076{
12077 DisasContext *dc = container_of(dcbase, DisasContext, base);
12078 CPUARMState *env = cpu->env_ptr;
12079 unsigned int insn;
12080
12081 if (arm_pre_translate_insn(dc)) {
12082 return;
12083 }
12084
12085 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
12086 dc->pc += 4;
12087 disas_arm_insn(dc, insn);
12088
d0264d86
RH
12089 arm_post_translate_insn(dc);
12090
12091 /* ARM is a fixed-length ISA. We performed the cross-page check
12092 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
12093}
12094
12095static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12096{
12097 DisasContext *dc = container_of(dcbase, DisasContext, base);
12098 CPUARMState *env = cpu->env_ptr;
12099
12100 if (arm_pre_translate_insn(dc)) {
12101 return;
12102 }
12103
12104 disas_thumb_insn(env, dc);
12105
12106 /* Advance the Thumb condexec condition. */
12107 if (dc->condexec_mask) {
12108 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12109 ((dc->condexec_mask >> 4) & 1));
12110 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12111 if (dc->condexec_mask == 0) {
12112 dc->condexec_cond = 0;
12113 }
12114 }
12115
d0264d86
RH
12116 arm_post_translate_insn(dc);
12117
12118 /* Thumb is a variable-length ISA. Stop translation when the next insn
12119 * will touch a new page. This ensures that prefetch aborts occur at
12120 * the right place.
12121 *
12122 * We want to stop the TB if the next insn starts in a new page,
12123 * or if it spans between this page and the next. This means that
12124 * if we're looking at the last halfword in the page we need to
12125 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12126 * or a 32-bit Thumb insn (which won't).
12127 * This is to avoid generating a silly TB with a single 16-bit insn
12128 * in it at the end of this page (which would execute correctly
12129 * but isn't very efficient).
12130 */
12131 if (dc->base.is_jmp == DISAS_NEXT
12132 && (dc->pc >= dc->next_page_start
12133 || (dc->pc >= dc->next_page_start - 3
12134 && insn_crosses_page(env, dc)))) {
12135 dc->base.is_jmp = DISAS_TOO_MANY;
12136 }
722ef0a5
RH
12137}
12138
70d3c035 12139static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 12140{
70d3c035 12141 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 12142
70d3c035
LV
12143 if (dc->base.tb->cflags & CF_LAST_IO && dc->condjmp) {
12144 /* FIXME: This can theoretically happen with self-modifying code. */
12145 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 12146 }
9ee6e8bb 12147
b5ff1b31 12148 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12149 instruction was a conditional branch or trap, and the PC has
12150 already been written. */
f021b2c4 12151 gen_set_condexec(dc);
dcba3a8d 12152 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
12153 /* Exception return branches need some special case code at the
12154 * end of the TB, which is complex enough that it has to
12155 * handle the single-step vs not and the condition-failed
12156 * insn codepath itself.
12157 */
12158 gen_bx_excret_final_code(dc);
12159 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12160 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 12161 switch (dc->base.is_jmp) {
7999a5c8 12162 case DISAS_SWI:
50225ad0 12163 gen_ss_advance(dc);
73710361
GB
12164 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12165 default_exception_el(dc));
7999a5c8
SF
12166 break;
12167 case DISAS_HVC:
37e6456e 12168 gen_ss_advance(dc);
73710361 12169 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12170 break;
12171 case DISAS_SMC:
37e6456e 12172 gen_ss_advance(dc);
73710361 12173 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12174 break;
12175 case DISAS_NEXT:
a68956ad 12176 case DISAS_TOO_MANY:
7999a5c8
SF
12177 case DISAS_UPDATE:
12178 gen_set_pc_im(dc, dc->pc);
12179 /* fall through */
12180 default:
5425415e
PM
12181 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12182 gen_singlestep_exception(dc);
a0c231e6
RH
12183 break;
12184 case DISAS_NORETURN:
12185 break;
7999a5c8 12186 }
8aaca4c0 12187 } else {
9ee6e8bb
PB
12188 /* While branches must always occur at the end of an IT block,
12189 there are a few other things that can cause us to terminate
65626741 12190 the TB in the middle of an IT block:
9ee6e8bb
PB
12191 - Exception generating instructions (bkpt, swi, undefined).
12192 - Page boundaries.
12193 - Hardware watchpoints.
12194 Hardware breakpoints have already been handled and skip this code.
12195 */
dcba3a8d 12196 switch(dc->base.is_jmp) {
8aaca4c0 12197 case DISAS_NEXT:
a68956ad 12198 case DISAS_TOO_MANY:
6e256c93 12199 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 12200 break;
577bf808 12201 case DISAS_JUMP:
8a6b28c7
EC
12202 gen_goto_ptr();
12203 break;
e8d52302
AB
12204 case DISAS_UPDATE:
12205 gen_set_pc_im(dc, dc->pc);
12206 /* fall through */
577bf808 12207 default:
8aaca4c0 12208 /* indicate that the hash table must be used to find the next TB */
57fec1fe 12209 tcg_gen_exit_tb(0);
8aaca4c0 12210 break;
a0c231e6 12211 case DISAS_NORETURN:
8aaca4c0
FB
12212 /* nothing more to generate */
12213 break;
9ee6e8bb 12214 case DISAS_WFI:
1ce94f81 12215 gen_helper_wfi(cpu_env);
84549b6d
PM
12216 /* The helper doesn't necessarily throw an exception, but we
12217 * must go back to the main loop to check for interrupts anyway.
12218 */
12219 tcg_gen_exit_tb(0);
9ee6e8bb 12220 break;
72c1d3af
PM
12221 case DISAS_WFE:
12222 gen_helper_wfe(cpu_env);
12223 break;
c87e5a61
PM
12224 case DISAS_YIELD:
12225 gen_helper_yield(cpu_env);
12226 break;
9ee6e8bb 12227 case DISAS_SWI:
73710361
GB
12228 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12229 default_exception_el(dc));
9ee6e8bb 12230 break;
37e6456e 12231 case DISAS_HVC:
73710361 12232 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12233 break;
12234 case DISAS_SMC:
73710361 12235 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12236 break;
8aaca4c0 12237 }
f021b2c4
PM
12238 }
12239
12240 if (dc->condjmp) {
12241 /* "Condition failed" instruction codepath for the branch/trap insn */
12242 gen_set_label(dc->condlabel);
12243 gen_set_condexec(dc);
b636649f 12244 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
12245 gen_set_pc_im(dc, dc->pc);
12246 gen_singlestep_exception(dc);
12247 } else {
6e256c93 12248 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 12249 }
2c0262af 12250 }
23169224
LV
12251
12252 /* Functions above can change dc->pc, so re-align db->pc_next */
12253 dc->base.pc_next = dc->pc;
70d3c035
LV
12254}
12255
4013f7fc
LV
12256static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12257{
12258 DisasContext *dc = container_of(dcbase, DisasContext, base);
12259
12260 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
12261 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size,
12262 dc->thumb | (dc->sctlr_b << 1));
12263}
12264
23169224
LV
12265static const TranslatorOps arm_translator_ops = {
12266 .init_disas_context = arm_tr_init_disas_context,
12267 .tb_start = arm_tr_tb_start,
12268 .insn_start = arm_tr_insn_start,
12269 .breakpoint_check = arm_tr_breakpoint_check,
12270 .translate_insn = arm_tr_translate_insn,
12271 .tb_stop = arm_tr_tb_stop,
12272 .disas_log = arm_tr_disas_log,
12273};
12274
722ef0a5
RH
12275static const TranslatorOps thumb_translator_ops = {
12276 .init_disas_context = arm_tr_init_disas_context,
12277 .tb_start = arm_tr_tb_start,
12278 .insn_start = arm_tr_insn_start,
12279 .breakpoint_check = arm_tr_breakpoint_check,
12280 .translate_insn = thumb_tr_translate_insn,
12281 .tb_stop = arm_tr_tb_stop,
12282 .disas_log = arm_tr_disas_log,
12283};
12284
70d3c035 12285/* generate intermediate code for basic block 'tb'. */
23169224 12286void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
70d3c035 12287{
23169224
LV
12288 DisasContext dc;
12289 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 12290
722ef0a5
RH
12291 if (ARM_TBFLAG_THUMB(tb->flags)) {
12292 ops = &thumb_translator_ops;
12293 }
23169224 12294#ifdef TARGET_AARCH64
70d3c035 12295 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
23169224 12296 ops = &aarch64_translator_ops;
2c0262af
FB
12297 }
12298#endif
23169224
LV
12299
12300 translator_loop(ops, &dc.base, cpu, tb);
2c0262af
FB
12301}
12302
b5ff1b31 12303static const char *cpu_mode_names[16] = {
28c9457d
EI
12304 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12305 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 12306};
9ee6e8bb 12307
878096ee
AF
12308void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12309 int flags)
2c0262af 12310{
878096ee
AF
12311 ARMCPU *cpu = ARM_CPU(cs);
12312 CPUARMState *env = &cpu->env;
2c0262af
FB
12313 int i;
12314
17731115
PM
12315 if (is_a64(env)) {
12316 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12317 return;
12318 }
12319
2c0262af 12320 for(i=0;i<16;i++) {
7fe48483 12321 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12322 if ((i % 4) == 3)
7fe48483 12323 cpu_fprintf(f, "\n");
2c0262af 12324 else
7fe48483 12325 cpu_fprintf(f, " ");
2c0262af 12326 }
06e5cf7a 12327
5b906f35
PM
12328 if (arm_feature(env, ARM_FEATURE_M)) {
12329 uint32_t xpsr = xpsr_read(env);
12330 const char *mode;
1e577cc7
PM
12331 const char *ns_status = "";
12332
12333 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12334 ns_status = env->v7m.secure ? "S " : "NS ";
12335 }
5b906f35
PM
12336
12337 if (xpsr & XPSR_EXCP) {
12338 mode = "handler";
12339 } else {
8bfc26ea 12340 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
12341 mode = "unpriv-thread";
12342 } else {
12343 mode = "priv-thread";
12344 }
12345 }
12346
1e577cc7 12347 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
5b906f35
PM
12348 xpsr,
12349 xpsr & XPSR_N ? 'N' : '-',
12350 xpsr & XPSR_Z ? 'Z' : '-',
12351 xpsr & XPSR_C ? 'C' : '-',
12352 xpsr & XPSR_V ? 'V' : '-',
12353 xpsr & XPSR_T ? 'T' : 'A',
1e577cc7 12354 ns_status,
5b906f35 12355 mode);
06e5cf7a 12356 } else {
5b906f35
PM
12357 uint32_t psr = cpsr_read(env);
12358 const char *ns_status = "";
12359
12360 if (arm_feature(env, ARM_FEATURE_EL3) &&
12361 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12362 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12363 }
12364
12365 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12366 psr,
12367 psr & CPSR_N ? 'N' : '-',
12368 psr & CPSR_Z ? 'Z' : '-',
12369 psr & CPSR_C ? 'C' : '-',
12370 psr & CPSR_V ? 'V' : '-',
12371 psr & CPSR_T ? 'T' : 'A',
12372 ns_status,
12373 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
12374 }
b7bcbe95 12375
f2617cfc
PM
12376 if (flags & CPU_DUMP_FPU) {
12377 int numvfpregs = 0;
12378 if (arm_feature(env, ARM_FEATURE_VFP)) {
12379 numvfpregs += 16;
12380 }
12381 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12382 numvfpregs += 16;
12383 }
12384 for (i = 0; i < numvfpregs; i++) {
12385 uint64_t v = float64_val(env->vfp.regs[i]);
12386 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12387 i * 2, (uint32_t)v,
12388 i * 2 + 1, (uint32_t)(v >> 32),
12389 i, v);
12390 }
12391 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 12392 }
2c0262af 12393}
a6b025d3 12394
bad729e2
RH
12395void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12396 target_ulong *data)
d2856f1a 12397{
3926cc84 12398 if (is_a64(env)) {
bad729e2 12399 env->pc = data[0];
40f860cd 12400 env->condexec_bits = 0;
aaa1f954 12401 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12402 } else {
bad729e2
RH
12403 env->regs[15] = data[0];
12404 env->condexec_bits = data[1];
aaa1f954 12405 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12406 }
d2856f1a 12407}