]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
target/arm: check CF_PARALLEL instead of parallel_cpus
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
1de7afc9 28#include "qemu/log.h"
534df156 29#include "qemu/bitops.h"
1d854765 30#include "arm_ldst.h"
19a6e31c 31#include "exec/semihost.h"
1497c961 32
2ef6175a
RH
33#include "exec/helper-proto.h"
34#include "exec/helper-gen.h"
2c0262af 35
a7e30d84 36#include "trace-tcg.h"
508127e2 37#include "exec/log.h"
a7e30d84
LV
38
39
2b51668f
PM
40#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
41#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 42/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 43#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
c99a55d3 44#define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
2b51668f
PM
45#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
46#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
47#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
48#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
49#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 50
86753403 51#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 52
f570c61e 53#include "translate.h"
e12ce78d 54
b5ff1b31
FB
55#if defined(CONFIG_USER_ONLY)
56#define IS_USER(s) 1
57#else
58#define IS_USER(s) (s->user)
59#endif
60
1bcea73e 61TCGv_env cpu_env;
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
b26eefb6 69/* FIXME: These should be removed. */
39d5492a 70static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 71static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 72
022c62cb 73#include "exec/gen-icount.h"
2e70f6ef 74
155c3eac
FN
75static const char *regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
78
b26eefb6
PB
79/* initialize TCG globals. */
80void arm_translate_init(void)
81{
155c3eac
FN
82 int i;
83
a7812ae4 84 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7c255043 85 tcg_ctx.tcg_env = cpu_env;
a7812ae4 86
155c3eac 87 for (i = 0; i < 16; i++) {
e1ccc054 88 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 89 offsetof(CPUARMState, regs[i]),
155c3eac
FN
90 regnames[i]);
91 }
e1ccc054
RH
92 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
93 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
94 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
95 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 96
e1ccc054 97 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 98 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 99 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 100 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 101
14ade10f 102 a64_translate_init();
b26eefb6
PB
103}
104
9bb6558a
PM
105/* Flags for the disas_set_da_iss info argument:
106 * lower bits hold the Rt register number, higher bits are flags.
107 */
108typedef enum ISSInfo {
109 ISSNone = 0,
110 ISSRegMask = 0x1f,
111 ISSInvalid = (1 << 5),
112 ISSIsAcqRel = (1 << 6),
113 ISSIsWrite = (1 << 7),
114 ISSIs16Bit = (1 << 8),
115} ISSInfo;
116
117/* Save the syndrome information for a Data Abort */
118static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
119{
120 uint32_t syn;
121 int sas = memop & MO_SIZE;
122 bool sse = memop & MO_SIGN;
123 bool is_acqrel = issinfo & ISSIsAcqRel;
124 bool is_write = issinfo & ISSIsWrite;
125 bool is_16bit = issinfo & ISSIs16Bit;
126 int srt = issinfo & ISSRegMask;
127
128 if (issinfo & ISSInvalid) {
129 /* Some callsites want to conditionally provide ISS info,
130 * eg "only if this was not a writeback"
131 */
132 return;
133 }
134
135 if (srt == 15) {
136 /* For AArch32, insns where the src/dest is R15 never generate
137 * ISS information. Catching that here saves checking at all
138 * the call sites.
139 */
140 return;
141 }
142
143 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
144 0, 0, 0, is_write, 0, is_16bit);
145 disas_set_insn_syndrome(s, syn);
146}
147
8bd5c820 148static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 149{
8bd5c820 150 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
151 * insns:
152 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
153 * otherwise, access as if at PL0.
154 */
155 switch (s->mmu_idx) {
156 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
157 case ARMMMUIdx_S12NSE0:
158 case ARMMMUIdx_S12NSE1:
8bd5c820 159 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
160 case ARMMMUIdx_S1E3:
161 case ARMMMUIdx_S1SE0:
162 case ARMMMUIdx_S1SE1:
8bd5c820 163 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
164 case ARMMMUIdx_MUser:
165 case ARMMMUIdx_MPriv:
3bef7012 166 case ARMMMUIdx_MNegPri:
e7b921c2 167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
b9f587d6
PM
168 case ARMMMUIdx_MSUser:
169 case ARMMMUIdx_MSPriv:
170 case ARMMMUIdx_MSNegPri:
171 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
579d21cc
PM
172 case ARMMMUIdx_S2NS:
173 default:
174 g_assert_not_reached();
175 }
176}
177
39d5492a 178static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 179{
39d5492a 180 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
181 tcg_gen_ld_i32(tmp, cpu_env, offset);
182 return tmp;
183}
184
0ecb72a5 185#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 186
39d5492a 187static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
188{
189 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 190 tcg_temp_free_i32(var);
d9ba4830
PB
191}
192
193#define store_cpu_field(var, name) \
0ecb72a5 194 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 195
b26eefb6 196/* Set a variable to the value of a CPU register. */
39d5492a 197static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
198{
199 if (reg == 15) {
200 uint32_t addr;
b90372ad 201 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
202 if (s->thumb)
203 addr = (long)s->pc + 2;
204 else
205 addr = (long)s->pc + 4;
206 tcg_gen_movi_i32(var, addr);
207 } else {
155c3eac 208 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
209 }
210}
211
212/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 213static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 214{
39d5492a 215 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
216 load_reg_var(s, tmp, reg);
217 return tmp;
218}
219
220/* Set a CPU register. The source must be a temporary and will be
221 marked as dead. */
39d5492a 222static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
223{
224 if (reg == 15) {
9b6a3ea7
PM
225 /* In Thumb mode, we must ignore bit 0.
226 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
227 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
228 * We choose to ignore [1:0] in ARM mode for all architecture versions.
229 */
230 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 231 s->base.is_jmp = DISAS_JUMP;
b26eefb6 232 }
155c3eac 233 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 234 tcg_temp_free_i32(var);
b26eefb6
PB
235}
236
b26eefb6 237/* Value extensions. */
86831435
PB
238#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
239#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
240#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
241#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
242
1497c961
PB
243#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
244#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 245
b26eefb6 246
39d5492a 247static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 248{
39d5492a 249 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 250 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
251 tcg_temp_free_i32(tmp_mask);
252}
d9ba4830
PB
253/* Set NZCV flags from the high 4 bits of var. */
254#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
255
d4a2dc67 256static void gen_exception_internal(int excp)
d9ba4830 257{
d4a2dc67
PM
258 TCGv_i32 tcg_excp = tcg_const_i32(excp);
259
260 assert(excp_is_internal(excp));
261 gen_helper_exception_internal(cpu_env, tcg_excp);
262 tcg_temp_free_i32(tcg_excp);
263}
264
73710361 265static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
266{
267 TCGv_i32 tcg_excp = tcg_const_i32(excp);
268 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 269 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 270
73710361
GB
271 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
272 tcg_syn, tcg_el);
273
274 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
275 tcg_temp_free_i32(tcg_syn);
276 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
277}
278
50225ad0
PM
279static void gen_ss_advance(DisasContext *s)
280{
281 /* If the singlestep state is Active-not-pending, advance to
282 * Active-pending.
283 */
284 if (s->ss_active) {
285 s->pstate_ss = 0;
286 gen_helper_clear_pstate_ss(cpu_env);
287 }
288}
289
290static void gen_step_complete_exception(DisasContext *s)
291{
292 /* We just completed step of an insn. Move from Active-not-pending
293 * to Active-pending, and then also take the swstep exception.
294 * This corresponds to making the (IMPDEF) choice to prioritize
295 * swstep exceptions over asynchronous exceptions taken to an exception
296 * level where debug is disabled. This choice has the advantage that
297 * we do not need to maintain internal state corresponding to the
298 * ISV/EX syndrome bits between completion of the step and generation
299 * of the exception, and our syndrome information is always correct.
300 */
301 gen_ss_advance(s);
73710361
GB
302 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
303 default_exception_el(s));
dcba3a8d 304 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
305}
306
5425415e
PM
307static void gen_singlestep_exception(DisasContext *s)
308{
309 /* Generate the right kind of exception for singlestep, which is
310 * either the architectural singlestep or EXCP_DEBUG for QEMU's
311 * gdb singlestepping.
312 */
313 if (s->ss_active) {
314 gen_step_complete_exception(s);
315 } else {
316 gen_exception_internal(EXCP_DEBUG);
317 }
318}
319
b636649f
PM
320static inline bool is_singlestepping(DisasContext *s)
321{
322 /* Return true if we are singlestepping either because of
323 * architectural singlestep or QEMU gdbstub singlestep. This does
324 * not include the command line '-singlestep' mode which is rather
325 * misnamed as it only means "one instruction per TB" and doesn't
326 * affect the code we generate.
327 */
dcba3a8d 328 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
329}
330
39d5492a 331static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 332{
39d5492a
PM
333 TCGv_i32 tmp1 = tcg_temp_new_i32();
334 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
335 tcg_gen_ext16s_i32(tmp1, a);
336 tcg_gen_ext16s_i32(tmp2, b);
3670669c 337 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 338 tcg_temp_free_i32(tmp2);
3670669c
PB
339 tcg_gen_sari_i32(a, a, 16);
340 tcg_gen_sari_i32(b, b, 16);
341 tcg_gen_mul_i32(b, b, a);
342 tcg_gen_mov_i32(a, tmp1);
7d1b0095 343 tcg_temp_free_i32(tmp1);
3670669c
PB
344}
345
346/* Byteswap each halfword. */
39d5492a 347static void gen_rev16(TCGv_i32 var)
3670669c 348{
39d5492a 349 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 350 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 351 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
352 tcg_gen_and_i32(tmp, tmp, mask);
353 tcg_gen_and_i32(var, var, mask);
3670669c 354 tcg_gen_shli_i32(var, var, 8);
3670669c 355 tcg_gen_or_i32(var, var, tmp);
68cedf73 356 tcg_temp_free_i32(mask);
7d1b0095 357 tcg_temp_free_i32(tmp);
3670669c
PB
358}
359
360/* Byteswap low halfword and sign extend. */
39d5492a 361static void gen_revsh(TCGv_i32 var)
3670669c 362{
1a855029
AJ
363 tcg_gen_ext16u_i32(var, var);
364 tcg_gen_bswap16_i32(var, var);
365 tcg_gen_ext16s_i32(var, var);
3670669c
PB
366}
367
838fa72d 368/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 369static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 370{
838fa72d
AJ
371 TCGv_i64 tmp64 = tcg_temp_new_i64();
372
373 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 374 tcg_temp_free_i32(b);
838fa72d
AJ
375 tcg_gen_shli_i64(tmp64, tmp64, 32);
376 tcg_gen_add_i64(a, tmp64, a);
377
378 tcg_temp_free_i64(tmp64);
379 return a;
380}
381
382/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 383static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
384{
385 TCGv_i64 tmp64 = tcg_temp_new_i64();
386
387 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 388 tcg_temp_free_i32(b);
838fa72d
AJ
389 tcg_gen_shli_i64(tmp64, tmp64, 32);
390 tcg_gen_sub_i64(a, tmp64, a);
391
392 tcg_temp_free_i64(tmp64);
393 return a;
3670669c
PB
394}
395
5e3f878a 396/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 397static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 398{
39d5492a
PM
399 TCGv_i32 lo = tcg_temp_new_i32();
400 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 401 TCGv_i64 ret;
5e3f878a 402
831d7fe8 403 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 404 tcg_temp_free_i32(a);
7d1b0095 405 tcg_temp_free_i32(b);
831d7fe8
RH
406
407 ret = tcg_temp_new_i64();
408 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
409 tcg_temp_free_i32(lo);
410 tcg_temp_free_i32(hi);
831d7fe8
RH
411
412 return ret;
5e3f878a
PB
413}
414
39d5492a 415static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 416{
39d5492a
PM
417 TCGv_i32 lo = tcg_temp_new_i32();
418 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 419 TCGv_i64 ret;
5e3f878a 420
831d7fe8 421 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 422 tcg_temp_free_i32(a);
7d1b0095 423 tcg_temp_free_i32(b);
831d7fe8
RH
424
425 ret = tcg_temp_new_i64();
426 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
427 tcg_temp_free_i32(lo);
428 tcg_temp_free_i32(hi);
831d7fe8
RH
429
430 return ret;
5e3f878a
PB
431}
432
8f01245e 433/* Swap low and high halfwords. */
39d5492a 434static void gen_swap_half(TCGv_i32 var)
8f01245e 435{
39d5492a 436 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
437 tcg_gen_shri_i32(tmp, var, 16);
438 tcg_gen_shli_i32(var, var, 16);
439 tcg_gen_or_i32(var, var, tmp);
7d1b0095 440 tcg_temp_free_i32(tmp);
8f01245e
PB
441}
442
b26eefb6
PB
443/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
444 tmp = (t0 ^ t1) & 0x8000;
445 t0 &= ~0x8000;
446 t1 &= ~0x8000;
447 t0 = (t0 + t1) ^ tmp;
448 */
449
39d5492a 450static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 451{
39d5492a 452 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
453 tcg_gen_xor_i32(tmp, t0, t1);
454 tcg_gen_andi_i32(tmp, tmp, 0x8000);
455 tcg_gen_andi_i32(t0, t0, ~0x8000);
456 tcg_gen_andi_i32(t1, t1, ~0x8000);
457 tcg_gen_add_i32(t0, t0, t1);
458 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
459 tcg_temp_free_i32(tmp);
460 tcg_temp_free_i32(t1);
b26eefb6
PB
461}
462
463/* Set CF to the top bit of var. */
39d5492a 464static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 465{
66c374de 466 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
467}
468
469/* Set N and Z flags from var. */
39d5492a 470static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 471{
66c374de
AJ
472 tcg_gen_mov_i32(cpu_NF, var);
473 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
474}
475
476/* T0 += T1 + CF. */
39d5492a 477static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 478{
396e467c 479 tcg_gen_add_i32(t0, t0, t1);
66c374de 480 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
481}
482
e9bb4aa9 483/* dest = T0 + T1 + CF. */
39d5492a 484static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 485{
e9bb4aa9 486 tcg_gen_add_i32(dest, t0, t1);
66c374de 487 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
488}
489
3670669c 490/* dest = T0 - T1 + CF - 1. */
39d5492a 491static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 492{
3670669c 493 tcg_gen_sub_i32(dest, t0, t1);
66c374de 494 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 495 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
496}
497
72485ec4 498/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 499static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 500{
39d5492a 501 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
502 tcg_gen_movi_i32(tmp, 0);
503 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 504 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 505 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
506 tcg_gen_xor_i32(tmp, t0, t1);
507 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
508 tcg_temp_free_i32(tmp);
509 tcg_gen_mov_i32(dest, cpu_NF);
510}
511
49b4c31e 512/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 513static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 514{
39d5492a 515 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
516 if (TCG_TARGET_HAS_add2_i32) {
517 tcg_gen_movi_i32(tmp, 0);
518 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 519 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
520 } else {
521 TCGv_i64 q0 = tcg_temp_new_i64();
522 TCGv_i64 q1 = tcg_temp_new_i64();
523 tcg_gen_extu_i32_i64(q0, t0);
524 tcg_gen_extu_i32_i64(q1, t1);
525 tcg_gen_add_i64(q0, q0, q1);
526 tcg_gen_extu_i32_i64(q1, cpu_CF);
527 tcg_gen_add_i64(q0, q0, q1);
528 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
529 tcg_temp_free_i64(q0);
530 tcg_temp_free_i64(q1);
531 }
532 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
533 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
534 tcg_gen_xor_i32(tmp, t0, t1);
535 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
536 tcg_temp_free_i32(tmp);
537 tcg_gen_mov_i32(dest, cpu_NF);
538}
539
72485ec4 540/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 541static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 542{
39d5492a 543 TCGv_i32 tmp;
72485ec4
AJ
544 tcg_gen_sub_i32(cpu_NF, t0, t1);
545 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
546 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
547 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
548 tmp = tcg_temp_new_i32();
549 tcg_gen_xor_i32(tmp, t0, t1);
550 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
551 tcg_temp_free_i32(tmp);
552 tcg_gen_mov_i32(dest, cpu_NF);
553}
554
e77f0832 555/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 556static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 557{
39d5492a 558 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
559 tcg_gen_not_i32(tmp, t1);
560 gen_adc_CC(dest, t0, tmp);
39d5492a 561 tcg_temp_free_i32(tmp);
2de68a49
RH
562}
563
365af80e 564#define GEN_SHIFT(name) \
39d5492a 565static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 566{ \
39d5492a 567 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
568 tmp1 = tcg_temp_new_i32(); \
569 tcg_gen_andi_i32(tmp1, t1, 0xff); \
570 tmp2 = tcg_const_i32(0); \
571 tmp3 = tcg_const_i32(0x1f); \
572 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
573 tcg_temp_free_i32(tmp3); \
574 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
575 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
576 tcg_temp_free_i32(tmp2); \
577 tcg_temp_free_i32(tmp1); \
578}
579GEN_SHIFT(shl)
580GEN_SHIFT(shr)
581#undef GEN_SHIFT
582
39d5492a 583static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 584{
39d5492a 585 TCGv_i32 tmp1, tmp2;
365af80e
AJ
586 tmp1 = tcg_temp_new_i32();
587 tcg_gen_andi_i32(tmp1, t1, 0xff);
588 tmp2 = tcg_const_i32(0x1f);
589 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
590 tcg_temp_free_i32(tmp2);
591 tcg_gen_sar_i32(dest, t0, tmp1);
592 tcg_temp_free_i32(tmp1);
593}
594
39d5492a 595static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 596{
39d5492a
PM
597 TCGv_i32 c0 = tcg_const_i32(0);
598 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
599 tcg_gen_neg_i32(tmp, src);
600 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
601 tcg_temp_free_i32(c0);
602 tcg_temp_free_i32(tmp);
603}
ad69471c 604
39d5492a 605static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 606{
9a119ff6 607 if (shift == 0) {
66c374de 608 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 609 } else {
66c374de
AJ
610 tcg_gen_shri_i32(cpu_CF, var, shift);
611 if (shift != 31) {
612 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
613 }
9a119ff6 614 }
9a119ff6 615}
b26eefb6 616
9a119ff6 617/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
618static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
619 int shift, int flags)
9a119ff6
PB
620{
621 switch (shiftop) {
622 case 0: /* LSL */
623 if (shift != 0) {
624 if (flags)
625 shifter_out_im(var, 32 - shift);
626 tcg_gen_shli_i32(var, var, shift);
627 }
628 break;
629 case 1: /* LSR */
630 if (shift == 0) {
631 if (flags) {
66c374de 632 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
633 }
634 tcg_gen_movi_i32(var, 0);
635 } else {
636 if (flags)
637 shifter_out_im(var, shift - 1);
638 tcg_gen_shri_i32(var, var, shift);
639 }
640 break;
641 case 2: /* ASR */
642 if (shift == 0)
643 shift = 32;
644 if (flags)
645 shifter_out_im(var, shift - 1);
646 if (shift == 32)
647 shift = 31;
648 tcg_gen_sari_i32(var, var, shift);
649 break;
650 case 3: /* ROR/RRX */
651 if (shift != 0) {
652 if (flags)
653 shifter_out_im(var, shift - 1);
f669df27 654 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 655 } else {
39d5492a 656 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 657 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
658 if (flags)
659 shifter_out_im(var, 0);
660 tcg_gen_shri_i32(var, var, 1);
b26eefb6 661 tcg_gen_or_i32(var, var, tmp);
7d1b0095 662 tcg_temp_free_i32(tmp);
b26eefb6
PB
663 }
664 }
665};
666
39d5492a
PM
667static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
668 TCGv_i32 shift, int flags)
8984bd2e
PB
669{
670 if (flags) {
671 switch (shiftop) {
9ef39277
BS
672 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
673 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
674 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
675 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
676 }
677 } else {
678 switch (shiftop) {
365af80e
AJ
679 case 0:
680 gen_shl(var, var, shift);
681 break;
682 case 1:
683 gen_shr(var, var, shift);
684 break;
685 case 2:
686 gen_sar(var, var, shift);
687 break;
f669df27
AJ
688 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
689 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
690 }
691 }
7d1b0095 692 tcg_temp_free_i32(shift);
8984bd2e
PB
693}
694
6ddbc6e4
PB
695#define PAS_OP(pfx) \
696 switch (op2) { \
697 case 0: gen_pas_helper(glue(pfx,add16)); break; \
698 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
699 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
700 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
701 case 4: gen_pas_helper(glue(pfx,add8)); break; \
702 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
703 }
39d5492a 704static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 705{
a7812ae4 706 TCGv_ptr tmp;
6ddbc6e4
PB
707
708 switch (op1) {
709#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
710 case 1:
a7812ae4 711 tmp = tcg_temp_new_ptr();
0ecb72a5 712 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 713 PAS_OP(s)
b75263d6 714 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
715 break;
716 case 5:
a7812ae4 717 tmp = tcg_temp_new_ptr();
0ecb72a5 718 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 719 PAS_OP(u)
b75263d6 720 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
721 break;
722#undef gen_pas_helper
723#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
724 case 2:
725 PAS_OP(q);
726 break;
727 case 3:
728 PAS_OP(sh);
729 break;
730 case 6:
731 PAS_OP(uq);
732 break;
733 case 7:
734 PAS_OP(uh);
735 break;
736#undef gen_pas_helper
737 }
738}
9ee6e8bb
PB
739#undef PAS_OP
740
6ddbc6e4
PB
741/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
742#define PAS_OP(pfx) \
ed89a2f1 743 switch (op1) { \
6ddbc6e4
PB
744 case 0: gen_pas_helper(glue(pfx,add8)); break; \
745 case 1: gen_pas_helper(glue(pfx,add16)); break; \
746 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
747 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
748 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
749 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
750 }
39d5492a 751static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 752{
a7812ae4 753 TCGv_ptr tmp;
6ddbc6e4 754
ed89a2f1 755 switch (op2) {
6ddbc6e4
PB
756#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
757 case 0:
a7812ae4 758 tmp = tcg_temp_new_ptr();
0ecb72a5 759 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 760 PAS_OP(s)
b75263d6 761 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
762 break;
763 case 4:
a7812ae4 764 tmp = tcg_temp_new_ptr();
0ecb72a5 765 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 766 PAS_OP(u)
b75263d6 767 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
768 break;
769#undef gen_pas_helper
770#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
771 case 1:
772 PAS_OP(q);
773 break;
774 case 2:
775 PAS_OP(sh);
776 break;
777 case 5:
778 PAS_OP(uq);
779 break;
780 case 6:
781 PAS_OP(uh);
782 break;
783#undef gen_pas_helper
784 }
785}
9ee6e8bb
PB
786#undef PAS_OP
787
39fb730a 788/*
6c2c63d3 789 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
790 * This is common between ARM and Aarch64 targets.
791 */
6c2c63d3 792void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 793{
6c2c63d3
RH
794 TCGv_i32 value;
795 TCGCond cond;
796 bool global = true;
d9ba4830 797
d9ba4830
PB
798 switch (cc) {
799 case 0: /* eq: Z */
d9ba4830 800 case 1: /* ne: !Z */
6c2c63d3
RH
801 cond = TCG_COND_EQ;
802 value = cpu_ZF;
d9ba4830 803 break;
6c2c63d3 804
d9ba4830 805 case 2: /* cs: C */
d9ba4830 806 case 3: /* cc: !C */
6c2c63d3
RH
807 cond = TCG_COND_NE;
808 value = cpu_CF;
d9ba4830 809 break;
6c2c63d3 810
d9ba4830 811 case 4: /* mi: N */
d9ba4830 812 case 5: /* pl: !N */
6c2c63d3
RH
813 cond = TCG_COND_LT;
814 value = cpu_NF;
d9ba4830 815 break;
6c2c63d3 816
d9ba4830 817 case 6: /* vs: V */
d9ba4830 818 case 7: /* vc: !V */
6c2c63d3
RH
819 cond = TCG_COND_LT;
820 value = cpu_VF;
d9ba4830 821 break;
6c2c63d3 822
d9ba4830 823 case 8: /* hi: C && !Z */
6c2c63d3
RH
824 case 9: /* ls: !C || Z -> !(C && !Z) */
825 cond = TCG_COND_NE;
826 value = tcg_temp_new_i32();
827 global = false;
828 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
829 ZF is non-zero for !Z; so AND the two subexpressions. */
830 tcg_gen_neg_i32(value, cpu_CF);
831 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 832 break;
6c2c63d3 833
d9ba4830 834 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 835 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
836 /* Since we're only interested in the sign bit, == 0 is >= 0. */
837 cond = TCG_COND_GE;
838 value = tcg_temp_new_i32();
839 global = false;
840 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 841 break;
6c2c63d3 842
d9ba4830 843 case 12: /* gt: !Z && N == V */
d9ba4830 844 case 13: /* le: Z || N != V */
6c2c63d3
RH
845 cond = TCG_COND_NE;
846 value = tcg_temp_new_i32();
847 global = false;
848 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
849 * the sign bit then AND with ZF to yield the result. */
850 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
851 tcg_gen_sari_i32(value, value, 31);
852 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 853 break;
6c2c63d3 854
9305eac0
RH
855 case 14: /* always */
856 case 15: /* always */
857 /* Use the ALWAYS condition, which will fold early.
858 * It doesn't matter what we use for the value. */
859 cond = TCG_COND_ALWAYS;
860 value = cpu_ZF;
861 goto no_invert;
862
d9ba4830
PB
863 default:
864 fprintf(stderr, "Bad condition code 0x%x\n", cc);
865 abort();
866 }
6c2c63d3
RH
867
868 if (cc & 1) {
869 cond = tcg_invert_cond(cond);
870 }
871
9305eac0 872 no_invert:
6c2c63d3
RH
873 cmp->cond = cond;
874 cmp->value = value;
875 cmp->value_global = global;
876}
877
878void arm_free_cc(DisasCompare *cmp)
879{
880 if (!cmp->value_global) {
881 tcg_temp_free_i32(cmp->value);
882 }
883}
884
885void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
886{
887 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
888}
889
890void arm_gen_test_cc(int cc, TCGLabel *label)
891{
892 DisasCompare cmp;
893 arm_test_cc(&cmp, cc);
894 arm_jump_cc(&cmp, label);
895 arm_free_cc(&cmp);
d9ba4830 896}
2c0262af 897
b1d8e52e 898static const uint8_t table_logic_cc[16] = {
2c0262af
FB
899 1, /* and */
900 1, /* xor */
901 0, /* sub */
902 0, /* rsb */
903 0, /* add */
904 0, /* adc */
905 0, /* sbc */
906 0, /* rsc */
907 1, /* andl */
908 1, /* xorl */
909 0, /* cmp */
910 0, /* cmn */
911 1, /* orr */
912 1, /* mov */
913 1, /* bic */
914 1, /* mvn */
915};
3b46e624 916
4d5e8c96
PM
917static inline void gen_set_condexec(DisasContext *s)
918{
919 if (s->condexec_mask) {
920 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
921 TCGv_i32 tmp = tcg_temp_new_i32();
922 tcg_gen_movi_i32(tmp, val);
923 store_cpu_field(tmp, condexec_bits);
924 }
925}
926
927static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
928{
929 tcg_gen_movi_i32(cpu_R[15], val);
930}
931
d9ba4830
PB
932/* Set PC and Thumb state from an immediate address. */
933static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 934{
39d5492a 935 TCGv_i32 tmp;
99c475ab 936
dcba3a8d 937 s->base.is_jmp = DISAS_JUMP;
d9ba4830 938 if (s->thumb != (addr & 1)) {
7d1b0095 939 tmp = tcg_temp_new_i32();
d9ba4830 940 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 941 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 942 tcg_temp_free_i32(tmp);
d9ba4830 943 }
155c3eac 944 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
945}
946
947/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 948static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 949{
dcba3a8d 950 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
951 tcg_gen_andi_i32(cpu_R[15], var, ~1);
952 tcg_gen_andi_i32(var, var, 1);
953 store_cpu_field(var, thumb);
d9ba4830
PB
954}
955
3bb8a96f
PM
956/* Set PC and Thumb state from var. var is marked as dead.
957 * For M-profile CPUs, include logic to detect exception-return
958 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
959 * and BX reg, and no others, and happens only for code in Handler mode.
960 */
961static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
962{
963 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 964 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
965 */
966 gen_bx(s, var);
d02a8698
PM
967 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
968 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 969 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
970 }
971}
972
973static inline void gen_bx_excret_final_code(DisasContext *s)
974{
975 /* Generate the code to finish possible exception return and end the TB */
976 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
977 uint32_t min_magic;
978
979 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
980 /* Covers FNC_RETURN and EXC_RETURN magic */
981 min_magic = FNC_RETURN_MIN_MAGIC;
982 } else {
983 /* EXC_RETURN magic only */
984 min_magic = EXC_RETURN_MIN_MAGIC;
985 }
3bb8a96f
PM
986
987 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 988 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
989 /* No: end the TB as we would for a DISAS_JMP */
990 if (is_singlestepping(s)) {
991 gen_singlestep_exception(s);
992 } else {
993 tcg_gen_exit_tb(0);
994 }
995 gen_set_label(excret_label);
996 /* Yes: this is an exception return.
997 * At this point in runtime env->regs[15] and env->thumb will hold
998 * the exception-return magic number, which do_v7m_exception_exit()
999 * will read. Nothing else will be able to see those values because
1000 * the cpu-exec main loop guarantees that we will always go straight
1001 * from raising the exception to the exception-handling code.
1002 *
1003 * gen_ss_advance(s) does nothing on M profile currently but
1004 * calling it is conceptually the right thing as we have executed
1005 * this instruction (compare SWI, HVC, SMC handling).
1006 */
1007 gen_ss_advance(s);
1008 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1009}
1010
fb602cb7
PM
1011static inline void gen_bxns(DisasContext *s, int rm)
1012{
1013 TCGv_i32 var = load_reg(s, rm);
1014
1015 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1016 * we need to sync state before calling it, but:
1017 * - we don't need to do gen_set_pc_im() because the bxns helper will
1018 * always set the PC itself
1019 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1020 * unless it's outside an IT block or the last insn in an IT block,
1021 * so we know that condexec == 0 (already set at the top of the TB)
1022 * is correct in the non-UNPREDICTABLE cases, and we can choose
1023 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1024 */
1025 gen_helper_v7m_bxns(cpu_env, var);
1026 tcg_temp_free_i32(var);
ef475b5d 1027 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1028}
1029
3e3fa230
PM
1030static inline void gen_blxns(DisasContext *s, int rm)
1031{
1032 TCGv_i32 var = load_reg(s, rm);
1033
1034 /* We don't need to sync condexec state, for the same reason as bxns.
1035 * We do however need to set the PC, because the blxns helper reads it.
1036 * The blxns helper may throw an exception.
1037 */
1038 gen_set_pc_im(s, s->pc);
1039 gen_helper_v7m_blxns(cpu_env, var);
1040 tcg_temp_free_i32(var);
1041 s->base.is_jmp = DISAS_EXIT;
1042}
1043
21aeb343
JR
1044/* Variant of store_reg which uses branch&exchange logic when storing
1045 to r15 in ARM architecture v7 and above. The source must be a temporary
1046 and will be marked as dead. */
7dcc1f89 1047static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1048{
1049 if (reg == 15 && ENABLE_ARCH_7) {
1050 gen_bx(s, var);
1051 } else {
1052 store_reg(s, reg, var);
1053 }
1054}
1055
be5e7a76
DES
1056/* Variant of store_reg which uses branch&exchange logic when storing
1057 * to r15 in ARM architecture v5T and above. This is used for storing
1058 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1059 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1060static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1061{
1062 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1063 gen_bx_excret(s, var);
be5e7a76
DES
1064 } else {
1065 store_reg(s, reg, var);
1066 }
1067}
1068
e334bd31
PB
1069#ifdef CONFIG_USER_ONLY
1070#define IS_USER_ONLY 1
1071#else
1072#define IS_USER_ONLY 0
1073#endif
1074
08307563
PM
1075/* Abstractions of "generate code to do a guest load/store for
1076 * AArch32", where a vaddr is always 32 bits (and is zero
1077 * extended if we're a 64 bit core) and data is also
1078 * 32 bits unless specifically doing a 64 bit access.
1079 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1080 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1081 */
08307563 1082
7f5616f5 1083static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1084{
7f5616f5
RH
1085 TCGv addr = tcg_temp_new();
1086 tcg_gen_extu_i32_tl(addr, a32);
1087
e334bd31 1088 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1089 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1090 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1091 }
7f5616f5 1092 return addr;
08307563
PM
1093}
1094
7f5616f5
RH
1095static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1096 int index, TCGMemOp opc)
08307563 1097{
7f5616f5
RH
1098 TCGv addr = gen_aa32_addr(s, a32, opc);
1099 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1100 tcg_temp_free(addr);
08307563
PM
1101}
1102
7f5616f5
RH
1103static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1104 int index, TCGMemOp opc)
1105{
1106 TCGv addr = gen_aa32_addr(s, a32, opc);
1107 tcg_gen_qemu_st_i32(val, addr, index, opc);
1108 tcg_temp_free(addr);
1109}
08307563 1110
7f5616f5 1111#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1112static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1113 TCGv_i32 a32, int index) \
08307563 1114{ \
7f5616f5 1115 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1116} \
1117static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1118 TCGv_i32 val, \
1119 TCGv_i32 a32, int index, \
1120 ISSInfo issinfo) \
1121{ \
1122 gen_aa32_ld##SUFF(s, val, a32, index); \
1123 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1124}
1125
7f5616f5 1126#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1127static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1128 TCGv_i32 a32, int index) \
08307563 1129{ \
7f5616f5 1130 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1131} \
1132static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1133 TCGv_i32 val, \
1134 TCGv_i32 a32, int index, \
1135 ISSInfo issinfo) \
1136{ \
1137 gen_aa32_st##SUFF(s, val, a32, index); \
1138 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1139}
1140
7f5616f5 1141static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1142{
e334bd31
PB
1143 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1144 if (!IS_USER_ONLY && s->sctlr_b) {
1145 tcg_gen_rotri_i64(val, val, 32);
1146 }
08307563
PM
1147}
1148
7f5616f5
RH
1149static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1150 int index, TCGMemOp opc)
08307563 1151{
7f5616f5
RH
1152 TCGv addr = gen_aa32_addr(s, a32, opc);
1153 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1154 gen_aa32_frob64(s, val);
1155 tcg_temp_free(addr);
1156}
1157
1158static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1159 TCGv_i32 a32, int index)
1160{
1161 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1162}
1163
1164static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1165 int index, TCGMemOp opc)
1166{
1167 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1168
1169 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1170 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1171 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1172 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1173 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1174 tcg_temp_free_i64(tmp);
e334bd31 1175 } else {
7f5616f5 1176 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1177 }
7f5616f5 1178 tcg_temp_free(addr);
08307563
PM
1179}
1180
7f5616f5
RH
1181static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1182 TCGv_i32 a32, int index)
1183{
1184 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1185}
08307563 1186
7f5616f5
RH
1187DO_GEN_LD(8s, MO_SB)
1188DO_GEN_LD(8u, MO_UB)
1189DO_GEN_LD(16s, MO_SW)
1190DO_GEN_LD(16u, MO_UW)
1191DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1192DO_GEN_ST(8, MO_UB)
1193DO_GEN_ST(16, MO_UW)
1194DO_GEN_ST(32, MO_UL)
08307563 1195
37e6456e
PM
1196static inline void gen_hvc(DisasContext *s, int imm16)
1197{
1198 /* The pre HVC helper handles cases when HVC gets trapped
1199 * as an undefined insn by runtime configuration (ie before
1200 * the insn really executes).
1201 */
1202 gen_set_pc_im(s, s->pc - 4);
1203 gen_helper_pre_hvc(cpu_env);
1204 /* Otherwise we will treat this as a real exception which
1205 * happens after execution of the insn. (The distinction matters
1206 * for the PC value reported to the exception handler and also
1207 * for single stepping.)
1208 */
1209 s->svc_imm = imm16;
1210 gen_set_pc_im(s, s->pc);
dcba3a8d 1211 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1212}
1213
1214static inline void gen_smc(DisasContext *s)
1215{
1216 /* As with HVC, we may take an exception either before or after
1217 * the insn executes.
1218 */
1219 TCGv_i32 tmp;
1220
1221 gen_set_pc_im(s, s->pc - 4);
1222 tmp = tcg_const_i32(syn_aa32_smc());
1223 gen_helper_pre_smc(cpu_env, tmp);
1224 tcg_temp_free_i32(tmp);
1225 gen_set_pc_im(s, s->pc);
dcba3a8d 1226 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1227}
1228
d4a2dc67
PM
1229static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1230{
1231 gen_set_condexec(s);
1232 gen_set_pc_im(s, s->pc - offset);
1233 gen_exception_internal(excp);
dcba3a8d 1234 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1235}
1236
73710361
GB
1237static void gen_exception_insn(DisasContext *s, int offset, int excp,
1238 int syn, uint32_t target_el)
d4a2dc67
PM
1239{
1240 gen_set_condexec(s);
1241 gen_set_pc_im(s, s->pc - offset);
73710361 1242 gen_exception(excp, syn, target_el);
dcba3a8d 1243 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1244}
1245
b5ff1b31
FB
1246/* Force a TB lookup after an instruction that changes the CPU state. */
1247static inline void gen_lookup_tb(DisasContext *s)
1248{
a6445c52 1249 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1250 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1251}
1252
19a6e31c
PM
1253static inline void gen_hlt(DisasContext *s, int imm)
1254{
1255 /* HLT. This has two purposes.
1256 * Architecturally, it is an external halting debug instruction.
1257 * Since QEMU doesn't implement external debug, we treat this as
1258 * it is required for halting debug disabled: it will UNDEF.
1259 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1260 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1261 * must trigger semihosting even for ARMv7 and earlier, where
1262 * HLT was an undefined encoding.
1263 * In system mode, we don't allow userspace access to
1264 * semihosting, to provide some semblance of security
1265 * (and for consistency with our 32-bit semihosting).
1266 */
1267 if (semihosting_enabled() &&
1268#ifndef CONFIG_USER_ONLY
1269 s->current_el != 0 &&
1270#endif
1271 (imm == (s->thumb ? 0x3c : 0xf000))) {
1272 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1273 return;
1274 }
1275
1276 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1277 default_exception_el(s));
1278}
1279
b0109805 1280static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1281 TCGv_i32 var)
2c0262af 1282{
1e8d4eec 1283 int val, rm, shift, shiftop;
39d5492a 1284 TCGv_i32 offset;
2c0262af
FB
1285
1286 if (!(insn & (1 << 25))) {
1287 /* immediate */
1288 val = insn & 0xfff;
1289 if (!(insn & (1 << 23)))
1290 val = -val;
537730b9 1291 if (val != 0)
b0109805 1292 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1293 } else {
1294 /* shift/register */
1295 rm = (insn) & 0xf;
1296 shift = (insn >> 7) & 0x1f;
1e8d4eec 1297 shiftop = (insn >> 5) & 3;
b26eefb6 1298 offset = load_reg(s, rm);
9a119ff6 1299 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1300 if (!(insn & (1 << 23)))
b0109805 1301 tcg_gen_sub_i32(var, var, offset);
2c0262af 1302 else
b0109805 1303 tcg_gen_add_i32(var, var, offset);
7d1b0095 1304 tcg_temp_free_i32(offset);
2c0262af
FB
1305 }
1306}
1307
191f9a93 1308static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1309 int extra, TCGv_i32 var)
2c0262af
FB
1310{
1311 int val, rm;
39d5492a 1312 TCGv_i32 offset;
3b46e624 1313
2c0262af
FB
1314 if (insn & (1 << 22)) {
1315 /* immediate */
1316 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1317 if (!(insn & (1 << 23)))
1318 val = -val;
18acad92 1319 val += extra;
537730b9 1320 if (val != 0)
b0109805 1321 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1322 } else {
1323 /* register */
191f9a93 1324 if (extra)
b0109805 1325 tcg_gen_addi_i32(var, var, extra);
2c0262af 1326 rm = (insn) & 0xf;
b26eefb6 1327 offset = load_reg(s, rm);
2c0262af 1328 if (!(insn & (1 << 23)))
b0109805 1329 tcg_gen_sub_i32(var, var, offset);
2c0262af 1330 else
b0109805 1331 tcg_gen_add_i32(var, var, offset);
7d1b0095 1332 tcg_temp_free_i32(offset);
2c0262af
FB
1333 }
1334}
1335
5aaebd13
PM
1336static TCGv_ptr get_fpstatus_ptr(int neon)
1337{
1338 TCGv_ptr statusptr = tcg_temp_new_ptr();
1339 int offset;
1340 if (neon) {
0ecb72a5 1341 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1342 } else {
0ecb72a5 1343 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1344 }
1345 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1346 return statusptr;
1347}
1348
4373f3ce
PB
1349#define VFP_OP2(name) \
1350static inline void gen_vfp_##name(int dp) \
1351{ \
ae1857ec
PM
1352 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1353 if (dp) { \
1354 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1355 } else { \
1356 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1357 } \
1358 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1359}
1360
4373f3ce
PB
1361VFP_OP2(add)
1362VFP_OP2(sub)
1363VFP_OP2(mul)
1364VFP_OP2(div)
1365
1366#undef VFP_OP2
1367
605a6aed
PM
1368static inline void gen_vfp_F1_mul(int dp)
1369{
1370 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1371 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1372 if (dp) {
ae1857ec 1373 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1374 } else {
ae1857ec 1375 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1376 }
ae1857ec 1377 tcg_temp_free_ptr(fpst);
605a6aed
PM
1378}
1379
1380static inline void gen_vfp_F1_neg(int dp)
1381{
1382 /* Like gen_vfp_neg() but put result in F1 */
1383 if (dp) {
1384 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1385 } else {
1386 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1387 }
1388}
1389
4373f3ce
PB
1390static inline void gen_vfp_abs(int dp)
1391{
1392 if (dp)
1393 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1394 else
1395 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1396}
1397
1398static inline void gen_vfp_neg(int dp)
1399{
1400 if (dp)
1401 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1402 else
1403 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1404}
1405
1406static inline void gen_vfp_sqrt(int dp)
1407{
1408 if (dp)
1409 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1410 else
1411 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1412}
1413
1414static inline void gen_vfp_cmp(int dp)
1415{
1416 if (dp)
1417 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1418 else
1419 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1420}
1421
1422static inline void gen_vfp_cmpe(int dp)
1423{
1424 if (dp)
1425 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1426 else
1427 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1428}
1429
1430static inline void gen_vfp_F1_ld0(int dp)
1431{
1432 if (dp)
5b340b51 1433 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1434 else
5b340b51 1435 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1436}
1437
5500b06c
PM
1438#define VFP_GEN_ITOF(name) \
1439static inline void gen_vfp_##name(int dp, int neon) \
1440{ \
5aaebd13 1441 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1442 if (dp) { \
1443 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1444 } else { \
1445 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1446 } \
b7fa9214 1447 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1448}
1449
5500b06c
PM
1450VFP_GEN_ITOF(uito)
1451VFP_GEN_ITOF(sito)
1452#undef VFP_GEN_ITOF
4373f3ce 1453
5500b06c
PM
1454#define VFP_GEN_FTOI(name) \
1455static inline void gen_vfp_##name(int dp, int neon) \
1456{ \
5aaebd13 1457 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1458 if (dp) { \
1459 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1460 } else { \
1461 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1462 } \
b7fa9214 1463 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1464}
1465
5500b06c
PM
1466VFP_GEN_FTOI(toui)
1467VFP_GEN_FTOI(touiz)
1468VFP_GEN_FTOI(tosi)
1469VFP_GEN_FTOI(tosiz)
1470#undef VFP_GEN_FTOI
4373f3ce 1471
16d5b3ca 1472#define VFP_GEN_FIX(name, round) \
5500b06c 1473static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1474{ \
39d5492a 1475 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1476 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1477 if (dp) { \
16d5b3ca
WN
1478 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1479 statusptr); \
5500b06c 1480 } else { \
16d5b3ca
WN
1481 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1482 statusptr); \
5500b06c 1483 } \
b75263d6 1484 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1485 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1486}
16d5b3ca
WN
1487VFP_GEN_FIX(tosh, _round_to_zero)
1488VFP_GEN_FIX(tosl, _round_to_zero)
1489VFP_GEN_FIX(touh, _round_to_zero)
1490VFP_GEN_FIX(toul, _round_to_zero)
1491VFP_GEN_FIX(shto, )
1492VFP_GEN_FIX(slto, )
1493VFP_GEN_FIX(uhto, )
1494VFP_GEN_FIX(ulto, )
4373f3ce 1495#undef VFP_GEN_FIX
9ee6e8bb 1496
39d5492a 1497static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1498{
08307563 1499 if (dp) {
12dcc321 1500 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1501 } else {
12dcc321 1502 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1503 }
b5ff1b31
FB
1504}
1505
39d5492a 1506static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1507{
08307563 1508 if (dp) {
12dcc321 1509 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1510 } else {
12dcc321 1511 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1512 }
b5ff1b31
FB
1513}
1514
8e96005d
FB
1515static inline long
1516vfp_reg_offset (int dp, int reg)
1517{
1518 if (dp)
1519 return offsetof(CPUARMState, vfp.regs[reg]);
1520 else if (reg & 1) {
1521 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1522 + offsetof(CPU_DoubleU, l.upper);
1523 } else {
1524 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1525 + offsetof(CPU_DoubleU, l.lower);
1526 }
1527}
9ee6e8bb
PB
1528
1529/* Return the offset of a 32-bit piece of a NEON register.
1530 zero is the least significant end of the register. */
1531static inline long
1532neon_reg_offset (int reg, int n)
1533{
1534 int sreg;
1535 sreg = reg * 2 + n;
1536 return vfp_reg_offset(0, sreg);
1537}
1538
39d5492a 1539static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1540{
39d5492a 1541 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1542 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1543 return tmp;
1544}
1545
39d5492a 1546static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1547{
1548 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1549 tcg_temp_free_i32(var);
8f8e3aa4
PB
1550}
1551
a7812ae4 1552static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1553{
1554 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1555}
1556
a7812ae4 1557static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1558{
1559 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1560}
1561
4373f3ce
PB
1562#define tcg_gen_ld_f32 tcg_gen_ld_i32
1563#define tcg_gen_ld_f64 tcg_gen_ld_i64
1564#define tcg_gen_st_f32 tcg_gen_st_i32
1565#define tcg_gen_st_f64 tcg_gen_st_i64
1566
b7bcbe95
FB
1567static inline void gen_mov_F0_vreg(int dp, int reg)
1568{
1569 if (dp)
4373f3ce 1570 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1571 else
4373f3ce 1572 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1573}
1574
1575static inline void gen_mov_F1_vreg(int dp, int reg)
1576{
1577 if (dp)
4373f3ce 1578 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1579 else
4373f3ce 1580 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1581}
1582
1583static inline void gen_mov_vreg_F0(int dp, int reg)
1584{
1585 if (dp)
4373f3ce 1586 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1587 else
4373f3ce 1588 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1589}
1590
18c9b560
AZ
1591#define ARM_CP_RW_BIT (1 << 20)
1592
a7812ae4 1593static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1594{
0ecb72a5 1595 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1596}
1597
a7812ae4 1598static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1599{
0ecb72a5 1600 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1601}
1602
39d5492a 1603static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1604{
39d5492a 1605 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1606 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1607 return var;
e677137d
PB
1608}
1609
39d5492a 1610static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1611{
0ecb72a5 1612 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1613 tcg_temp_free_i32(var);
e677137d
PB
1614}
1615
1616static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1617{
1618 iwmmxt_store_reg(cpu_M0, rn);
1619}
1620
1621static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1622{
1623 iwmmxt_load_reg(cpu_M0, rn);
1624}
1625
1626static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1627{
1628 iwmmxt_load_reg(cpu_V1, rn);
1629 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1630}
1631
1632static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1633{
1634 iwmmxt_load_reg(cpu_V1, rn);
1635 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1636}
1637
1638static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1639{
1640 iwmmxt_load_reg(cpu_V1, rn);
1641 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1642}
1643
1644#define IWMMXT_OP(name) \
1645static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1646{ \
1647 iwmmxt_load_reg(cpu_V1, rn); \
1648 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1649}
1650
477955bd
PM
1651#define IWMMXT_OP_ENV(name) \
1652static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1653{ \
1654 iwmmxt_load_reg(cpu_V1, rn); \
1655 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1656}
1657
1658#define IWMMXT_OP_ENV_SIZE(name) \
1659IWMMXT_OP_ENV(name##b) \
1660IWMMXT_OP_ENV(name##w) \
1661IWMMXT_OP_ENV(name##l)
e677137d 1662
477955bd 1663#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1664static inline void gen_op_iwmmxt_##name##_M0(void) \
1665{ \
477955bd 1666 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1667}
1668
1669IWMMXT_OP(maddsq)
1670IWMMXT_OP(madduq)
1671IWMMXT_OP(sadb)
1672IWMMXT_OP(sadw)
1673IWMMXT_OP(mulslw)
1674IWMMXT_OP(mulshw)
1675IWMMXT_OP(mululw)
1676IWMMXT_OP(muluhw)
1677IWMMXT_OP(macsw)
1678IWMMXT_OP(macuw)
1679
477955bd
PM
1680IWMMXT_OP_ENV_SIZE(unpackl)
1681IWMMXT_OP_ENV_SIZE(unpackh)
1682
1683IWMMXT_OP_ENV1(unpacklub)
1684IWMMXT_OP_ENV1(unpackluw)
1685IWMMXT_OP_ENV1(unpacklul)
1686IWMMXT_OP_ENV1(unpackhub)
1687IWMMXT_OP_ENV1(unpackhuw)
1688IWMMXT_OP_ENV1(unpackhul)
1689IWMMXT_OP_ENV1(unpacklsb)
1690IWMMXT_OP_ENV1(unpacklsw)
1691IWMMXT_OP_ENV1(unpacklsl)
1692IWMMXT_OP_ENV1(unpackhsb)
1693IWMMXT_OP_ENV1(unpackhsw)
1694IWMMXT_OP_ENV1(unpackhsl)
1695
1696IWMMXT_OP_ENV_SIZE(cmpeq)
1697IWMMXT_OP_ENV_SIZE(cmpgtu)
1698IWMMXT_OP_ENV_SIZE(cmpgts)
1699
1700IWMMXT_OP_ENV_SIZE(mins)
1701IWMMXT_OP_ENV_SIZE(minu)
1702IWMMXT_OP_ENV_SIZE(maxs)
1703IWMMXT_OP_ENV_SIZE(maxu)
1704
1705IWMMXT_OP_ENV_SIZE(subn)
1706IWMMXT_OP_ENV_SIZE(addn)
1707IWMMXT_OP_ENV_SIZE(subu)
1708IWMMXT_OP_ENV_SIZE(addu)
1709IWMMXT_OP_ENV_SIZE(subs)
1710IWMMXT_OP_ENV_SIZE(adds)
1711
1712IWMMXT_OP_ENV(avgb0)
1713IWMMXT_OP_ENV(avgb1)
1714IWMMXT_OP_ENV(avgw0)
1715IWMMXT_OP_ENV(avgw1)
e677137d 1716
477955bd
PM
1717IWMMXT_OP_ENV(packuw)
1718IWMMXT_OP_ENV(packul)
1719IWMMXT_OP_ENV(packuq)
1720IWMMXT_OP_ENV(packsw)
1721IWMMXT_OP_ENV(packsl)
1722IWMMXT_OP_ENV(packsq)
e677137d 1723
e677137d
PB
1724static void gen_op_iwmmxt_set_mup(void)
1725{
39d5492a 1726 TCGv_i32 tmp;
e677137d
PB
1727 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1728 tcg_gen_ori_i32(tmp, tmp, 2);
1729 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1730}
1731
1732static void gen_op_iwmmxt_set_cup(void)
1733{
39d5492a 1734 TCGv_i32 tmp;
e677137d
PB
1735 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1736 tcg_gen_ori_i32(tmp, tmp, 1);
1737 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1738}
1739
1740static void gen_op_iwmmxt_setpsr_nz(void)
1741{
39d5492a 1742 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1743 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1744 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1745}
1746
1747static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1748{
1749 iwmmxt_load_reg(cpu_V1, rn);
86831435 1750 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1751 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1752}
1753
39d5492a
PM
1754static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1755 TCGv_i32 dest)
18c9b560
AZ
1756{
1757 int rd;
1758 uint32_t offset;
39d5492a 1759 TCGv_i32 tmp;
18c9b560
AZ
1760
1761 rd = (insn >> 16) & 0xf;
da6b5335 1762 tmp = load_reg(s, rd);
18c9b560
AZ
1763
1764 offset = (insn & 0xff) << ((insn >> 7) & 2);
1765 if (insn & (1 << 24)) {
1766 /* Pre indexed */
1767 if (insn & (1 << 23))
da6b5335 1768 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1769 else
da6b5335
FN
1770 tcg_gen_addi_i32(tmp, tmp, -offset);
1771 tcg_gen_mov_i32(dest, tmp);
18c9b560 1772 if (insn & (1 << 21))
da6b5335
FN
1773 store_reg(s, rd, tmp);
1774 else
7d1b0095 1775 tcg_temp_free_i32(tmp);
18c9b560
AZ
1776 } else if (insn & (1 << 21)) {
1777 /* Post indexed */
da6b5335 1778 tcg_gen_mov_i32(dest, tmp);
18c9b560 1779 if (insn & (1 << 23))
da6b5335 1780 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1781 else
da6b5335
FN
1782 tcg_gen_addi_i32(tmp, tmp, -offset);
1783 store_reg(s, rd, tmp);
18c9b560
AZ
1784 } else if (!(insn & (1 << 23)))
1785 return 1;
1786 return 0;
1787}
1788
39d5492a 1789static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1790{
1791 int rd = (insn >> 0) & 0xf;
39d5492a 1792 TCGv_i32 tmp;
18c9b560 1793
da6b5335
FN
1794 if (insn & (1 << 8)) {
1795 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1796 return 1;
da6b5335
FN
1797 } else {
1798 tmp = iwmmxt_load_creg(rd);
1799 }
1800 } else {
7d1b0095 1801 tmp = tcg_temp_new_i32();
da6b5335 1802 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1803 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1804 }
1805 tcg_gen_andi_i32(tmp, tmp, mask);
1806 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1807 tcg_temp_free_i32(tmp);
18c9b560
AZ
1808 return 0;
1809}
1810
a1c7273b 1811/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1812 (ie. an undefined instruction). */
7dcc1f89 1813static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1814{
1815 int rd, wrd;
1816 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1817 TCGv_i32 addr;
1818 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1819
1820 if ((insn & 0x0e000e00) == 0x0c000000) {
1821 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1822 wrd = insn & 0xf;
1823 rdlo = (insn >> 12) & 0xf;
1824 rdhi = (insn >> 16) & 0xf;
1825 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1826 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1827 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1828 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1829 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1830 } else { /* TMCRR */
da6b5335
FN
1831 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1832 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1833 gen_op_iwmmxt_set_mup();
1834 }
1835 return 0;
1836 }
1837
1838 wrd = (insn >> 12) & 0xf;
7d1b0095 1839 addr = tcg_temp_new_i32();
da6b5335 1840 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1841 tcg_temp_free_i32(addr);
18c9b560 1842 return 1;
da6b5335 1843 }
18c9b560
AZ
1844 if (insn & ARM_CP_RW_BIT) {
1845 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1846 tmp = tcg_temp_new_i32();
12dcc321 1847 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1848 iwmmxt_store_creg(wrd, tmp);
18c9b560 1849 } else {
e677137d
PB
1850 i = 1;
1851 if (insn & (1 << 8)) {
1852 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1853 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d
PB
1854 i = 0;
1855 } else { /* WLDRW wRd */
29531141 1856 tmp = tcg_temp_new_i32();
12dcc321 1857 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1858 }
1859 } else {
29531141 1860 tmp = tcg_temp_new_i32();
e677137d 1861 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1862 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
e677137d 1863 } else { /* WLDRB */
12dcc321 1864 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1865 }
1866 }
1867 if (i) {
1868 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1869 tcg_temp_free_i32(tmp);
e677137d 1870 }
18c9b560
AZ
1871 gen_op_iwmmxt_movq_wRn_M0(wrd);
1872 }
1873 } else {
1874 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1875 tmp = iwmmxt_load_creg(wrd);
12dcc321 1876 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1877 } else {
1878 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1879 tmp = tcg_temp_new_i32();
e677137d
PB
1880 if (insn & (1 << 8)) {
1881 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1882 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1883 } else { /* WSTRW wRd */
ecc7b3aa 1884 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1885 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1886 }
1887 } else {
1888 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1889 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1890 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
e677137d 1891 } else { /* WSTRB */
ecc7b3aa 1892 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1893 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1894 }
1895 }
18c9b560 1896 }
29531141 1897 tcg_temp_free_i32(tmp);
18c9b560 1898 }
7d1b0095 1899 tcg_temp_free_i32(addr);
18c9b560
AZ
1900 return 0;
1901 }
1902
1903 if ((insn & 0x0f000000) != 0x0e000000)
1904 return 1;
1905
1906 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1907 case 0x000: /* WOR */
1908 wrd = (insn >> 12) & 0xf;
1909 rd0 = (insn >> 0) & 0xf;
1910 rd1 = (insn >> 16) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0);
1912 gen_op_iwmmxt_orq_M0_wRn(rd1);
1913 gen_op_iwmmxt_setpsr_nz();
1914 gen_op_iwmmxt_movq_wRn_M0(wrd);
1915 gen_op_iwmmxt_set_mup();
1916 gen_op_iwmmxt_set_cup();
1917 break;
1918 case 0x011: /* TMCR */
1919 if (insn & 0xf)
1920 return 1;
1921 rd = (insn >> 12) & 0xf;
1922 wrd = (insn >> 16) & 0xf;
1923 switch (wrd) {
1924 case ARM_IWMMXT_wCID:
1925 case ARM_IWMMXT_wCASF:
1926 break;
1927 case ARM_IWMMXT_wCon:
1928 gen_op_iwmmxt_set_cup();
1929 /* Fall through. */
1930 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1931 tmp = iwmmxt_load_creg(wrd);
1932 tmp2 = load_reg(s, rd);
f669df27 1933 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1934 tcg_temp_free_i32(tmp2);
da6b5335 1935 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1936 break;
1937 case ARM_IWMMXT_wCGR0:
1938 case ARM_IWMMXT_wCGR1:
1939 case ARM_IWMMXT_wCGR2:
1940 case ARM_IWMMXT_wCGR3:
1941 gen_op_iwmmxt_set_cup();
da6b5335
FN
1942 tmp = load_reg(s, rd);
1943 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1944 break;
1945 default:
1946 return 1;
1947 }
1948 break;
1949 case 0x100: /* WXOR */
1950 wrd = (insn >> 12) & 0xf;
1951 rd0 = (insn >> 0) & 0xf;
1952 rd1 = (insn >> 16) & 0xf;
1953 gen_op_iwmmxt_movq_M0_wRn(rd0);
1954 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1955 gen_op_iwmmxt_setpsr_nz();
1956 gen_op_iwmmxt_movq_wRn_M0(wrd);
1957 gen_op_iwmmxt_set_mup();
1958 gen_op_iwmmxt_set_cup();
1959 break;
1960 case 0x111: /* TMRC */
1961 if (insn & 0xf)
1962 return 1;
1963 rd = (insn >> 12) & 0xf;
1964 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1965 tmp = iwmmxt_load_creg(wrd);
1966 store_reg(s, rd, tmp);
18c9b560
AZ
1967 break;
1968 case 0x300: /* WANDN */
1969 wrd = (insn >> 12) & 0xf;
1970 rd0 = (insn >> 0) & 0xf;
1971 rd1 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1973 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1974 gen_op_iwmmxt_andq_M0_wRn(rd1);
1975 gen_op_iwmmxt_setpsr_nz();
1976 gen_op_iwmmxt_movq_wRn_M0(wrd);
1977 gen_op_iwmmxt_set_mup();
1978 gen_op_iwmmxt_set_cup();
1979 break;
1980 case 0x200: /* WAND */
1981 wrd = (insn >> 12) & 0xf;
1982 rd0 = (insn >> 0) & 0xf;
1983 rd1 = (insn >> 16) & 0xf;
1984 gen_op_iwmmxt_movq_M0_wRn(rd0);
1985 gen_op_iwmmxt_andq_M0_wRn(rd1);
1986 gen_op_iwmmxt_setpsr_nz();
1987 gen_op_iwmmxt_movq_wRn_M0(wrd);
1988 gen_op_iwmmxt_set_mup();
1989 gen_op_iwmmxt_set_cup();
1990 break;
1991 case 0x810: case 0xa10: /* WMADD */
1992 wrd = (insn >> 12) & 0xf;
1993 rd0 = (insn >> 0) & 0xf;
1994 rd1 = (insn >> 16) & 0xf;
1995 gen_op_iwmmxt_movq_M0_wRn(rd0);
1996 if (insn & (1 << 21))
1997 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1998 else
1999 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2000 gen_op_iwmmxt_movq_wRn_M0(wrd);
2001 gen_op_iwmmxt_set_mup();
2002 break;
2003 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
2004 wrd = (insn >> 12) & 0xf;
2005 rd0 = (insn >> 16) & 0xf;
2006 rd1 = (insn >> 0) & 0xf;
2007 gen_op_iwmmxt_movq_M0_wRn(rd0);
2008 switch ((insn >> 22) & 3) {
2009 case 0:
2010 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2011 break;
2012 case 1:
2013 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2014 break;
2015 case 2:
2016 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2017 break;
2018 case 3:
2019 return 1;
2020 }
2021 gen_op_iwmmxt_movq_wRn_M0(wrd);
2022 gen_op_iwmmxt_set_mup();
2023 gen_op_iwmmxt_set_cup();
2024 break;
2025 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
2026 wrd = (insn >> 12) & 0xf;
2027 rd0 = (insn >> 16) & 0xf;
2028 rd1 = (insn >> 0) & 0xf;
2029 gen_op_iwmmxt_movq_M0_wRn(rd0);
2030 switch ((insn >> 22) & 3) {
2031 case 0:
2032 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2033 break;
2034 case 1:
2035 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2036 break;
2037 case 2:
2038 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2039 break;
2040 case 3:
2041 return 1;
2042 }
2043 gen_op_iwmmxt_movq_wRn_M0(wrd);
2044 gen_op_iwmmxt_set_mup();
2045 gen_op_iwmmxt_set_cup();
2046 break;
2047 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2048 wrd = (insn >> 12) & 0xf;
2049 rd0 = (insn >> 16) & 0xf;
2050 rd1 = (insn >> 0) & 0xf;
2051 gen_op_iwmmxt_movq_M0_wRn(rd0);
2052 if (insn & (1 << 22))
2053 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2054 else
2055 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2056 if (!(insn & (1 << 20)))
2057 gen_op_iwmmxt_addl_M0_wRn(wrd);
2058 gen_op_iwmmxt_movq_wRn_M0(wrd);
2059 gen_op_iwmmxt_set_mup();
2060 break;
2061 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2062 wrd = (insn >> 12) & 0xf;
2063 rd0 = (insn >> 16) & 0xf;
2064 rd1 = (insn >> 0) & 0xf;
2065 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2066 if (insn & (1 << 21)) {
2067 if (insn & (1 << 20))
2068 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2069 else
2070 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2071 } else {
2072 if (insn & (1 << 20))
2073 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2074 else
2075 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2076 }
18c9b560
AZ
2077 gen_op_iwmmxt_movq_wRn_M0(wrd);
2078 gen_op_iwmmxt_set_mup();
2079 break;
2080 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2081 wrd = (insn >> 12) & 0xf;
2082 rd0 = (insn >> 16) & 0xf;
2083 rd1 = (insn >> 0) & 0xf;
2084 gen_op_iwmmxt_movq_M0_wRn(rd0);
2085 if (insn & (1 << 21))
2086 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2087 else
2088 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2089 if (!(insn & (1 << 20))) {
e677137d
PB
2090 iwmmxt_load_reg(cpu_V1, wrd);
2091 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2092 }
2093 gen_op_iwmmxt_movq_wRn_M0(wrd);
2094 gen_op_iwmmxt_set_mup();
2095 break;
2096 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2097 wrd = (insn >> 12) & 0xf;
2098 rd0 = (insn >> 16) & 0xf;
2099 rd1 = (insn >> 0) & 0xf;
2100 gen_op_iwmmxt_movq_M0_wRn(rd0);
2101 switch ((insn >> 22) & 3) {
2102 case 0:
2103 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2104 break;
2105 case 1:
2106 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2107 break;
2108 case 2:
2109 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2110 break;
2111 case 3:
2112 return 1;
2113 }
2114 gen_op_iwmmxt_movq_wRn_M0(wrd);
2115 gen_op_iwmmxt_set_mup();
2116 gen_op_iwmmxt_set_cup();
2117 break;
2118 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2119 wrd = (insn >> 12) & 0xf;
2120 rd0 = (insn >> 16) & 0xf;
2121 rd1 = (insn >> 0) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2123 if (insn & (1 << 22)) {
2124 if (insn & (1 << 20))
2125 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2126 else
2127 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2128 } else {
2129 if (insn & (1 << 20))
2130 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2131 else
2132 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2133 }
18c9b560
AZ
2134 gen_op_iwmmxt_movq_wRn_M0(wrd);
2135 gen_op_iwmmxt_set_mup();
2136 gen_op_iwmmxt_set_cup();
2137 break;
2138 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2139 wrd = (insn >> 12) & 0xf;
2140 rd0 = (insn >> 16) & 0xf;
2141 rd1 = (insn >> 0) & 0xf;
2142 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2143 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2144 tcg_gen_andi_i32(tmp, tmp, 7);
2145 iwmmxt_load_reg(cpu_V1, rd1);
2146 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2147 tcg_temp_free_i32(tmp);
18c9b560
AZ
2148 gen_op_iwmmxt_movq_wRn_M0(wrd);
2149 gen_op_iwmmxt_set_mup();
2150 break;
2151 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2152 if (((insn >> 6) & 3) == 3)
2153 return 1;
18c9b560
AZ
2154 rd = (insn >> 12) & 0xf;
2155 wrd = (insn >> 16) & 0xf;
da6b5335 2156 tmp = load_reg(s, rd);
18c9b560
AZ
2157 gen_op_iwmmxt_movq_M0_wRn(wrd);
2158 switch ((insn >> 6) & 3) {
2159 case 0:
da6b5335
FN
2160 tmp2 = tcg_const_i32(0xff);
2161 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2162 break;
2163 case 1:
da6b5335
FN
2164 tmp2 = tcg_const_i32(0xffff);
2165 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2166 break;
2167 case 2:
da6b5335
FN
2168 tmp2 = tcg_const_i32(0xffffffff);
2169 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2170 break;
da6b5335 2171 default:
39d5492a
PM
2172 TCGV_UNUSED_I32(tmp2);
2173 TCGV_UNUSED_I32(tmp3);
18c9b560 2174 }
da6b5335 2175 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2176 tcg_temp_free_i32(tmp3);
2177 tcg_temp_free_i32(tmp2);
7d1b0095 2178 tcg_temp_free_i32(tmp);
18c9b560
AZ
2179 gen_op_iwmmxt_movq_wRn_M0(wrd);
2180 gen_op_iwmmxt_set_mup();
2181 break;
2182 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2183 rd = (insn >> 12) & 0xf;
2184 wrd = (insn >> 16) & 0xf;
da6b5335 2185 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2186 return 1;
2187 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2188 tmp = tcg_temp_new_i32();
18c9b560
AZ
2189 switch ((insn >> 22) & 3) {
2190 case 0:
da6b5335 2191 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2192 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2193 if (insn & 8) {
2194 tcg_gen_ext8s_i32(tmp, tmp);
2195 } else {
2196 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2197 }
2198 break;
2199 case 1:
da6b5335 2200 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2201 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2202 if (insn & 8) {
2203 tcg_gen_ext16s_i32(tmp, tmp);
2204 } else {
2205 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2206 }
2207 break;
2208 case 2:
da6b5335 2209 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2210 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2211 break;
18c9b560 2212 }
da6b5335 2213 store_reg(s, rd, tmp);
18c9b560
AZ
2214 break;
2215 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2216 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2217 return 1;
da6b5335 2218 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2219 switch ((insn >> 22) & 3) {
2220 case 0:
da6b5335 2221 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2222 break;
2223 case 1:
da6b5335 2224 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2225 break;
2226 case 2:
da6b5335 2227 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2228 break;
18c9b560 2229 }
da6b5335
FN
2230 tcg_gen_shli_i32(tmp, tmp, 28);
2231 gen_set_nzcv(tmp);
7d1b0095 2232 tcg_temp_free_i32(tmp);
18c9b560
AZ
2233 break;
2234 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2235 if (((insn >> 6) & 3) == 3)
2236 return 1;
18c9b560
AZ
2237 rd = (insn >> 12) & 0xf;
2238 wrd = (insn >> 16) & 0xf;
da6b5335 2239 tmp = load_reg(s, rd);
18c9b560
AZ
2240 switch ((insn >> 6) & 3) {
2241 case 0:
da6b5335 2242 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2243 break;
2244 case 1:
da6b5335 2245 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2246 break;
2247 case 2:
da6b5335 2248 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2249 break;
18c9b560 2250 }
7d1b0095 2251 tcg_temp_free_i32(tmp);
18c9b560
AZ
2252 gen_op_iwmmxt_movq_wRn_M0(wrd);
2253 gen_op_iwmmxt_set_mup();
2254 break;
2255 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2256 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2257 return 1;
da6b5335 2258 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2259 tmp2 = tcg_temp_new_i32();
da6b5335 2260 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2261 switch ((insn >> 22) & 3) {
2262 case 0:
2263 for (i = 0; i < 7; i ++) {
da6b5335
FN
2264 tcg_gen_shli_i32(tmp2, tmp2, 4);
2265 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2266 }
2267 break;
2268 case 1:
2269 for (i = 0; i < 3; i ++) {
da6b5335
FN
2270 tcg_gen_shli_i32(tmp2, tmp2, 8);
2271 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2272 }
2273 break;
2274 case 2:
da6b5335
FN
2275 tcg_gen_shli_i32(tmp2, tmp2, 16);
2276 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2277 break;
18c9b560 2278 }
da6b5335 2279 gen_set_nzcv(tmp);
7d1b0095
PM
2280 tcg_temp_free_i32(tmp2);
2281 tcg_temp_free_i32(tmp);
18c9b560
AZ
2282 break;
2283 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2284 wrd = (insn >> 12) & 0xf;
2285 rd0 = (insn >> 16) & 0xf;
2286 gen_op_iwmmxt_movq_M0_wRn(rd0);
2287 switch ((insn >> 22) & 3) {
2288 case 0:
e677137d 2289 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2290 break;
2291 case 1:
e677137d 2292 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2293 break;
2294 case 2:
e677137d 2295 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2296 break;
2297 case 3:
2298 return 1;
2299 }
2300 gen_op_iwmmxt_movq_wRn_M0(wrd);
2301 gen_op_iwmmxt_set_mup();
2302 break;
2303 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2304 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2305 return 1;
da6b5335 2306 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2307 tmp2 = tcg_temp_new_i32();
da6b5335 2308 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2309 switch ((insn >> 22) & 3) {
2310 case 0:
2311 for (i = 0; i < 7; i ++) {
da6b5335
FN
2312 tcg_gen_shli_i32(tmp2, tmp2, 4);
2313 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2314 }
2315 break;
2316 case 1:
2317 for (i = 0; i < 3; i ++) {
da6b5335
FN
2318 tcg_gen_shli_i32(tmp2, tmp2, 8);
2319 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2320 }
2321 break;
2322 case 2:
da6b5335
FN
2323 tcg_gen_shli_i32(tmp2, tmp2, 16);
2324 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2325 break;
18c9b560 2326 }
da6b5335 2327 gen_set_nzcv(tmp);
7d1b0095
PM
2328 tcg_temp_free_i32(tmp2);
2329 tcg_temp_free_i32(tmp);
18c9b560
AZ
2330 break;
2331 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2332 rd = (insn >> 12) & 0xf;
2333 rd0 = (insn >> 16) & 0xf;
da6b5335 2334 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2335 return 1;
2336 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2337 tmp = tcg_temp_new_i32();
18c9b560
AZ
2338 switch ((insn >> 22) & 3) {
2339 case 0:
da6b5335 2340 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2341 break;
2342 case 1:
da6b5335 2343 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2344 break;
2345 case 2:
da6b5335 2346 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2347 break;
18c9b560 2348 }
da6b5335 2349 store_reg(s, rd, tmp);
18c9b560
AZ
2350 break;
2351 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2352 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2353 wrd = (insn >> 12) & 0xf;
2354 rd0 = (insn >> 16) & 0xf;
2355 rd1 = (insn >> 0) & 0xf;
2356 gen_op_iwmmxt_movq_M0_wRn(rd0);
2357 switch ((insn >> 22) & 3) {
2358 case 0:
2359 if (insn & (1 << 21))
2360 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2361 else
2362 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2363 break;
2364 case 1:
2365 if (insn & (1 << 21))
2366 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2367 else
2368 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2369 break;
2370 case 2:
2371 if (insn & (1 << 21))
2372 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2373 else
2374 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2375 break;
2376 case 3:
2377 return 1;
2378 }
2379 gen_op_iwmmxt_movq_wRn_M0(wrd);
2380 gen_op_iwmmxt_set_mup();
2381 gen_op_iwmmxt_set_cup();
2382 break;
2383 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2384 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2385 wrd = (insn >> 12) & 0xf;
2386 rd0 = (insn >> 16) & 0xf;
2387 gen_op_iwmmxt_movq_M0_wRn(rd0);
2388 switch ((insn >> 22) & 3) {
2389 case 0:
2390 if (insn & (1 << 21))
2391 gen_op_iwmmxt_unpacklsb_M0();
2392 else
2393 gen_op_iwmmxt_unpacklub_M0();
2394 break;
2395 case 1:
2396 if (insn & (1 << 21))
2397 gen_op_iwmmxt_unpacklsw_M0();
2398 else
2399 gen_op_iwmmxt_unpackluw_M0();
2400 break;
2401 case 2:
2402 if (insn & (1 << 21))
2403 gen_op_iwmmxt_unpacklsl_M0();
2404 else
2405 gen_op_iwmmxt_unpacklul_M0();
2406 break;
2407 case 3:
2408 return 1;
2409 }
2410 gen_op_iwmmxt_movq_wRn_M0(wrd);
2411 gen_op_iwmmxt_set_mup();
2412 gen_op_iwmmxt_set_cup();
2413 break;
2414 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2415 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2416 wrd = (insn >> 12) & 0xf;
2417 rd0 = (insn >> 16) & 0xf;
2418 gen_op_iwmmxt_movq_M0_wRn(rd0);
2419 switch ((insn >> 22) & 3) {
2420 case 0:
2421 if (insn & (1 << 21))
2422 gen_op_iwmmxt_unpackhsb_M0();
2423 else
2424 gen_op_iwmmxt_unpackhub_M0();
2425 break;
2426 case 1:
2427 if (insn & (1 << 21))
2428 gen_op_iwmmxt_unpackhsw_M0();
2429 else
2430 gen_op_iwmmxt_unpackhuw_M0();
2431 break;
2432 case 2:
2433 if (insn & (1 << 21))
2434 gen_op_iwmmxt_unpackhsl_M0();
2435 else
2436 gen_op_iwmmxt_unpackhul_M0();
2437 break;
2438 case 3:
2439 return 1;
2440 }
2441 gen_op_iwmmxt_movq_wRn_M0(wrd);
2442 gen_op_iwmmxt_set_mup();
2443 gen_op_iwmmxt_set_cup();
2444 break;
2445 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2446 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2447 if (((insn >> 22) & 3) == 0)
2448 return 1;
18c9b560
AZ
2449 wrd = (insn >> 12) & 0xf;
2450 rd0 = (insn >> 16) & 0xf;
2451 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2452 tmp = tcg_temp_new_i32();
da6b5335 2453 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2454 tcg_temp_free_i32(tmp);
18c9b560 2455 return 1;
da6b5335 2456 }
18c9b560 2457 switch ((insn >> 22) & 3) {
18c9b560 2458 case 1:
477955bd 2459 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2460 break;
2461 case 2:
477955bd 2462 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2463 break;
2464 case 3:
477955bd 2465 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2466 break;
2467 }
7d1b0095 2468 tcg_temp_free_i32(tmp);
18c9b560
AZ
2469 gen_op_iwmmxt_movq_wRn_M0(wrd);
2470 gen_op_iwmmxt_set_mup();
2471 gen_op_iwmmxt_set_cup();
2472 break;
2473 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2474 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2475 if (((insn >> 22) & 3) == 0)
2476 return 1;
18c9b560
AZ
2477 wrd = (insn >> 12) & 0xf;
2478 rd0 = (insn >> 16) & 0xf;
2479 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2480 tmp = tcg_temp_new_i32();
da6b5335 2481 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2482 tcg_temp_free_i32(tmp);
18c9b560 2483 return 1;
da6b5335 2484 }
18c9b560 2485 switch ((insn >> 22) & 3) {
18c9b560 2486 case 1:
477955bd 2487 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2488 break;
2489 case 2:
477955bd 2490 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2491 break;
2492 case 3:
477955bd 2493 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2494 break;
2495 }
7d1b0095 2496 tcg_temp_free_i32(tmp);
18c9b560
AZ
2497 gen_op_iwmmxt_movq_wRn_M0(wrd);
2498 gen_op_iwmmxt_set_mup();
2499 gen_op_iwmmxt_set_cup();
2500 break;
2501 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2502 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2503 if (((insn >> 22) & 3) == 0)
2504 return 1;
18c9b560
AZ
2505 wrd = (insn >> 12) & 0xf;
2506 rd0 = (insn >> 16) & 0xf;
2507 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2508 tmp = tcg_temp_new_i32();
da6b5335 2509 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2510 tcg_temp_free_i32(tmp);
18c9b560 2511 return 1;
da6b5335 2512 }
18c9b560 2513 switch ((insn >> 22) & 3) {
18c9b560 2514 case 1:
477955bd 2515 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2516 break;
2517 case 2:
477955bd 2518 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2519 break;
2520 case 3:
477955bd 2521 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2522 break;
2523 }
7d1b0095 2524 tcg_temp_free_i32(tmp);
18c9b560
AZ
2525 gen_op_iwmmxt_movq_wRn_M0(wrd);
2526 gen_op_iwmmxt_set_mup();
2527 gen_op_iwmmxt_set_cup();
2528 break;
2529 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2530 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2531 if (((insn >> 22) & 3) == 0)
2532 return 1;
18c9b560
AZ
2533 wrd = (insn >> 12) & 0xf;
2534 rd0 = (insn >> 16) & 0xf;
2535 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2536 tmp = tcg_temp_new_i32();
18c9b560 2537 switch ((insn >> 22) & 3) {
18c9b560 2538 case 1:
da6b5335 2539 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2540 tcg_temp_free_i32(tmp);
18c9b560 2541 return 1;
da6b5335 2542 }
477955bd 2543 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2544 break;
2545 case 2:
da6b5335 2546 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2547 tcg_temp_free_i32(tmp);
18c9b560 2548 return 1;
da6b5335 2549 }
477955bd 2550 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2551 break;
2552 case 3:
da6b5335 2553 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2554 tcg_temp_free_i32(tmp);
18c9b560 2555 return 1;
da6b5335 2556 }
477955bd 2557 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2558 break;
2559 }
7d1b0095 2560 tcg_temp_free_i32(tmp);
18c9b560
AZ
2561 gen_op_iwmmxt_movq_wRn_M0(wrd);
2562 gen_op_iwmmxt_set_mup();
2563 gen_op_iwmmxt_set_cup();
2564 break;
2565 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2566 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2567 wrd = (insn >> 12) & 0xf;
2568 rd0 = (insn >> 16) & 0xf;
2569 rd1 = (insn >> 0) & 0xf;
2570 gen_op_iwmmxt_movq_M0_wRn(rd0);
2571 switch ((insn >> 22) & 3) {
2572 case 0:
2573 if (insn & (1 << 21))
2574 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2575 else
2576 gen_op_iwmmxt_minub_M0_wRn(rd1);
2577 break;
2578 case 1:
2579 if (insn & (1 << 21))
2580 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2581 else
2582 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2583 break;
2584 case 2:
2585 if (insn & (1 << 21))
2586 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2587 else
2588 gen_op_iwmmxt_minul_M0_wRn(rd1);
2589 break;
2590 case 3:
2591 return 1;
2592 }
2593 gen_op_iwmmxt_movq_wRn_M0(wrd);
2594 gen_op_iwmmxt_set_mup();
2595 break;
2596 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2597 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2598 wrd = (insn >> 12) & 0xf;
2599 rd0 = (insn >> 16) & 0xf;
2600 rd1 = (insn >> 0) & 0xf;
2601 gen_op_iwmmxt_movq_M0_wRn(rd0);
2602 switch ((insn >> 22) & 3) {
2603 case 0:
2604 if (insn & (1 << 21))
2605 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2606 else
2607 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2608 break;
2609 case 1:
2610 if (insn & (1 << 21))
2611 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2612 else
2613 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2614 break;
2615 case 2:
2616 if (insn & (1 << 21))
2617 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2618 else
2619 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2620 break;
2621 case 3:
2622 return 1;
2623 }
2624 gen_op_iwmmxt_movq_wRn_M0(wrd);
2625 gen_op_iwmmxt_set_mup();
2626 break;
2627 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2628 case 0x402: case 0x502: case 0x602: case 0x702:
2629 wrd = (insn >> 12) & 0xf;
2630 rd0 = (insn >> 16) & 0xf;
2631 rd1 = (insn >> 0) & 0xf;
2632 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2633 tmp = tcg_const_i32((insn >> 20) & 3);
2634 iwmmxt_load_reg(cpu_V1, rd1);
2635 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2636 tcg_temp_free_i32(tmp);
18c9b560
AZ
2637 gen_op_iwmmxt_movq_wRn_M0(wrd);
2638 gen_op_iwmmxt_set_mup();
2639 break;
2640 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2641 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2642 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2643 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2644 wrd = (insn >> 12) & 0xf;
2645 rd0 = (insn >> 16) & 0xf;
2646 rd1 = (insn >> 0) & 0xf;
2647 gen_op_iwmmxt_movq_M0_wRn(rd0);
2648 switch ((insn >> 20) & 0xf) {
2649 case 0x0:
2650 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2651 break;
2652 case 0x1:
2653 gen_op_iwmmxt_subub_M0_wRn(rd1);
2654 break;
2655 case 0x3:
2656 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2657 break;
2658 case 0x4:
2659 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2660 break;
2661 case 0x5:
2662 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2663 break;
2664 case 0x7:
2665 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2666 break;
2667 case 0x8:
2668 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2669 break;
2670 case 0x9:
2671 gen_op_iwmmxt_subul_M0_wRn(rd1);
2672 break;
2673 case 0xb:
2674 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2675 break;
2676 default:
2677 return 1;
2678 }
2679 gen_op_iwmmxt_movq_wRn_M0(wrd);
2680 gen_op_iwmmxt_set_mup();
2681 gen_op_iwmmxt_set_cup();
2682 break;
2683 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2684 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2685 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2686 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2687 wrd = (insn >> 12) & 0xf;
2688 rd0 = (insn >> 16) & 0xf;
2689 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2690 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2691 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2692 tcg_temp_free_i32(tmp);
18c9b560
AZ
2693 gen_op_iwmmxt_movq_wRn_M0(wrd);
2694 gen_op_iwmmxt_set_mup();
2695 gen_op_iwmmxt_set_cup();
2696 break;
2697 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2698 case 0x418: case 0x518: case 0x618: case 0x718:
2699 case 0x818: case 0x918: case 0xa18: case 0xb18:
2700 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2701 wrd = (insn >> 12) & 0xf;
2702 rd0 = (insn >> 16) & 0xf;
2703 rd1 = (insn >> 0) & 0xf;
2704 gen_op_iwmmxt_movq_M0_wRn(rd0);
2705 switch ((insn >> 20) & 0xf) {
2706 case 0x0:
2707 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2708 break;
2709 case 0x1:
2710 gen_op_iwmmxt_addub_M0_wRn(rd1);
2711 break;
2712 case 0x3:
2713 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2714 break;
2715 case 0x4:
2716 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2717 break;
2718 case 0x5:
2719 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2720 break;
2721 case 0x7:
2722 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2723 break;
2724 case 0x8:
2725 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2726 break;
2727 case 0x9:
2728 gen_op_iwmmxt_addul_M0_wRn(rd1);
2729 break;
2730 case 0xb:
2731 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2732 break;
2733 default:
2734 return 1;
2735 }
2736 gen_op_iwmmxt_movq_wRn_M0(wrd);
2737 gen_op_iwmmxt_set_mup();
2738 gen_op_iwmmxt_set_cup();
2739 break;
2740 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2741 case 0x408: case 0x508: case 0x608: case 0x708:
2742 case 0x808: case 0x908: case 0xa08: case 0xb08:
2743 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2744 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2745 return 1;
18c9b560
AZ
2746 wrd = (insn >> 12) & 0xf;
2747 rd0 = (insn >> 16) & 0xf;
2748 rd1 = (insn >> 0) & 0xf;
2749 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2750 switch ((insn >> 22) & 3) {
18c9b560
AZ
2751 case 1:
2752 if (insn & (1 << 21))
2753 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2754 else
2755 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2756 break;
2757 case 2:
2758 if (insn & (1 << 21))
2759 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2760 else
2761 gen_op_iwmmxt_packul_M0_wRn(rd1);
2762 break;
2763 case 3:
2764 if (insn & (1 << 21))
2765 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2766 else
2767 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2768 break;
2769 }
2770 gen_op_iwmmxt_movq_wRn_M0(wrd);
2771 gen_op_iwmmxt_set_mup();
2772 gen_op_iwmmxt_set_cup();
2773 break;
2774 case 0x201: case 0x203: case 0x205: case 0x207:
2775 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2776 case 0x211: case 0x213: case 0x215: case 0x217:
2777 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2778 wrd = (insn >> 5) & 0xf;
2779 rd0 = (insn >> 12) & 0xf;
2780 rd1 = (insn >> 0) & 0xf;
2781 if (rd0 == 0xf || rd1 == 0xf)
2782 return 1;
2783 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2784 tmp = load_reg(s, rd0);
2785 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2786 switch ((insn >> 16) & 0xf) {
2787 case 0x0: /* TMIA */
da6b5335 2788 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2789 break;
2790 case 0x8: /* TMIAPH */
da6b5335 2791 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2792 break;
2793 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2794 if (insn & (1 << 16))
da6b5335 2795 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2796 if (insn & (1 << 17))
da6b5335
FN
2797 tcg_gen_shri_i32(tmp2, tmp2, 16);
2798 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2799 break;
2800 default:
7d1b0095
PM
2801 tcg_temp_free_i32(tmp2);
2802 tcg_temp_free_i32(tmp);
18c9b560
AZ
2803 return 1;
2804 }
7d1b0095
PM
2805 tcg_temp_free_i32(tmp2);
2806 tcg_temp_free_i32(tmp);
18c9b560
AZ
2807 gen_op_iwmmxt_movq_wRn_M0(wrd);
2808 gen_op_iwmmxt_set_mup();
2809 break;
2810 default:
2811 return 1;
2812 }
2813
2814 return 0;
2815}
2816
a1c7273b 2817/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2818 (ie. an undefined instruction). */
7dcc1f89 2819static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2820{
2821 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2822 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2823
2824 if ((insn & 0x0ff00f10) == 0x0e200010) {
2825 /* Multiply with Internal Accumulate Format */
2826 rd0 = (insn >> 12) & 0xf;
2827 rd1 = insn & 0xf;
2828 acc = (insn >> 5) & 7;
2829
2830 if (acc != 0)
2831 return 1;
2832
3a554c0f
FN
2833 tmp = load_reg(s, rd0);
2834 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2835 switch ((insn >> 16) & 0xf) {
2836 case 0x0: /* MIA */
3a554c0f 2837 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2838 break;
2839 case 0x8: /* MIAPH */
3a554c0f 2840 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2841 break;
2842 case 0xc: /* MIABB */
2843 case 0xd: /* MIABT */
2844 case 0xe: /* MIATB */
2845 case 0xf: /* MIATT */
18c9b560 2846 if (insn & (1 << 16))
3a554c0f 2847 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2848 if (insn & (1 << 17))
3a554c0f
FN
2849 tcg_gen_shri_i32(tmp2, tmp2, 16);
2850 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2851 break;
2852 default:
2853 return 1;
2854 }
7d1b0095
PM
2855 tcg_temp_free_i32(tmp2);
2856 tcg_temp_free_i32(tmp);
18c9b560
AZ
2857
2858 gen_op_iwmmxt_movq_wRn_M0(acc);
2859 return 0;
2860 }
2861
2862 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2863 /* Internal Accumulator Access Format */
2864 rdhi = (insn >> 16) & 0xf;
2865 rdlo = (insn >> 12) & 0xf;
2866 acc = insn & 7;
2867
2868 if (acc != 0)
2869 return 1;
2870
2871 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2872 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2873 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2874 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2875 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2876 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2877 } else { /* MAR */
3a554c0f
FN
2878 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2879 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2880 }
2881 return 0;
2882 }
2883
2884 return 1;
2885}
2886
9ee6e8bb
PB
2887#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2888#define VFP_SREG(insn, bigbit, smallbit) \
2889 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2890#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2891 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2892 reg = (((insn) >> (bigbit)) & 0x0f) \
2893 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2894 } else { \
2895 if (insn & (1 << (smallbit))) \
2896 return 1; \
2897 reg = ((insn) >> (bigbit)) & 0x0f; \
2898 }} while (0)
2899
2900#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2901#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2902#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2903#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2904#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2905#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2906
4373f3ce 2907/* Move between integer and VFP cores. */
39d5492a 2908static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2909{
39d5492a 2910 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2911 tcg_gen_mov_i32(tmp, cpu_F0s);
2912 return tmp;
2913}
2914
39d5492a 2915static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2916{
2917 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2918 tcg_temp_free_i32(tmp);
4373f3ce
PB
2919}
2920
39d5492a 2921static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2922{
39d5492a 2923 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2924 if (shift)
2925 tcg_gen_shri_i32(var, var, shift);
86831435 2926 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2927 tcg_gen_shli_i32(tmp, var, 8);
2928 tcg_gen_or_i32(var, var, tmp);
2929 tcg_gen_shli_i32(tmp, var, 16);
2930 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2931 tcg_temp_free_i32(tmp);
ad69471c
PB
2932}
2933
39d5492a 2934static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2935{
39d5492a 2936 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2937 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2938 tcg_gen_shli_i32(tmp, var, 16);
2939 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2940 tcg_temp_free_i32(tmp);
ad69471c
PB
2941}
2942
39d5492a 2943static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2944{
39d5492a 2945 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2946 tcg_gen_andi_i32(var, var, 0xffff0000);
2947 tcg_gen_shri_i32(tmp, var, 16);
2948 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2949 tcg_temp_free_i32(tmp);
ad69471c
PB
2950}
2951
39d5492a 2952static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2953{
2954 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2955 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2956 switch (size) {
2957 case 0:
12dcc321 2958 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2959 gen_neon_dup_u8(tmp, 0);
2960 break;
2961 case 1:
12dcc321 2962 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2963 gen_neon_dup_low16(tmp);
2964 break;
2965 case 2:
12dcc321 2966 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2967 break;
2968 default: /* Avoid compiler warnings. */
2969 abort();
2970 }
2971 return tmp;
2972}
2973
04731fb5
WN
2974static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2975 uint32_t dp)
2976{
2977 uint32_t cc = extract32(insn, 20, 2);
2978
2979 if (dp) {
2980 TCGv_i64 frn, frm, dest;
2981 TCGv_i64 tmp, zero, zf, nf, vf;
2982
2983 zero = tcg_const_i64(0);
2984
2985 frn = tcg_temp_new_i64();
2986 frm = tcg_temp_new_i64();
2987 dest = tcg_temp_new_i64();
2988
2989 zf = tcg_temp_new_i64();
2990 nf = tcg_temp_new_i64();
2991 vf = tcg_temp_new_i64();
2992
2993 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2994 tcg_gen_ext_i32_i64(nf, cpu_NF);
2995 tcg_gen_ext_i32_i64(vf, cpu_VF);
2996
2997 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2998 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2999 switch (cc) {
3000 case 0: /* eq: Z */
3001 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3002 frn, frm);
3003 break;
3004 case 1: /* vs: V */
3005 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3006 frn, frm);
3007 break;
3008 case 2: /* ge: N == V -> N ^ V == 0 */
3009 tmp = tcg_temp_new_i64();
3010 tcg_gen_xor_i64(tmp, vf, nf);
3011 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3012 frn, frm);
3013 tcg_temp_free_i64(tmp);
3014 break;
3015 case 3: /* gt: !Z && N == V */
3016 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3017 frn, frm);
3018 tmp = tcg_temp_new_i64();
3019 tcg_gen_xor_i64(tmp, vf, nf);
3020 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3021 dest, frm);
3022 tcg_temp_free_i64(tmp);
3023 break;
3024 }
3025 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3026 tcg_temp_free_i64(frn);
3027 tcg_temp_free_i64(frm);
3028 tcg_temp_free_i64(dest);
3029
3030 tcg_temp_free_i64(zf);
3031 tcg_temp_free_i64(nf);
3032 tcg_temp_free_i64(vf);
3033
3034 tcg_temp_free_i64(zero);
3035 } else {
3036 TCGv_i32 frn, frm, dest;
3037 TCGv_i32 tmp, zero;
3038
3039 zero = tcg_const_i32(0);
3040
3041 frn = tcg_temp_new_i32();
3042 frm = tcg_temp_new_i32();
3043 dest = tcg_temp_new_i32();
3044 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3045 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3046 switch (cc) {
3047 case 0: /* eq: Z */
3048 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3049 frn, frm);
3050 break;
3051 case 1: /* vs: V */
3052 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3053 frn, frm);
3054 break;
3055 case 2: /* ge: N == V -> N ^ V == 0 */
3056 tmp = tcg_temp_new_i32();
3057 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3058 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3059 frn, frm);
3060 tcg_temp_free_i32(tmp);
3061 break;
3062 case 3: /* gt: !Z && N == V */
3063 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3064 frn, frm);
3065 tmp = tcg_temp_new_i32();
3066 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3067 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3068 dest, frm);
3069 tcg_temp_free_i32(tmp);
3070 break;
3071 }
3072 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3073 tcg_temp_free_i32(frn);
3074 tcg_temp_free_i32(frm);
3075 tcg_temp_free_i32(dest);
3076
3077 tcg_temp_free_i32(zero);
3078 }
3079
3080 return 0;
3081}
3082
40cfacdd
WN
3083static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3084 uint32_t rm, uint32_t dp)
3085{
3086 uint32_t vmin = extract32(insn, 6, 1);
3087 TCGv_ptr fpst = get_fpstatus_ptr(0);
3088
3089 if (dp) {
3090 TCGv_i64 frn, frm, dest;
3091
3092 frn = tcg_temp_new_i64();
3093 frm = tcg_temp_new_i64();
3094 dest = tcg_temp_new_i64();
3095
3096 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3097 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3098 if (vmin) {
f71a2ae5 3099 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3100 } else {
f71a2ae5 3101 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3102 }
3103 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3104 tcg_temp_free_i64(frn);
3105 tcg_temp_free_i64(frm);
3106 tcg_temp_free_i64(dest);
3107 } else {
3108 TCGv_i32 frn, frm, dest;
3109
3110 frn = tcg_temp_new_i32();
3111 frm = tcg_temp_new_i32();
3112 dest = tcg_temp_new_i32();
3113
3114 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3115 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3116 if (vmin) {
f71a2ae5 3117 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3118 } else {
f71a2ae5 3119 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3120 }
3121 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3122 tcg_temp_free_i32(frn);
3123 tcg_temp_free_i32(frm);
3124 tcg_temp_free_i32(dest);
3125 }
3126
3127 tcg_temp_free_ptr(fpst);
3128 return 0;
3129}
3130
7655f39b
WN
3131static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3132 int rounding)
3133{
3134 TCGv_ptr fpst = get_fpstatus_ptr(0);
3135 TCGv_i32 tcg_rmode;
3136
3137 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3138 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3139
3140 if (dp) {
3141 TCGv_i64 tcg_op;
3142 TCGv_i64 tcg_res;
3143 tcg_op = tcg_temp_new_i64();
3144 tcg_res = tcg_temp_new_i64();
3145 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3146 gen_helper_rintd(tcg_res, tcg_op, fpst);
3147 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3148 tcg_temp_free_i64(tcg_op);
3149 tcg_temp_free_i64(tcg_res);
3150 } else {
3151 TCGv_i32 tcg_op;
3152 TCGv_i32 tcg_res;
3153 tcg_op = tcg_temp_new_i32();
3154 tcg_res = tcg_temp_new_i32();
3155 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3156 gen_helper_rints(tcg_res, tcg_op, fpst);
3157 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3158 tcg_temp_free_i32(tcg_op);
3159 tcg_temp_free_i32(tcg_res);
3160 }
3161
3162 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3163 tcg_temp_free_i32(tcg_rmode);
3164
3165 tcg_temp_free_ptr(fpst);
3166 return 0;
3167}
3168
c9975a83
WN
3169static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3170 int rounding)
3171{
3172 bool is_signed = extract32(insn, 7, 1);
3173 TCGv_ptr fpst = get_fpstatus_ptr(0);
3174 TCGv_i32 tcg_rmode, tcg_shift;
3175
3176 tcg_shift = tcg_const_i32(0);
3177
3178 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3179 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3180
3181 if (dp) {
3182 TCGv_i64 tcg_double, tcg_res;
3183 TCGv_i32 tcg_tmp;
3184 /* Rd is encoded as a single precision register even when the source
3185 * is double precision.
3186 */
3187 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3188 tcg_double = tcg_temp_new_i64();
3189 tcg_res = tcg_temp_new_i64();
3190 tcg_tmp = tcg_temp_new_i32();
3191 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3192 if (is_signed) {
3193 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3194 } else {
3195 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3196 }
ecc7b3aa 3197 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3198 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3199 tcg_temp_free_i32(tcg_tmp);
3200 tcg_temp_free_i64(tcg_res);
3201 tcg_temp_free_i64(tcg_double);
3202 } else {
3203 TCGv_i32 tcg_single, tcg_res;
3204 tcg_single = tcg_temp_new_i32();
3205 tcg_res = tcg_temp_new_i32();
3206 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3207 if (is_signed) {
3208 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3209 } else {
3210 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3211 }
3212 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3213 tcg_temp_free_i32(tcg_res);
3214 tcg_temp_free_i32(tcg_single);
3215 }
3216
3217 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3218 tcg_temp_free_i32(tcg_rmode);
3219
3220 tcg_temp_free_i32(tcg_shift);
3221
3222 tcg_temp_free_ptr(fpst);
3223
3224 return 0;
3225}
7655f39b
WN
3226
3227/* Table for converting the most common AArch32 encoding of
3228 * rounding mode to arm_fprounding order (which matches the
3229 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3230 */
3231static const uint8_t fp_decode_rm[] = {
3232 FPROUNDING_TIEAWAY,
3233 FPROUNDING_TIEEVEN,
3234 FPROUNDING_POSINF,
3235 FPROUNDING_NEGINF,
3236};
3237
7dcc1f89 3238static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3239{
3240 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3241
d614a513 3242 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3243 return 1;
3244 }
3245
3246 if (dp) {
3247 VFP_DREG_D(rd, insn);
3248 VFP_DREG_N(rn, insn);
3249 VFP_DREG_M(rm, insn);
3250 } else {
3251 rd = VFP_SREG_D(insn);
3252 rn = VFP_SREG_N(insn);
3253 rm = VFP_SREG_M(insn);
3254 }
3255
3256 if ((insn & 0x0f800e50) == 0x0e000a00) {
3257 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3258 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3259 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3260 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3261 /* VRINTA, VRINTN, VRINTP, VRINTM */
3262 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3263 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3264 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3265 /* VCVTA, VCVTN, VCVTP, VCVTM */
3266 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3267 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3268 }
3269 return 1;
3270}
3271
a1c7273b 3272/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3273 (ie. an undefined instruction). */
7dcc1f89 3274static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3275{
3276 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3277 int dp, veclen;
39d5492a
PM
3278 TCGv_i32 addr;
3279 TCGv_i32 tmp;
3280 TCGv_i32 tmp2;
b7bcbe95 3281
d614a513 3282 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3283 return 1;
d614a513 3284 }
40f137e1 3285
2c7ffc41
PM
3286 /* FIXME: this access check should not take precedence over UNDEF
3287 * for invalid encodings; we will generate incorrect syndrome information
3288 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3289 */
9dbbc748 3290 if (s->fp_excp_el) {
2c7ffc41 3291 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3292 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3293 return 0;
3294 }
3295
5df8bac1 3296 if (!s->vfp_enabled) {
9ee6e8bb 3297 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3298 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3299 return 1;
3300 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3301 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3302 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3303 return 1;
a50c0f51 3304 }
40f137e1 3305 }
6a57f3eb
WN
3306
3307 if (extract32(insn, 28, 4) == 0xf) {
3308 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3309 * only used in v8 and above.
3310 */
7dcc1f89 3311 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3312 }
3313
b7bcbe95
FB
3314 dp = ((insn & 0xf00) == 0xb00);
3315 switch ((insn >> 24) & 0xf) {
3316 case 0xe:
3317 if (insn & (1 << 4)) {
3318 /* single register transfer */
b7bcbe95
FB
3319 rd = (insn >> 12) & 0xf;
3320 if (dp) {
9ee6e8bb
PB
3321 int size;
3322 int pass;
3323
3324 VFP_DREG_N(rn, insn);
3325 if (insn & 0xf)
b7bcbe95 3326 return 1;
9ee6e8bb 3327 if (insn & 0x00c00060
d614a513 3328 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3329 return 1;
d614a513 3330 }
9ee6e8bb
PB
3331
3332 pass = (insn >> 21) & 1;
3333 if (insn & (1 << 22)) {
3334 size = 0;
3335 offset = ((insn >> 5) & 3) * 8;
3336 } else if (insn & (1 << 5)) {
3337 size = 1;
3338 offset = (insn & (1 << 6)) ? 16 : 0;
3339 } else {
3340 size = 2;
3341 offset = 0;
3342 }
18c9b560 3343 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3344 /* vfp->arm */
ad69471c 3345 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3346 switch (size) {
3347 case 0:
9ee6e8bb 3348 if (offset)
ad69471c 3349 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3350 if (insn & (1 << 23))
ad69471c 3351 gen_uxtb(tmp);
9ee6e8bb 3352 else
ad69471c 3353 gen_sxtb(tmp);
9ee6e8bb
PB
3354 break;
3355 case 1:
9ee6e8bb
PB
3356 if (insn & (1 << 23)) {
3357 if (offset) {
ad69471c 3358 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3359 } else {
ad69471c 3360 gen_uxth(tmp);
9ee6e8bb
PB
3361 }
3362 } else {
3363 if (offset) {
ad69471c 3364 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3365 } else {
ad69471c 3366 gen_sxth(tmp);
9ee6e8bb
PB
3367 }
3368 }
3369 break;
3370 case 2:
9ee6e8bb
PB
3371 break;
3372 }
ad69471c 3373 store_reg(s, rd, tmp);
b7bcbe95
FB
3374 } else {
3375 /* arm->vfp */
ad69471c 3376 tmp = load_reg(s, rd);
9ee6e8bb
PB
3377 if (insn & (1 << 23)) {
3378 /* VDUP */
3379 if (size == 0) {
ad69471c 3380 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3381 } else if (size == 1) {
ad69471c 3382 gen_neon_dup_low16(tmp);
9ee6e8bb 3383 }
cbbccffc 3384 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3385 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3386 tcg_gen_mov_i32(tmp2, tmp);
3387 neon_store_reg(rn, n, tmp2);
3388 }
3389 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3390 } else {
3391 /* VMOV */
3392 switch (size) {
3393 case 0:
ad69471c 3394 tmp2 = neon_load_reg(rn, pass);
d593c48e 3395 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3396 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3397 break;
3398 case 1:
ad69471c 3399 tmp2 = neon_load_reg(rn, pass);
d593c48e 3400 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3401 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3402 break;
3403 case 2:
9ee6e8bb
PB
3404 break;
3405 }
ad69471c 3406 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3407 }
b7bcbe95 3408 }
9ee6e8bb
PB
3409 } else { /* !dp */
3410 if ((insn & 0x6f) != 0x00)
3411 return 1;
3412 rn = VFP_SREG_N(insn);
18c9b560 3413 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3414 /* vfp->arm */
3415 if (insn & (1 << 21)) {
3416 /* system register */
40f137e1 3417 rn >>= 1;
9ee6e8bb 3418
b7bcbe95 3419 switch (rn) {
40f137e1 3420 case ARM_VFP_FPSID:
4373f3ce 3421 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3422 VFP3 restricts all id registers to privileged
3423 accesses. */
3424 if (IS_USER(s)
d614a513 3425 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3426 return 1;
d614a513 3427 }
4373f3ce 3428 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3429 break;
40f137e1 3430 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3431 if (IS_USER(s))
3432 return 1;
4373f3ce 3433 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3434 break;
40f137e1
PB
3435 case ARM_VFP_FPINST:
3436 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3437 /* Not present in VFP3. */
3438 if (IS_USER(s)
d614a513 3439 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3440 return 1;
d614a513 3441 }
4373f3ce 3442 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3443 break;
40f137e1 3444 case ARM_VFP_FPSCR:
601d70b9 3445 if (rd == 15) {
4373f3ce
PB
3446 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3447 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3448 } else {
7d1b0095 3449 tmp = tcg_temp_new_i32();
4373f3ce
PB
3450 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3451 }
b7bcbe95 3452 break;
a50c0f51 3453 case ARM_VFP_MVFR2:
d614a513 3454 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3455 return 1;
3456 }
3457 /* fall through */
9ee6e8bb
PB
3458 case ARM_VFP_MVFR0:
3459 case ARM_VFP_MVFR1:
3460 if (IS_USER(s)
d614a513 3461 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3462 return 1;
d614a513 3463 }
4373f3ce 3464 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3465 break;
b7bcbe95
FB
3466 default:
3467 return 1;
3468 }
3469 } else {
3470 gen_mov_F0_vreg(0, rn);
4373f3ce 3471 tmp = gen_vfp_mrs();
b7bcbe95
FB
3472 }
3473 if (rd == 15) {
b5ff1b31 3474 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3475 gen_set_nzcv(tmp);
7d1b0095 3476 tcg_temp_free_i32(tmp);
4373f3ce
PB
3477 } else {
3478 store_reg(s, rd, tmp);
3479 }
b7bcbe95
FB
3480 } else {
3481 /* arm->vfp */
b7bcbe95 3482 if (insn & (1 << 21)) {
40f137e1 3483 rn >>= 1;
b7bcbe95
FB
3484 /* system register */
3485 switch (rn) {
40f137e1 3486 case ARM_VFP_FPSID:
9ee6e8bb
PB
3487 case ARM_VFP_MVFR0:
3488 case ARM_VFP_MVFR1:
b7bcbe95
FB
3489 /* Writes are ignored. */
3490 break;
40f137e1 3491 case ARM_VFP_FPSCR:
e4c1cfa5 3492 tmp = load_reg(s, rd);
4373f3ce 3493 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3494 tcg_temp_free_i32(tmp);
b5ff1b31 3495 gen_lookup_tb(s);
b7bcbe95 3496 break;
40f137e1 3497 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3498 if (IS_USER(s))
3499 return 1;
71b3c3de
JR
3500 /* TODO: VFP subarchitecture support.
3501 * For now, keep the EN bit only */
e4c1cfa5 3502 tmp = load_reg(s, rd);
71b3c3de 3503 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3504 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3505 gen_lookup_tb(s);
3506 break;
3507 case ARM_VFP_FPINST:
3508 case ARM_VFP_FPINST2:
23adb861
PM
3509 if (IS_USER(s)) {
3510 return 1;
3511 }
e4c1cfa5 3512 tmp = load_reg(s, rd);
4373f3ce 3513 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3514 break;
b7bcbe95
FB
3515 default:
3516 return 1;
3517 }
3518 } else {
e4c1cfa5 3519 tmp = load_reg(s, rd);
4373f3ce 3520 gen_vfp_msr(tmp);
b7bcbe95
FB
3521 gen_mov_vreg_F0(0, rn);
3522 }
3523 }
3524 }
3525 } else {
3526 /* data processing */
3527 /* The opcode is in bits 23, 21, 20 and 6. */
3528 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3529 if (dp) {
3530 if (op == 15) {
3531 /* rn is opcode */
3532 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3533 } else {
3534 /* rn is register number */
9ee6e8bb 3535 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3536 }
3537
239c20c7
WN
3538 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3539 ((rn & 0x1e) == 0x6))) {
3540 /* Integer or single/half precision destination. */
9ee6e8bb 3541 rd = VFP_SREG_D(insn);
b7bcbe95 3542 } else {
9ee6e8bb 3543 VFP_DREG_D(rd, insn);
b7bcbe95 3544 }
04595bf6 3545 if (op == 15 &&
239c20c7
WN
3546 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3547 ((rn & 0x1e) == 0x4))) {
3548 /* VCVT from int or half precision is always from S reg
3549 * regardless of dp bit. VCVT with immediate frac_bits
3550 * has same format as SREG_M.
04595bf6
PM
3551 */
3552 rm = VFP_SREG_M(insn);
b7bcbe95 3553 } else {
9ee6e8bb 3554 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3555 }
3556 } else {
9ee6e8bb 3557 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3558 if (op == 15 && rn == 15) {
3559 /* Double precision destination. */
9ee6e8bb
PB
3560 VFP_DREG_D(rd, insn);
3561 } else {
3562 rd = VFP_SREG_D(insn);
3563 }
04595bf6
PM
3564 /* NB that we implicitly rely on the encoding for the frac_bits
3565 * in VCVT of fixed to float being the same as that of an SREG_M
3566 */
9ee6e8bb 3567 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3568 }
3569
69d1fc22 3570 veclen = s->vec_len;
b7bcbe95
FB
3571 if (op == 15 && rn > 3)
3572 veclen = 0;
3573
3574 /* Shut up compiler warnings. */
3575 delta_m = 0;
3576 delta_d = 0;
3577 bank_mask = 0;
3b46e624 3578
b7bcbe95
FB
3579 if (veclen > 0) {
3580 if (dp)
3581 bank_mask = 0xc;
3582 else
3583 bank_mask = 0x18;
3584
3585 /* Figure out what type of vector operation this is. */
3586 if ((rd & bank_mask) == 0) {
3587 /* scalar */
3588 veclen = 0;
3589 } else {
3590 if (dp)
69d1fc22 3591 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3592 else
69d1fc22 3593 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3594
3595 if ((rm & bank_mask) == 0) {
3596 /* mixed scalar/vector */
3597 delta_m = 0;
3598 } else {
3599 /* vector */
3600 delta_m = delta_d;
3601 }
3602 }
3603 }
3604
3605 /* Load the initial operands. */
3606 if (op == 15) {
3607 switch (rn) {
3608 case 16:
3609 case 17:
3610 /* Integer source */
3611 gen_mov_F0_vreg(0, rm);
3612 break;
3613 case 8:
3614 case 9:
3615 /* Compare */
3616 gen_mov_F0_vreg(dp, rd);
3617 gen_mov_F1_vreg(dp, rm);
3618 break;
3619 case 10:
3620 case 11:
3621 /* Compare with zero */
3622 gen_mov_F0_vreg(dp, rd);
3623 gen_vfp_F1_ld0(dp);
3624 break;
9ee6e8bb
PB
3625 case 20:
3626 case 21:
3627 case 22:
3628 case 23:
644ad806
PB
3629 case 28:
3630 case 29:
3631 case 30:
3632 case 31:
9ee6e8bb
PB
3633 /* Source and destination the same. */
3634 gen_mov_F0_vreg(dp, rd);
3635 break;
6e0c0ed1
PM
3636 case 4:
3637 case 5:
3638 case 6:
3639 case 7:
239c20c7
WN
3640 /* VCVTB, VCVTT: only present with the halfprec extension
3641 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3642 * (we choose to UNDEF)
6e0c0ed1 3643 */
d614a513
PM
3644 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3645 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3646 return 1;
3647 }
239c20c7
WN
3648 if (!extract32(rn, 1, 1)) {
3649 /* Half precision source. */
3650 gen_mov_F0_vreg(0, rm);
3651 break;
3652 }
6e0c0ed1 3653 /* Otherwise fall through */
b7bcbe95
FB
3654 default:
3655 /* One source operand. */
3656 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3657 break;
b7bcbe95
FB
3658 }
3659 } else {
3660 /* Two source operands. */
3661 gen_mov_F0_vreg(dp, rn);
3662 gen_mov_F1_vreg(dp, rm);
3663 }
3664
3665 for (;;) {
3666 /* Perform the calculation. */
3667 switch (op) {
605a6aed
PM
3668 case 0: /* VMLA: fd + (fn * fm) */
3669 /* Note that order of inputs to the add matters for NaNs */
3670 gen_vfp_F1_mul(dp);
3671 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3672 gen_vfp_add(dp);
3673 break;
605a6aed 3674 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3675 gen_vfp_mul(dp);
605a6aed
PM
3676 gen_vfp_F1_neg(dp);
3677 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3678 gen_vfp_add(dp);
3679 break;
605a6aed
PM
3680 case 2: /* VNMLS: -fd + (fn * fm) */
3681 /* Note that it isn't valid to replace (-A + B) with (B - A)
3682 * or similar plausible looking simplifications
3683 * because this will give wrong results for NaNs.
3684 */
3685 gen_vfp_F1_mul(dp);
3686 gen_mov_F0_vreg(dp, rd);
3687 gen_vfp_neg(dp);
3688 gen_vfp_add(dp);
b7bcbe95 3689 break;
605a6aed 3690 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3691 gen_vfp_mul(dp);
605a6aed
PM
3692 gen_vfp_F1_neg(dp);
3693 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3694 gen_vfp_neg(dp);
605a6aed 3695 gen_vfp_add(dp);
b7bcbe95
FB
3696 break;
3697 case 4: /* mul: fn * fm */
3698 gen_vfp_mul(dp);
3699 break;
3700 case 5: /* nmul: -(fn * fm) */
3701 gen_vfp_mul(dp);
3702 gen_vfp_neg(dp);
3703 break;
3704 case 6: /* add: fn + fm */
3705 gen_vfp_add(dp);
3706 break;
3707 case 7: /* sub: fn - fm */
3708 gen_vfp_sub(dp);
3709 break;
3710 case 8: /* div: fn / fm */
3711 gen_vfp_div(dp);
3712 break;
da97f52c
PM
3713 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3714 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3715 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3716 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3717 /* These are fused multiply-add, and must be done as one
3718 * floating point operation with no rounding between the
3719 * multiplication and addition steps.
3720 * NB that doing the negations here as separate steps is
3721 * correct : an input NaN should come out with its sign bit
3722 * flipped if it is a negated-input.
3723 */
d614a513 3724 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3725 return 1;
3726 }
3727 if (dp) {
3728 TCGv_ptr fpst;
3729 TCGv_i64 frd;
3730 if (op & 1) {
3731 /* VFNMS, VFMS */
3732 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3733 }
3734 frd = tcg_temp_new_i64();
3735 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3736 if (op & 2) {
3737 /* VFNMA, VFNMS */
3738 gen_helper_vfp_negd(frd, frd);
3739 }
3740 fpst = get_fpstatus_ptr(0);
3741 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3742 cpu_F1d, frd, fpst);
3743 tcg_temp_free_ptr(fpst);
3744 tcg_temp_free_i64(frd);
3745 } else {
3746 TCGv_ptr fpst;
3747 TCGv_i32 frd;
3748 if (op & 1) {
3749 /* VFNMS, VFMS */
3750 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3751 }
3752 frd = tcg_temp_new_i32();
3753 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3754 if (op & 2) {
3755 gen_helper_vfp_negs(frd, frd);
3756 }
3757 fpst = get_fpstatus_ptr(0);
3758 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3759 cpu_F1s, frd, fpst);
3760 tcg_temp_free_ptr(fpst);
3761 tcg_temp_free_i32(frd);
3762 }
3763 break;
9ee6e8bb 3764 case 14: /* fconst */
d614a513
PM
3765 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3766 return 1;
3767 }
9ee6e8bb
PB
3768
3769 n = (insn << 12) & 0x80000000;
3770 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3771 if (dp) {
3772 if (i & 0x40)
3773 i |= 0x3f80;
3774 else
3775 i |= 0x4000;
3776 n |= i << 16;
4373f3ce 3777 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3778 } else {
3779 if (i & 0x40)
3780 i |= 0x780;
3781 else
3782 i |= 0x800;
3783 n |= i << 19;
5b340b51 3784 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3785 }
9ee6e8bb 3786 break;
b7bcbe95
FB
3787 case 15: /* extension space */
3788 switch (rn) {
3789 case 0: /* cpy */
3790 /* no-op */
3791 break;
3792 case 1: /* abs */
3793 gen_vfp_abs(dp);
3794 break;
3795 case 2: /* neg */
3796 gen_vfp_neg(dp);
3797 break;
3798 case 3: /* sqrt */
3799 gen_vfp_sqrt(dp);
3800 break;
239c20c7 3801 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3802 tmp = gen_vfp_mrs();
3803 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3804 if (dp) {
3805 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3806 cpu_env);
3807 } else {
3808 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3809 cpu_env);
3810 }
7d1b0095 3811 tcg_temp_free_i32(tmp);
60011498 3812 break;
239c20c7 3813 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3814 tmp = gen_vfp_mrs();
3815 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3816 if (dp) {
3817 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3818 cpu_env);
3819 } else {
3820 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3821 cpu_env);
3822 }
7d1b0095 3823 tcg_temp_free_i32(tmp);
60011498 3824 break;
239c20c7 3825 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3826 tmp = tcg_temp_new_i32();
239c20c7
WN
3827 if (dp) {
3828 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3829 cpu_env);
3830 } else {
3831 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3832 cpu_env);
3833 }
60011498
PB
3834 gen_mov_F0_vreg(0, rd);
3835 tmp2 = gen_vfp_mrs();
3836 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3837 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3838 tcg_temp_free_i32(tmp2);
60011498
PB
3839 gen_vfp_msr(tmp);
3840 break;
239c20c7 3841 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3842 tmp = tcg_temp_new_i32();
239c20c7
WN
3843 if (dp) {
3844 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3845 cpu_env);
3846 } else {
3847 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3848 cpu_env);
3849 }
60011498
PB
3850 tcg_gen_shli_i32(tmp, tmp, 16);
3851 gen_mov_F0_vreg(0, rd);
3852 tmp2 = gen_vfp_mrs();
3853 tcg_gen_ext16u_i32(tmp2, tmp2);
3854 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3855 tcg_temp_free_i32(tmp2);
60011498
PB
3856 gen_vfp_msr(tmp);
3857 break;
b7bcbe95
FB
3858 case 8: /* cmp */
3859 gen_vfp_cmp(dp);
3860 break;
3861 case 9: /* cmpe */
3862 gen_vfp_cmpe(dp);
3863 break;
3864 case 10: /* cmpz */
3865 gen_vfp_cmp(dp);
3866 break;
3867 case 11: /* cmpez */
3868 gen_vfp_F1_ld0(dp);
3869 gen_vfp_cmpe(dp);
3870 break;
664c6733
WN
3871 case 12: /* vrintr */
3872 {
3873 TCGv_ptr fpst = get_fpstatus_ptr(0);
3874 if (dp) {
3875 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3876 } else {
3877 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3878 }
3879 tcg_temp_free_ptr(fpst);
3880 break;
3881 }
a290c62a
WN
3882 case 13: /* vrintz */
3883 {
3884 TCGv_ptr fpst = get_fpstatus_ptr(0);
3885 TCGv_i32 tcg_rmode;
3886 tcg_rmode = tcg_const_i32(float_round_to_zero);
3887 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3888 if (dp) {
3889 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3890 } else {
3891 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3892 }
3893 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3894 tcg_temp_free_i32(tcg_rmode);
3895 tcg_temp_free_ptr(fpst);
3896 break;
3897 }
4e82bc01
WN
3898 case 14: /* vrintx */
3899 {
3900 TCGv_ptr fpst = get_fpstatus_ptr(0);
3901 if (dp) {
3902 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3903 } else {
3904 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3905 }
3906 tcg_temp_free_ptr(fpst);
3907 break;
3908 }
b7bcbe95
FB
3909 case 15: /* single<->double conversion */
3910 if (dp)
4373f3ce 3911 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3912 else
4373f3ce 3913 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3914 break;
3915 case 16: /* fuito */
5500b06c 3916 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3917 break;
3918 case 17: /* fsito */
5500b06c 3919 gen_vfp_sito(dp, 0);
b7bcbe95 3920 break;
9ee6e8bb 3921 case 20: /* fshto */
d614a513
PM
3922 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3923 return 1;
3924 }
5500b06c 3925 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3926 break;
3927 case 21: /* fslto */
d614a513
PM
3928 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3929 return 1;
3930 }
5500b06c 3931 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3932 break;
3933 case 22: /* fuhto */
d614a513
PM
3934 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3935 return 1;
3936 }
5500b06c 3937 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3938 break;
3939 case 23: /* fulto */
d614a513
PM
3940 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3941 return 1;
3942 }
5500b06c 3943 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3944 break;
b7bcbe95 3945 case 24: /* ftoui */
5500b06c 3946 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3947 break;
3948 case 25: /* ftouiz */
5500b06c 3949 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3950 break;
3951 case 26: /* ftosi */
5500b06c 3952 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3953 break;
3954 case 27: /* ftosiz */
5500b06c 3955 gen_vfp_tosiz(dp, 0);
b7bcbe95 3956 break;
9ee6e8bb 3957 case 28: /* ftosh */
d614a513
PM
3958 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3959 return 1;
3960 }
5500b06c 3961 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3962 break;
3963 case 29: /* ftosl */
d614a513
PM
3964 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3965 return 1;
3966 }
5500b06c 3967 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3968 break;
3969 case 30: /* ftouh */
d614a513
PM
3970 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3971 return 1;
3972 }
5500b06c 3973 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3974 break;
3975 case 31: /* ftoul */
d614a513
PM
3976 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3977 return 1;
3978 }
5500b06c 3979 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3980 break;
b7bcbe95 3981 default: /* undefined */
b7bcbe95
FB
3982 return 1;
3983 }
3984 break;
3985 default: /* undefined */
b7bcbe95
FB
3986 return 1;
3987 }
3988
3989 /* Write back the result. */
239c20c7
WN
3990 if (op == 15 && (rn >= 8 && rn <= 11)) {
3991 /* Comparison, do nothing. */
3992 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3993 (rn & 0x1e) == 0x6)) {
3994 /* VCVT double to int: always integer result.
3995 * VCVT double to half precision is always a single
3996 * precision result.
3997 */
b7bcbe95 3998 gen_mov_vreg_F0(0, rd);
239c20c7 3999 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
4000 /* conversion */
4001 gen_mov_vreg_F0(!dp, rd);
239c20c7 4002 } else {
b7bcbe95 4003 gen_mov_vreg_F0(dp, rd);
239c20c7 4004 }
b7bcbe95
FB
4005
4006 /* break out of the loop if we have finished */
4007 if (veclen == 0)
4008 break;
4009
4010 if (op == 15 && delta_m == 0) {
4011 /* single source one-many */
4012 while (veclen--) {
4013 rd = ((rd + delta_d) & (bank_mask - 1))
4014 | (rd & bank_mask);
4015 gen_mov_vreg_F0(dp, rd);
4016 }
4017 break;
4018 }
4019 /* Setup the next operands. */
4020 veclen--;
4021 rd = ((rd + delta_d) & (bank_mask - 1))
4022 | (rd & bank_mask);
4023
4024 if (op == 15) {
4025 /* One source operand. */
4026 rm = ((rm + delta_m) & (bank_mask - 1))
4027 | (rm & bank_mask);
4028 gen_mov_F0_vreg(dp, rm);
4029 } else {
4030 /* Two source operands. */
4031 rn = ((rn + delta_d) & (bank_mask - 1))
4032 | (rn & bank_mask);
4033 gen_mov_F0_vreg(dp, rn);
4034 if (delta_m) {
4035 rm = ((rm + delta_m) & (bank_mask - 1))
4036 | (rm & bank_mask);
4037 gen_mov_F1_vreg(dp, rm);
4038 }
4039 }
4040 }
4041 }
4042 break;
4043 case 0xc:
4044 case 0xd:
8387da81 4045 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
4046 /* two-register transfer */
4047 rn = (insn >> 16) & 0xf;
4048 rd = (insn >> 12) & 0xf;
4049 if (dp) {
9ee6e8bb
PB
4050 VFP_DREG_M(rm, insn);
4051 } else {
4052 rm = VFP_SREG_M(insn);
4053 }
b7bcbe95 4054
18c9b560 4055 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4056 /* vfp->arm */
4057 if (dp) {
4373f3ce
PB
4058 gen_mov_F0_vreg(0, rm * 2);
4059 tmp = gen_vfp_mrs();
4060 store_reg(s, rd, tmp);
4061 gen_mov_F0_vreg(0, rm * 2 + 1);
4062 tmp = gen_vfp_mrs();
4063 store_reg(s, rn, tmp);
b7bcbe95
FB
4064 } else {
4065 gen_mov_F0_vreg(0, rm);
4373f3ce 4066 tmp = gen_vfp_mrs();
8387da81 4067 store_reg(s, rd, tmp);
b7bcbe95 4068 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4069 tmp = gen_vfp_mrs();
8387da81 4070 store_reg(s, rn, tmp);
b7bcbe95
FB
4071 }
4072 } else {
4073 /* arm->vfp */
4074 if (dp) {
4373f3ce
PB
4075 tmp = load_reg(s, rd);
4076 gen_vfp_msr(tmp);
4077 gen_mov_vreg_F0(0, rm * 2);
4078 tmp = load_reg(s, rn);
4079 gen_vfp_msr(tmp);
4080 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4081 } else {
8387da81 4082 tmp = load_reg(s, rd);
4373f3ce 4083 gen_vfp_msr(tmp);
b7bcbe95 4084 gen_mov_vreg_F0(0, rm);
8387da81 4085 tmp = load_reg(s, rn);
4373f3ce 4086 gen_vfp_msr(tmp);
b7bcbe95
FB
4087 gen_mov_vreg_F0(0, rm + 1);
4088 }
4089 }
4090 } else {
4091 /* Load/store */
4092 rn = (insn >> 16) & 0xf;
4093 if (dp)
9ee6e8bb 4094 VFP_DREG_D(rd, insn);
b7bcbe95 4095 else
9ee6e8bb 4096 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4097 if ((insn & 0x01200000) == 0x01000000) {
4098 /* Single load/store */
4099 offset = (insn & 0xff) << 2;
4100 if ((insn & (1 << 23)) == 0)
4101 offset = -offset;
934814f1
PM
4102 if (s->thumb && rn == 15) {
4103 /* This is actually UNPREDICTABLE */
4104 addr = tcg_temp_new_i32();
4105 tcg_gen_movi_i32(addr, s->pc & ~2);
4106 } else {
4107 addr = load_reg(s, rn);
4108 }
312eea9f 4109 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4110 if (insn & (1 << 20)) {
312eea9f 4111 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4112 gen_mov_vreg_F0(dp, rd);
4113 } else {
4114 gen_mov_F0_vreg(dp, rd);
312eea9f 4115 gen_vfp_st(s, dp, addr);
b7bcbe95 4116 }
7d1b0095 4117 tcg_temp_free_i32(addr);
b7bcbe95
FB
4118 } else {
4119 /* load/store multiple */
934814f1 4120 int w = insn & (1 << 21);
b7bcbe95
FB
4121 if (dp)
4122 n = (insn >> 1) & 0x7f;
4123 else
4124 n = insn & 0xff;
4125
934814f1
PM
4126 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4127 /* P == U , W == 1 => UNDEF */
4128 return 1;
4129 }
4130 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4131 /* UNPREDICTABLE cases for bad immediates: we choose to
4132 * UNDEF to avoid generating huge numbers of TCG ops
4133 */
4134 return 1;
4135 }
4136 if (rn == 15 && w) {
4137 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4138 return 1;
4139 }
4140
4141 if (s->thumb && rn == 15) {
4142 /* This is actually UNPREDICTABLE */
4143 addr = tcg_temp_new_i32();
4144 tcg_gen_movi_i32(addr, s->pc & ~2);
4145 } else {
4146 addr = load_reg(s, rn);
4147 }
b7bcbe95 4148 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4149 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4150
4151 if (dp)
4152 offset = 8;
4153 else
4154 offset = 4;
4155 for (i = 0; i < n; i++) {
18c9b560 4156 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4157 /* load */
312eea9f 4158 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4159 gen_mov_vreg_F0(dp, rd + i);
4160 } else {
4161 /* store */
4162 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4163 gen_vfp_st(s, dp, addr);
b7bcbe95 4164 }
312eea9f 4165 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4166 }
934814f1 4167 if (w) {
b7bcbe95
FB
4168 /* writeback */
4169 if (insn & (1 << 24))
4170 offset = -offset * n;
4171 else if (dp && (insn & 1))
4172 offset = 4;
4173 else
4174 offset = 0;
4175
4176 if (offset != 0)
312eea9f
FN
4177 tcg_gen_addi_i32(addr, addr, offset);
4178 store_reg(s, rn, addr);
4179 } else {
7d1b0095 4180 tcg_temp_free_i32(addr);
b7bcbe95
FB
4181 }
4182 }
4183 }
4184 break;
4185 default:
4186 /* Should never happen. */
4187 return 1;
4188 }
4189 return 0;
4190}
4191
90aa39a1 4192static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4193{
90aa39a1 4194#ifndef CONFIG_USER_ONLY
dcba3a8d 4195 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
4196 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4197#else
4198 return true;
4199#endif
4200}
6e256c93 4201
8a6b28c7
EC
4202static void gen_goto_ptr(void)
4203{
7f11636d 4204 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
4205}
4206
4cae8f56
AB
4207/* This will end the TB but doesn't guarantee we'll return to
4208 * cpu_loop_exec. Any live exit_requests will be processed as we
4209 * enter the next TB.
4210 */
8a6b28c7 4211static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4212{
4213 if (use_goto_tb(s, dest)) {
57fec1fe 4214 tcg_gen_goto_tb(n);
eaed129d 4215 gen_set_pc_im(s, dest);
dcba3a8d 4216 tcg_gen_exit_tb((uintptr_t)s->base.tb + n);
6e256c93 4217 } else {
eaed129d 4218 gen_set_pc_im(s, dest);
8a6b28c7 4219 gen_goto_ptr();
6e256c93 4220 }
dcba3a8d 4221 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
4222}
4223
8aaca4c0
FB
4224static inline void gen_jmp (DisasContext *s, uint32_t dest)
4225{
b636649f 4226 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4227 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4228 if (s->thumb)
d9ba4830
PB
4229 dest |= 1;
4230 gen_bx_im(s, dest);
8aaca4c0 4231 } else {
6e256c93 4232 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4233 }
4234}
4235
39d5492a 4236static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4237{
ee097184 4238 if (x)
d9ba4830 4239 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4240 else
d9ba4830 4241 gen_sxth(t0);
ee097184 4242 if (y)
d9ba4830 4243 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4244 else
d9ba4830
PB
4245 gen_sxth(t1);
4246 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4247}
4248
4249/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4250static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4251{
b5ff1b31
FB
4252 uint32_t mask;
4253
4254 mask = 0;
4255 if (flags & (1 << 0))
4256 mask |= 0xff;
4257 if (flags & (1 << 1))
4258 mask |= 0xff00;
4259 if (flags & (1 << 2))
4260 mask |= 0xff0000;
4261 if (flags & (1 << 3))
4262 mask |= 0xff000000;
9ee6e8bb 4263
2ae23e75 4264 /* Mask out undefined bits. */
9ee6e8bb 4265 mask &= ~CPSR_RESERVED;
d614a513 4266 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4267 mask &= ~CPSR_T;
d614a513
PM
4268 }
4269 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4270 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4271 }
4272 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4273 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4274 }
4275 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4276 mask &= ~CPSR_IT;
d614a513 4277 }
4051e12c
PM
4278 /* Mask out execution state and reserved bits. */
4279 if (!spsr) {
4280 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4281 }
b5ff1b31
FB
4282 /* Mask out privileged bits. */
4283 if (IS_USER(s))
9ee6e8bb 4284 mask &= CPSR_USER;
b5ff1b31
FB
4285 return mask;
4286}
4287
2fbac54b 4288/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4289static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4290{
39d5492a 4291 TCGv_i32 tmp;
b5ff1b31
FB
4292 if (spsr) {
4293 /* ??? This is also undefined in system mode. */
4294 if (IS_USER(s))
4295 return 1;
d9ba4830
PB
4296
4297 tmp = load_cpu_field(spsr);
4298 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4299 tcg_gen_andi_i32(t0, t0, mask);
4300 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4301 store_cpu_field(tmp, spsr);
b5ff1b31 4302 } else {
2fbac54b 4303 gen_set_cpsr(t0, mask);
b5ff1b31 4304 }
7d1b0095 4305 tcg_temp_free_i32(t0);
b5ff1b31
FB
4306 gen_lookup_tb(s);
4307 return 0;
4308}
4309
2fbac54b
FN
4310/* Returns nonzero if access to the PSR is not permitted. */
4311static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4312{
39d5492a 4313 TCGv_i32 tmp;
7d1b0095 4314 tmp = tcg_temp_new_i32();
2fbac54b
FN
4315 tcg_gen_movi_i32(tmp, val);
4316 return gen_set_psr(s, mask, spsr, tmp);
4317}
4318
8bfd0550
PM
4319static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4320 int *tgtmode, int *regno)
4321{
4322 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4323 * the target mode and register number, and identify the various
4324 * unpredictable cases.
4325 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4326 * + executed in user mode
4327 * + using R15 as the src/dest register
4328 * + accessing an unimplemented register
4329 * + accessing a register that's inaccessible at current PL/security state*
4330 * + accessing a register that you could access with a different insn
4331 * We choose to UNDEF in all these cases.
4332 * Since we don't know which of the various AArch32 modes we are in
4333 * we have to defer some checks to runtime.
4334 * Accesses to Monitor mode registers from Secure EL1 (which implies
4335 * that EL3 is AArch64) must trap to EL3.
4336 *
4337 * If the access checks fail this function will emit code to take
4338 * an exception and return false. Otherwise it will return true,
4339 * and set *tgtmode and *regno appropriately.
4340 */
4341 int exc_target = default_exception_el(s);
4342
4343 /* These instructions are present only in ARMv8, or in ARMv7 with the
4344 * Virtualization Extensions.
4345 */
4346 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4347 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4348 goto undef;
4349 }
4350
4351 if (IS_USER(s) || rn == 15) {
4352 goto undef;
4353 }
4354
4355 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4356 * of registers into (r, sysm).
4357 */
4358 if (r) {
4359 /* SPSRs for other modes */
4360 switch (sysm) {
4361 case 0xe: /* SPSR_fiq */
4362 *tgtmode = ARM_CPU_MODE_FIQ;
4363 break;
4364 case 0x10: /* SPSR_irq */
4365 *tgtmode = ARM_CPU_MODE_IRQ;
4366 break;
4367 case 0x12: /* SPSR_svc */
4368 *tgtmode = ARM_CPU_MODE_SVC;
4369 break;
4370 case 0x14: /* SPSR_abt */
4371 *tgtmode = ARM_CPU_MODE_ABT;
4372 break;
4373 case 0x16: /* SPSR_und */
4374 *tgtmode = ARM_CPU_MODE_UND;
4375 break;
4376 case 0x1c: /* SPSR_mon */
4377 *tgtmode = ARM_CPU_MODE_MON;
4378 break;
4379 case 0x1e: /* SPSR_hyp */
4380 *tgtmode = ARM_CPU_MODE_HYP;
4381 break;
4382 default: /* unallocated */
4383 goto undef;
4384 }
4385 /* We arbitrarily assign SPSR a register number of 16. */
4386 *regno = 16;
4387 } else {
4388 /* general purpose registers for other modes */
4389 switch (sysm) {
4390 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4391 *tgtmode = ARM_CPU_MODE_USR;
4392 *regno = sysm + 8;
4393 break;
4394 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4395 *tgtmode = ARM_CPU_MODE_FIQ;
4396 *regno = sysm;
4397 break;
4398 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4399 *tgtmode = ARM_CPU_MODE_IRQ;
4400 *regno = sysm & 1 ? 13 : 14;
4401 break;
4402 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4403 *tgtmode = ARM_CPU_MODE_SVC;
4404 *regno = sysm & 1 ? 13 : 14;
4405 break;
4406 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4407 *tgtmode = ARM_CPU_MODE_ABT;
4408 *regno = sysm & 1 ? 13 : 14;
4409 break;
4410 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4411 *tgtmode = ARM_CPU_MODE_UND;
4412 *regno = sysm & 1 ? 13 : 14;
4413 break;
4414 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4415 *tgtmode = ARM_CPU_MODE_MON;
4416 *regno = sysm & 1 ? 13 : 14;
4417 break;
4418 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4419 *tgtmode = ARM_CPU_MODE_HYP;
4420 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4421 *regno = sysm & 1 ? 13 : 17;
4422 break;
4423 default: /* unallocated */
4424 goto undef;
4425 }
4426 }
4427
4428 /* Catch the 'accessing inaccessible register' cases we can detect
4429 * at translate time.
4430 */
4431 switch (*tgtmode) {
4432 case ARM_CPU_MODE_MON:
4433 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4434 goto undef;
4435 }
4436 if (s->current_el == 1) {
4437 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4438 * then accesses to Mon registers trap to EL3
4439 */
4440 exc_target = 3;
4441 goto undef;
4442 }
4443 break;
4444 case ARM_CPU_MODE_HYP:
4445 /* Note that we can forbid accesses from EL2 here because they
4446 * must be from Hyp mode itself
4447 */
4448 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4449 goto undef;
4450 }
4451 break;
4452 default:
4453 break;
4454 }
4455
4456 return true;
4457
4458undef:
4459 /* If we get here then some access check did not pass */
4460 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4461 return false;
4462}
4463
4464static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4465{
4466 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4467 int tgtmode = 0, regno = 0;
4468
4469 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4470 return;
4471 }
4472
4473 /* Sync state because msr_banked() can raise exceptions */
4474 gen_set_condexec(s);
4475 gen_set_pc_im(s, s->pc - 4);
4476 tcg_reg = load_reg(s, rn);
4477 tcg_tgtmode = tcg_const_i32(tgtmode);
4478 tcg_regno = tcg_const_i32(regno);
4479 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4480 tcg_temp_free_i32(tcg_tgtmode);
4481 tcg_temp_free_i32(tcg_regno);
4482 tcg_temp_free_i32(tcg_reg);
dcba3a8d 4483 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4484}
4485
4486static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4487{
4488 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4489 int tgtmode = 0, regno = 0;
4490
4491 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4492 return;
4493 }
4494
4495 /* Sync state because mrs_banked() can raise exceptions */
4496 gen_set_condexec(s);
4497 gen_set_pc_im(s, s->pc - 4);
4498 tcg_reg = tcg_temp_new_i32();
4499 tcg_tgtmode = tcg_const_i32(tgtmode);
4500 tcg_regno = tcg_const_i32(regno);
4501 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4502 tcg_temp_free_i32(tcg_tgtmode);
4503 tcg_temp_free_i32(tcg_regno);
4504 store_reg(s, rn, tcg_reg);
dcba3a8d 4505 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4506}
4507
fb0e8e79
PM
4508/* Store value to PC as for an exception return (ie don't
4509 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4510 * will do the masking based on the new value of the Thumb bit.
4511 */
4512static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4513{
fb0e8e79
PM
4514 tcg_gen_mov_i32(cpu_R[15], pc);
4515 tcg_temp_free_i32(pc);
b5ff1b31
FB
4516}
4517
b0109805 4518/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4519static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4520{
fb0e8e79
PM
4521 store_pc_exc_ret(s, pc);
4522 /* The cpsr_write_eret helper will mask the low bits of PC
4523 * appropriately depending on the new Thumb bit, so it must
4524 * be called after storing the new PC.
4525 */
235ea1f5 4526 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 4527 tcg_temp_free_i32(cpsr);
b29fd33d 4528 /* Must exit loop to check un-masked IRQs */
dcba3a8d 4529 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 4530}
3b46e624 4531
fb0e8e79
PM
4532/* Generate an old-style exception return. Marks pc as dead. */
4533static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4534{
4535 gen_rfe(s, pc, load_cpu_field(spsr));
4536}
4537
c22edfeb
AB
4538/*
4539 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4540 * only call the helper when running single threaded TCG code to ensure
4541 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4542 * just skip this instruction. Currently the SEV/SEVL instructions
4543 * which are *one* of many ways to wake the CPU from WFE are not
4544 * implemented so we can't sleep like WFI does.
4545 */
9ee6e8bb
PB
4546static void gen_nop_hint(DisasContext *s, int val)
4547{
4548 switch (val) {
2399d4e7
EC
4549 /* When running in MTTCG we don't generate jumps to the yield and
4550 * WFE helpers as it won't affect the scheduling of other vCPUs.
4551 * If we wanted to more completely model WFE/SEV so we don't busy
4552 * spin unnecessarily we would need to do something more involved.
4553 */
c87e5a61 4554 case 1: /* yield */
2399d4e7 4555 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4556 gen_set_pc_im(s, s->pc);
dcba3a8d 4557 s->base.is_jmp = DISAS_YIELD;
c22edfeb 4558 }
c87e5a61 4559 break;
9ee6e8bb 4560 case 3: /* wfi */
eaed129d 4561 gen_set_pc_im(s, s->pc);
dcba3a8d 4562 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
4563 break;
4564 case 2: /* wfe */
2399d4e7 4565 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4566 gen_set_pc_im(s, s->pc);
dcba3a8d 4567 s->base.is_jmp = DISAS_WFE;
c22edfeb 4568 }
72c1d3af 4569 break;
9ee6e8bb 4570 case 4: /* sev */
12b10571
MR
4571 case 5: /* sevl */
4572 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4573 default: /* nop */
4574 break;
4575 }
4576}
99c475ab 4577
ad69471c 4578#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4579
39d5492a 4580static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4581{
4582 switch (size) {
dd8fbd78
FN
4583 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4584 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4585 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4586 default: abort();
9ee6e8bb 4587 }
9ee6e8bb
PB
4588}
4589
39d5492a 4590static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4591{
4592 switch (size) {
dd8fbd78
FN
4593 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4594 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4595 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4596 default: return;
4597 }
4598}
4599
4600/* 32-bit pairwise ops end up the same as the elementwise versions. */
4601#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4602#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4603#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4604#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4605
ad69471c
PB
4606#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4607 switch ((size << 1) | u) { \
4608 case 0: \
dd8fbd78 4609 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4610 break; \
4611 case 1: \
dd8fbd78 4612 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4613 break; \
4614 case 2: \
dd8fbd78 4615 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4616 break; \
4617 case 3: \
dd8fbd78 4618 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4619 break; \
4620 case 4: \
dd8fbd78 4621 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4622 break; \
4623 case 5: \
dd8fbd78 4624 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4625 break; \
4626 default: return 1; \
4627 }} while (0)
9ee6e8bb
PB
4628
4629#define GEN_NEON_INTEGER_OP(name) do { \
4630 switch ((size << 1) | u) { \
ad69471c 4631 case 0: \
dd8fbd78 4632 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4633 break; \
4634 case 1: \
dd8fbd78 4635 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4636 break; \
4637 case 2: \
dd8fbd78 4638 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4639 break; \
4640 case 3: \
dd8fbd78 4641 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4642 break; \
4643 case 4: \
dd8fbd78 4644 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4645 break; \
4646 case 5: \
dd8fbd78 4647 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4648 break; \
9ee6e8bb
PB
4649 default: return 1; \
4650 }} while (0)
4651
39d5492a 4652static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4653{
39d5492a 4654 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4655 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4656 return tmp;
9ee6e8bb
PB
4657}
4658
39d5492a 4659static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4660{
dd8fbd78 4661 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4662 tcg_temp_free_i32(var);
9ee6e8bb
PB
4663}
4664
39d5492a 4665static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4666{
39d5492a 4667 TCGv_i32 tmp;
9ee6e8bb 4668 if (size == 1) {
0fad6efc
PM
4669 tmp = neon_load_reg(reg & 7, reg >> 4);
4670 if (reg & 8) {
dd8fbd78 4671 gen_neon_dup_high16(tmp);
0fad6efc
PM
4672 } else {
4673 gen_neon_dup_low16(tmp);
dd8fbd78 4674 }
0fad6efc
PM
4675 } else {
4676 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4677 }
dd8fbd78 4678 return tmp;
9ee6e8bb
PB
4679}
4680
02acedf9 4681static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4682{
39d5492a 4683 TCGv_i32 tmp, tmp2;
600b828c 4684 if (!q && size == 2) {
02acedf9
PM
4685 return 1;
4686 }
4687 tmp = tcg_const_i32(rd);
4688 tmp2 = tcg_const_i32(rm);
4689 if (q) {
4690 switch (size) {
4691 case 0:
02da0b2d 4692 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4693 break;
4694 case 1:
02da0b2d 4695 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4696 break;
4697 case 2:
02da0b2d 4698 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4699 break;
4700 default:
4701 abort();
4702 }
4703 } else {
4704 switch (size) {
4705 case 0:
02da0b2d 4706 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4707 break;
4708 case 1:
02da0b2d 4709 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4710 break;
4711 default:
4712 abort();
4713 }
4714 }
4715 tcg_temp_free_i32(tmp);
4716 tcg_temp_free_i32(tmp2);
4717 return 0;
19457615
FN
4718}
4719
d68a6f3a 4720static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4721{
39d5492a 4722 TCGv_i32 tmp, tmp2;
600b828c 4723 if (!q && size == 2) {
d68a6f3a
PM
4724 return 1;
4725 }
4726 tmp = tcg_const_i32(rd);
4727 tmp2 = tcg_const_i32(rm);
4728 if (q) {
4729 switch (size) {
4730 case 0:
02da0b2d 4731 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4732 break;
4733 case 1:
02da0b2d 4734 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4735 break;
4736 case 2:
02da0b2d 4737 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4738 break;
4739 default:
4740 abort();
4741 }
4742 } else {
4743 switch (size) {
4744 case 0:
02da0b2d 4745 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4746 break;
4747 case 1:
02da0b2d 4748 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4749 break;
4750 default:
4751 abort();
4752 }
4753 }
4754 tcg_temp_free_i32(tmp);
4755 tcg_temp_free_i32(tmp2);
4756 return 0;
19457615
FN
4757}
4758
39d5492a 4759static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4760{
39d5492a 4761 TCGv_i32 rd, tmp;
19457615 4762
7d1b0095
PM
4763 rd = tcg_temp_new_i32();
4764 tmp = tcg_temp_new_i32();
19457615
FN
4765
4766 tcg_gen_shli_i32(rd, t0, 8);
4767 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4768 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4769 tcg_gen_or_i32(rd, rd, tmp);
4770
4771 tcg_gen_shri_i32(t1, t1, 8);
4772 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4773 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4774 tcg_gen_or_i32(t1, t1, tmp);
4775 tcg_gen_mov_i32(t0, rd);
4776
7d1b0095
PM
4777 tcg_temp_free_i32(tmp);
4778 tcg_temp_free_i32(rd);
19457615
FN
4779}
4780
39d5492a 4781static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4782{
39d5492a 4783 TCGv_i32 rd, tmp;
19457615 4784
7d1b0095
PM
4785 rd = tcg_temp_new_i32();
4786 tmp = tcg_temp_new_i32();
19457615
FN
4787
4788 tcg_gen_shli_i32(rd, t0, 16);
4789 tcg_gen_andi_i32(tmp, t1, 0xffff);
4790 tcg_gen_or_i32(rd, rd, tmp);
4791 tcg_gen_shri_i32(t1, t1, 16);
4792 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4793 tcg_gen_or_i32(t1, t1, tmp);
4794 tcg_gen_mov_i32(t0, rd);
4795
7d1b0095
PM
4796 tcg_temp_free_i32(tmp);
4797 tcg_temp_free_i32(rd);
19457615
FN
4798}
4799
4800
9ee6e8bb
PB
4801static struct {
4802 int nregs;
4803 int interleave;
4804 int spacing;
4805} neon_ls_element_type[11] = {
4806 {4, 4, 1},
4807 {4, 4, 2},
4808 {4, 1, 1},
4809 {4, 2, 1},
4810 {3, 3, 1},
4811 {3, 3, 2},
4812 {3, 1, 1},
4813 {1, 1, 1},
4814 {2, 2, 1},
4815 {2, 2, 2},
4816 {2, 1, 1}
4817};
4818
4819/* Translate a NEON load/store element instruction. Return nonzero if the
4820 instruction is invalid. */
7dcc1f89 4821static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4822{
4823 int rd, rn, rm;
4824 int op;
4825 int nregs;
4826 int interleave;
84496233 4827 int spacing;
9ee6e8bb
PB
4828 int stride;
4829 int size;
4830 int reg;
4831 int pass;
4832 int load;
4833 int shift;
9ee6e8bb 4834 int n;
39d5492a
PM
4835 TCGv_i32 addr;
4836 TCGv_i32 tmp;
4837 TCGv_i32 tmp2;
84496233 4838 TCGv_i64 tmp64;
9ee6e8bb 4839
2c7ffc41
PM
4840 /* FIXME: this access check should not take precedence over UNDEF
4841 * for invalid encodings; we will generate incorrect syndrome information
4842 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4843 */
9dbbc748 4844 if (s->fp_excp_el) {
2c7ffc41 4845 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4846 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4847 return 0;
4848 }
4849
5df8bac1 4850 if (!s->vfp_enabled)
9ee6e8bb
PB
4851 return 1;
4852 VFP_DREG_D(rd, insn);
4853 rn = (insn >> 16) & 0xf;
4854 rm = insn & 0xf;
4855 load = (insn & (1 << 21)) != 0;
4856 if ((insn & (1 << 23)) == 0) {
4857 /* Load store all elements. */
4858 op = (insn >> 8) & 0xf;
4859 size = (insn >> 6) & 3;
84496233 4860 if (op > 10)
9ee6e8bb 4861 return 1;
f2dd89d0
PM
4862 /* Catch UNDEF cases for bad values of align field */
4863 switch (op & 0xc) {
4864 case 4:
4865 if (((insn >> 5) & 1) == 1) {
4866 return 1;
4867 }
4868 break;
4869 case 8:
4870 if (((insn >> 4) & 3) == 3) {
4871 return 1;
4872 }
4873 break;
4874 default:
4875 break;
4876 }
9ee6e8bb
PB
4877 nregs = neon_ls_element_type[op].nregs;
4878 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4879 spacing = neon_ls_element_type[op].spacing;
4880 if (size == 3 && (interleave | spacing) != 1)
4881 return 1;
e318a60b 4882 addr = tcg_temp_new_i32();
dcc65026 4883 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4884 stride = (1 << size) * interleave;
4885 for (reg = 0; reg < nregs; reg++) {
4886 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4887 load_reg_var(s, addr, rn);
4888 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4889 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4890 load_reg_var(s, addr, rn);
4891 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4892 }
84496233 4893 if (size == 3) {
8ed1237d 4894 tmp64 = tcg_temp_new_i64();
84496233 4895 if (load) {
12dcc321 4896 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4897 neon_store_reg64(tmp64, rd);
84496233 4898 } else {
84496233 4899 neon_load_reg64(tmp64, rd);
12dcc321 4900 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4901 }
8ed1237d 4902 tcg_temp_free_i64(tmp64);
84496233
JR
4903 tcg_gen_addi_i32(addr, addr, stride);
4904 } else {
4905 for (pass = 0; pass < 2; pass++) {
4906 if (size == 2) {
4907 if (load) {
58ab8e96 4908 tmp = tcg_temp_new_i32();
12dcc321 4909 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
4910 neon_store_reg(rd, pass, tmp);
4911 } else {
4912 tmp = neon_load_reg(rd, pass);
12dcc321 4913 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 4914 tcg_temp_free_i32(tmp);
84496233 4915 }
1b2b1e54 4916 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4917 } else if (size == 1) {
4918 if (load) {
58ab8e96 4919 tmp = tcg_temp_new_i32();
12dcc321 4920 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 4921 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4922 tmp2 = tcg_temp_new_i32();
12dcc321 4923 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 4924 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4925 tcg_gen_shli_i32(tmp2, tmp2, 16);
4926 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4927 tcg_temp_free_i32(tmp2);
84496233
JR
4928 neon_store_reg(rd, pass, tmp);
4929 } else {
4930 tmp = neon_load_reg(rd, pass);
7d1b0095 4931 tmp2 = tcg_temp_new_i32();
84496233 4932 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 4933 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 4934 tcg_temp_free_i32(tmp);
84496233 4935 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 4936 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 4937 tcg_temp_free_i32(tmp2);
1b2b1e54 4938 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4939 }
84496233
JR
4940 } else /* size == 0 */ {
4941 if (load) {
39d5492a 4942 TCGV_UNUSED_I32(tmp2);
84496233 4943 for (n = 0; n < 4; n++) {
58ab8e96 4944 tmp = tcg_temp_new_i32();
12dcc321 4945 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
4946 tcg_gen_addi_i32(addr, addr, stride);
4947 if (n == 0) {
4948 tmp2 = tmp;
4949 } else {
41ba8341
PB
4950 tcg_gen_shli_i32(tmp, tmp, n * 8);
4951 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4952 tcg_temp_free_i32(tmp);
84496233 4953 }
9ee6e8bb 4954 }
84496233
JR
4955 neon_store_reg(rd, pass, tmp2);
4956 } else {
4957 tmp2 = neon_load_reg(rd, pass);
4958 for (n = 0; n < 4; n++) {
7d1b0095 4959 tmp = tcg_temp_new_i32();
84496233
JR
4960 if (n == 0) {
4961 tcg_gen_mov_i32(tmp, tmp2);
4962 } else {
4963 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4964 }
12dcc321 4965 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 4966 tcg_temp_free_i32(tmp);
84496233
JR
4967 tcg_gen_addi_i32(addr, addr, stride);
4968 }
7d1b0095 4969 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4970 }
4971 }
4972 }
4973 }
84496233 4974 rd += spacing;
9ee6e8bb 4975 }
e318a60b 4976 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4977 stride = nregs * 8;
4978 } else {
4979 size = (insn >> 10) & 3;
4980 if (size == 3) {
4981 /* Load single element to all lanes. */
8e18cde3
PM
4982 int a = (insn >> 4) & 1;
4983 if (!load) {
9ee6e8bb 4984 return 1;
8e18cde3 4985 }
9ee6e8bb
PB
4986 size = (insn >> 6) & 3;
4987 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4988
4989 if (size == 3) {
4990 if (nregs != 4 || a == 0) {
9ee6e8bb 4991 return 1;
99c475ab 4992 }
8e18cde3
PM
4993 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4994 size = 2;
4995 }
4996 if (nregs == 1 && a == 1 && size == 0) {
4997 return 1;
4998 }
4999 if (nregs == 3 && a == 1) {
5000 return 1;
5001 }
e318a60b 5002 addr = tcg_temp_new_i32();
8e18cde3
PM
5003 load_reg_var(s, addr, rn);
5004 if (nregs == 1) {
5005 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
5006 tmp = gen_load_and_replicate(s, addr, size);
5007 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5008 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5009 if (insn & (1 << 5)) {
5010 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
5011 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
5012 }
5013 tcg_temp_free_i32(tmp);
5014 } else {
5015 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
5016 stride = (insn & (1 << 5)) ? 2 : 1;
5017 for (reg = 0; reg < nregs; reg++) {
5018 tmp = gen_load_and_replicate(s, addr, size);
5019 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5020 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5021 tcg_temp_free_i32(tmp);
5022 tcg_gen_addi_i32(addr, addr, 1 << size);
5023 rd += stride;
5024 }
9ee6e8bb 5025 }
e318a60b 5026 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5027 stride = (1 << size) * nregs;
5028 } else {
5029 /* Single element. */
93262b16 5030 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
5031 pass = (insn >> 7) & 1;
5032 switch (size) {
5033 case 0:
5034 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
5035 stride = 1;
5036 break;
5037 case 1:
5038 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
5039 stride = (insn & (1 << 5)) ? 2 : 1;
5040 break;
5041 case 2:
5042 shift = 0;
9ee6e8bb
PB
5043 stride = (insn & (1 << 6)) ? 2 : 1;
5044 break;
5045 default:
5046 abort();
5047 }
5048 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
5049 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5050 switch (nregs) {
5051 case 1:
5052 if (((idx & (1 << size)) != 0) ||
5053 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5054 return 1;
5055 }
5056 break;
5057 case 3:
5058 if ((idx & 1) != 0) {
5059 return 1;
5060 }
5061 /* fall through */
5062 case 2:
5063 if (size == 2 && (idx & 2) != 0) {
5064 return 1;
5065 }
5066 break;
5067 case 4:
5068 if ((size == 2) && ((idx & 3) == 3)) {
5069 return 1;
5070 }
5071 break;
5072 default:
5073 abort();
5074 }
5075 if ((rd + stride * (nregs - 1)) > 31) {
5076 /* Attempts to write off the end of the register file
5077 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5078 * the neon_load_reg() would write off the end of the array.
5079 */
5080 return 1;
5081 }
e318a60b 5082 addr = tcg_temp_new_i32();
dcc65026 5083 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5084 for (reg = 0; reg < nregs; reg++) {
5085 if (load) {
58ab8e96 5086 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
5087 switch (size) {
5088 case 0:
12dcc321 5089 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5090 break;
5091 case 1:
12dcc321 5092 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5093 break;
5094 case 2:
12dcc321 5095 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5096 break;
a50f5b91
PB
5097 default: /* Avoid compiler warnings. */
5098 abort();
9ee6e8bb
PB
5099 }
5100 if (size != 2) {
8f8e3aa4 5101 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
5102 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5103 shift, size ? 16 : 8);
7d1b0095 5104 tcg_temp_free_i32(tmp2);
9ee6e8bb 5105 }
8f8e3aa4 5106 neon_store_reg(rd, pass, tmp);
9ee6e8bb 5107 } else { /* Store */
8f8e3aa4
PB
5108 tmp = neon_load_reg(rd, pass);
5109 if (shift)
5110 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
5111 switch (size) {
5112 case 0:
12dcc321 5113 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5114 break;
5115 case 1:
12dcc321 5116 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5117 break;
5118 case 2:
12dcc321 5119 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5120 break;
99c475ab 5121 }
58ab8e96 5122 tcg_temp_free_i32(tmp);
99c475ab 5123 }
9ee6e8bb 5124 rd += stride;
1b2b1e54 5125 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5126 }
e318a60b 5127 tcg_temp_free_i32(addr);
9ee6e8bb 5128 stride = nregs * (1 << size);
99c475ab 5129 }
9ee6e8bb
PB
5130 }
5131 if (rm != 15) {
39d5492a 5132 TCGv_i32 base;
b26eefb6
PB
5133
5134 base = load_reg(s, rn);
9ee6e8bb 5135 if (rm == 13) {
b26eefb6 5136 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5137 } else {
39d5492a 5138 TCGv_i32 index;
b26eefb6
PB
5139 index = load_reg(s, rm);
5140 tcg_gen_add_i32(base, base, index);
7d1b0095 5141 tcg_temp_free_i32(index);
9ee6e8bb 5142 }
b26eefb6 5143 store_reg(s, rn, base);
9ee6e8bb
PB
5144 }
5145 return 0;
5146}
3b46e624 5147
8f8e3aa4 5148/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 5149static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
5150{
5151 tcg_gen_and_i32(t, t, c);
f669df27 5152 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
5153 tcg_gen_or_i32(dest, t, f);
5154}
5155
39d5492a 5156static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5157{
5158 switch (size) {
5159 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5160 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5161 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5162 default: abort();
5163 }
5164}
5165
39d5492a 5166static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5167{
5168 switch (size) {
02da0b2d
PM
5169 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5170 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5171 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5172 default: abort();
5173 }
5174}
5175
39d5492a 5176static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5177{
5178 switch (size) {
02da0b2d
PM
5179 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5180 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5181 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5182 default: abort();
5183 }
5184}
5185
39d5492a 5186static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5187{
5188 switch (size) {
02da0b2d
PM
5189 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5190 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5191 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5192 default: abort();
5193 }
5194}
5195
39d5492a 5196static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5197 int q, int u)
5198{
5199 if (q) {
5200 if (u) {
5201 switch (size) {
5202 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5203 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5204 default: abort();
5205 }
5206 } else {
5207 switch (size) {
5208 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5209 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5210 default: abort();
5211 }
5212 }
5213 } else {
5214 if (u) {
5215 switch (size) {
b408a9b0
CL
5216 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5217 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5218 default: abort();
5219 }
5220 } else {
5221 switch (size) {
5222 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5223 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5224 default: abort();
5225 }
5226 }
5227 }
5228}
5229
39d5492a 5230static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5231{
5232 if (u) {
5233 switch (size) {
5234 case 0: gen_helper_neon_widen_u8(dest, src); break;
5235 case 1: gen_helper_neon_widen_u16(dest, src); break;
5236 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5237 default: abort();
5238 }
5239 } else {
5240 switch (size) {
5241 case 0: gen_helper_neon_widen_s8(dest, src); break;
5242 case 1: gen_helper_neon_widen_s16(dest, src); break;
5243 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5244 default: abort();
5245 }
5246 }
7d1b0095 5247 tcg_temp_free_i32(src);
ad69471c
PB
5248}
5249
5250static inline void gen_neon_addl(int size)
5251{
5252 switch (size) {
5253 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5254 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5255 case 2: tcg_gen_add_i64(CPU_V001); break;
5256 default: abort();
5257 }
5258}
5259
5260static inline void gen_neon_subl(int size)
5261{
5262 switch (size) {
5263 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5264 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5265 case 2: tcg_gen_sub_i64(CPU_V001); break;
5266 default: abort();
5267 }
5268}
5269
a7812ae4 5270static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5271{
5272 switch (size) {
5273 case 0: gen_helper_neon_negl_u16(var, var); break;
5274 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5275 case 2:
5276 tcg_gen_neg_i64(var, var);
5277 break;
ad69471c
PB
5278 default: abort();
5279 }
5280}
5281
a7812ae4 5282static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5283{
5284 switch (size) {
02da0b2d
PM
5285 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5286 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5287 default: abort();
5288 }
5289}
5290
39d5492a
PM
5291static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5292 int size, int u)
ad69471c 5293{
a7812ae4 5294 TCGv_i64 tmp;
ad69471c
PB
5295
5296 switch ((size << 1) | u) {
5297 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5298 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5299 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5300 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5301 case 4:
5302 tmp = gen_muls_i64_i32(a, b);
5303 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5304 tcg_temp_free_i64(tmp);
ad69471c
PB
5305 break;
5306 case 5:
5307 tmp = gen_mulu_i64_i32(a, b);
5308 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5309 tcg_temp_free_i64(tmp);
ad69471c
PB
5310 break;
5311 default: abort();
5312 }
c6067f04
CL
5313
5314 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5315 Don't forget to clean them now. */
5316 if (size < 2) {
7d1b0095
PM
5317 tcg_temp_free_i32(a);
5318 tcg_temp_free_i32(b);
c6067f04 5319 }
ad69471c
PB
5320}
5321
39d5492a
PM
5322static void gen_neon_narrow_op(int op, int u, int size,
5323 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5324{
5325 if (op) {
5326 if (u) {
5327 gen_neon_unarrow_sats(size, dest, src);
5328 } else {
5329 gen_neon_narrow(size, dest, src);
5330 }
5331 } else {
5332 if (u) {
5333 gen_neon_narrow_satu(size, dest, src);
5334 } else {
5335 gen_neon_narrow_sats(size, dest, src);
5336 }
5337 }
5338}
5339
62698be3
PM
5340/* Symbolic constants for op fields for Neon 3-register same-length.
5341 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5342 * table A7-9.
5343 */
5344#define NEON_3R_VHADD 0
5345#define NEON_3R_VQADD 1
5346#define NEON_3R_VRHADD 2
5347#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5348#define NEON_3R_VHSUB 4
5349#define NEON_3R_VQSUB 5
5350#define NEON_3R_VCGT 6
5351#define NEON_3R_VCGE 7
5352#define NEON_3R_VSHL 8
5353#define NEON_3R_VQSHL 9
5354#define NEON_3R_VRSHL 10
5355#define NEON_3R_VQRSHL 11
5356#define NEON_3R_VMAX 12
5357#define NEON_3R_VMIN 13
5358#define NEON_3R_VABD 14
5359#define NEON_3R_VABA 15
5360#define NEON_3R_VADD_VSUB 16
5361#define NEON_3R_VTST_VCEQ 17
5362#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5363#define NEON_3R_VMUL 19
5364#define NEON_3R_VPMAX 20
5365#define NEON_3R_VPMIN 21
5366#define NEON_3R_VQDMULH_VQRDMULH 22
5367#define NEON_3R_VPADD 23
f1ecb913 5368#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 5369#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
5370#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5371#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5372#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5373#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5374#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5375#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5376
5377static const uint8_t neon_3r_sizes[] = {
5378 [NEON_3R_VHADD] = 0x7,
5379 [NEON_3R_VQADD] = 0xf,
5380 [NEON_3R_VRHADD] = 0x7,
5381 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5382 [NEON_3R_VHSUB] = 0x7,
5383 [NEON_3R_VQSUB] = 0xf,
5384 [NEON_3R_VCGT] = 0x7,
5385 [NEON_3R_VCGE] = 0x7,
5386 [NEON_3R_VSHL] = 0xf,
5387 [NEON_3R_VQSHL] = 0xf,
5388 [NEON_3R_VRSHL] = 0xf,
5389 [NEON_3R_VQRSHL] = 0xf,
5390 [NEON_3R_VMAX] = 0x7,
5391 [NEON_3R_VMIN] = 0x7,
5392 [NEON_3R_VABD] = 0x7,
5393 [NEON_3R_VABA] = 0x7,
5394 [NEON_3R_VADD_VSUB] = 0xf,
5395 [NEON_3R_VTST_VCEQ] = 0x7,
5396 [NEON_3R_VML] = 0x7,
5397 [NEON_3R_VMUL] = 0x7,
5398 [NEON_3R_VPMAX] = 0x7,
5399 [NEON_3R_VPMIN] = 0x7,
5400 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5401 [NEON_3R_VPADD] = 0x7,
f1ecb913 5402 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 5403 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5404 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5405 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5406 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5407 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5408 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5409 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5410};
5411
600b828c
PM
5412/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5413 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5414 * table A7-13.
5415 */
5416#define NEON_2RM_VREV64 0
5417#define NEON_2RM_VREV32 1
5418#define NEON_2RM_VREV16 2
5419#define NEON_2RM_VPADDL 4
5420#define NEON_2RM_VPADDL_U 5
9d935509
AB
5421#define NEON_2RM_AESE 6 /* Includes AESD */
5422#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5423#define NEON_2RM_VCLS 8
5424#define NEON_2RM_VCLZ 9
5425#define NEON_2RM_VCNT 10
5426#define NEON_2RM_VMVN 11
5427#define NEON_2RM_VPADAL 12
5428#define NEON_2RM_VPADAL_U 13
5429#define NEON_2RM_VQABS 14
5430#define NEON_2RM_VQNEG 15
5431#define NEON_2RM_VCGT0 16
5432#define NEON_2RM_VCGE0 17
5433#define NEON_2RM_VCEQ0 18
5434#define NEON_2RM_VCLE0 19
5435#define NEON_2RM_VCLT0 20
f1ecb913 5436#define NEON_2RM_SHA1H 21
600b828c
PM
5437#define NEON_2RM_VABS 22
5438#define NEON_2RM_VNEG 23
5439#define NEON_2RM_VCGT0_F 24
5440#define NEON_2RM_VCGE0_F 25
5441#define NEON_2RM_VCEQ0_F 26
5442#define NEON_2RM_VCLE0_F 27
5443#define NEON_2RM_VCLT0_F 28
5444#define NEON_2RM_VABS_F 30
5445#define NEON_2RM_VNEG_F 31
5446#define NEON_2RM_VSWP 32
5447#define NEON_2RM_VTRN 33
5448#define NEON_2RM_VUZP 34
5449#define NEON_2RM_VZIP 35
5450#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5451#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5452#define NEON_2RM_VSHLL 38
f1ecb913 5453#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5454#define NEON_2RM_VRINTN 40
2ce70625 5455#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5456#define NEON_2RM_VRINTA 42
5457#define NEON_2RM_VRINTZ 43
600b828c 5458#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5459#define NEON_2RM_VRINTM 45
600b828c 5460#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5461#define NEON_2RM_VRINTP 47
901ad525
WN
5462#define NEON_2RM_VCVTAU 48
5463#define NEON_2RM_VCVTAS 49
5464#define NEON_2RM_VCVTNU 50
5465#define NEON_2RM_VCVTNS 51
5466#define NEON_2RM_VCVTPU 52
5467#define NEON_2RM_VCVTPS 53
5468#define NEON_2RM_VCVTMU 54
5469#define NEON_2RM_VCVTMS 55
600b828c
PM
5470#define NEON_2RM_VRECPE 56
5471#define NEON_2RM_VRSQRTE 57
5472#define NEON_2RM_VRECPE_F 58
5473#define NEON_2RM_VRSQRTE_F 59
5474#define NEON_2RM_VCVT_FS 60
5475#define NEON_2RM_VCVT_FU 61
5476#define NEON_2RM_VCVT_SF 62
5477#define NEON_2RM_VCVT_UF 63
5478
5479static int neon_2rm_is_float_op(int op)
5480{
5481 /* Return true if this neon 2reg-misc op is float-to-float */
5482 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5483 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5484 op == NEON_2RM_VRINTM ||
5485 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5486 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5487}
5488
fe8fcf3d
PM
5489static bool neon_2rm_is_v8_op(int op)
5490{
5491 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5492 switch (op) {
5493 case NEON_2RM_VRINTN:
5494 case NEON_2RM_VRINTA:
5495 case NEON_2RM_VRINTM:
5496 case NEON_2RM_VRINTP:
5497 case NEON_2RM_VRINTZ:
5498 case NEON_2RM_VRINTX:
5499 case NEON_2RM_VCVTAU:
5500 case NEON_2RM_VCVTAS:
5501 case NEON_2RM_VCVTNU:
5502 case NEON_2RM_VCVTNS:
5503 case NEON_2RM_VCVTPU:
5504 case NEON_2RM_VCVTPS:
5505 case NEON_2RM_VCVTMU:
5506 case NEON_2RM_VCVTMS:
5507 return true;
5508 default:
5509 return false;
5510 }
5511}
5512
600b828c
PM
5513/* Each entry in this array has bit n set if the insn allows
5514 * size value n (otherwise it will UNDEF). Since unallocated
5515 * op values will have no bits set they always UNDEF.
5516 */
5517static const uint8_t neon_2rm_sizes[] = {
5518 [NEON_2RM_VREV64] = 0x7,
5519 [NEON_2RM_VREV32] = 0x3,
5520 [NEON_2RM_VREV16] = 0x1,
5521 [NEON_2RM_VPADDL] = 0x7,
5522 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5523 [NEON_2RM_AESE] = 0x1,
5524 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5525 [NEON_2RM_VCLS] = 0x7,
5526 [NEON_2RM_VCLZ] = 0x7,
5527 [NEON_2RM_VCNT] = 0x1,
5528 [NEON_2RM_VMVN] = 0x1,
5529 [NEON_2RM_VPADAL] = 0x7,
5530 [NEON_2RM_VPADAL_U] = 0x7,
5531 [NEON_2RM_VQABS] = 0x7,
5532 [NEON_2RM_VQNEG] = 0x7,
5533 [NEON_2RM_VCGT0] = 0x7,
5534 [NEON_2RM_VCGE0] = 0x7,
5535 [NEON_2RM_VCEQ0] = 0x7,
5536 [NEON_2RM_VCLE0] = 0x7,
5537 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5538 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5539 [NEON_2RM_VABS] = 0x7,
5540 [NEON_2RM_VNEG] = 0x7,
5541 [NEON_2RM_VCGT0_F] = 0x4,
5542 [NEON_2RM_VCGE0_F] = 0x4,
5543 [NEON_2RM_VCEQ0_F] = 0x4,
5544 [NEON_2RM_VCLE0_F] = 0x4,
5545 [NEON_2RM_VCLT0_F] = 0x4,
5546 [NEON_2RM_VABS_F] = 0x4,
5547 [NEON_2RM_VNEG_F] = 0x4,
5548 [NEON_2RM_VSWP] = 0x1,
5549 [NEON_2RM_VTRN] = 0x7,
5550 [NEON_2RM_VUZP] = 0x7,
5551 [NEON_2RM_VZIP] = 0x7,
5552 [NEON_2RM_VMOVN] = 0x7,
5553 [NEON_2RM_VQMOVN] = 0x7,
5554 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5555 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5556 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5557 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5558 [NEON_2RM_VRINTA] = 0x4,
5559 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5560 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5561 [NEON_2RM_VRINTM] = 0x4,
600b828c 5562 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5563 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5564 [NEON_2RM_VCVTAU] = 0x4,
5565 [NEON_2RM_VCVTAS] = 0x4,
5566 [NEON_2RM_VCVTNU] = 0x4,
5567 [NEON_2RM_VCVTNS] = 0x4,
5568 [NEON_2RM_VCVTPU] = 0x4,
5569 [NEON_2RM_VCVTPS] = 0x4,
5570 [NEON_2RM_VCVTMU] = 0x4,
5571 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5572 [NEON_2RM_VRECPE] = 0x4,
5573 [NEON_2RM_VRSQRTE] = 0x4,
5574 [NEON_2RM_VRECPE_F] = 0x4,
5575 [NEON_2RM_VRSQRTE_F] = 0x4,
5576 [NEON_2RM_VCVT_FS] = 0x4,
5577 [NEON_2RM_VCVT_FU] = 0x4,
5578 [NEON_2RM_VCVT_SF] = 0x4,
5579 [NEON_2RM_VCVT_UF] = 0x4,
5580};
5581
9ee6e8bb
PB
5582/* Translate a NEON data processing instruction. Return nonzero if the
5583 instruction is invalid.
ad69471c
PB
5584 We process data in a mixture of 32-bit and 64-bit chunks.
5585 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5586
7dcc1f89 5587static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5588{
5589 int op;
5590 int q;
5591 int rd, rn, rm;
5592 int size;
5593 int shift;
5594 int pass;
5595 int count;
5596 int pairwise;
5597 int u;
ca9a32e4 5598 uint32_t imm, mask;
39d5492a 5599 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 5600 TCGv_i64 tmp64;
9ee6e8bb 5601
2c7ffc41
PM
5602 /* FIXME: this access check should not take precedence over UNDEF
5603 * for invalid encodings; we will generate incorrect syndrome information
5604 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5605 */
9dbbc748 5606 if (s->fp_excp_el) {
2c7ffc41 5607 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5608 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5609 return 0;
5610 }
5611
5df8bac1 5612 if (!s->vfp_enabled)
9ee6e8bb
PB
5613 return 1;
5614 q = (insn & (1 << 6)) != 0;
5615 u = (insn >> 24) & 1;
5616 VFP_DREG_D(rd, insn);
5617 VFP_DREG_N(rn, insn);
5618 VFP_DREG_M(rm, insn);
5619 size = (insn >> 20) & 3;
5620 if ((insn & (1 << 23)) == 0) {
5621 /* Three register same length. */
5622 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5623 /* Catch invalid op and bad size combinations: UNDEF */
5624 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5625 return 1;
5626 }
25f84f79
PM
5627 /* All insns of this form UNDEF for either this condition or the
5628 * superset of cases "Q==1"; we catch the latter later.
5629 */
5630 if (q && ((rd | rn | rm) & 1)) {
5631 return 1;
5632 }
f1ecb913
AB
5633 /*
5634 * The SHA-1/SHA-256 3-register instructions require special treatment
5635 * here, as their size field is overloaded as an op type selector, and
5636 * they all consume their input in a single pass.
5637 */
5638 if (op == NEON_3R_SHA) {
5639 if (!q) {
5640 return 1;
5641 }
5642 if (!u) { /* SHA-1 */
d614a513 5643 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5644 return 1;
5645 }
5646 tmp = tcg_const_i32(rd);
5647 tmp2 = tcg_const_i32(rn);
5648 tmp3 = tcg_const_i32(rm);
5649 tmp4 = tcg_const_i32(size);
5650 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5651 tcg_temp_free_i32(tmp4);
5652 } else { /* SHA-256 */
d614a513 5653 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5654 return 1;
5655 }
5656 tmp = tcg_const_i32(rd);
5657 tmp2 = tcg_const_i32(rn);
5658 tmp3 = tcg_const_i32(rm);
5659 switch (size) {
5660 case 0:
5661 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5662 break;
5663 case 1:
5664 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5665 break;
5666 case 2:
5667 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5668 break;
5669 }
5670 }
5671 tcg_temp_free_i32(tmp);
5672 tcg_temp_free_i32(tmp2);
5673 tcg_temp_free_i32(tmp3);
5674 return 0;
5675 }
62698be3
PM
5676 if (size == 3 && op != NEON_3R_LOGIC) {
5677 /* 64-bit element instructions. */
9ee6e8bb 5678 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5679 neon_load_reg64(cpu_V0, rn + pass);
5680 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5681 switch (op) {
62698be3 5682 case NEON_3R_VQADD:
9ee6e8bb 5683 if (u) {
02da0b2d
PM
5684 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5685 cpu_V0, cpu_V1);
2c0262af 5686 } else {
02da0b2d
PM
5687 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5688 cpu_V0, cpu_V1);
2c0262af 5689 }
9ee6e8bb 5690 break;
62698be3 5691 case NEON_3R_VQSUB:
9ee6e8bb 5692 if (u) {
02da0b2d
PM
5693 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5694 cpu_V0, cpu_V1);
ad69471c 5695 } else {
02da0b2d
PM
5696 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5697 cpu_V0, cpu_V1);
ad69471c
PB
5698 }
5699 break;
62698be3 5700 case NEON_3R_VSHL:
ad69471c
PB
5701 if (u) {
5702 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5703 } else {
5704 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5705 }
5706 break;
62698be3 5707 case NEON_3R_VQSHL:
ad69471c 5708 if (u) {
02da0b2d
PM
5709 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5710 cpu_V1, cpu_V0);
ad69471c 5711 } else {
02da0b2d
PM
5712 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5713 cpu_V1, cpu_V0);
ad69471c
PB
5714 }
5715 break;
62698be3 5716 case NEON_3R_VRSHL:
ad69471c
PB
5717 if (u) {
5718 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5719 } else {
ad69471c
PB
5720 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5721 }
5722 break;
62698be3 5723 case NEON_3R_VQRSHL:
ad69471c 5724 if (u) {
02da0b2d
PM
5725 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5726 cpu_V1, cpu_V0);
ad69471c 5727 } else {
02da0b2d
PM
5728 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5729 cpu_V1, cpu_V0);
1e8d4eec 5730 }
9ee6e8bb 5731 break;
62698be3 5732 case NEON_3R_VADD_VSUB:
9ee6e8bb 5733 if (u) {
ad69471c 5734 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5735 } else {
ad69471c 5736 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5737 }
5738 break;
5739 default:
5740 abort();
2c0262af 5741 }
ad69471c 5742 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5743 }
9ee6e8bb 5744 return 0;
2c0262af 5745 }
25f84f79 5746 pairwise = 0;
9ee6e8bb 5747 switch (op) {
62698be3
PM
5748 case NEON_3R_VSHL:
5749 case NEON_3R_VQSHL:
5750 case NEON_3R_VRSHL:
5751 case NEON_3R_VQRSHL:
9ee6e8bb 5752 {
ad69471c
PB
5753 int rtmp;
5754 /* Shift instruction operands are reversed. */
5755 rtmp = rn;
9ee6e8bb 5756 rn = rm;
ad69471c 5757 rm = rtmp;
9ee6e8bb 5758 }
2c0262af 5759 break;
25f84f79
PM
5760 case NEON_3R_VPADD:
5761 if (u) {
5762 return 1;
5763 }
5764 /* Fall through */
62698be3
PM
5765 case NEON_3R_VPMAX:
5766 case NEON_3R_VPMIN:
9ee6e8bb 5767 pairwise = 1;
2c0262af 5768 break;
25f84f79
PM
5769 case NEON_3R_FLOAT_ARITH:
5770 pairwise = (u && size < 2); /* if VPADD (float) */
5771 break;
5772 case NEON_3R_FLOAT_MINMAX:
5773 pairwise = u; /* if VPMIN/VPMAX (float) */
5774 break;
5775 case NEON_3R_FLOAT_CMP:
5776 if (!u && size) {
5777 /* no encoding for U=0 C=1x */
5778 return 1;
5779 }
5780 break;
5781 case NEON_3R_FLOAT_ACMP:
5782 if (!u) {
5783 return 1;
5784 }
5785 break;
505935fc
WN
5786 case NEON_3R_FLOAT_MISC:
5787 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5788 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5789 return 1;
5790 }
2c0262af 5791 break;
25f84f79
PM
5792 case NEON_3R_VMUL:
5793 if (u && (size != 0)) {
5794 /* UNDEF on invalid size for polynomial subcase */
5795 return 1;
5796 }
2c0262af 5797 break;
da97f52c 5798 case NEON_3R_VFM:
d614a513 5799 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5800 return 1;
5801 }
5802 break;
9ee6e8bb 5803 default:
2c0262af 5804 break;
9ee6e8bb 5805 }
dd8fbd78 5806
25f84f79
PM
5807 if (pairwise && q) {
5808 /* All the pairwise insns UNDEF if Q is set */
5809 return 1;
5810 }
5811
9ee6e8bb
PB
5812 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5813
5814 if (pairwise) {
5815 /* Pairwise. */
a5a14945
JR
5816 if (pass < 1) {
5817 tmp = neon_load_reg(rn, 0);
5818 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5819 } else {
a5a14945
JR
5820 tmp = neon_load_reg(rm, 0);
5821 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5822 }
5823 } else {
5824 /* Elementwise. */
dd8fbd78
FN
5825 tmp = neon_load_reg(rn, pass);
5826 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5827 }
5828 switch (op) {
62698be3 5829 case NEON_3R_VHADD:
9ee6e8bb
PB
5830 GEN_NEON_INTEGER_OP(hadd);
5831 break;
62698be3 5832 case NEON_3R_VQADD:
02da0b2d 5833 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5834 break;
62698be3 5835 case NEON_3R_VRHADD:
9ee6e8bb 5836 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5837 break;
62698be3 5838 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5839 switch ((u << 2) | size) {
5840 case 0: /* VAND */
dd8fbd78 5841 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5842 break;
5843 case 1: /* BIC */
f669df27 5844 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5845 break;
5846 case 2: /* VORR */
dd8fbd78 5847 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5848 break;
5849 case 3: /* VORN */
f669df27 5850 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5851 break;
5852 case 4: /* VEOR */
dd8fbd78 5853 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5854 break;
5855 case 5: /* VBSL */
dd8fbd78
FN
5856 tmp3 = neon_load_reg(rd, pass);
5857 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5858 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5859 break;
5860 case 6: /* VBIT */
dd8fbd78
FN
5861 tmp3 = neon_load_reg(rd, pass);
5862 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5863 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5864 break;
5865 case 7: /* VBIF */
dd8fbd78
FN
5866 tmp3 = neon_load_reg(rd, pass);
5867 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5868 tcg_temp_free_i32(tmp3);
9ee6e8bb 5869 break;
2c0262af
FB
5870 }
5871 break;
62698be3 5872 case NEON_3R_VHSUB:
9ee6e8bb
PB
5873 GEN_NEON_INTEGER_OP(hsub);
5874 break;
62698be3 5875 case NEON_3R_VQSUB:
02da0b2d 5876 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5877 break;
62698be3 5878 case NEON_3R_VCGT:
9ee6e8bb
PB
5879 GEN_NEON_INTEGER_OP(cgt);
5880 break;
62698be3 5881 case NEON_3R_VCGE:
9ee6e8bb
PB
5882 GEN_NEON_INTEGER_OP(cge);
5883 break;
62698be3 5884 case NEON_3R_VSHL:
ad69471c 5885 GEN_NEON_INTEGER_OP(shl);
2c0262af 5886 break;
62698be3 5887 case NEON_3R_VQSHL:
02da0b2d 5888 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5889 break;
62698be3 5890 case NEON_3R_VRSHL:
ad69471c 5891 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5892 break;
62698be3 5893 case NEON_3R_VQRSHL:
02da0b2d 5894 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5895 break;
62698be3 5896 case NEON_3R_VMAX:
9ee6e8bb
PB
5897 GEN_NEON_INTEGER_OP(max);
5898 break;
62698be3 5899 case NEON_3R_VMIN:
9ee6e8bb
PB
5900 GEN_NEON_INTEGER_OP(min);
5901 break;
62698be3 5902 case NEON_3R_VABD:
9ee6e8bb
PB
5903 GEN_NEON_INTEGER_OP(abd);
5904 break;
62698be3 5905 case NEON_3R_VABA:
9ee6e8bb 5906 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5907 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5908 tmp2 = neon_load_reg(rd, pass);
5909 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5910 break;
62698be3 5911 case NEON_3R_VADD_VSUB:
9ee6e8bb 5912 if (!u) { /* VADD */
62698be3 5913 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5914 } else { /* VSUB */
5915 switch (size) {
dd8fbd78
FN
5916 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5917 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5918 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5919 default: abort();
9ee6e8bb
PB
5920 }
5921 }
5922 break;
62698be3 5923 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5924 if (!u) { /* VTST */
5925 switch (size) {
dd8fbd78
FN
5926 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5927 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5928 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5929 default: abort();
9ee6e8bb
PB
5930 }
5931 } else { /* VCEQ */
5932 switch (size) {
dd8fbd78
FN
5933 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5934 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5935 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5936 default: abort();
9ee6e8bb
PB
5937 }
5938 }
5939 break;
62698be3 5940 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5941 switch (size) {
dd8fbd78
FN
5942 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5943 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5944 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5945 default: abort();
9ee6e8bb 5946 }
7d1b0095 5947 tcg_temp_free_i32(tmp2);
dd8fbd78 5948 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5949 if (u) { /* VMLS */
dd8fbd78 5950 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5951 } else { /* VMLA */
dd8fbd78 5952 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5953 }
5954 break;
62698be3 5955 case NEON_3R_VMUL:
9ee6e8bb 5956 if (u) { /* polynomial */
dd8fbd78 5957 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5958 } else { /* Integer */
5959 switch (size) {
dd8fbd78
FN
5960 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5961 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5962 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5963 default: abort();
9ee6e8bb
PB
5964 }
5965 }
5966 break;
62698be3 5967 case NEON_3R_VPMAX:
9ee6e8bb
PB
5968 GEN_NEON_INTEGER_OP(pmax);
5969 break;
62698be3 5970 case NEON_3R_VPMIN:
9ee6e8bb
PB
5971 GEN_NEON_INTEGER_OP(pmin);
5972 break;
62698be3 5973 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5974 if (!u) { /* VQDMULH */
5975 switch (size) {
02da0b2d
PM
5976 case 1:
5977 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5978 break;
5979 case 2:
5980 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5981 break;
62698be3 5982 default: abort();
9ee6e8bb 5983 }
62698be3 5984 } else { /* VQRDMULH */
9ee6e8bb 5985 switch (size) {
02da0b2d
PM
5986 case 1:
5987 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5988 break;
5989 case 2:
5990 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5991 break;
62698be3 5992 default: abort();
9ee6e8bb
PB
5993 }
5994 }
5995 break;
62698be3 5996 case NEON_3R_VPADD:
9ee6e8bb 5997 switch (size) {
dd8fbd78
FN
5998 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5999 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6000 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 6001 default: abort();
9ee6e8bb
PB
6002 }
6003 break;
62698be3 6004 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
6005 {
6006 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
6007 switch ((u << 2) | size) {
6008 case 0: /* VADD */
aa47cfdd
PM
6009 case 4: /* VPADD */
6010 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6011 break;
6012 case 2: /* VSUB */
aa47cfdd 6013 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6014 break;
6015 case 6: /* VABD */
aa47cfdd 6016 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6017 break;
6018 default:
62698be3 6019 abort();
9ee6e8bb 6020 }
aa47cfdd 6021 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6022 break;
aa47cfdd 6023 }
62698be3 6024 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
6025 {
6026 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6027 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6028 if (!u) {
7d1b0095 6029 tcg_temp_free_i32(tmp2);
dd8fbd78 6030 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6031 if (size == 0) {
aa47cfdd 6032 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6033 } else {
aa47cfdd 6034 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
6035 }
6036 }
aa47cfdd 6037 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6038 break;
aa47cfdd 6039 }
62698be3 6040 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
6041 {
6042 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 6043 if (!u) {
aa47cfdd 6044 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 6045 } else {
aa47cfdd
PM
6046 if (size == 0) {
6047 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6048 } else {
6049 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6050 }
b5ff1b31 6051 }
aa47cfdd 6052 tcg_temp_free_ptr(fpstatus);
2c0262af 6053 break;
aa47cfdd 6054 }
62698be3 6055 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6056 {
6057 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6058 if (size == 0) {
6059 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6060 } else {
6061 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6062 }
6063 tcg_temp_free_ptr(fpstatus);
2c0262af 6064 break;
aa47cfdd 6065 }
62698be3 6066 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6067 {
6068 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6069 if (size == 0) {
f71a2ae5 6070 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6071 } else {
f71a2ae5 6072 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6073 }
6074 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6075 break;
aa47cfdd 6076 }
505935fc
WN
6077 case NEON_3R_FLOAT_MISC:
6078 if (u) {
6079 /* VMAXNM/VMINNM */
6080 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6081 if (size == 0) {
f71a2ae5 6082 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6083 } else {
f71a2ae5 6084 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6085 }
6086 tcg_temp_free_ptr(fpstatus);
6087 } else {
6088 if (size == 0) {
6089 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6090 } else {
6091 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6092 }
6093 }
2c0262af 6094 break;
da97f52c
PM
6095 case NEON_3R_VFM:
6096 {
6097 /* VFMA, VFMS: fused multiply-add */
6098 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6099 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6100 if (size) {
6101 /* VFMS */
6102 gen_helper_vfp_negs(tmp, tmp);
6103 }
6104 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6105 tcg_temp_free_i32(tmp3);
6106 tcg_temp_free_ptr(fpstatus);
6107 break;
6108 }
9ee6e8bb
PB
6109 default:
6110 abort();
2c0262af 6111 }
7d1b0095 6112 tcg_temp_free_i32(tmp2);
dd8fbd78 6113
9ee6e8bb
PB
6114 /* Save the result. For elementwise operations we can put it
6115 straight into the destination register. For pairwise operations
6116 we have to be careful to avoid clobbering the source operands. */
6117 if (pairwise && rd == rm) {
dd8fbd78 6118 neon_store_scratch(pass, tmp);
9ee6e8bb 6119 } else {
dd8fbd78 6120 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6121 }
6122
6123 } /* for pass */
6124 if (pairwise && rd == rm) {
6125 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6126 tmp = neon_load_scratch(pass);
6127 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6128 }
6129 }
ad69471c 6130 /* End of 3 register same size operations. */
9ee6e8bb
PB
6131 } else if (insn & (1 << 4)) {
6132 if ((insn & 0x00380080) != 0) {
6133 /* Two registers and shift. */
6134 op = (insn >> 8) & 0xf;
6135 if (insn & (1 << 7)) {
cc13115b
PM
6136 /* 64-bit shift. */
6137 if (op > 7) {
6138 return 1;
6139 }
9ee6e8bb
PB
6140 size = 3;
6141 } else {
6142 size = 2;
6143 while ((insn & (1 << (size + 19))) == 0)
6144 size--;
6145 }
6146 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 6147 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
6148 by immediate using the variable shift operations. */
6149 if (op < 8) {
6150 /* Shift by immediate:
6151 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6152 if (q && ((rd | rm) & 1)) {
6153 return 1;
6154 }
6155 if (!u && (op == 4 || op == 6)) {
6156 return 1;
6157 }
9ee6e8bb
PB
6158 /* Right shifts are encoded as N - shift, where N is the
6159 element size in bits. */
6160 if (op <= 4)
6161 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
6162 if (size == 3) {
6163 count = q + 1;
6164 } else {
6165 count = q ? 4: 2;
6166 }
6167 switch (size) {
6168 case 0:
6169 imm = (uint8_t) shift;
6170 imm |= imm << 8;
6171 imm |= imm << 16;
6172 break;
6173 case 1:
6174 imm = (uint16_t) shift;
6175 imm |= imm << 16;
6176 break;
6177 case 2:
6178 case 3:
6179 imm = shift;
6180 break;
6181 default:
6182 abort();
6183 }
6184
6185 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6186 if (size == 3) {
6187 neon_load_reg64(cpu_V0, rm + pass);
6188 tcg_gen_movi_i64(cpu_V1, imm);
6189 switch (op) {
6190 case 0: /* VSHR */
6191 case 1: /* VSRA */
6192 if (u)
6193 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6194 else
ad69471c 6195 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6196 break;
ad69471c
PB
6197 case 2: /* VRSHR */
6198 case 3: /* VRSRA */
6199 if (u)
6200 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6201 else
ad69471c 6202 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6203 break;
ad69471c 6204 case 4: /* VSRI */
ad69471c
PB
6205 case 5: /* VSHL, VSLI */
6206 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6207 break;
0322b26e 6208 case 6: /* VQSHLU */
02da0b2d
PM
6209 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6210 cpu_V0, cpu_V1);
ad69471c 6211 break;
0322b26e
PM
6212 case 7: /* VQSHL */
6213 if (u) {
02da0b2d 6214 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6215 cpu_V0, cpu_V1);
6216 } else {
02da0b2d 6217 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6218 cpu_V0, cpu_V1);
6219 }
9ee6e8bb 6220 break;
9ee6e8bb 6221 }
ad69471c
PB
6222 if (op == 1 || op == 3) {
6223 /* Accumulate. */
5371cb81 6224 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6225 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6226 } else if (op == 4 || (op == 5 && u)) {
6227 /* Insert */
923e6509
CL
6228 neon_load_reg64(cpu_V1, rd + pass);
6229 uint64_t mask;
6230 if (shift < -63 || shift > 63) {
6231 mask = 0;
6232 } else {
6233 if (op == 4) {
6234 mask = 0xffffffffffffffffull >> -shift;
6235 } else {
6236 mask = 0xffffffffffffffffull << shift;
6237 }
6238 }
6239 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6240 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6241 }
6242 neon_store_reg64(cpu_V0, rd + pass);
6243 } else { /* size < 3 */
6244 /* Operands in T0 and T1. */
dd8fbd78 6245 tmp = neon_load_reg(rm, pass);
7d1b0095 6246 tmp2 = tcg_temp_new_i32();
dd8fbd78 6247 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6248 switch (op) {
6249 case 0: /* VSHR */
6250 case 1: /* VSRA */
6251 GEN_NEON_INTEGER_OP(shl);
6252 break;
6253 case 2: /* VRSHR */
6254 case 3: /* VRSRA */
6255 GEN_NEON_INTEGER_OP(rshl);
6256 break;
6257 case 4: /* VSRI */
ad69471c
PB
6258 case 5: /* VSHL, VSLI */
6259 switch (size) {
dd8fbd78
FN
6260 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6261 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6262 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6263 default: abort();
ad69471c
PB
6264 }
6265 break;
0322b26e 6266 case 6: /* VQSHLU */
ad69471c 6267 switch (size) {
0322b26e 6268 case 0:
02da0b2d
PM
6269 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6270 tmp, tmp2);
0322b26e
PM
6271 break;
6272 case 1:
02da0b2d
PM
6273 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6274 tmp, tmp2);
0322b26e
PM
6275 break;
6276 case 2:
02da0b2d
PM
6277 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6278 tmp, tmp2);
0322b26e
PM
6279 break;
6280 default:
cc13115b 6281 abort();
ad69471c
PB
6282 }
6283 break;
0322b26e 6284 case 7: /* VQSHL */
02da0b2d 6285 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6286 break;
ad69471c 6287 }
7d1b0095 6288 tcg_temp_free_i32(tmp2);
ad69471c
PB
6289
6290 if (op == 1 || op == 3) {
6291 /* Accumulate. */
dd8fbd78 6292 tmp2 = neon_load_reg(rd, pass);
5371cb81 6293 gen_neon_add(size, tmp, tmp2);
7d1b0095 6294 tcg_temp_free_i32(tmp2);
ad69471c
PB
6295 } else if (op == 4 || (op == 5 && u)) {
6296 /* Insert */
6297 switch (size) {
6298 case 0:
6299 if (op == 4)
ca9a32e4 6300 mask = 0xff >> -shift;
ad69471c 6301 else
ca9a32e4
JR
6302 mask = (uint8_t)(0xff << shift);
6303 mask |= mask << 8;
6304 mask |= mask << 16;
ad69471c
PB
6305 break;
6306 case 1:
6307 if (op == 4)
ca9a32e4 6308 mask = 0xffff >> -shift;
ad69471c 6309 else
ca9a32e4
JR
6310 mask = (uint16_t)(0xffff << shift);
6311 mask |= mask << 16;
ad69471c
PB
6312 break;
6313 case 2:
ca9a32e4
JR
6314 if (shift < -31 || shift > 31) {
6315 mask = 0;
6316 } else {
6317 if (op == 4)
6318 mask = 0xffffffffu >> -shift;
6319 else
6320 mask = 0xffffffffu << shift;
6321 }
ad69471c
PB
6322 break;
6323 default:
6324 abort();
6325 }
dd8fbd78 6326 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6327 tcg_gen_andi_i32(tmp, tmp, mask);
6328 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6329 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6330 tcg_temp_free_i32(tmp2);
ad69471c 6331 }
dd8fbd78 6332 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6333 }
6334 } /* for pass */
6335 } else if (op < 10) {
ad69471c 6336 /* Shift by immediate and narrow:
9ee6e8bb 6337 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6338 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6339 if (rm & 1) {
6340 return 1;
6341 }
9ee6e8bb
PB
6342 shift = shift - (1 << (size + 3));
6343 size++;
92cdfaeb 6344 if (size == 3) {
a7812ae4 6345 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6346 neon_load_reg64(cpu_V0, rm);
6347 neon_load_reg64(cpu_V1, rm + 1);
6348 for (pass = 0; pass < 2; pass++) {
6349 TCGv_i64 in;
6350 if (pass == 0) {
6351 in = cpu_V0;
6352 } else {
6353 in = cpu_V1;
6354 }
ad69471c 6355 if (q) {
0b36f4cd 6356 if (input_unsigned) {
92cdfaeb 6357 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6358 } else {
92cdfaeb 6359 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6360 }
ad69471c 6361 } else {
0b36f4cd 6362 if (input_unsigned) {
92cdfaeb 6363 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6364 } else {
92cdfaeb 6365 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6366 }
ad69471c 6367 }
7d1b0095 6368 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6369 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6370 neon_store_reg(rd, pass, tmp);
6371 } /* for pass */
6372 tcg_temp_free_i64(tmp64);
6373 } else {
6374 if (size == 1) {
6375 imm = (uint16_t)shift;
6376 imm |= imm << 16;
2c0262af 6377 } else {
92cdfaeb
PM
6378 /* size == 2 */
6379 imm = (uint32_t)shift;
6380 }
6381 tmp2 = tcg_const_i32(imm);
6382 tmp4 = neon_load_reg(rm + 1, 0);
6383 tmp5 = neon_load_reg(rm + 1, 1);
6384 for (pass = 0; pass < 2; pass++) {
6385 if (pass == 0) {
6386 tmp = neon_load_reg(rm, 0);
6387 } else {
6388 tmp = tmp4;
6389 }
0b36f4cd
CL
6390 gen_neon_shift_narrow(size, tmp, tmp2, q,
6391 input_unsigned);
92cdfaeb
PM
6392 if (pass == 0) {
6393 tmp3 = neon_load_reg(rm, 1);
6394 } else {
6395 tmp3 = tmp5;
6396 }
0b36f4cd
CL
6397 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6398 input_unsigned);
36aa55dc 6399 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6400 tcg_temp_free_i32(tmp);
6401 tcg_temp_free_i32(tmp3);
6402 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6403 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6404 neon_store_reg(rd, pass, tmp);
6405 } /* for pass */
c6067f04 6406 tcg_temp_free_i32(tmp2);
b75263d6 6407 }
9ee6e8bb 6408 } else if (op == 10) {
cc13115b
PM
6409 /* VSHLL, VMOVL */
6410 if (q || (rd & 1)) {
9ee6e8bb 6411 return 1;
cc13115b 6412 }
ad69471c
PB
6413 tmp = neon_load_reg(rm, 0);
6414 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6415 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6416 if (pass == 1)
6417 tmp = tmp2;
6418
6419 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6420
9ee6e8bb
PB
6421 if (shift != 0) {
6422 /* The shift is less than the width of the source
ad69471c
PB
6423 type, so we can just shift the whole register. */
6424 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6425 /* Widen the result of shift: we need to clear
6426 * the potential overflow bits resulting from
6427 * left bits of the narrow input appearing as
6428 * right bits of left the neighbour narrow
6429 * input. */
ad69471c
PB
6430 if (size < 2 || !u) {
6431 uint64_t imm64;
6432 if (size == 0) {
6433 imm = (0xffu >> (8 - shift));
6434 imm |= imm << 16;
acdf01ef 6435 } else if (size == 1) {
ad69471c 6436 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6437 } else {
6438 /* size == 2 */
6439 imm = 0xffffffff >> (32 - shift);
6440 }
6441 if (size < 2) {
6442 imm64 = imm | (((uint64_t)imm) << 32);
6443 } else {
6444 imm64 = imm;
9ee6e8bb 6445 }
acdf01ef 6446 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6447 }
6448 }
ad69471c 6449 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6450 }
f73534a5 6451 } else if (op >= 14) {
9ee6e8bb 6452 /* VCVT fixed-point. */
cc13115b
PM
6453 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6454 return 1;
6455 }
f73534a5
PM
6456 /* We have already masked out the must-be-1 top bit of imm6,
6457 * hence this 32-shift where the ARM ARM has 64-imm6.
6458 */
6459 shift = 32 - shift;
9ee6e8bb 6460 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6461 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6462 if (!(op & 1)) {
9ee6e8bb 6463 if (u)
5500b06c 6464 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6465 else
5500b06c 6466 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6467 } else {
6468 if (u)
5500b06c 6469 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6470 else
5500b06c 6471 gen_vfp_tosl(0, shift, 1);
2c0262af 6472 }
4373f3ce 6473 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6474 }
6475 } else {
9ee6e8bb
PB
6476 return 1;
6477 }
6478 } else { /* (insn & 0x00380080) == 0 */
6479 int invert;
7d80fee5
PM
6480 if (q && (rd & 1)) {
6481 return 1;
6482 }
9ee6e8bb
PB
6483
6484 op = (insn >> 8) & 0xf;
6485 /* One register and immediate. */
6486 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6487 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6488 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6489 * We choose to not special-case this and will behave as if a
6490 * valid constant encoding of 0 had been given.
6491 */
9ee6e8bb
PB
6492 switch (op) {
6493 case 0: case 1:
6494 /* no-op */
6495 break;
6496 case 2: case 3:
6497 imm <<= 8;
6498 break;
6499 case 4: case 5:
6500 imm <<= 16;
6501 break;
6502 case 6: case 7:
6503 imm <<= 24;
6504 break;
6505 case 8: case 9:
6506 imm |= imm << 16;
6507 break;
6508 case 10: case 11:
6509 imm = (imm << 8) | (imm << 24);
6510 break;
6511 case 12:
8e31209e 6512 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6513 break;
6514 case 13:
6515 imm = (imm << 16) | 0xffff;
6516 break;
6517 case 14:
6518 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6519 if (invert)
6520 imm = ~imm;
6521 break;
6522 case 15:
7d80fee5
PM
6523 if (invert) {
6524 return 1;
6525 }
9ee6e8bb
PB
6526 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6527 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6528 break;
6529 }
6530 if (invert)
6531 imm = ~imm;
6532
9ee6e8bb
PB
6533 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6534 if (op & 1 && op < 12) {
ad69471c 6535 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6536 if (invert) {
6537 /* The immediate value has already been inverted, so
6538 BIC becomes AND. */
ad69471c 6539 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6540 } else {
ad69471c 6541 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6542 }
9ee6e8bb 6543 } else {
ad69471c 6544 /* VMOV, VMVN. */
7d1b0095 6545 tmp = tcg_temp_new_i32();
9ee6e8bb 6546 if (op == 14 && invert) {
a5a14945 6547 int n;
ad69471c
PB
6548 uint32_t val;
6549 val = 0;
9ee6e8bb
PB
6550 for (n = 0; n < 4; n++) {
6551 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6552 val |= 0xff << (n * 8);
9ee6e8bb 6553 }
ad69471c
PB
6554 tcg_gen_movi_i32(tmp, val);
6555 } else {
6556 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6557 }
9ee6e8bb 6558 }
ad69471c 6559 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6560 }
6561 }
e4b3861d 6562 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6563 if (size != 3) {
6564 op = (insn >> 8) & 0xf;
6565 if ((insn & (1 << 6)) == 0) {
6566 /* Three registers of different lengths. */
6567 int src1_wide;
6568 int src2_wide;
6569 int prewiden;
526d0096
PM
6570 /* undefreq: bit 0 : UNDEF if size == 0
6571 * bit 1 : UNDEF if size == 1
6572 * bit 2 : UNDEF if size == 2
6573 * bit 3 : UNDEF if U == 1
6574 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6575 */
6576 int undefreq;
6577 /* prewiden, src1_wide, src2_wide, undefreq */
6578 static const int neon_3reg_wide[16][4] = {
6579 {1, 0, 0, 0}, /* VADDL */
6580 {1, 1, 0, 0}, /* VADDW */
6581 {1, 0, 0, 0}, /* VSUBL */
6582 {1, 1, 0, 0}, /* VSUBW */
6583 {0, 1, 1, 0}, /* VADDHN */
6584 {0, 0, 0, 0}, /* VABAL */
6585 {0, 1, 1, 0}, /* VSUBHN */
6586 {0, 0, 0, 0}, /* VABDL */
6587 {0, 0, 0, 0}, /* VMLAL */
526d0096 6588 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6589 {0, 0, 0, 0}, /* VMLSL */
526d0096 6590 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6591 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6592 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6593 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6594 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6595 };
6596
6597 prewiden = neon_3reg_wide[op][0];
6598 src1_wide = neon_3reg_wide[op][1];
6599 src2_wide = neon_3reg_wide[op][2];
695272dc 6600 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6601
526d0096
PM
6602 if ((undefreq & (1 << size)) ||
6603 ((undefreq & 8) && u)) {
695272dc
PM
6604 return 1;
6605 }
6606 if ((src1_wide && (rn & 1)) ||
6607 (src2_wide && (rm & 1)) ||
6608 (!src2_wide && (rd & 1))) {
ad69471c 6609 return 1;
695272dc 6610 }
ad69471c 6611
4e624eda
PM
6612 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6613 * outside the loop below as it only performs a single pass.
6614 */
6615 if (op == 14 && size == 2) {
6616 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6617
d614a513 6618 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6619 return 1;
6620 }
6621 tcg_rn = tcg_temp_new_i64();
6622 tcg_rm = tcg_temp_new_i64();
6623 tcg_rd = tcg_temp_new_i64();
6624 neon_load_reg64(tcg_rn, rn);
6625 neon_load_reg64(tcg_rm, rm);
6626 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6627 neon_store_reg64(tcg_rd, rd);
6628 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6629 neon_store_reg64(tcg_rd, rd + 1);
6630 tcg_temp_free_i64(tcg_rn);
6631 tcg_temp_free_i64(tcg_rm);
6632 tcg_temp_free_i64(tcg_rd);
6633 return 0;
6634 }
6635
9ee6e8bb
PB
6636 /* Avoid overlapping operands. Wide source operands are
6637 always aligned so will never overlap with wide
6638 destinations in problematic ways. */
8f8e3aa4 6639 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6640 tmp = neon_load_reg(rm, 1);
6641 neon_store_scratch(2, tmp);
8f8e3aa4 6642 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6643 tmp = neon_load_reg(rn, 1);
6644 neon_store_scratch(2, tmp);
9ee6e8bb 6645 }
39d5492a 6646 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6647 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6648 if (src1_wide) {
6649 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6650 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6651 } else {
ad69471c 6652 if (pass == 1 && rd == rn) {
dd8fbd78 6653 tmp = neon_load_scratch(2);
9ee6e8bb 6654 } else {
ad69471c
PB
6655 tmp = neon_load_reg(rn, pass);
6656 }
6657 if (prewiden) {
6658 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6659 }
6660 }
ad69471c
PB
6661 if (src2_wide) {
6662 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6663 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6664 } else {
ad69471c 6665 if (pass == 1 && rd == rm) {
dd8fbd78 6666 tmp2 = neon_load_scratch(2);
9ee6e8bb 6667 } else {
ad69471c
PB
6668 tmp2 = neon_load_reg(rm, pass);
6669 }
6670 if (prewiden) {
6671 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6672 }
9ee6e8bb
PB
6673 }
6674 switch (op) {
6675 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6676 gen_neon_addl(size);
9ee6e8bb 6677 break;
79b0e534 6678 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6679 gen_neon_subl(size);
9ee6e8bb
PB
6680 break;
6681 case 5: case 7: /* VABAL, VABDL */
6682 switch ((size << 1) | u) {
ad69471c
PB
6683 case 0:
6684 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6685 break;
6686 case 1:
6687 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6688 break;
6689 case 2:
6690 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6691 break;
6692 case 3:
6693 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6694 break;
6695 case 4:
6696 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6697 break;
6698 case 5:
6699 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6700 break;
9ee6e8bb
PB
6701 default: abort();
6702 }
7d1b0095
PM
6703 tcg_temp_free_i32(tmp2);
6704 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6705 break;
6706 case 8: case 9: case 10: case 11: case 12: case 13:
6707 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6708 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6709 break;
6710 case 14: /* Polynomial VMULL */
e5ca24cb 6711 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6712 tcg_temp_free_i32(tmp2);
6713 tcg_temp_free_i32(tmp);
e5ca24cb 6714 break;
695272dc
PM
6715 default: /* 15 is RESERVED: caught earlier */
6716 abort();
9ee6e8bb 6717 }
ebcd88ce
PM
6718 if (op == 13) {
6719 /* VQDMULL */
6720 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6721 neon_store_reg64(cpu_V0, rd + pass);
6722 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6723 /* Accumulate. */
ebcd88ce 6724 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6725 switch (op) {
4dc064e6
PM
6726 case 10: /* VMLSL */
6727 gen_neon_negl(cpu_V0, size);
6728 /* Fall through */
6729 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6730 gen_neon_addl(size);
9ee6e8bb
PB
6731 break;
6732 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6733 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6734 if (op == 11) {
6735 gen_neon_negl(cpu_V0, size);
6736 }
ad69471c
PB
6737 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6738 break;
9ee6e8bb
PB
6739 default:
6740 abort();
6741 }
ad69471c 6742 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6743 } else if (op == 4 || op == 6) {
6744 /* Narrowing operation. */
7d1b0095 6745 tmp = tcg_temp_new_i32();
79b0e534 6746 if (!u) {
9ee6e8bb 6747 switch (size) {
ad69471c
PB
6748 case 0:
6749 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6750 break;
6751 case 1:
6752 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6753 break;
6754 case 2:
6755 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6756 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6757 break;
9ee6e8bb
PB
6758 default: abort();
6759 }
6760 } else {
6761 switch (size) {
ad69471c
PB
6762 case 0:
6763 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6764 break;
6765 case 1:
6766 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6767 break;
6768 case 2:
6769 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6770 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6771 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6772 break;
9ee6e8bb
PB
6773 default: abort();
6774 }
6775 }
ad69471c
PB
6776 if (pass == 0) {
6777 tmp3 = tmp;
6778 } else {
6779 neon_store_reg(rd, 0, tmp3);
6780 neon_store_reg(rd, 1, tmp);
6781 }
9ee6e8bb
PB
6782 } else {
6783 /* Write back the result. */
ad69471c 6784 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6785 }
6786 }
6787 } else {
3e3326df
PM
6788 /* Two registers and a scalar. NB that for ops of this form
6789 * the ARM ARM labels bit 24 as Q, but it is in our variable
6790 * 'u', not 'q'.
6791 */
6792 if (size == 0) {
6793 return 1;
6794 }
9ee6e8bb 6795 switch (op) {
9ee6e8bb 6796 case 1: /* Float VMLA scalar */
9ee6e8bb 6797 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6798 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6799 if (size == 1) {
6800 return 1;
6801 }
6802 /* fall through */
6803 case 0: /* Integer VMLA scalar */
6804 case 4: /* Integer VMLS scalar */
6805 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6806 case 12: /* VQDMULH scalar */
6807 case 13: /* VQRDMULH scalar */
3e3326df
PM
6808 if (u && ((rd | rn) & 1)) {
6809 return 1;
6810 }
dd8fbd78
FN
6811 tmp = neon_get_scalar(size, rm);
6812 neon_store_scratch(0, tmp);
9ee6e8bb 6813 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6814 tmp = neon_load_scratch(0);
6815 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6816 if (op == 12) {
6817 if (size == 1) {
02da0b2d 6818 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6819 } else {
02da0b2d 6820 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6821 }
6822 } else if (op == 13) {
6823 if (size == 1) {
02da0b2d 6824 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6825 } else {
02da0b2d 6826 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6827 }
6828 } else if (op & 1) {
aa47cfdd
PM
6829 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6830 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6831 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6832 } else {
6833 switch (size) {
dd8fbd78
FN
6834 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6835 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6836 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6837 default: abort();
9ee6e8bb
PB
6838 }
6839 }
7d1b0095 6840 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6841 if (op < 8) {
6842 /* Accumulate. */
dd8fbd78 6843 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6844 switch (op) {
6845 case 0:
dd8fbd78 6846 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6847 break;
6848 case 1:
aa47cfdd
PM
6849 {
6850 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6851 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6852 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6853 break;
aa47cfdd 6854 }
9ee6e8bb 6855 case 4:
dd8fbd78 6856 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6857 break;
6858 case 5:
aa47cfdd
PM
6859 {
6860 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6861 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6862 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6863 break;
aa47cfdd 6864 }
9ee6e8bb
PB
6865 default:
6866 abort();
6867 }
7d1b0095 6868 tcg_temp_free_i32(tmp2);
9ee6e8bb 6869 }
dd8fbd78 6870 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6871 }
6872 break;
9ee6e8bb 6873 case 3: /* VQDMLAL scalar */
9ee6e8bb 6874 case 7: /* VQDMLSL scalar */
9ee6e8bb 6875 case 11: /* VQDMULL scalar */
3e3326df 6876 if (u == 1) {
ad69471c 6877 return 1;
3e3326df
PM
6878 }
6879 /* fall through */
6880 case 2: /* VMLAL sclar */
6881 case 6: /* VMLSL scalar */
6882 case 10: /* VMULL scalar */
6883 if (rd & 1) {
6884 return 1;
6885 }
dd8fbd78 6886 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6887 /* We need a copy of tmp2 because gen_neon_mull
6888 * deletes it during pass 0. */
7d1b0095 6889 tmp4 = tcg_temp_new_i32();
c6067f04 6890 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6891 tmp3 = neon_load_reg(rn, 1);
ad69471c 6892
9ee6e8bb 6893 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6894 if (pass == 0) {
6895 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6896 } else {
dd8fbd78 6897 tmp = tmp3;
c6067f04 6898 tmp2 = tmp4;
9ee6e8bb 6899 }
ad69471c 6900 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6901 if (op != 11) {
6902 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6903 }
9ee6e8bb 6904 switch (op) {
4dc064e6
PM
6905 case 6:
6906 gen_neon_negl(cpu_V0, size);
6907 /* Fall through */
6908 case 2:
ad69471c 6909 gen_neon_addl(size);
9ee6e8bb
PB
6910 break;
6911 case 3: case 7:
ad69471c 6912 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6913 if (op == 7) {
6914 gen_neon_negl(cpu_V0, size);
6915 }
ad69471c 6916 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6917 break;
6918 case 10:
6919 /* no-op */
6920 break;
6921 case 11:
ad69471c 6922 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6923 break;
6924 default:
6925 abort();
6926 }
ad69471c 6927 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6928 }
dd8fbd78 6929
dd8fbd78 6930
9ee6e8bb
PB
6931 break;
6932 default: /* 14 and 15 are RESERVED */
6933 return 1;
6934 }
6935 }
6936 } else { /* size == 3 */
6937 if (!u) {
6938 /* Extract. */
9ee6e8bb 6939 imm = (insn >> 8) & 0xf;
ad69471c
PB
6940
6941 if (imm > 7 && !q)
6942 return 1;
6943
52579ea1
PM
6944 if (q && ((rd | rn | rm) & 1)) {
6945 return 1;
6946 }
6947
ad69471c
PB
6948 if (imm == 0) {
6949 neon_load_reg64(cpu_V0, rn);
6950 if (q) {
6951 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6952 }
ad69471c
PB
6953 } else if (imm == 8) {
6954 neon_load_reg64(cpu_V0, rn + 1);
6955 if (q) {
6956 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6957 }
ad69471c 6958 } else if (q) {
a7812ae4 6959 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6960 if (imm < 8) {
6961 neon_load_reg64(cpu_V0, rn);
a7812ae4 6962 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6963 } else {
6964 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6965 neon_load_reg64(tmp64, rm);
ad69471c
PB
6966 }
6967 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6968 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6969 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6970 if (imm < 8) {
6971 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6972 } else {
ad69471c
PB
6973 neon_load_reg64(cpu_V1, rm + 1);
6974 imm -= 8;
9ee6e8bb 6975 }
ad69471c 6976 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6977 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6978 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6979 tcg_temp_free_i64(tmp64);
ad69471c 6980 } else {
a7812ae4 6981 /* BUGFIX */
ad69471c 6982 neon_load_reg64(cpu_V0, rn);
a7812ae4 6983 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6984 neon_load_reg64(cpu_V1, rm);
a7812ae4 6985 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6986 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6987 }
6988 neon_store_reg64(cpu_V0, rd);
6989 if (q) {
6990 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6991 }
6992 } else if ((insn & (1 << 11)) == 0) {
6993 /* Two register misc. */
6994 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6995 size = (insn >> 18) & 3;
600b828c
PM
6996 /* UNDEF for unknown op values and bad op-size combinations */
6997 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6998 return 1;
6999 }
fe8fcf3d
PM
7000 if (neon_2rm_is_v8_op(op) &&
7001 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7002 return 1;
7003 }
fc2a9b37
PM
7004 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7005 q && ((rm | rd) & 1)) {
7006 return 1;
7007 }
9ee6e8bb 7008 switch (op) {
600b828c 7009 case NEON_2RM_VREV64:
9ee6e8bb 7010 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
7011 tmp = neon_load_reg(rm, pass * 2);
7012 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 7013 switch (size) {
dd8fbd78
FN
7014 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7015 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
7016 case 2: /* no-op */ break;
7017 default: abort();
7018 }
dd8fbd78 7019 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 7020 if (size == 2) {
dd8fbd78 7021 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 7022 } else {
9ee6e8bb 7023 switch (size) {
dd8fbd78
FN
7024 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7025 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
7026 default: abort();
7027 }
dd8fbd78 7028 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
7029 }
7030 }
7031 break;
600b828c
PM
7032 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7033 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
7034 for (pass = 0; pass < q + 1; pass++) {
7035 tmp = neon_load_reg(rm, pass * 2);
7036 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7037 tmp = neon_load_reg(rm, pass * 2 + 1);
7038 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7039 switch (size) {
7040 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7041 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7042 case 2: tcg_gen_add_i64(CPU_V001); break;
7043 default: abort();
7044 }
600b828c 7045 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 7046 /* Accumulate. */
ad69471c
PB
7047 neon_load_reg64(cpu_V1, rd + pass);
7048 gen_neon_addl(size);
9ee6e8bb 7049 }
ad69471c 7050 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7051 }
7052 break;
600b828c 7053 case NEON_2RM_VTRN:
9ee6e8bb 7054 if (size == 2) {
a5a14945 7055 int n;
9ee6e8bb 7056 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7057 tmp = neon_load_reg(rm, n);
7058 tmp2 = neon_load_reg(rd, n + 1);
7059 neon_store_reg(rm, n, tmp2);
7060 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7061 }
7062 } else {
7063 goto elementwise;
7064 }
7065 break;
600b828c 7066 case NEON_2RM_VUZP:
02acedf9 7067 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7068 return 1;
9ee6e8bb
PB
7069 }
7070 break;
600b828c 7071 case NEON_2RM_VZIP:
d68a6f3a 7072 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7073 return 1;
9ee6e8bb
PB
7074 }
7075 break;
600b828c
PM
7076 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7077 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7078 if (rm & 1) {
7079 return 1;
7080 }
39d5492a 7081 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 7082 for (pass = 0; pass < 2; pass++) {
ad69471c 7083 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7084 tmp = tcg_temp_new_i32();
600b828c
PM
7085 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7086 tmp, cpu_V0);
ad69471c
PB
7087 if (pass == 0) {
7088 tmp2 = tmp;
7089 } else {
7090 neon_store_reg(rd, 0, tmp2);
7091 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7092 }
9ee6e8bb
PB
7093 }
7094 break;
600b828c 7095 case NEON_2RM_VSHLL:
fc2a9b37 7096 if (q || (rd & 1)) {
9ee6e8bb 7097 return 1;
600b828c 7098 }
ad69471c
PB
7099 tmp = neon_load_reg(rm, 0);
7100 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7101 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7102 if (pass == 1)
7103 tmp = tmp2;
7104 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7105 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7106 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7107 }
7108 break;
600b828c 7109 case NEON_2RM_VCVT_F16_F32:
d614a513 7110 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7111 q || (rm & 1)) {
7112 return 1;
7113 }
7d1b0095
PM
7114 tmp = tcg_temp_new_i32();
7115 tmp2 = tcg_temp_new_i32();
60011498 7116 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 7117 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 7118 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 7119 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7120 tcg_gen_shli_i32(tmp2, tmp2, 16);
7121 tcg_gen_or_i32(tmp2, tmp2, tmp);
7122 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 7123 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
7124 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7125 neon_store_reg(rd, 0, tmp2);
7d1b0095 7126 tmp2 = tcg_temp_new_i32();
2d981da7 7127 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7128 tcg_gen_shli_i32(tmp2, tmp2, 16);
7129 tcg_gen_or_i32(tmp2, tmp2, tmp);
7130 neon_store_reg(rd, 1, tmp2);
7d1b0095 7131 tcg_temp_free_i32(tmp);
60011498 7132 break;
600b828c 7133 case NEON_2RM_VCVT_F32_F16:
d614a513 7134 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7135 q || (rd & 1)) {
7136 return 1;
7137 }
7d1b0095 7138 tmp3 = tcg_temp_new_i32();
60011498
PB
7139 tmp = neon_load_reg(rm, 0);
7140 tmp2 = neon_load_reg(rm, 1);
7141 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 7142 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7143 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7144 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 7145 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7146 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7147 tcg_temp_free_i32(tmp);
60011498 7148 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 7149 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7150 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7151 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 7152 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7153 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7154 tcg_temp_free_i32(tmp2);
7155 tcg_temp_free_i32(tmp3);
60011498 7156 break;
9d935509 7157 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 7158 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
7159 || ((rm | rd) & 1)) {
7160 return 1;
7161 }
7162 tmp = tcg_const_i32(rd);
7163 tmp2 = tcg_const_i32(rm);
7164
7165 /* Bit 6 is the lowest opcode bit; it distinguishes between
7166 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7167 */
7168 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7169
7170 if (op == NEON_2RM_AESE) {
7171 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
7172 } else {
7173 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
7174 }
7175 tcg_temp_free_i32(tmp);
7176 tcg_temp_free_i32(tmp2);
7177 tcg_temp_free_i32(tmp3);
7178 break;
f1ecb913 7179 case NEON_2RM_SHA1H:
d614a513 7180 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
7181 || ((rm | rd) & 1)) {
7182 return 1;
7183 }
7184 tmp = tcg_const_i32(rd);
7185 tmp2 = tcg_const_i32(rm);
7186
7187 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
7188
7189 tcg_temp_free_i32(tmp);
7190 tcg_temp_free_i32(tmp2);
7191 break;
7192 case NEON_2RM_SHA1SU1:
7193 if ((rm | rd) & 1) {
7194 return 1;
7195 }
7196 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7197 if (q) {
d614a513 7198 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
7199 return 1;
7200 }
d614a513 7201 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
7202 return 1;
7203 }
7204 tmp = tcg_const_i32(rd);
7205 tmp2 = tcg_const_i32(rm);
7206 if (q) {
7207 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
7208 } else {
7209 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
7210 }
7211 tcg_temp_free_i32(tmp);
7212 tcg_temp_free_i32(tmp2);
7213 break;
9ee6e8bb
PB
7214 default:
7215 elementwise:
7216 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7217 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7218 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7219 neon_reg_offset(rm, pass));
39d5492a 7220 TCGV_UNUSED_I32(tmp);
9ee6e8bb 7221 } else {
dd8fbd78 7222 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7223 }
7224 switch (op) {
600b828c 7225 case NEON_2RM_VREV32:
9ee6e8bb 7226 switch (size) {
dd8fbd78
FN
7227 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7228 case 1: gen_swap_half(tmp); break;
600b828c 7229 default: abort();
9ee6e8bb
PB
7230 }
7231 break;
600b828c 7232 case NEON_2RM_VREV16:
dd8fbd78 7233 gen_rev16(tmp);
9ee6e8bb 7234 break;
600b828c 7235 case NEON_2RM_VCLS:
9ee6e8bb 7236 switch (size) {
dd8fbd78
FN
7237 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7238 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7239 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7240 default: abort();
9ee6e8bb
PB
7241 }
7242 break;
600b828c 7243 case NEON_2RM_VCLZ:
9ee6e8bb 7244 switch (size) {
dd8fbd78
FN
7245 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7246 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 7247 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 7248 default: abort();
9ee6e8bb
PB
7249 }
7250 break;
600b828c 7251 case NEON_2RM_VCNT:
dd8fbd78 7252 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7253 break;
600b828c 7254 case NEON_2RM_VMVN:
dd8fbd78 7255 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7256 break;
600b828c 7257 case NEON_2RM_VQABS:
9ee6e8bb 7258 switch (size) {
02da0b2d
PM
7259 case 0:
7260 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7261 break;
7262 case 1:
7263 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7264 break;
7265 case 2:
7266 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7267 break;
600b828c 7268 default: abort();
9ee6e8bb
PB
7269 }
7270 break;
600b828c 7271 case NEON_2RM_VQNEG:
9ee6e8bb 7272 switch (size) {
02da0b2d
PM
7273 case 0:
7274 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7275 break;
7276 case 1:
7277 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7278 break;
7279 case 2:
7280 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7281 break;
600b828c 7282 default: abort();
9ee6e8bb
PB
7283 }
7284 break;
600b828c 7285 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7286 tmp2 = tcg_const_i32(0);
9ee6e8bb 7287 switch(size) {
dd8fbd78
FN
7288 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7289 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7290 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7291 default: abort();
9ee6e8bb 7292 }
39d5492a 7293 tcg_temp_free_i32(tmp2);
600b828c 7294 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7295 tcg_gen_not_i32(tmp, tmp);
600b828c 7296 }
9ee6e8bb 7297 break;
600b828c 7298 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7299 tmp2 = tcg_const_i32(0);
9ee6e8bb 7300 switch(size) {
dd8fbd78
FN
7301 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7302 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7303 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7304 default: abort();
9ee6e8bb 7305 }
39d5492a 7306 tcg_temp_free_i32(tmp2);
600b828c 7307 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7308 tcg_gen_not_i32(tmp, tmp);
600b828c 7309 }
9ee6e8bb 7310 break;
600b828c 7311 case NEON_2RM_VCEQ0:
dd8fbd78 7312 tmp2 = tcg_const_i32(0);
9ee6e8bb 7313 switch(size) {
dd8fbd78
FN
7314 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7315 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7316 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7317 default: abort();
9ee6e8bb 7318 }
39d5492a 7319 tcg_temp_free_i32(tmp2);
9ee6e8bb 7320 break;
600b828c 7321 case NEON_2RM_VABS:
9ee6e8bb 7322 switch(size) {
dd8fbd78
FN
7323 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7324 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7325 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7326 default: abort();
9ee6e8bb
PB
7327 }
7328 break;
600b828c 7329 case NEON_2RM_VNEG:
dd8fbd78
FN
7330 tmp2 = tcg_const_i32(0);
7331 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7332 tcg_temp_free_i32(tmp2);
9ee6e8bb 7333 break;
600b828c 7334 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7335 {
7336 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7337 tmp2 = tcg_const_i32(0);
aa47cfdd 7338 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7339 tcg_temp_free_i32(tmp2);
aa47cfdd 7340 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7341 break;
aa47cfdd 7342 }
600b828c 7343 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7344 {
7345 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7346 tmp2 = tcg_const_i32(0);
aa47cfdd 7347 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7348 tcg_temp_free_i32(tmp2);
aa47cfdd 7349 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7350 break;
aa47cfdd 7351 }
600b828c 7352 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7353 {
7354 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7355 tmp2 = tcg_const_i32(0);
aa47cfdd 7356 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7357 tcg_temp_free_i32(tmp2);
aa47cfdd 7358 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7359 break;
aa47cfdd 7360 }
600b828c 7361 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7362 {
7363 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7364 tmp2 = tcg_const_i32(0);
aa47cfdd 7365 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7366 tcg_temp_free_i32(tmp2);
aa47cfdd 7367 tcg_temp_free_ptr(fpstatus);
0e326109 7368 break;
aa47cfdd 7369 }
600b828c 7370 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7371 {
7372 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7373 tmp2 = tcg_const_i32(0);
aa47cfdd 7374 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7375 tcg_temp_free_i32(tmp2);
aa47cfdd 7376 tcg_temp_free_ptr(fpstatus);
0e326109 7377 break;
aa47cfdd 7378 }
600b828c 7379 case NEON_2RM_VABS_F:
4373f3ce 7380 gen_vfp_abs(0);
9ee6e8bb 7381 break;
600b828c 7382 case NEON_2RM_VNEG_F:
4373f3ce 7383 gen_vfp_neg(0);
9ee6e8bb 7384 break;
600b828c 7385 case NEON_2RM_VSWP:
dd8fbd78
FN
7386 tmp2 = neon_load_reg(rd, pass);
7387 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7388 break;
600b828c 7389 case NEON_2RM_VTRN:
dd8fbd78 7390 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7391 switch (size) {
dd8fbd78
FN
7392 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7393 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7394 default: abort();
9ee6e8bb 7395 }
dd8fbd78 7396 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7397 break;
34f7b0a2
WN
7398 case NEON_2RM_VRINTN:
7399 case NEON_2RM_VRINTA:
7400 case NEON_2RM_VRINTM:
7401 case NEON_2RM_VRINTP:
7402 case NEON_2RM_VRINTZ:
7403 {
7404 TCGv_i32 tcg_rmode;
7405 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7406 int rmode;
7407
7408 if (op == NEON_2RM_VRINTZ) {
7409 rmode = FPROUNDING_ZERO;
7410 } else {
7411 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7412 }
7413
7414 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7415 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7416 cpu_env);
7417 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7418 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7419 cpu_env);
7420 tcg_temp_free_ptr(fpstatus);
7421 tcg_temp_free_i32(tcg_rmode);
7422 break;
7423 }
2ce70625
WN
7424 case NEON_2RM_VRINTX:
7425 {
7426 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7427 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7428 tcg_temp_free_ptr(fpstatus);
7429 break;
7430 }
901ad525
WN
7431 case NEON_2RM_VCVTAU:
7432 case NEON_2RM_VCVTAS:
7433 case NEON_2RM_VCVTNU:
7434 case NEON_2RM_VCVTNS:
7435 case NEON_2RM_VCVTPU:
7436 case NEON_2RM_VCVTPS:
7437 case NEON_2RM_VCVTMU:
7438 case NEON_2RM_VCVTMS:
7439 {
7440 bool is_signed = !extract32(insn, 7, 1);
7441 TCGv_ptr fpst = get_fpstatus_ptr(1);
7442 TCGv_i32 tcg_rmode, tcg_shift;
7443 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7444
7445 tcg_shift = tcg_const_i32(0);
7446 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7447 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7448 cpu_env);
7449
7450 if (is_signed) {
7451 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7452 tcg_shift, fpst);
7453 } else {
7454 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7455 tcg_shift, fpst);
7456 }
7457
7458 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7459 cpu_env);
7460 tcg_temp_free_i32(tcg_rmode);
7461 tcg_temp_free_i32(tcg_shift);
7462 tcg_temp_free_ptr(fpst);
7463 break;
7464 }
600b828c 7465 case NEON_2RM_VRECPE:
b6d4443a
AB
7466 {
7467 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7468 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7469 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7470 break;
b6d4443a 7471 }
600b828c 7472 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7473 {
7474 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7475 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7476 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7477 break;
c2fb418e 7478 }
600b828c 7479 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7480 {
7481 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7482 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7483 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7484 break;
b6d4443a 7485 }
600b828c 7486 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7487 {
7488 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7489 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7490 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7491 break;
c2fb418e 7492 }
600b828c 7493 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7494 gen_vfp_sito(0, 1);
9ee6e8bb 7495 break;
600b828c 7496 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7497 gen_vfp_uito(0, 1);
9ee6e8bb 7498 break;
600b828c 7499 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7500 gen_vfp_tosiz(0, 1);
9ee6e8bb 7501 break;
600b828c 7502 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7503 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7504 break;
7505 default:
600b828c
PM
7506 /* Reserved op values were caught by the
7507 * neon_2rm_sizes[] check earlier.
7508 */
7509 abort();
9ee6e8bb 7510 }
600b828c 7511 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7512 tcg_gen_st_f32(cpu_F0s, cpu_env,
7513 neon_reg_offset(rd, pass));
9ee6e8bb 7514 } else {
dd8fbd78 7515 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7516 }
7517 }
7518 break;
7519 }
7520 } else if ((insn & (1 << 10)) == 0) {
7521 /* VTBL, VTBX. */
56907d77
PM
7522 int n = ((insn >> 8) & 3) + 1;
7523 if ((rn + n) > 32) {
7524 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7525 * helper function running off the end of the register file.
7526 */
7527 return 1;
7528 }
7529 n <<= 3;
9ee6e8bb 7530 if (insn & (1 << 6)) {
8f8e3aa4 7531 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7532 } else {
7d1b0095 7533 tmp = tcg_temp_new_i32();
8f8e3aa4 7534 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7535 }
8f8e3aa4 7536 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
7537 tmp4 = tcg_const_i32(rn);
7538 tmp5 = tcg_const_i32(n);
9ef39277 7539 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 7540 tcg_temp_free_i32(tmp);
9ee6e8bb 7541 if (insn & (1 << 6)) {
8f8e3aa4 7542 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7543 } else {
7d1b0095 7544 tmp = tcg_temp_new_i32();
8f8e3aa4 7545 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7546 }
8f8e3aa4 7547 tmp3 = neon_load_reg(rm, 1);
9ef39277 7548 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
7549 tcg_temp_free_i32(tmp5);
7550 tcg_temp_free_i32(tmp4);
8f8e3aa4 7551 neon_store_reg(rd, 0, tmp2);
3018f259 7552 neon_store_reg(rd, 1, tmp3);
7d1b0095 7553 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7554 } else if ((insn & 0x380) == 0) {
7555 /* VDUP */
133da6aa
JR
7556 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7557 return 1;
7558 }
9ee6e8bb 7559 if (insn & (1 << 19)) {
dd8fbd78 7560 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7561 } else {
dd8fbd78 7562 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7563 }
7564 if (insn & (1 << 16)) {
dd8fbd78 7565 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7566 } else if (insn & (1 << 17)) {
7567 if ((insn >> 18) & 1)
dd8fbd78 7568 gen_neon_dup_high16(tmp);
9ee6e8bb 7569 else
dd8fbd78 7570 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7571 }
7572 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7573 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7574 tcg_gen_mov_i32(tmp2, tmp);
7575 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7576 }
7d1b0095 7577 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7578 } else {
7579 return 1;
7580 }
7581 }
7582 }
7583 return 0;
7584}
7585
7dcc1f89 7586static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7587{
4b6a83fb
PM
7588 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7589 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7590
7591 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7592
7593 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7594 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7595 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7596 return 1;
7597 }
d614a513 7598 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7599 return disas_iwmmxt_insn(s, insn);
d614a513 7600 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7601 return disas_dsp_insn(s, insn);
c0f4af17
PM
7602 }
7603 return 1;
4b6a83fb
PM
7604 }
7605
7606 /* Otherwise treat as a generic register access */
7607 is64 = (insn & (1 << 25)) == 0;
7608 if (!is64 && ((insn & (1 << 4)) == 0)) {
7609 /* cdp */
7610 return 1;
7611 }
7612
7613 crm = insn & 0xf;
7614 if (is64) {
7615 crn = 0;
7616 opc1 = (insn >> 4) & 0xf;
7617 opc2 = 0;
7618 rt2 = (insn >> 16) & 0xf;
7619 } else {
7620 crn = (insn >> 16) & 0xf;
7621 opc1 = (insn >> 21) & 7;
7622 opc2 = (insn >> 5) & 7;
7623 rt2 = 0;
7624 }
7625 isread = (insn >> 20) & 1;
7626 rt = (insn >> 12) & 0xf;
7627
60322b39 7628 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7629 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7630 if (ri) {
7631 /* Check access permissions */
dcbff19b 7632 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7633 return 1;
7634 }
7635
c0f4af17 7636 if (ri->accessfn ||
d614a513 7637 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7638 /* Emit code to perform further access permissions checks at
7639 * runtime; this may result in an exception.
c0f4af17
PM
7640 * Note that on XScale all cp0..c13 registers do an access check
7641 * call in order to handle c15_cpar.
f59df3f2
PM
7642 */
7643 TCGv_ptr tmpptr;
3f208fd7 7644 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7645 uint32_t syndrome;
7646
7647 /* Note that since we are an implementation which takes an
7648 * exception on a trapped conditional instruction only if the
7649 * instruction passes its condition code check, we can take
7650 * advantage of the clause in the ARM ARM that allows us to set
7651 * the COND field in the instruction to 0xE in all cases.
7652 * We could fish the actual condition out of the insn (ARM)
7653 * or the condexec bits (Thumb) but it isn't necessary.
7654 */
7655 switch (cpnum) {
7656 case 14:
7657 if (is64) {
7658 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7659 isread, false);
8bcbf37c
PM
7660 } else {
7661 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7662 rt, isread, false);
8bcbf37c
PM
7663 }
7664 break;
7665 case 15:
7666 if (is64) {
7667 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7668 isread, false);
8bcbf37c
PM
7669 } else {
7670 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7671 rt, isread, false);
8bcbf37c
PM
7672 }
7673 break;
7674 default:
7675 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7676 * so this can only happen if this is an ARMv7 or earlier CPU,
7677 * in which case the syndrome information won't actually be
7678 * guest visible.
7679 */
d614a513 7680 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7681 syndrome = syn_uncategorized();
7682 break;
7683 }
7684
43bfa4a1 7685 gen_set_condexec(s);
3977ee5d 7686 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7687 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7688 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7689 tcg_isread = tcg_const_i32(isread);
7690 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7691 tcg_isread);
f59df3f2 7692 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7693 tcg_temp_free_i32(tcg_syn);
3f208fd7 7694 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7695 }
7696
4b6a83fb
PM
7697 /* Handle special cases first */
7698 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7699 case ARM_CP_NOP:
7700 return 0;
7701 case ARM_CP_WFI:
7702 if (isread) {
7703 return 1;
7704 }
eaed129d 7705 gen_set_pc_im(s, s->pc);
dcba3a8d 7706 s->base.is_jmp = DISAS_WFI;
2bee5105 7707 return 0;
4b6a83fb
PM
7708 default:
7709 break;
7710 }
7711
c5a49c63 7712 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7713 gen_io_start();
7714 }
7715
4b6a83fb
PM
7716 if (isread) {
7717 /* Read */
7718 if (is64) {
7719 TCGv_i64 tmp64;
7720 TCGv_i32 tmp;
7721 if (ri->type & ARM_CP_CONST) {
7722 tmp64 = tcg_const_i64(ri->resetvalue);
7723 } else if (ri->readfn) {
7724 TCGv_ptr tmpptr;
4b6a83fb
PM
7725 tmp64 = tcg_temp_new_i64();
7726 tmpptr = tcg_const_ptr(ri);
7727 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7728 tcg_temp_free_ptr(tmpptr);
7729 } else {
7730 tmp64 = tcg_temp_new_i64();
7731 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7732 }
7733 tmp = tcg_temp_new_i32();
ecc7b3aa 7734 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7735 store_reg(s, rt, tmp);
7736 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7737 tmp = tcg_temp_new_i32();
ecc7b3aa 7738 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7739 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7740 store_reg(s, rt2, tmp);
7741 } else {
39d5492a 7742 TCGv_i32 tmp;
4b6a83fb
PM
7743 if (ri->type & ARM_CP_CONST) {
7744 tmp = tcg_const_i32(ri->resetvalue);
7745 } else if (ri->readfn) {
7746 TCGv_ptr tmpptr;
4b6a83fb
PM
7747 tmp = tcg_temp_new_i32();
7748 tmpptr = tcg_const_ptr(ri);
7749 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7750 tcg_temp_free_ptr(tmpptr);
7751 } else {
7752 tmp = load_cpu_offset(ri->fieldoffset);
7753 }
7754 if (rt == 15) {
7755 /* Destination register of r15 for 32 bit loads sets
7756 * the condition codes from the high 4 bits of the value
7757 */
7758 gen_set_nzcv(tmp);
7759 tcg_temp_free_i32(tmp);
7760 } else {
7761 store_reg(s, rt, tmp);
7762 }
7763 }
7764 } else {
7765 /* Write */
7766 if (ri->type & ARM_CP_CONST) {
7767 /* If not forbidden by access permissions, treat as WI */
7768 return 0;
7769 }
7770
7771 if (is64) {
39d5492a 7772 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7773 TCGv_i64 tmp64 = tcg_temp_new_i64();
7774 tmplo = load_reg(s, rt);
7775 tmphi = load_reg(s, rt2);
7776 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7777 tcg_temp_free_i32(tmplo);
7778 tcg_temp_free_i32(tmphi);
7779 if (ri->writefn) {
7780 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7781 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7782 tcg_temp_free_ptr(tmpptr);
7783 } else {
7784 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7785 }
7786 tcg_temp_free_i64(tmp64);
7787 } else {
7788 if (ri->writefn) {
39d5492a 7789 TCGv_i32 tmp;
4b6a83fb 7790 TCGv_ptr tmpptr;
4b6a83fb
PM
7791 tmp = load_reg(s, rt);
7792 tmpptr = tcg_const_ptr(ri);
7793 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7794 tcg_temp_free_ptr(tmpptr);
7795 tcg_temp_free_i32(tmp);
7796 } else {
39d5492a 7797 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7798 store_cpu_offset(tmp, ri->fieldoffset);
7799 }
7800 }
2452731c
PM
7801 }
7802
c5a49c63 7803 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7804 /* I/O operations must end the TB here (whether read or write) */
7805 gen_io_end();
7806 gen_lookup_tb(s);
7807 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7808 /* We default to ending the TB on a coprocessor register write,
7809 * but allow this to be suppressed by the register definition
7810 * (usually only necessary to work around guest bugs).
7811 */
2452731c 7812 gen_lookup_tb(s);
4b6a83fb 7813 }
2452731c 7814
4b6a83fb
PM
7815 return 0;
7816 }
7817
626187d8
PM
7818 /* Unknown register; this might be a guest error or a QEMU
7819 * unimplemented feature.
7820 */
7821 if (is64) {
7822 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7823 "64 bit system register cp:%d opc1: %d crm:%d "
7824 "(%s)\n",
7825 isread ? "read" : "write", cpnum, opc1, crm,
7826 s->ns ? "non-secure" : "secure");
626187d8
PM
7827 } else {
7828 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7829 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7830 "(%s)\n",
7831 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7832 s->ns ? "non-secure" : "secure");
626187d8
PM
7833 }
7834
4a9a539f 7835 return 1;
9ee6e8bb
PB
7836}
7837
5e3f878a
PB
7838
7839/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7840static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7841{
39d5492a 7842 TCGv_i32 tmp;
7d1b0095 7843 tmp = tcg_temp_new_i32();
ecc7b3aa 7844 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7845 store_reg(s, rlow, tmp);
7d1b0095 7846 tmp = tcg_temp_new_i32();
5e3f878a 7847 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7848 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7849 store_reg(s, rhigh, tmp);
7850}
7851
7852/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7853static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7854{
a7812ae4 7855 TCGv_i64 tmp;
39d5492a 7856 TCGv_i32 tmp2;
5e3f878a 7857
36aa55dc 7858 /* Load value and extend to 64 bits. */
a7812ae4 7859 tmp = tcg_temp_new_i64();
5e3f878a
PB
7860 tmp2 = load_reg(s, rlow);
7861 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7862 tcg_temp_free_i32(tmp2);
5e3f878a 7863 tcg_gen_add_i64(val, val, tmp);
b75263d6 7864 tcg_temp_free_i64(tmp);
5e3f878a
PB
7865}
7866
7867/* load and add a 64-bit value from a register pair. */
a7812ae4 7868static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7869{
a7812ae4 7870 TCGv_i64 tmp;
39d5492a
PM
7871 TCGv_i32 tmpl;
7872 TCGv_i32 tmph;
5e3f878a
PB
7873
7874 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7875 tmpl = load_reg(s, rlow);
7876 tmph = load_reg(s, rhigh);
a7812ae4 7877 tmp = tcg_temp_new_i64();
36aa55dc 7878 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7879 tcg_temp_free_i32(tmpl);
7880 tcg_temp_free_i32(tmph);
5e3f878a 7881 tcg_gen_add_i64(val, val, tmp);
b75263d6 7882 tcg_temp_free_i64(tmp);
5e3f878a
PB
7883}
7884
c9f10124 7885/* Set N and Z flags from hi|lo. */
39d5492a 7886static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7887{
c9f10124
RH
7888 tcg_gen_mov_i32(cpu_NF, hi);
7889 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7890}
7891
426f5abc
PB
7892/* Load/Store exclusive instructions are implemented by remembering
7893 the value/address loaded, and seeing if these are the same
354161b3 7894 when the store is performed. This should be sufficient to implement
426f5abc 7895 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
7896 regular stores. The compare vs the remembered value is done during
7897 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 7898static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7899 TCGv_i32 addr, int size)
426f5abc 7900{
94ee24e7 7901 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 7902 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 7903
50225ad0
PM
7904 s->is_ldex = true;
7905
426f5abc 7906 if (size == 3) {
39d5492a 7907 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 7908 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 7909
354161b3
EC
7910 gen_aa32_ld_i64(s, t64, addr, get_mem_index(s), opc);
7911 tcg_gen_mov_i64(cpu_exclusive_val, t64);
7912 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7913 tcg_temp_free_i64(t64);
7914
7915 store_reg(s, rt2, tmp2);
03d05e2d 7916 } else {
354161b3 7917 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 7918 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7919 }
03d05e2d
PM
7920
7921 store_reg(s, rt, tmp);
7922 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7923}
7924
7925static void gen_clrex(DisasContext *s)
7926{
03d05e2d 7927 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7928}
7929
426f5abc 7930static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7931 TCGv_i32 addr, int size)
426f5abc 7932{
354161b3
EC
7933 TCGv_i32 t0, t1, t2;
7934 TCGv_i64 extaddr;
7935 TCGv taddr;
42a268c2
RH
7936 TCGLabel *done_label;
7937 TCGLabel *fail_label;
354161b3 7938 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
7939
7940 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7941 [addr] = {Rt};
7942 {Rd} = 0;
7943 } else {
7944 {Rd} = 1;
7945 } */
7946 fail_label = gen_new_label();
7947 done_label = gen_new_label();
03d05e2d
PM
7948 extaddr = tcg_temp_new_i64();
7949 tcg_gen_extu_i32_i64(extaddr, addr);
7950 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7951 tcg_temp_free_i64(extaddr);
7952
354161b3
EC
7953 taddr = gen_aa32_addr(s, addr, opc);
7954 t0 = tcg_temp_new_i32();
7955 t1 = load_reg(s, rt);
426f5abc 7956 if (size == 3) {
354161b3
EC
7957 TCGv_i64 o64 = tcg_temp_new_i64();
7958 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 7959
354161b3
EC
7960 t2 = load_reg(s, rt2);
7961 tcg_gen_concat_i32_i64(n64, t1, t2);
7962 tcg_temp_free_i32(t2);
7963 gen_aa32_frob64(s, n64);
03d05e2d 7964
354161b3
EC
7965 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7966 get_mem_index(s), opc);
7967 tcg_temp_free_i64(n64);
7968
7969 gen_aa32_frob64(s, o64);
7970 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7971 tcg_gen_extrl_i64_i32(t0, o64);
7972
7973 tcg_temp_free_i64(o64);
7974 } else {
7975 t2 = tcg_temp_new_i32();
7976 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7977 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7978 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7979 tcg_temp_free_i32(t2);
426f5abc 7980 }
354161b3
EC
7981 tcg_temp_free_i32(t1);
7982 tcg_temp_free(taddr);
7983 tcg_gen_mov_i32(cpu_R[rd], t0);
7984 tcg_temp_free_i32(t0);
426f5abc 7985 tcg_gen_br(done_label);
354161b3 7986
426f5abc
PB
7987 gen_set_label(fail_label);
7988 tcg_gen_movi_i32(cpu_R[rd], 1);
7989 gen_set_label(done_label);
03d05e2d 7990 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 7991}
426f5abc 7992
81465888
PM
7993/* gen_srs:
7994 * @env: CPUARMState
7995 * @s: DisasContext
7996 * @mode: mode field from insn (which stack to store to)
7997 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7998 * @writeback: true if writeback bit set
7999 *
8000 * Generate code for the SRS (Store Return State) insn.
8001 */
8002static void gen_srs(DisasContext *s,
8003 uint32_t mode, uint32_t amode, bool writeback)
8004{
8005 int32_t offset;
cbc0326b
PM
8006 TCGv_i32 addr, tmp;
8007 bool undef = false;
8008
8009 /* SRS is:
8010 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 8011 * and specified mode is monitor mode
cbc0326b
PM
8012 * - UNDEFINED in Hyp mode
8013 * - UNPREDICTABLE in User or System mode
8014 * - UNPREDICTABLE if the specified mode is:
8015 * -- not implemented
8016 * -- not a valid mode number
8017 * -- a mode that's at a higher exception level
8018 * -- Monitor, if we are Non-secure
f01377f5 8019 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 8020 */
ba63cf47 8021 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
8022 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
8023 return;
8024 }
8025
8026 if (s->current_el == 0 || s->current_el == 2) {
8027 undef = true;
8028 }
8029
8030 switch (mode) {
8031 case ARM_CPU_MODE_USR:
8032 case ARM_CPU_MODE_FIQ:
8033 case ARM_CPU_MODE_IRQ:
8034 case ARM_CPU_MODE_SVC:
8035 case ARM_CPU_MODE_ABT:
8036 case ARM_CPU_MODE_UND:
8037 case ARM_CPU_MODE_SYS:
8038 break;
8039 case ARM_CPU_MODE_HYP:
8040 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8041 undef = true;
8042 }
8043 break;
8044 case ARM_CPU_MODE_MON:
8045 /* No need to check specifically for "are we non-secure" because
8046 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8047 * so if this isn't EL3 then we must be non-secure.
8048 */
8049 if (s->current_el != 3) {
8050 undef = true;
8051 }
8052 break;
8053 default:
8054 undef = true;
8055 }
8056
8057 if (undef) {
8058 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8059 default_exception_el(s));
8060 return;
8061 }
8062
8063 addr = tcg_temp_new_i32();
8064 tmp = tcg_const_i32(mode);
f01377f5
PM
8065 /* get_r13_banked() will raise an exception if called from System mode */
8066 gen_set_condexec(s);
8067 gen_set_pc_im(s, s->pc - 4);
81465888
PM
8068 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8069 tcg_temp_free_i32(tmp);
8070 switch (amode) {
8071 case 0: /* DA */
8072 offset = -4;
8073 break;
8074 case 1: /* IA */
8075 offset = 0;
8076 break;
8077 case 2: /* DB */
8078 offset = -8;
8079 break;
8080 case 3: /* IB */
8081 offset = 4;
8082 break;
8083 default:
8084 abort();
8085 }
8086 tcg_gen_addi_i32(addr, addr, offset);
8087 tmp = load_reg(s, 14);
12dcc321 8088 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8089 tcg_temp_free_i32(tmp);
81465888
PM
8090 tmp = load_cpu_field(spsr);
8091 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 8092 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8093 tcg_temp_free_i32(tmp);
81465888
PM
8094 if (writeback) {
8095 switch (amode) {
8096 case 0:
8097 offset = -8;
8098 break;
8099 case 1:
8100 offset = 4;
8101 break;
8102 case 2:
8103 offset = -4;
8104 break;
8105 case 3:
8106 offset = 0;
8107 break;
8108 default:
8109 abort();
8110 }
8111 tcg_gen_addi_i32(addr, addr, offset);
8112 tmp = tcg_const_i32(mode);
8113 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8114 tcg_temp_free_i32(tmp);
8115 }
8116 tcg_temp_free_i32(addr);
dcba3a8d 8117 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
8118}
8119
f4df2210 8120static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8121{
f4df2210 8122 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8123 TCGv_i32 tmp;
8124 TCGv_i32 tmp2;
8125 TCGv_i32 tmp3;
8126 TCGv_i32 addr;
a7812ae4 8127 TCGv_i64 tmp64;
9ee6e8bb 8128
e13886e3
PM
8129 /* M variants do not implement ARM mode; this must raise the INVSTATE
8130 * UsageFault exception.
8131 */
b53d8923 8132 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
8133 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8134 default_exception_el(s));
8135 return;
b53d8923 8136 }
9ee6e8bb
PB
8137 cond = insn >> 28;
8138 if (cond == 0xf){
be5e7a76
DES
8139 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8140 * choose to UNDEF. In ARMv5 and above the space is used
8141 * for miscellaneous unconditional instructions.
8142 */
8143 ARCH(5);
8144
9ee6e8bb
PB
8145 /* Unconditional instructions. */
8146 if (((insn >> 25) & 7) == 1) {
8147 /* NEON Data processing. */
d614a513 8148 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8149 goto illegal_op;
d614a513 8150 }
9ee6e8bb 8151
7dcc1f89 8152 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8153 goto illegal_op;
7dcc1f89 8154 }
9ee6e8bb
PB
8155 return;
8156 }
8157 if ((insn & 0x0f100000) == 0x04000000) {
8158 /* NEON load/store. */
d614a513 8159 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8160 goto illegal_op;
d614a513 8161 }
9ee6e8bb 8162
7dcc1f89 8163 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8164 goto illegal_op;
7dcc1f89 8165 }
9ee6e8bb
PB
8166 return;
8167 }
6a57f3eb
WN
8168 if ((insn & 0x0f000e10) == 0x0e000a00) {
8169 /* VFP. */
7dcc1f89 8170 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8171 goto illegal_op;
8172 }
8173 return;
8174 }
3d185e5d
PM
8175 if (((insn & 0x0f30f000) == 0x0510f000) ||
8176 ((insn & 0x0f30f010) == 0x0710f000)) {
8177 if ((insn & (1 << 22)) == 0) {
8178 /* PLDW; v7MP */
d614a513 8179 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8180 goto illegal_op;
8181 }
8182 }
8183 /* Otherwise PLD; v5TE+ */
be5e7a76 8184 ARCH(5TE);
3d185e5d
PM
8185 return;
8186 }
8187 if (((insn & 0x0f70f000) == 0x0450f000) ||
8188 ((insn & 0x0f70f010) == 0x0650f000)) {
8189 ARCH(7);
8190 return; /* PLI; V7 */
8191 }
8192 if (((insn & 0x0f700000) == 0x04100000) ||
8193 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8194 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8195 goto illegal_op;
8196 }
8197 return; /* v7MP: Unallocated memory hint: must NOP */
8198 }
8199
8200 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8201 ARCH(6);
8202 /* setend */
9886ecdf
PB
8203 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8204 gen_helper_setend(cpu_env);
dcba3a8d 8205 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8206 }
8207 return;
8208 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8209 switch ((insn >> 4) & 0xf) {
8210 case 1: /* clrex */
8211 ARCH(6K);
426f5abc 8212 gen_clrex(s);
9ee6e8bb
PB
8213 return;
8214 case 4: /* dsb */
8215 case 5: /* dmb */
9ee6e8bb 8216 ARCH(7);
61e4c432 8217 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8218 return;
6df99dec
SS
8219 case 6: /* isb */
8220 /* We need to break the TB after this insn to execute
8221 * self-modifying code correctly and also to take
8222 * any pending interrupts immediately.
8223 */
0b609cc1 8224 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 8225 return;
9ee6e8bb
PB
8226 default:
8227 goto illegal_op;
8228 }
8229 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8230 /* srs */
81465888
PM
8231 ARCH(6);
8232 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8233 return;
ea825eee 8234 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8235 /* rfe */
c67b6b71 8236 int32_t offset;
9ee6e8bb
PB
8237 if (IS_USER(s))
8238 goto illegal_op;
8239 ARCH(6);
8240 rn = (insn >> 16) & 0xf;
b0109805 8241 addr = load_reg(s, rn);
9ee6e8bb
PB
8242 i = (insn >> 23) & 3;
8243 switch (i) {
b0109805 8244 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8245 case 1: offset = 0; break; /* IA */
8246 case 2: offset = -8; break; /* DB */
b0109805 8247 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8248 default: abort();
8249 }
8250 if (offset)
b0109805
PB
8251 tcg_gen_addi_i32(addr, addr, offset);
8252 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8253 tmp = tcg_temp_new_i32();
12dcc321 8254 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8255 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8256 tmp2 = tcg_temp_new_i32();
12dcc321 8257 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8258 if (insn & (1 << 21)) {
8259 /* Base writeback. */
8260 switch (i) {
b0109805 8261 case 0: offset = -8; break;
c67b6b71
FN
8262 case 1: offset = 4; break;
8263 case 2: offset = -4; break;
b0109805 8264 case 3: offset = 0; break;
9ee6e8bb
PB
8265 default: abort();
8266 }
8267 if (offset)
b0109805
PB
8268 tcg_gen_addi_i32(addr, addr, offset);
8269 store_reg(s, rn, addr);
8270 } else {
7d1b0095 8271 tcg_temp_free_i32(addr);
9ee6e8bb 8272 }
b0109805 8273 gen_rfe(s, tmp, tmp2);
c67b6b71 8274 return;
9ee6e8bb
PB
8275 } else if ((insn & 0x0e000000) == 0x0a000000) {
8276 /* branch link and change to thumb (blx <offset>) */
8277 int32_t offset;
8278
8279 val = (uint32_t)s->pc;
7d1b0095 8280 tmp = tcg_temp_new_i32();
d9ba4830
PB
8281 tcg_gen_movi_i32(tmp, val);
8282 store_reg(s, 14, tmp);
9ee6e8bb
PB
8283 /* Sign-extend the 24-bit offset */
8284 offset = (((int32_t)insn) << 8) >> 8;
8285 /* offset * 4 + bit24 * 2 + (thumb bit) */
8286 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8287 /* pipeline offset */
8288 val += 4;
be5e7a76 8289 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8290 gen_bx_im(s, val);
9ee6e8bb
PB
8291 return;
8292 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8293 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8294 /* iWMMXt register transfer. */
c0f4af17 8295 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8296 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8297 return;
c0f4af17
PM
8298 }
8299 }
9ee6e8bb
PB
8300 }
8301 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8302 /* Coprocessor double register transfer. */
be5e7a76 8303 ARCH(5TE);
9ee6e8bb
PB
8304 } else if ((insn & 0x0f000010) == 0x0e000010) {
8305 /* Additional coprocessor register transfer. */
7997d92f 8306 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8307 uint32_t mask;
8308 uint32_t val;
8309 /* cps (privileged) */
8310 if (IS_USER(s))
8311 return;
8312 mask = val = 0;
8313 if (insn & (1 << 19)) {
8314 if (insn & (1 << 8))
8315 mask |= CPSR_A;
8316 if (insn & (1 << 7))
8317 mask |= CPSR_I;
8318 if (insn & (1 << 6))
8319 mask |= CPSR_F;
8320 if (insn & (1 << 18))
8321 val |= mask;
8322 }
7997d92f 8323 if (insn & (1 << 17)) {
9ee6e8bb
PB
8324 mask |= CPSR_M;
8325 val |= (insn & 0x1f);
8326 }
8327 if (mask) {
2fbac54b 8328 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8329 }
8330 return;
8331 }
8332 goto illegal_op;
8333 }
8334 if (cond != 0xe) {
8335 /* if not always execute, we generate a conditional jump to
8336 next instruction */
8337 s->condlabel = gen_new_label();
39fb730a 8338 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8339 s->condjmp = 1;
8340 }
8341 if ((insn & 0x0f900000) == 0x03000000) {
8342 if ((insn & (1 << 21)) == 0) {
8343 ARCH(6T2);
8344 rd = (insn >> 12) & 0xf;
8345 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8346 if ((insn & (1 << 22)) == 0) {
8347 /* MOVW */
7d1b0095 8348 tmp = tcg_temp_new_i32();
5e3f878a 8349 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8350 } else {
8351 /* MOVT */
5e3f878a 8352 tmp = load_reg(s, rd);
86831435 8353 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8354 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8355 }
5e3f878a 8356 store_reg(s, rd, tmp);
9ee6e8bb
PB
8357 } else {
8358 if (((insn >> 12) & 0xf) != 0xf)
8359 goto illegal_op;
8360 if (((insn >> 16) & 0xf) == 0) {
8361 gen_nop_hint(s, insn & 0xff);
8362 } else {
8363 /* CPSR = immediate */
8364 val = insn & 0xff;
8365 shift = ((insn >> 8) & 0xf) * 2;
8366 if (shift)
8367 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8368 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8369 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8370 i, val)) {
9ee6e8bb 8371 goto illegal_op;
7dcc1f89 8372 }
9ee6e8bb
PB
8373 }
8374 }
8375 } else if ((insn & 0x0f900000) == 0x01000000
8376 && (insn & 0x00000090) != 0x00000090) {
8377 /* miscellaneous instructions */
8378 op1 = (insn >> 21) & 3;
8379 sh = (insn >> 4) & 0xf;
8380 rm = insn & 0xf;
8381 switch (sh) {
8bfd0550
PM
8382 case 0x0: /* MSR, MRS */
8383 if (insn & (1 << 9)) {
8384 /* MSR (banked) and MRS (banked) */
8385 int sysm = extract32(insn, 16, 4) |
8386 (extract32(insn, 8, 1) << 4);
8387 int r = extract32(insn, 22, 1);
8388
8389 if (op1 & 1) {
8390 /* MSR (banked) */
8391 gen_msr_banked(s, r, sysm, rm);
8392 } else {
8393 /* MRS (banked) */
8394 int rd = extract32(insn, 12, 4);
8395
8396 gen_mrs_banked(s, r, sysm, rd);
8397 }
8398 break;
8399 }
8400
8401 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8402 if (op1 & 1) {
8403 /* PSR = reg */
2fbac54b 8404 tmp = load_reg(s, rm);
9ee6e8bb 8405 i = ((op1 & 2) != 0);
7dcc1f89 8406 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8407 goto illegal_op;
8408 } else {
8409 /* reg = PSR */
8410 rd = (insn >> 12) & 0xf;
8411 if (op1 & 2) {
8412 if (IS_USER(s))
8413 goto illegal_op;
d9ba4830 8414 tmp = load_cpu_field(spsr);
9ee6e8bb 8415 } else {
7d1b0095 8416 tmp = tcg_temp_new_i32();
9ef39277 8417 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8418 }
d9ba4830 8419 store_reg(s, rd, tmp);
9ee6e8bb
PB
8420 }
8421 break;
8422 case 0x1:
8423 if (op1 == 1) {
8424 /* branch/exchange thumb (bx). */
be5e7a76 8425 ARCH(4T);
d9ba4830
PB
8426 tmp = load_reg(s, rm);
8427 gen_bx(s, tmp);
9ee6e8bb
PB
8428 } else if (op1 == 3) {
8429 /* clz */
be5e7a76 8430 ARCH(5);
9ee6e8bb 8431 rd = (insn >> 12) & 0xf;
1497c961 8432 tmp = load_reg(s, rm);
7539a012 8433 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8434 store_reg(s, rd, tmp);
9ee6e8bb
PB
8435 } else {
8436 goto illegal_op;
8437 }
8438 break;
8439 case 0x2:
8440 if (op1 == 1) {
8441 ARCH(5J); /* bxj */
8442 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8443 tmp = load_reg(s, rm);
8444 gen_bx(s, tmp);
9ee6e8bb
PB
8445 } else {
8446 goto illegal_op;
8447 }
8448 break;
8449 case 0x3:
8450 if (op1 != 1)
8451 goto illegal_op;
8452
be5e7a76 8453 ARCH(5);
9ee6e8bb 8454 /* branch link/exchange thumb (blx) */
d9ba4830 8455 tmp = load_reg(s, rm);
7d1b0095 8456 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8457 tcg_gen_movi_i32(tmp2, s->pc);
8458 store_reg(s, 14, tmp2);
8459 gen_bx(s, tmp);
9ee6e8bb 8460 break;
eb0ecd5a
WN
8461 case 0x4:
8462 {
8463 /* crc32/crc32c */
8464 uint32_t c = extract32(insn, 8, 4);
8465
8466 /* Check this CPU supports ARMv8 CRC instructions.
8467 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8468 * Bits 8, 10 and 11 should be zero.
8469 */
d614a513 8470 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8471 (c & 0xd) != 0) {
8472 goto illegal_op;
8473 }
8474
8475 rn = extract32(insn, 16, 4);
8476 rd = extract32(insn, 12, 4);
8477
8478 tmp = load_reg(s, rn);
8479 tmp2 = load_reg(s, rm);
aa633469
PM
8480 if (op1 == 0) {
8481 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8482 } else if (op1 == 1) {
8483 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8484 }
eb0ecd5a
WN
8485 tmp3 = tcg_const_i32(1 << op1);
8486 if (c & 0x2) {
8487 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8488 } else {
8489 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8490 }
8491 tcg_temp_free_i32(tmp2);
8492 tcg_temp_free_i32(tmp3);
8493 store_reg(s, rd, tmp);
8494 break;
8495 }
9ee6e8bb 8496 case 0x5: /* saturating add/subtract */
be5e7a76 8497 ARCH(5TE);
9ee6e8bb
PB
8498 rd = (insn >> 12) & 0xf;
8499 rn = (insn >> 16) & 0xf;
b40d0353 8500 tmp = load_reg(s, rm);
5e3f878a 8501 tmp2 = load_reg(s, rn);
9ee6e8bb 8502 if (op1 & 2)
9ef39277 8503 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8504 if (op1 & 1)
9ef39277 8505 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8506 else
9ef39277 8507 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8508 tcg_temp_free_i32(tmp2);
5e3f878a 8509 store_reg(s, rd, tmp);
9ee6e8bb 8510 break;
49e14940 8511 case 7:
d4a2dc67
PM
8512 {
8513 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8514 switch (op1) {
19a6e31c
PM
8515 case 0:
8516 /* HLT */
8517 gen_hlt(s, imm16);
8518 break;
37e6456e
PM
8519 case 1:
8520 /* bkpt */
8521 ARCH(5);
8522 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
8523 syn_aa32_bkpt(imm16, false),
8524 default_exception_el(s));
37e6456e
PM
8525 break;
8526 case 2:
8527 /* Hypervisor call (v7) */
8528 ARCH(7);
8529 if (IS_USER(s)) {
8530 goto illegal_op;
8531 }
8532 gen_hvc(s, imm16);
8533 break;
8534 case 3:
8535 /* Secure monitor call (v6+) */
8536 ARCH(6K);
8537 if (IS_USER(s)) {
8538 goto illegal_op;
8539 }
8540 gen_smc(s);
8541 break;
8542 default:
19a6e31c 8543 g_assert_not_reached();
49e14940 8544 }
9ee6e8bb 8545 break;
d4a2dc67 8546 }
9ee6e8bb
PB
8547 case 0x8: /* signed multiply */
8548 case 0xa:
8549 case 0xc:
8550 case 0xe:
be5e7a76 8551 ARCH(5TE);
9ee6e8bb
PB
8552 rs = (insn >> 8) & 0xf;
8553 rn = (insn >> 12) & 0xf;
8554 rd = (insn >> 16) & 0xf;
8555 if (op1 == 1) {
8556 /* (32 * 16) >> 16 */
5e3f878a
PB
8557 tmp = load_reg(s, rm);
8558 tmp2 = load_reg(s, rs);
9ee6e8bb 8559 if (sh & 4)
5e3f878a 8560 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8561 else
5e3f878a 8562 gen_sxth(tmp2);
a7812ae4
PB
8563 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8564 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8565 tmp = tcg_temp_new_i32();
ecc7b3aa 8566 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8567 tcg_temp_free_i64(tmp64);
9ee6e8bb 8568 if ((sh & 2) == 0) {
5e3f878a 8569 tmp2 = load_reg(s, rn);
9ef39277 8570 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8571 tcg_temp_free_i32(tmp2);
9ee6e8bb 8572 }
5e3f878a 8573 store_reg(s, rd, tmp);
9ee6e8bb
PB
8574 } else {
8575 /* 16 * 16 */
5e3f878a
PB
8576 tmp = load_reg(s, rm);
8577 tmp2 = load_reg(s, rs);
8578 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8579 tcg_temp_free_i32(tmp2);
9ee6e8bb 8580 if (op1 == 2) {
a7812ae4
PB
8581 tmp64 = tcg_temp_new_i64();
8582 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8583 tcg_temp_free_i32(tmp);
a7812ae4
PB
8584 gen_addq(s, tmp64, rn, rd);
8585 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8586 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8587 } else {
8588 if (op1 == 0) {
5e3f878a 8589 tmp2 = load_reg(s, rn);
9ef39277 8590 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8591 tcg_temp_free_i32(tmp2);
9ee6e8bb 8592 }
5e3f878a 8593 store_reg(s, rd, tmp);
9ee6e8bb
PB
8594 }
8595 }
8596 break;
8597 default:
8598 goto illegal_op;
8599 }
8600 } else if (((insn & 0x0e000000) == 0 &&
8601 (insn & 0x00000090) != 0x90) ||
8602 ((insn & 0x0e000000) == (1 << 25))) {
8603 int set_cc, logic_cc, shiftop;
8604
8605 op1 = (insn >> 21) & 0xf;
8606 set_cc = (insn >> 20) & 1;
8607 logic_cc = table_logic_cc[op1] & set_cc;
8608
8609 /* data processing instruction */
8610 if (insn & (1 << 25)) {
8611 /* immediate operand */
8612 val = insn & 0xff;
8613 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8614 if (shift) {
9ee6e8bb 8615 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8616 }
7d1b0095 8617 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8618 tcg_gen_movi_i32(tmp2, val);
8619 if (logic_cc && shift) {
8620 gen_set_CF_bit31(tmp2);
8621 }
9ee6e8bb
PB
8622 } else {
8623 /* register */
8624 rm = (insn) & 0xf;
e9bb4aa9 8625 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8626 shiftop = (insn >> 5) & 3;
8627 if (!(insn & (1 << 4))) {
8628 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8629 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8630 } else {
8631 rs = (insn >> 8) & 0xf;
8984bd2e 8632 tmp = load_reg(s, rs);
e9bb4aa9 8633 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8634 }
8635 }
8636 if (op1 != 0x0f && op1 != 0x0d) {
8637 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8638 tmp = load_reg(s, rn);
8639 } else {
39d5492a 8640 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
8641 }
8642 rd = (insn >> 12) & 0xf;
8643 switch(op1) {
8644 case 0x00:
e9bb4aa9
JR
8645 tcg_gen_and_i32(tmp, tmp, tmp2);
8646 if (logic_cc) {
8647 gen_logic_CC(tmp);
8648 }
7dcc1f89 8649 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8650 break;
8651 case 0x01:
e9bb4aa9
JR
8652 tcg_gen_xor_i32(tmp, tmp, tmp2);
8653 if (logic_cc) {
8654 gen_logic_CC(tmp);
8655 }
7dcc1f89 8656 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8657 break;
8658 case 0x02:
8659 if (set_cc && rd == 15) {
8660 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8661 if (IS_USER(s)) {
9ee6e8bb 8662 goto illegal_op;
e9bb4aa9 8663 }
72485ec4 8664 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8665 gen_exception_return(s, tmp);
9ee6e8bb 8666 } else {
e9bb4aa9 8667 if (set_cc) {
72485ec4 8668 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8669 } else {
8670 tcg_gen_sub_i32(tmp, tmp, tmp2);
8671 }
7dcc1f89 8672 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8673 }
8674 break;
8675 case 0x03:
e9bb4aa9 8676 if (set_cc) {
72485ec4 8677 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8678 } else {
8679 tcg_gen_sub_i32(tmp, tmp2, tmp);
8680 }
7dcc1f89 8681 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8682 break;
8683 case 0x04:
e9bb4aa9 8684 if (set_cc) {
72485ec4 8685 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8686 } else {
8687 tcg_gen_add_i32(tmp, tmp, tmp2);
8688 }
7dcc1f89 8689 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8690 break;
8691 case 0x05:
e9bb4aa9 8692 if (set_cc) {
49b4c31e 8693 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8694 } else {
8695 gen_add_carry(tmp, tmp, tmp2);
8696 }
7dcc1f89 8697 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8698 break;
8699 case 0x06:
e9bb4aa9 8700 if (set_cc) {
2de68a49 8701 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8702 } else {
8703 gen_sub_carry(tmp, tmp, tmp2);
8704 }
7dcc1f89 8705 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8706 break;
8707 case 0x07:
e9bb4aa9 8708 if (set_cc) {
2de68a49 8709 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8710 } else {
8711 gen_sub_carry(tmp, tmp2, tmp);
8712 }
7dcc1f89 8713 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8714 break;
8715 case 0x08:
8716 if (set_cc) {
e9bb4aa9
JR
8717 tcg_gen_and_i32(tmp, tmp, tmp2);
8718 gen_logic_CC(tmp);
9ee6e8bb 8719 }
7d1b0095 8720 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8721 break;
8722 case 0x09:
8723 if (set_cc) {
e9bb4aa9
JR
8724 tcg_gen_xor_i32(tmp, tmp, tmp2);
8725 gen_logic_CC(tmp);
9ee6e8bb 8726 }
7d1b0095 8727 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8728 break;
8729 case 0x0a:
8730 if (set_cc) {
72485ec4 8731 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8732 }
7d1b0095 8733 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8734 break;
8735 case 0x0b:
8736 if (set_cc) {
72485ec4 8737 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8738 }
7d1b0095 8739 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8740 break;
8741 case 0x0c:
e9bb4aa9
JR
8742 tcg_gen_or_i32(tmp, tmp, tmp2);
8743 if (logic_cc) {
8744 gen_logic_CC(tmp);
8745 }
7dcc1f89 8746 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8747 break;
8748 case 0x0d:
8749 if (logic_cc && rd == 15) {
8750 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8751 if (IS_USER(s)) {
9ee6e8bb 8752 goto illegal_op;
e9bb4aa9
JR
8753 }
8754 gen_exception_return(s, tmp2);
9ee6e8bb 8755 } else {
e9bb4aa9
JR
8756 if (logic_cc) {
8757 gen_logic_CC(tmp2);
8758 }
7dcc1f89 8759 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8760 }
8761 break;
8762 case 0x0e:
f669df27 8763 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8764 if (logic_cc) {
8765 gen_logic_CC(tmp);
8766 }
7dcc1f89 8767 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8768 break;
8769 default:
8770 case 0x0f:
e9bb4aa9
JR
8771 tcg_gen_not_i32(tmp2, tmp2);
8772 if (logic_cc) {
8773 gen_logic_CC(tmp2);
8774 }
7dcc1f89 8775 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8776 break;
8777 }
e9bb4aa9 8778 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8779 tcg_temp_free_i32(tmp2);
e9bb4aa9 8780 }
9ee6e8bb
PB
8781 } else {
8782 /* other instructions */
8783 op1 = (insn >> 24) & 0xf;
8784 switch(op1) {
8785 case 0x0:
8786 case 0x1:
8787 /* multiplies, extra load/stores */
8788 sh = (insn >> 5) & 3;
8789 if (sh == 0) {
8790 if (op1 == 0x0) {
8791 rd = (insn >> 16) & 0xf;
8792 rn = (insn >> 12) & 0xf;
8793 rs = (insn >> 8) & 0xf;
8794 rm = (insn) & 0xf;
8795 op1 = (insn >> 20) & 0xf;
8796 switch (op1) {
8797 case 0: case 1: case 2: case 3: case 6:
8798 /* 32 bit mul */
5e3f878a
PB
8799 tmp = load_reg(s, rs);
8800 tmp2 = load_reg(s, rm);
8801 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8802 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8803 if (insn & (1 << 22)) {
8804 /* Subtract (mls) */
8805 ARCH(6T2);
5e3f878a
PB
8806 tmp2 = load_reg(s, rn);
8807 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8808 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8809 } else if (insn & (1 << 21)) {
8810 /* Add */
5e3f878a
PB
8811 tmp2 = load_reg(s, rn);
8812 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8813 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8814 }
8815 if (insn & (1 << 20))
5e3f878a
PB
8816 gen_logic_CC(tmp);
8817 store_reg(s, rd, tmp);
9ee6e8bb 8818 break;
8aac08b1
AJ
8819 case 4:
8820 /* 64 bit mul double accumulate (UMAAL) */
8821 ARCH(6);
8822 tmp = load_reg(s, rs);
8823 tmp2 = load_reg(s, rm);
8824 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8825 gen_addq_lo(s, tmp64, rn);
8826 gen_addq_lo(s, tmp64, rd);
8827 gen_storeq_reg(s, rn, rd, tmp64);
8828 tcg_temp_free_i64(tmp64);
8829 break;
8830 case 8: case 9: case 10: case 11:
8831 case 12: case 13: case 14: case 15:
8832 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8833 tmp = load_reg(s, rs);
8834 tmp2 = load_reg(s, rm);
8aac08b1 8835 if (insn & (1 << 22)) {
c9f10124 8836 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8837 } else {
c9f10124 8838 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8839 }
8840 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8841 TCGv_i32 al = load_reg(s, rn);
8842 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8843 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8844 tcg_temp_free_i32(al);
8845 tcg_temp_free_i32(ah);
9ee6e8bb 8846 }
8aac08b1 8847 if (insn & (1 << 20)) {
c9f10124 8848 gen_logicq_cc(tmp, tmp2);
8aac08b1 8849 }
c9f10124
RH
8850 store_reg(s, rn, tmp);
8851 store_reg(s, rd, tmp2);
9ee6e8bb 8852 break;
8aac08b1
AJ
8853 default:
8854 goto illegal_op;
9ee6e8bb
PB
8855 }
8856 } else {
8857 rn = (insn >> 16) & 0xf;
8858 rd = (insn >> 12) & 0xf;
8859 if (insn & (1 << 23)) {
8860 /* load/store exclusive */
2359bf80 8861 int op2 = (insn >> 8) & 3;
86753403 8862 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8863
8864 switch (op2) {
8865 case 0: /* lda/stl */
8866 if (op1 == 1) {
8867 goto illegal_op;
8868 }
8869 ARCH(8);
8870 break;
8871 case 1: /* reserved */
8872 goto illegal_op;
8873 case 2: /* ldaex/stlex */
8874 ARCH(8);
8875 break;
8876 case 3: /* ldrex/strex */
8877 if (op1) {
8878 ARCH(6K);
8879 } else {
8880 ARCH(6);
8881 }
8882 break;
8883 }
8884
3174f8e9 8885 addr = tcg_temp_local_new_i32();
98a46317 8886 load_reg_var(s, addr, rn);
2359bf80
MR
8887
8888 /* Since the emulation does not have barriers,
8889 the acquire/release semantics need no special
8890 handling */
8891 if (op2 == 0) {
8892 if (insn & (1 << 20)) {
8893 tmp = tcg_temp_new_i32();
8894 switch (op1) {
8895 case 0: /* lda */
9bb6558a
PM
8896 gen_aa32_ld32u_iss(s, tmp, addr,
8897 get_mem_index(s),
8898 rd | ISSIsAcqRel);
2359bf80
MR
8899 break;
8900 case 2: /* ldab */
9bb6558a
PM
8901 gen_aa32_ld8u_iss(s, tmp, addr,
8902 get_mem_index(s),
8903 rd | ISSIsAcqRel);
2359bf80
MR
8904 break;
8905 case 3: /* ldah */
9bb6558a
PM
8906 gen_aa32_ld16u_iss(s, tmp, addr,
8907 get_mem_index(s),
8908 rd | ISSIsAcqRel);
2359bf80
MR
8909 break;
8910 default:
8911 abort();
8912 }
8913 store_reg(s, rd, tmp);
8914 } else {
8915 rm = insn & 0xf;
8916 tmp = load_reg(s, rm);
8917 switch (op1) {
8918 case 0: /* stl */
9bb6558a
PM
8919 gen_aa32_st32_iss(s, tmp, addr,
8920 get_mem_index(s),
8921 rm | ISSIsAcqRel);
2359bf80
MR
8922 break;
8923 case 2: /* stlb */
9bb6558a
PM
8924 gen_aa32_st8_iss(s, tmp, addr,
8925 get_mem_index(s),
8926 rm | ISSIsAcqRel);
2359bf80
MR
8927 break;
8928 case 3: /* stlh */
9bb6558a
PM
8929 gen_aa32_st16_iss(s, tmp, addr,
8930 get_mem_index(s),
8931 rm | ISSIsAcqRel);
2359bf80
MR
8932 break;
8933 default:
8934 abort();
8935 }
8936 tcg_temp_free_i32(tmp);
8937 }
8938 } else if (insn & (1 << 20)) {
86753403
PB
8939 switch (op1) {
8940 case 0: /* ldrex */
426f5abc 8941 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8942 break;
8943 case 1: /* ldrexd */
426f5abc 8944 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8945 break;
8946 case 2: /* ldrexb */
426f5abc 8947 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8948 break;
8949 case 3: /* ldrexh */
426f5abc 8950 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8951 break;
8952 default:
8953 abort();
8954 }
9ee6e8bb
PB
8955 } else {
8956 rm = insn & 0xf;
86753403
PB
8957 switch (op1) {
8958 case 0: /* strex */
426f5abc 8959 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8960 break;
8961 case 1: /* strexd */
502e64fe 8962 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8963 break;
8964 case 2: /* strexb */
426f5abc 8965 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8966 break;
8967 case 3: /* strexh */
426f5abc 8968 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8969 break;
8970 default:
8971 abort();
8972 }
9ee6e8bb 8973 }
39d5492a 8974 tcg_temp_free_i32(addr);
9ee6e8bb 8975 } else {
cf12bce0
EC
8976 TCGv taddr;
8977 TCGMemOp opc = s->be_data;
8978
9ee6e8bb
PB
8979 /* SWP instruction */
8980 rm = (insn) & 0xf;
8981
9ee6e8bb 8982 if (insn & (1 << 22)) {
cf12bce0 8983 opc |= MO_UB;
9ee6e8bb 8984 } else {
cf12bce0 8985 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 8986 }
cf12bce0
EC
8987
8988 addr = load_reg(s, rn);
8989 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 8990 tcg_temp_free_i32(addr);
cf12bce0
EC
8991
8992 tmp = load_reg(s, rm);
8993 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
8994 get_mem_index(s), opc);
8995 tcg_temp_free(taddr);
8996 store_reg(s, rd, tmp);
9ee6e8bb
PB
8997 }
8998 }
8999 } else {
9000 int address_offset;
3960c336 9001 bool load = insn & (1 << 20);
63f26fcf
PM
9002 bool wbit = insn & (1 << 21);
9003 bool pbit = insn & (1 << 24);
3960c336 9004 bool doubleword = false;
9bb6558a
PM
9005 ISSInfo issinfo;
9006
9ee6e8bb
PB
9007 /* Misc load/store */
9008 rn = (insn >> 16) & 0xf;
9009 rd = (insn >> 12) & 0xf;
3960c336 9010
9bb6558a
PM
9011 /* ISS not valid if writeback */
9012 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
9013
3960c336
PM
9014 if (!load && (sh & 2)) {
9015 /* doubleword */
9016 ARCH(5TE);
9017 if (rd & 1) {
9018 /* UNPREDICTABLE; we choose to UNDEF */
9019 goto illegal_op;
9020 }
9021 load = (sh & 1) == 0;
9022 doubleword = true;
9023 }
9024
b0109805 9025 addr = load_reg(s, rn);
63f26fcf 9026 if (pbit) {
b0109805 9027 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 9028 }
9ee6e8bb 9029 address_offset = 0;
3960c336
PM
9030
9031 if (doubleword) {
9032 if (!load) {
9ee6e8bb 9033 /* store */
b0109805 9034 tmp = load_reg(s, rd);
12dcc321 9035 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9036 tcg_temp_free_i32(tmp);
b0109805
PB
9037 tcg_gen_addi_i32(addr, addr, 4);
9038 tmp = load_reg(s, rd + 1);
12dcc321 9039 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9040 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9041 } else {
9042 /* load */
5a839c0d 9043 tmp = tcg_temp_new_i32();
12dcc321 9044 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9045 store_reg(s, rd, tmp);
9046 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9047 tmp = tcg_temp_new_i32();
12dcc321 9048 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9049 rd++;
9ee6e8bb
PB
9050 }
9051 address_offset = -4;
3960c336
PM
9052 } else if (load) {
9053 /* load */
9054 tmp = tcg_temp_new_i32();
9055 switch (sh) {
9056 case 1:
9bb6558a
PM
9057 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9058 issinfo);
3960c336
PM
9059 break;
9060 case 2:
9bb6558a
PM
9061 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9062 issinfo);
3960c336
PM
9063 break;
9064 default:
9065 case 3:
9bb6558a
PM
9066 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9067 issinfo);
3960c336
PM
9068 break;
9069 }
9ee6e8bb
PB
9070 } else {
9071 /* store */
b0109805 9072 tmp = load_reg(s, rd);
9bb6558a 9073 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 9074 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9075 }
9076 /* Perform base writeback before the loaded value to
9077 ensure correct behavior with overlapping index registers.
b6af0975 9078 ldrd with base writeback is undefined if the
9ee6e8bb 9079 destination and index registers overlap. */
63f26fcf 9080 if (!pbit) {
b0109805
PB
9081 gen_add_datah_offset(s, insn, address_offset, addr);
9082 store_reg(s, rn, addr);
63f26fcf 9083 } else if (wbit) {
9ee6e8bb 9084 if (address_offset)
b0109805
PB
9085 tcg_gen_addi_i32(addr, addr, address_offset);
9086 store_reg(s, rn, addr);
9087 } else {
7d1b0095 9088 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9089 }
9090 if (load) {
9091 /* Complete the load. */
b0109805 9092 store_reg(s, rd, tmp);
9ee6e8bb
PB
9093 }
9094 }
9095 break;
9096 case 0x4:
9097 case 0x5:
9098 goto do_ldst;
9099 case 0x6:
9100 case 0x7:
9101 if (insn & (1 << 4)) {
9102 ARCH(6);
9103 /* Armv6 Media instructions. */
9104 rm = insn & 0xf;
9105 rn = (insn >> 16) & 0xf;
2c0262af 9106 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
9107 rs = (insn >> 8) & 0xf;
9108 switch ((insn >> 23) & 3) {
9109 case 0: /* Parallel add/subtract. */
9110 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
9111 tmp = load_reg(s, rn);
9112 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9113 sh = (insn >> 5) & 7;
9114 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9115 goto illegal_op;
6ddbc6e4 9116 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 9117 tcg_temp_free_i32(tmp2);
6ddbc6e4 9118 store_reg(s, rd, tmp);
9ee6e8bb
PB
9119 break;
9120 case 1:
9121 if ((insn & 0x00700020) == 0) {
6c95676b 9122 /* Halfword pack. */
3670669c
PB
9123 tmp = load_reg(s, rn);
9124 tmp2 = load_reg(s, rm);
9ee6e8bb 9125 shift = (insn >> 7) & 0x1f;
3670669c
PB
9126 if (insn & (1 << 6)) {
9127 /* pkhtb */
22478e79
AZ
9128 if (shift == 0)
9129 shift = 31;
9130 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 9131 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 9132 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
9133 } else {
9134 /* pkhbt */
22478e79
AZ
9135 if (shift)
9136 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 9137 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
9138 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9139 }
9140 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9141 tcg_temp_free_i32(tmp2);
3670669c 9142 store_reg(s, rd, tmp);
9ee6e8bb
PB
9143 } else if ((insn & 0x00200020) == 0x00200000) {
9144 /* [us]sat */
6ddbc6e4 9145 tmp = load_reg(s, rm);
9ee6e8bb
PB
9146 shift = (insn >> 7) & 0x1f;
9147 if (insn & (1 << 6)) {
9148 if (shift == 0)
9149 shift = 31;
6ddbc6e4 9150 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9151 } else {
6ddbc6e4 9152 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9153 }
9154 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9155 tmp2 = tcg_const_i32(sh);
9156 if (insn & (1 << 22))
9ef39277 9157 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9158 else
9ef39277 9159 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9160 tcg_temp_free_i32(tmp2);
6ddbc6e4 9161 store_reg(s, rd, tmp);
9ee6e8bb
PB
9162 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9163 /* [us]sat16 */
6ddbc6e4 9164 tmp = load_reg(s, rm);
9ee6e8bb 9165 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9166 tmp2 = tcg_const_i32(sh);
9167 if (insn & (1 << 22))
9ef39277 9168 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9169 else
9ef39277 9170 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9171 tcg_temp_free_i32(tmp2);
6ddbc6e4 9172 store_reg(s, rd, tmp);
9ee6e8bb
PB
9173 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9174 /* Select bytes. */
6ddbc6e4
PB
9175 tmp = load_reg(s, rn);
9176 tmp2 = load_reg(s, rm);
7d1b0095 9177 tmp3 = tcg_temp_new_i32();
0ecb72a5 9178 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9179 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9180 tcg_temp_free_i32(tmp3);
9181 tcg_temp_free_i32(tmp2);
6ddbc6e4 9182 store_reg(s, rd, tmp);
9ee6e8bb 9183 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9184 tmp = load_reg(s, rm);
9ee6e8bb 9185 shift = (insn >> 10) & 3;
1301f322 9186 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9187 rotate, a shift is sufficient. */
9188 if (shift != 0)
f669df27 9189 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9190 op1 = (insn >> 20) & 7;
9191 switch (op1) {
5e3f878a
PB
9192 case 0: gen_sxtb16(tmp); break;
9193 case 2: gen_sxtb(tmp); break;
9194 case 3: gen_sxth(tmp); break;
9195 case 4: gen_uxtb16(tmp); break;
9196 case 6: gen_uxtb(tmp); break;
9197 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9198 default: goto illegal_op;
9199 }
9200 if (rn != 15) {
5e3f878a 9201 tmp2 = load_reg(s, rn);
9ee6e8bb 9202 if ((op1 & 3) == 0) {
5e3f878a 9203 gen_add16(tmp, tmp2);
9ee6e8bb 9204 } else {
5e3f878a 9205 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9206 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9207 }
9208 }
6c95676b 9209 store_reg(s, rd, tmp);
9ee6e8bb
PB
9210 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9211 /* rev */
b0109805 9212 tmp = load_reg(s, rm);
9ee6e8bb
PB
9213 if (insn & (1 << 22)) {
9214 if (insn & (1 << 7)) {
b0109805 9215 gen_revsh(tmp);
9ee6e8bb
PB
9216 } else {
9217 ARCH(6T2);
b0109805 9218 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9219 }
9220 } else {
9221 if (insn & (1 << 7))
b0109805 9222 gen_rev16(tmp);
9ee6e8bb 9223 else
66896cb8 9224 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9225 }
b0109805 9226 store_reg(s, rd, tmp);
9ee6e8bb
PB
9227 } else {
9228 goto illegal_op;
9229 }
9230 break;
9231 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9232 switch ((insn >> 20) & 0x7) {
9233 case 5:
9234 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9235 /* op2 not 00x or 11x : UNDEF */
9236 goto illegal_op;
9237 }
838fa72d
AJ
9238 /* Signed multiply most significant [accumulate].
9239 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9240 tmp = load_reg(s, rm);
9241 tmp2 = load_reg(s, rs);
a7812ae4 9242 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9243
955a7dd5 9244 if (rd != 15) {
838fa72d 9245 tmp = load_reg(s, rd);
9ee6e8bb 9246 if (insn & (1 << 6)) {
838fa72d 9247 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9248 } else {
838fa72d 9249 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9250 }
9251 }
838fa72d
AJ
9252 if (insn & (1 << 5)) {
9253 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9254 }
9255 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9256 tmp = tcg_temp_new_i32();
ecc7b3aa 9257 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9258 tcg_temp_free_i64(tmp64);
955a7dd5 9259 store_reg(s, rn, tmp);
41e9564d
PM
9260 break;
9261 case 0:
9262 case 4:
9263 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9264 if (insn & (1 << 7)) {
9265 goto illegal_op;
9266 }
9267 tmp = load_reg(s, rm);
9268 tmp2 = load_reg(s, rs);
9ee6e8bb 9269 if (insn & (1 << 5))
5e3f878a
PB
9270 gen_swap_half(tmp2);
9271 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9272 if (insn & (1 << 22)) {
5e3f878a 9273 /* smlald, smlsld */
33bbd75a
PC
9274 TCGv_i64 tmp64_2;
9275
a7812ae4 9276 tmp64 = tcg_temp_new_i64();
33bbd75a 9277 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9278 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9279 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9280 tcg_temp_free_i32(tmp);
33bbd75a
PC
9281 tcg_temp_free_i32(tmp2);
9282 if (insn & (1 << 6)) {
9283 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9284 } else {
9285 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9286 }
9287 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9288 gen_addq(s, tmp64, rd, rn);
9289 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9290 tcg_temp_free_i64(tmp64);
9ee6e8bb 9291 } else {
5e3f878a 9292 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9293 if (insn & (1 << 6)) {
9294 /* This subtraction cannot overflow. */
9295 tcg_gen_sub_i32(tmp, tmp, tmp2);
9296 } else {
9297 /* This addition cannot overflow 32 bits;
9298 * however it may overflow considered as a
9299 * signed operation, in which case we must set
9300 * the Q flag.
9301 */
9302 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9303 }
9304 tcg_temp_free_i32(tmp2);
22478e79 9305 if (rd != 15)
9ee6e8bb 9306 {
22478e79 9307 tmp2 = load_reg(s, rd);
9ef39277 9308 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9309 tcg_temp_free_i32(tmp2);
9ee6e8bb 9310 }
22478e79 9311 store_reg(s, rn, tmp);
9ee6e8bb 9312 }
41e9564d 9313 break;
b8b8ea05
PM
9314 case 1:
9315 case 3:
9316 /* SDIV, UDIV */
d614a513 9317 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9318 goto illegal_op;
9319 }
9320 if (((insn >> 5) & 7) || (rd != 15)) {
9321 goto illegal_op;
9322 }
9323 tmp = load_reg(s, rm);
9324 tmp2 = load_reg(s, rs);
9325 if (insn & (1 << 21)) {
9326 gen_helper_udiv(tmp, tmp, tmp2);
9327 } else {
9328 gen_helper_sdiv(tmp, tmp, tmp2);
9329 }
9330 tcg_temp_free_i32(tmp2);
9331 store_reg(s, rn, tmp);
9332 break;
41e9564d
PM
9333 default:
9334 goto illegal_op;
9ee6e8bb
PB
9335 }
9336 break;
9337 case 3:
9338 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9339 switch (op1) {
9340 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9341 ARCH(6);
9342 tmp = load_reg(s, rm);
9343 tmp2 = load_reg(s, rs);
9344 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9345 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9346 if (rd != 15) {
9347 tmp2 = load_reg(s, rd);
6ddbc6e4 9348 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9349 tcg_temp_free_i32(tmp2);
9ee6e8bb 9350 }
ded9d295 9351 store_reg(s, rn, tmp);
9ee6e8bb
PB
9352 break;
9353 case 0x20: case 0x24: case 0x28: case 0x2c:
9354 /* Bitfield insert/clear. */
9355 ARCH(6T2);
9356 shift = (insn >> 7) & 0x1f;
9357 i = (insn >> 16) & 0x1f;
45140a57
KB
9358 if (i < shift) {
9359 /* UNPREDICTABLE; we choose to UNDEF */
9360 goto illegal_op;
9361 }
9ee6e8bb
PB
9362 i = i + 1 - shift;
9363 if (rm == 15) {
7d1b0095 9364 tmp = tcg_temp_new_i32();
5e3f878a 9365 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9366 } else {
5e3f878a 9367 tmp = load_reg(s, rm);
9ee6e8bb
PB
9368 }
9369 if (i != 32) {
5e3f878a 9370 tmp2 = load_reg(s, rd);
d593c48e 9371 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9372 tcg_temp_free_i32(tmp2);
9ee6e8bb 9373 }
5e3f878a 9374 store_reg(s, rd, tmp);
9ee6e8bb
PB
9375 break;
9376 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9377 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9378 ARCH(6T2);
5e3f878a 9379 tmp = load_reg(s, rm);
9ee6e8bb
PB
9380 shift = (insn >> 7) & 0x1f;
9381 i = ((insn >> 16) & 0x1f) + 1;
9382 if (shift + i > 32)
9383 goto illegal_op;
9384 if (i < 32) {
9385 if (op1 & 0x20) {
59a71b4c 9386 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9387 } else {
59a71b4c 9388 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9389 }
9390 }
5e3f878a 9391 store_reg(s, rd, tmp);
9ee6e8bb
PB
9392 break;
9393 default:
9394 goto illegal_op;
9395 }
9396 break;
9397 }
9398 break;
9399 }
9400 do_ldst:
9401 /* Check for undefined extension instructions
9402 * per the ARM Bible IE:
9403 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9404 */
9405 sh = (0xf << 20) | (0xf << 4);
9406 if (op1 == 0x7 && ((insn & sh) == sh))
9407 {
9408 goto illegal_op;
9409 }
9410 /* load/store byte/word */
9411 rn = (insn >> 16) & 0xf;
9412 rd = (insn >> 12) & 0xf;
b0109805 9413 tmp2 = load_reg(s, rn);
a99caa48
PM
9414 if ((insn & 0x01200000) == 0x00200000) {
9415 /* ldrt/strt */
579d21cc 9416 i = get_a32_user_mem_index(s);
a99caa48
PM
9417 } else {
9418 i = get_mem_index(s);
9419 }
9ee6e8bb 9420 if (insn & (1 << 24))
b0109805 9421 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9422 if (insn & (1 << 20)) {
9423 /* load */
5a839c0d 9424 tmp = tcg_temp_new_i32();
9ee6e8bb 9425 if (insn & (1 << 22)) {
9bb6558a 9426 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9427 } else {
9bb6558a 9428 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9429 }
9ee6e8bb
PB
9430 } else {
9431 /* store */
b0109805 9432 tmp = load_reg(s, rd);
5a839c0d 9433 if (insn & (1 << 22)) {
9bb6558a 9434 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9435 } else {
9bb6558a 9436 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9437 }
9438 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9439 }
9440 if (!(insn & (1 << 24))) {
b0109805
PB
9441 gen_add_data_offset(s, insn, tmp2);
9442 store_reg(s, rn, tmp2);
9443 } else if (insn & (1 << 21)) {
9444 store_reg(s, rn, tmp2);
9445 } else {
7d1b0095 9446 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9447 }
9448 if (insn & (1 << 20)) {
9449 /* Complete the load. */
7dcc1f89 9450 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9451 }
9452 break;
9453 case 0x08:
9454 case 0x09:
9455 {
da3e53dd
PM
9456 int j, n, loaded_base;
9457 bool exc_return = false;
9458 bool is_load = extract32(insn, 20, 1);
9459 bool user = false;
39d5492a 9460 TCGv_i32 loaded_var;
9ee6e8bb
PB
9461 /* load/store multiple words */
9462 /* XXX: store correct base if write back */
9ee6e8bb 9463 if (insn & (1 << 22)) {
da3e53dd 9464 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9465 if (IS_USER(s))
9466 goto illegal_op; /* only usable in supervisor mode */
9467
da3e53dd
PM
9468 if (is_load && extract32(insn, 15, 1)) {
9469 exc_return = true;
9470 } else {
9471 user = true;
9472 }
9ee6e8bb
PB
9473 }
9474 rn = (insn >> 16) & 0xf;
b0109805 9475 addr = load_reg(s, rn);
9ee6e8bb
PB
9476
9477 /* compute total size */
9478 loaded_base = 0;
39d5492a 9479 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9480 n = 0;
9481 for(i=0;i<16;i++) {
9482 if (insn & (1 << i))
9483 n++;
9484 }
9485 /* XXX: test invalid n == 0 case ? */
9486 if (insn & (1 << 23)) {
9487 if (insn & (1 << 24)) {
9488 /* pre increment */
b0109805 9489 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9490 } else {
9491 /* post increment */
9492 }
9493 } else {
9494 if (insn & (1 << 24)) {
9495 /* pre decrement */
b0109805 9496 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9497 } else {
9498 /* post decrement */
9499 if (n != 1)
b0109805 9500 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9501 }
9502 }
9503 j = 0;
9504 for(i=0;i<16;i++) {
9505 if (insn & (1 << i)) {
da3e53dd 9506 if (is_load) {
9ee6e8bb 9507 /* load */
5a839c0d 9508 tmp = tcg_temp_new_i32();
12dcc321 9509 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9510 if (user) {
b75263d6 9511 tmp2 = tcg_const_i32(i);
1ce94f81 9512 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9513 tcg_temp_free_i32(tmp2);
7d1b0095 9514 tcg_temp_free_i32(tmp);
9ee6e8bb 9515 } else if (i == rn) {
b0109805 9516 loaded_var = tmp;
9ee6e8bb 9517 loaded_base = 1;
fb0e8e79
PM
9518 } else if (rn == 15 && exc_return) {
9519 store_pc_exc_ret(s, tmp);
9ee6e8bb 9520 } else {
7dcc1f89 9521 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9522 }
9523 } else {
9524 /* store */
9525 if (i == 15) {
9526 /* special case: r15 = PC + 8 */
9527 val = (long)s->pc + 4;
7d1b0095 9528 tmp = tcg_temp_new_i32();
b0109805 9529 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9530 } else if (user) {
7d1b0095 9531 tmp = tcg_temp_new_i32();
b75263d6 9532 tmp2 = tcg_const_i32(i);
9ef39277 9533 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9534 tcg_temp_free_i32(tmp2);
9ee6e8bb 9535 } else {
b0109805 9536 tmp = load_reg(s, i);
9ee6e8bb 9537 }
12dcc321 9538 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9539 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9540 }
9541 j++;
9542 /* no need to add after the last transfer */
9543 if (j != n)
b0109805 9544 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9545 }
9546 }
9547 if (insn & (1 << 21)) {
9548 /* write back */
9549 if (insn & (1 << 23)) {
9550 if (insn & (1 << 24)) {
9551 /* pre increment */
9552 } else {
9553 /* post increment */
b0109805 9554 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9555 }
9556 } else {
9557 if (insn & (1 << 24)) {
9558 /* pre decrement */
9559 if (n != 1)
b0109805 9560 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9561 } else {
9562 /* post decrement */
b0109805 9563 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9564 }
9565 }
b0109805
PB
9566 store_reg(s, rn, addr);
9567 } else {
7d1b0095 9568 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9569 }
9570 if (loaded_base) {
b0109805 9571 store_reg(s, rn, loaded_var);
9ee6e8bb 9572 }
da3e53dd 9573 if (exc_return) {
9ee6e8bb 9574 /* Restore CPSR from SPSR. */
d9ba4830 9575 tmp = load_cpu_field(spsr);
235ea1f5 9576 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 9577 tcg_temp_free_i32(tmp);
b29fd33d 9578 /* Must exit loop to check un-masked IRQs */
dcba3a8d 9579 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
9580 }
9581 }
9582 break;
9583 case 0xa:
9584 case 0xb:
9585 {
9586 int32_t offset;
9587
9588 /* branch (and link) */
9589 val = (int32_t)s->pc;
9590 if (insn & (1 << 24)) {
7d1b0095 9591 tmp = tcg_temp_new_i32();
5e3f878a
PB
9592 tcg_gen_movi_i32(tmp, val);
9593 store_reg(s, 14, tmp);
9ee6e8bb 9594 }
534df156
PM
9595 offset = sextract32(insn << 2, 0, 26);
9596 val += offset + 4;
9ee6e8bb
PB
9597 gen_jmp(s, val);
9598 }
9599 break;
9600 case 0xc:
9601 case 0xd:
9602 case 0xe:
6a57f3eb
WN
9603 if (((insn >> 8) & 0xe) == 10) {
9604 /* VFP. */
7dcc1f89 9605 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9606 goto illegal_op;
9607 }
7dcc1f89 9608 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9609 /* Coprocessor. */
9ee6e8bb 9610 goto illegal_op;
6a57f3eb 9611 }
9ee6e8bb
PB
9612 break;
9613 case 0xf:
9614 /* swi */
eaed129d 9615 gen_set_pc_im(s, s->pc);
d4a2dc67 9616 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 9617 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
9618 break;
9619 default:
9620 illegal_op:
73710361
GB
9621 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9622 default_exception_el(s));
9ee6e8bb
PB
9623 break;
9624 }
9625 }
9626}
9627
296e5a0a
PM
9628static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
9629{
9630 /* Return true if this is a 16 bit instruction. We must be precise
9631 * about this (matching the decode). We assume that s->pc still
9632 * points to the first 16 bits of the insn.
9633 */
9634 if ((insn >> 11) < 0x1d) {
9635 /* Definitely a 16-bit instruction */
9636 return true;
9637 }
9638
9639 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
9640 * first half of a 32-bit Thumb insn. Thumb-1 cores might
9641 * end up actually treating this as two 16-bit insns, though,
9642 * if it's half of a bl/blx pair that might span a page boundary.
9643 */
9644 if (arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
9645 /* Thumb2 cores (including all M profile ones) always treat
9646 * 32-bit insns as 32-bit.
9647 */
9648 return false;
9649 }
9650
9651 if ((insn >> 11) == 0x1e && (s->pc < s->next_page_start - 3)) {
9652 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
9653 * is not on the next page; we merge this into a 32-bit
9654 * insn.
9655 */
9656 return false;
9657 }
9658 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
9659 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
9660 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
9661 * -- handle as single 16 bit insn
9662 */
9663 return true;
9664}
9665
9ee6e8bb
PB
9666/* Return true if this is a Thumb-2 logical op. */
9667static int
9668thumb2_logic_op(int op)
9669{
9670 return (op < 8);
9671}
9672
9673/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9674 then set condition code flags based on the result of the operation.
9675 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9676 to the high bit of T1.
9677 Returns zero if the opcode is valid. */
9678
9679static int
39d5492a
PM
9680gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9681 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9682{
9683 int logic_cc;
9684
9685 logic_cc = 0;
9686 switch (op) {
9687 case 0: /* and */
396e467c 9688 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9689 logic_cc = conds;
9690 break;
9691 case 1: /* bic */
f669df27 9692 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9693 logic_cc = conds;
9694 break;
9695 case 2: /* orr */
396e467c 9696 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9697 logic_cc = conds;
9698 break;
9699 case 3: /* orn */
29501f1b 9700 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9701 logic_cc = conds;
9702 break;
9703 case 4: /* eor */
396e467c 9704 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9705 logic_cc = conds;
9706 break;
9707 case 8: /* add */
9708 if (conds)
72485ec4 9709 gen_add_CC(t0, t0, t1);
9ee6e8bb 9710 else
396e467c 9711 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9712 break;
9713 case 10: /* adc */
9714 if (conds)
49b4c31e 9715 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9716 else
396e467c 9717 gen_adc(t0, t1);
9ee6e8bb
PB
9718 break;
9719 case 11: /* sbc */
2de68a49
RH
9720 if (conds) {
9721 gen_sbc_CC(t0, t0, t1);
9722 } else {
396e467c 9723 gen_sub_carry(t0, t0, t1);
2de68a49 9724 }
9ee6e8bb
PB
9725 break;
9726 case 13: /* sub */
9727 if (conds)
72485ec4 9728 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9729 else
396e467c 9730 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9731 break;
9732 case 14: /* rsb */
9733 if (conds)
72485ec4 9734 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9735 else
396e467c 9736 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9737 break;
9738 default: /* 5, 6, 7, 9, 12, 15. */
9739 return 1;
9740 }
9741 if (logic_cc) {
396e467c 9742 gen_logic_CC(t0);
9ee6e8bb 9743 if (shifter_out)
396e467c 9744 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9745 }
9746 return 0;
9747}
9748
9749/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9750 is not legal. */
296e5a0a 9751static int disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 9752{
296e5a0a 9753 uint32_t imm, shift, offset;
9ee6e8bb 9754 uint32_t rd, rn, rm, rs;
39d5492a
PM
9755 TCGv_i32 tmp;
9756 TCGv_i32 tmp2;
9757 TCGv_i32 tmp3;
9758 TCGv_i32 addr;
a7812ae4 9759 TCGv_i64 tmp64;
9ee6e8bb
PB
9760 int op;
9761 int shiftop;
9762 int conds;
9763 int logic_cc;
9764
296e5a0a
PM
9765 /* The only 32 bit insn that's allowed for Thumb1 is the combined
9766 * BL/BLX prefix and suffix.
9767 */
9ee6e8bb
PB
9768 if ((insn & 0xf800e800) != 0xf000e800) {
9769 ARCH(6T2);
9770 }
9771
9772 rn = (insn >> 16) & 0xf;
9773 rs = (insn >> 12) & 0xf;
9774 rd = (insn >> 8) & 0xf;
9775 rm = insn & 0xf;
9776 switch ((insn >> 25) & 0xf) {
9777 case 0: case 1: case 2: case 3:
9778 /* 16-bit instructions. Should never happen. */
9779 abort();
9780 case 4:
9781 if (insn & (1 << 22)) {
ebfe27c5
PM
9782 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9783 * - load/store doubleword, load/store exclusive, ldacq/strel,
9784 * table branch.
9785 */
76eff04d
PM
9786 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
9787 arm_dc_feature(s, ARM_FEATURE_V8)) {
9788 /* 0b1110_1001_0111_1111_1110_1001_0111_111
9789 * - SG (v8M only)
9790 * The bulk of the behaviour for this instruction is implemented
9791 * in v7m_handle_execute_nsc(), which deals with the insn when
9792 * it is executed by a CPU in non-secure state from memory
9793 * which is Secure & NonSecure-Callable.
9794 * Here we only need to handle the remaining cases:
9795 * * in NS memory (including the "security extension not
9796 * implemented" case) : NOP
9797 * * in S memory but CPU already secure (clear IT bits)
9798 * We know that the attribute for the memory this insn is
9799 * in must match the current CPU state, because otherwise
9800 * get_phys_addr_pmsav8 would have generated an exception.
9801 */
9802 if (s->v8m_secure) {
9803 /* Like the IT insn, we don't need to generate any code */
9804 s->condexec_cond = 0;
9805 s->condexec_mask = 0;
9806 }
9807 } else if (insn & 0x01200000) {
ebfe27c5
PM
9808 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9809 * - load/store dual (post-indexed)
9810 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9811 * - load/store dual (literal and immediate)
9812 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9813 * - load/store dual (pre-indexed)
9814 */
9ee6e8bb 9815 if (rn == 15) {
ebfe27c5
PM
9816 if (insn & (1 << 21)) {
9817 /* UNPREDICTABLE */
9818 goto illegal_op;
9819 }
7d1b0095 9820 addr = tcg_temp_new_i32();
b0109805 9821 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9822 } else {
b0109805 9823 addr = load_reg(s, rn);
9ee6e8bb
PB
9824 }
9825 offset = (insn & 0xff) * 4;
9826 if ((insn & (1 << 23)) == 0)
9827 offset = -offset;
9828 if (insn & (1 << 24)) {
b0109805 9829 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9830 offset = 0;
9831 }
9832 if (insn & (1 << 20)) {
9833 /* ldrd */
e2592fad 9834 tmp = tcg_temp_new_i32();
12dcc321 9835 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9836 store_reg(s, rs, tmp);
9837 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9838 tmp = tcg_temp_new_i32();
12dcc321 9839 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9840 store_reg(s, rd, tmp);
9ee6e8bb
PB
9841 } else {
9842 /* strd */
b0109805 9843 tmp = load_reg(s, rs);
12dcc321 9844 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9845 tcg_temp_free_i32(tmp);
b0109805
PB
9846 tcg_gen_addi_i32(addr, addr, 4);
9847 tmp = load_reg(s, rd);
12dcc321 9848 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9849 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9850 }
9851 if (insn & (1 << 21)) {
9852 /* Base writeback. */
b0109805
PB
9853 tcg_gen_addi_i32(addr, addr, offset - 4);
9854 store_reg(s, rn, addr);
9855 } else {
7d1b0095 9856 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9857 }
9858 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
9859 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9860 * - load/store exclusive word
9861 */
9862 if (rs == 15) {
9863 goto illegal_op;
9864 }
39d5492a 9865 addr = tcg_temp_local_new_i32();
98a46317 9866 load_reg_var(s, addr, rn);
426f5abc 9867 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9868 if (insn & (1 << 20)) {
426f5abc 9869 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9870 } else {
426f5abc 9871 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9872 }
39d5492a 9873 tcg_temp_free_i32(addr);
2359bf80 9874 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9875 /* Table Branch. */
9876 if (rn == 15) {
7d1b0095 9877 addr = tcg_temp_new_i32();
b0109805 9878 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9879 } else {
b0109805 9880 addr = load_reg(s, rn);
9ee6e8bb 9881 }
b26eefb6 9882 tmp = load_reg(s, rm);
b0109805 9883 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9884 if (insn & (1 << 4)) {
9885 /* tbh */
b0109805 9886 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9887 tcg_temp_free_i32(tmp);
e2592fad 9888 tmp = tcg_temp_new_i32();
12dcc321 9889 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9890 } else { /* tbb */
7d1b0095 9891 tcg_temp_free_i32(tmp);
e2592fad 9892 tmp = tcg_temp_new_i32();
12dcc321 9893 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9894 }
7d1b0095 9895 tcg_temp_free_i32(addr);
b0109805
PB
9896 tcg_gen_shli_i32(tmp, tmp, 1);
9897 tcg_gen_addi_i32(tmp, tmp, s->pc);
9898 store_reg(s, 15, tmp);
9ee6e8bb 9899 } else {
2359bf80 9900 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9901 op = (insn >> 4) & 0x3;
2359bf80
MR
9902 switch (op2) {
9903 case 0:
426f5abc 9904 goto illegal_op;
2359bf80
MR
9905 case 1:
9906 /* Load/store exclusive byte/halfword/doubleword */
9907 if (op == 2) {
9908 goto illegal_op;
9909 }
9910 ARCH(7);
9911 break;
9912 case 2:
9913 /* Load-acquire/store-release */
9914 if (op == 3) {
9915 goto illegal_op;
9916 }
9917 /* Fall through */
9918 case 3:
9919 /* Load-acquire/store-release exclusive */
9920 ARCH(8);
9921 break;
426f5abc 9922 }
39d5492a 9923 addr = tcg_temp_local_new_i32();
98a46317 9924 load_reg_var(s, addr, rn);
2359bf80
MR
9925 if (!(op2 & 1)) {
9926 if (insn & (1 << 20)) {
9927 tmp = tcg_temp_new_i32();
9928 switch (op) {
9929 case 0: /* ldab */
9bb6558a
PM
9930 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9931 rs | ISSIsAcqRel);
2359bf80
MR
9932 break;
9933 case 1: /* ldah */
9bb6558a
PM
9934 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9935 rs | ISSIsAcqRel);
2359bf80
MR
9936 break;
9937 case 2: /* lda */
9bb6558a
PM
9938 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
9939 rs | ISSIsAcqRel);
2359bf80
MR
9940 break;
9941 default:
9942 abort();
9943 }
9944 store_reg(s, rs, tmp);
9945 } else {
9946 tmp = load_reg(s, rs);
9947 switch (op) {
9948 case 0: /* stlb */
9bb6558a
PM
9949 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
9950 rs | ISSIsAcqRel);
2359bf80
MR
9951 break;
9952 case 1: /* stlh */
9bb6558a
PM
9953 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
9954 rs | ISSIsAcqRel);
2359bf80
MR
9955 break;
9956 case 2: /* stl */
9bb6558a
PM
9957 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
9958 rs | ISSIsAcqRel);
2359bf80
MR
9959 break;
9960 default:
9961 abort();
9962 }
9963 tcg_temp_free_i32(tmp);
9964 }
9965 } else if (insn & (1 << 20)) {
426f5abc 9966 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9967 } else {
426f5abc 9968 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9969 }
39d5492a 9970 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9971 }
9972 } else {
9973 /* Load/store multiple, RFE, SRS. */
9974 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9975 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9976 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9977 goto illegal_op;
00115976 9978 }
9ee6e8bb
PB
9979 if (insn & (1 << 20)) {
9980 /* rfe */
b0109805
PB
9981 addr = load_reg(s, rn);
9982 if ((insn & (1 << 24)) == 0)
9983 tcg_gen_addi_i32(addr, addr, -8);
9984 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9985 tmp = tcg_temp_new_i32();
12dcc321 9986 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9987 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9988 tmp2 = tcg_temp_new_i32();
12dcc321 9989 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9990 if (insn & (1 << 21)) {
9991 /* Base writeback. */
b0109805
PB
9992 if (insn & (1 << 24)) {
9993 tcg_gen_addi_i32(addr, addr, 4);
9994 } else {
9995 tcg_gen_addi_i32(addr, addr, -4);
9996 }
9997 store_reg(s, rn, addr);
9998 } else {
7d1b0095 9999 tcg_temp_free_i32(addr);
9ee6e8bb 10000 }
b0109805 10001 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
10002 } else {
10003 /* srs */
81465888
PM
10004 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10005 insn & (1 << 21));
9ee6e8bb
PB
10006 }
10007 } else {
5856d44e 10008 int i, loaded_base = 0;
39d5492a 10009 TCGv_i32 loaded_var;
9ee6e8bb 10010 /* Load/store multiple. */
b0109805 10011 addr = load_reg(s, rn);
9ee6e8bb
PB
10012 offset = 0;
10013 for (i = 0; i < 16; i++) {
10014 if (insn & (1 << i))
10015 offset += 4;
10016 }
10017 if (insn & (1 << 24)) {
b0109805 10018 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10019 }
10020
39d5492a 10021 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
10022 for (i = 0; i < 16; i++) {
10023 if ((insn & (1 << i)) == 0)
10024 continue;
10025 if (insn & (1 << 20)) {
10026 /* Load. */
e2592fad 10027 tmp = tcg_temp_new_i32();
12dcc321 10028 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10029 if (i == 15) {
3bb8a96f 10030 gen_bx_excret(s, tmp);
5856d44e
YO
10031 } else if (i == rn) {
10032 loaded_var = tmp;
10033 loaded_base = 1;
9ee6e8bb 10034 } else {
b0109805 10035 store_reg(s, i, tmp);
9ee6e8bb
PB
10036 }
10037 } else {
10038 /* Store. */
b0109805 10039 tmp = load_reg(s, i);
12dcc321 10040 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10041 tcg_temp_free_i32(tmp);
9ee6e8bb 10042 }
b0109805 10043 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 10044 }
5856d44e
YO
10045 if (loaded_base) {
10046 store_reg(s, rn, loaded_var);
10047 }
9ee6e8bb
PB
10048 if (insn & (1 << 21)) {
10049 /* Base register writeback. */
10050 if (insn & (1 << 24)) {
b0109805 10051 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10052 }
10053 /* Fault if writeback register is in register list. */
10054 if (insn & (1 << rn))
10055 goto illegal_op;
b0109805
PB
10056 store_reg(s, rn, addr);
10057 } else {
7d1b0095 10058 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10059 }
10060 }
10061 }
10062 break;
2af9ab77
JB
10063 case 5:
10064
9ee6e8bb 10065 op = (insn >> 21) & 0xf;
2af9ab77 10066 if (op == 6) {
62b44f05
AR
10067 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10068 goto illegal_op;
10069 }
2af9ab77
JB
10070 /* Halfword pack. */
10071 tmp = load_reg(s, rn);
10072 tmp2 = load_reg(s, rm);
10073 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
10074 if (insn & (1 << 5)) {
10075 /* pkhtb */
10076 if (shift == 0)
10077 shift = 31;
10078 tcg_gen_sari_i32(tmp2, tmp2, shift);
10079 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10080 tcg_gen_ext16u_i32(tmp2, tmp2);
10081 } else {
10082 /* pkhbt */
10083 if (shift)
10084 tcg_gen_shli_i32(tmp2, tmp2, shift);
10085 tcg_gen_ext16u_i32(tmp, tmp);
10086 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10087 }
10088 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10089 tcg_temp_free_i32(tmp2);
3174f8e9
FN
10090 store_reg(s, rd, tmp);
10091 } else {
2af9ab77
JB
10092 /* Data processing register constant shift. */
10093 if (rn == 15) {
7d1b0095 10094 tmp = tcg_temp_new_i32();
2af9ab77
JB
10095 tcg_gen_movi_i32(tmp, 0);
10096 } else {
10097 tmp = load_reg(s, rn);
10098 }
10099 tmp2 = load_reg(s, rm);
10100
10101 shiftop = (insn >> 4) & 3;
10102 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10103 conds = (insn & (1 << 20)) != 0;
10104 logic_cc = (conds && thumb2_logic_op(op));
10105 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10106 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10107 goto illegal_op;
7d1b0095 10108 tcg_temp_free_i32(tmp2);
2af9ab77
JB
10109 if (rd != 15) {
10110 store_reg(s, rd, tmp);
10111 } else {
7d1b0095 10112 tcg_temp_free_i32(tmp);
2af9ab77 10113 }
3174f8e9 10114 }
9ee6e8bb
PB
10115 break;
10116 case 13: /* Misc data processing. */
10117 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10118 if (op < 4 && (insn & 0xf000) != 0xf000)
10119 goto illegal_op;
10120 switch (op) {
10121 case 0: /* Register controlled shift. */
8984bd2e
PB
10122 tmp = load_reg(s, rn);
10123 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10124 if ((insn & 0x70) != 0)
10125 goto illegal_op;
10126 op = (insn >> 21) & 3;
8984bd2e
PB
10127 logic_cc = (insn & (1 << 20)) != 0;
10128 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10129 if (logic_cc)
10130 gen_logic_CC(tmp);
bedb8a6b 10131 store_reg(s, rd, tmp);
9ee6e8bb
PB
10132 break;
10133 case 1: /* Sign/zero extend. */
62b44f05
AR
10134 op = (insn >> 20) & 7;
10135 switch (op) {
10136 case 0: /* SXTAH, SXTH */
10137 case 1: /* UXTAH, UXTH */
10138 case 4: /* SXTAB, SXTB */
10139 case 5: /* UXTAB, UXTB */
10140 break;
10141 case 2: /* SXTAB16, SXTB16 */
10142 case 3: /* UXTAB16, UXTB16 */
10143 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10144 goto illegal_op;
10145 }
10146 break;
10147 default:
10148 goto illegal_op;
10149 }
10150 if (rn != 15) {
10151 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10152 goto illegal_op;
10153 }
10154 }
5e3f878a 10155 tmp = load_reg(s, rm);
9ee6e8bb 10156 shift = (insn >> 4) & 3;
1301f322 10157 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10158 rotate, a shift is sufficient. */
10159 if (shift != 0)
f669df27 10160 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10161 op = (insn >> 20) & 7;
10162 switch (op) {
5e3f878a
PB
10163 case 0: gen_sxth(tmp); break;
10164 case 1: gen_uxth(tmp); break;
10165 case 2: gen_sxtb16(tmp); break;
10166 case 3: gen_uxtb16(tmp); break;
10167 case 4: gen_sxtb(tmp); break;
10168 case 5: gen_uxtb(tmp); break;
62b44f05
AR
10169 default:
10170 g_assert_not_reached();
9ee6e8bb
PB
10171 }
10172 if (rn != 15) {
5e3f878a 10173 tmp2 = load_reg(s, rn);
9ee6e8bb 10174 if ((op >> 1) == 1) {
5e3f878a 10175 gen_add16(tmp, tmp2);
9ee6e8bb 10176 } else {
5e3f878a 10177 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10178 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10179 }
10180 }
5e3f878a 10181 store_reg(s, rd, tmp);
9ee6e8bb
PB
10182 break;
10183 case 2: /* SIMD add/subtract. */
62b44f05
AR
10184 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10185 goto illegal_op;
10186 }
9ee6e8bb
PB
10187 op = (insn >> 20) & 7;
10188 shift = (insn >> 4) & 7;
10189 if ((op & 3) == 3 || (shift & 3) == 3)
10190 goto illegal_op;
6ddbc6e4
PB
10191 tmp = load_reg(s, rn);
10192 tmp2 = load_reg(s, rm);
10193 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10194 tcg_temp_free_i32(tmp2);
6ddbc6e4 10195 store_reg(s, rd, tmp);
9ee6e8bb
PB
10196 break;
10197 case 3: /* Other data processing. */
10198 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10199 if (op < 4) {
10200 /* Saturating add/subtract. */
62b44f05
AR
10201 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10202 goto illegal_op;
10203 }
d9ba4830
PB
10204 tmp = load_reg(s, rn);
10205 tmp2 = load_reg(s, rm);
9ee6e8bb 10206 if (op & 1)
9ef39277 10207 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10208 if (op & 2)
9ef39277 10209 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10210 else
9ef39277 10211 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10212 tcg_temp_free_i32(tmp2);
9ee6e8bb 10213 } else {
62b44f05
AR
10214 switch (op) {
10215 case 0x0a: /* rbit */
10216 case 0x08: /* rev */
10217 case 0x09: /* rev16 */
10218 case 0x0b: /* revsh */
10219 case 0x18: /* clz */
10220 break;
10221 case 0x10: /* sel */
10222 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10223 goto illegal_op;
10224 }
10225 break;
10226 case 0x20: /* crc32/crc32c */
10227 case 0x21:
10228 case 0x22:
10229 case 0x28:
10230 case 0x29:
10231 case 0x2a:
10232 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10233 goto illegal_op;
10234 }
10235 break;
10236 default:
10237 goto illegal_op;
10238 }
d9ba4830 10239 tmp = load_reg(s, rn);
9ee6e8bb
PB
10240 switch (op) {
10241 case 0x0a: /* rbit */
d9ba4830 10242 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10243 break;
10244 case 0x08: /* rev */
66896cb8 10245 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10246 break;
10247 case 0x09: /* rev16 */
d9ba4830 10248 gen_rev16(tmp);
9ee6e8bb
PB
10249 break;
10250 case 0x0b: /* revsh */
d9ba4830 10251 gen_revsh(tmp);
9ee6e8bb
PB
10252 break;
10253 case 0x10: /* sel */
d9ba4830 10254 tmp2 = load_reg(s, rm);
7d1b0095 10255 tmp3 = tcg_temp_new_i32();
0ecb72a5 10256 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10257 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10258 tcg_temp_free_i32(tmp3);
10259 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10260 break;
10261 case 0x18: /* clz */
7539a012 10262 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10263 break;
eb0ecd5a
WN
10264 case 0x20:
10265 case 0x21:
10266 case 0x22:
10267 case 0x28:
10268 case 0x29:
10269 case 0x2a:
10270 {
10271 /* crc32/crc32c */
10272 uint32_t sz = op & 0x3;
10273 uint32_t c = op & 0x8;
10274
eb0ecd5a 10275 tmp2 = load_reg(s, rm);
aa633469
PM
10276 if (sz == 0) {
10277 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10278 } else if (sz == 1) {
10279 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10280 }
eb0ecd5a
WN
10281 tmp3 = tcg_const_i32(1 << sz);
10282 if (c) {
10283 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10284 } else {
10285 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10286 }
10287 tcg_temp_free_i32(tmp2);
10288 tcg_temp_free_i32(tmp3);
10289 break;
10290 }
9ee6e8bb 10291 default:
62b44f05 10292 g_assert_not_reached();
9ee6e8bb
PB
10293 }
10294 }
d9ba4830 10295 store_reg(s, rd, tmp);
9ee6e8bb
PB
10296 break;
10297 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10298 switch ((insn >> 20) & 7) {
10299 case 0: /* 32 x 32 -> 32 */
10300 case 7: /* Unsigned sum of absolute differences. */
10301 break;
10302 case 1: /* 16 x 16 -> 32 */
10303 case 2: /* Dual multiply add. */
10304 case 3: /* 32 * 16 -> 32msb */
10305 case 4: /* Dual multiply subtract. */
10306 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10307 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10308 goto illegal_op;
10309 }
10310 break;
10311 }
9ee6e8bb 10312 op = (insn >> 4) & 0xf;
d9ba4830
PB
10313 tmp = load_reg(s, rn);
10314 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10315 switch ((insn >> 20) & 7) {
10316 case 0: /* 32 x 32 -> 32 */
d9ba4830 10317 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10318 tcg_temp_free_i32(tmp2);
9ee6e8bb 10319 if (rs != 15) {
d9ba4830 10320 tmp2 = load_reg(s, rs);
9ee6e8bb 10321 if (op)
d9ba4830 10322 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10323 else
d9ba4830 10324 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10325 tcg_temp_free_i32(tmp2);
9ee6e8bb 10326 }
9ee6e8bb
PB
10327 break;
10328 case 1: /* 16 x 16 -> 32 */
d9ba4830 10329 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10330 tcg_temp_free_i32(tmp2);
9ee6e8bb 10331 if (rs != 15) {
d9ba4830 10332 tmp2 = load_reg(s, rs);
9ef39277 10333 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10334 tcg_temp_free_i32(tmp2);
9ee6e8bb 10335 }
9ee6e8bb
PB
10336 break;
10337 case 2: /* Dual multiply add. */
10338 case 4: /* Dual multiply subtract. */
10339 if (op)
d9ba4830
PB
10340 gen_swap_half(tmp2);
10341 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10342 if (insn & (1 << 22)) {
e1d177b9 10343 /* This subtraction cannot overflow. */
d9ba4830 10344 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10345 } else {
e1d177b9
PM
10346 /* This addition cannot overflow 32 bits;
10347 * however it may overflow considered as a signed
10348 * operation, in which case we must set the Q flag.
10349 */
9ef39277 10350 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10351 }
7d1b0095 10352 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10353 if (rs != 15)
10354 {
d9ba4830 10355 tmp2 = load_reg(s, rs);
9ef39277 10356 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10357 tcg_temp_free_i32(tmp2);
9ee6e8bb 10358 }
9ee6e8bb
PB
10359 break;
10360 case 3: /* 32 * 16 -> 32msb */
10361 if (op)
d9ba4830 10362 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10363 else
d9ba4830 10364 gen_sxth(tmp2);
a7812ae4
PB
10365 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10366 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10367 tmp = tcg_temp_new_i32();
ecc7b3aa 10368 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10369 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10370 if (rs != 15)
10371 {
d9ba4830 10372 tmp2 = load_reg(s, rs);
9ef39277 10373 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10374 tcg_temp_free_i32(tmp2);
9ee6e8bb 10375 }
9ee6e8bb 10376 break;
838fa72d
AJ
10377 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10378 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10379 if (rs != 15) {
838fa72d
AJ
10380 tmp = load_reg(s, rs);
10381 if (insn & (1 << 20)) {
10382 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10383 } else {
838fa72d 10384 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10385 }
2c0262af 10386 }
838fa72d
AJ
10387 if (insn & (1 << 4)) {
10388 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10389 }
10390 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10391 tmp = tcg_temp_new_i32();
ecc7b3aa 10392 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10393 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10394 break;
10395 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10396 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10397 tcg_temp_free_i32(tmp2);
9ee6e8bb 10398 if (rs != 15) {
d9ba4830
PB
10399 tmp2 = load_reg(s, rs);
10400 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10401 tcg_temp_free_i32(tmp2);
5fd46862 10402 }
9ee6e8bb 10403 break;
2c0262af 10404 }
d9ba4830 10405 store_reg(s, rd, tmp);
2c0262af 10406 break;
9ee6e8bb
PB
10407 case 6: case 7: /* 64-bit multiply, Divide. */
10408 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10409 tmp = load_reg(s, rn);
10410 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10411 if ((op & 0x50) == 0x10) {
10412 /* sdiv, udiv */
d614a513 10413 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10414 goto illegal_op;
47789990 10415 }
9ee6e8bb 10416 if (op & 0x20)
5e3f878a 10417 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10418 else
5e3f878a 10419 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10420 tcg_temp_free_i32(tmp2);
5e3f878a 10421 store_reg(s, rd, tmp);
9ee6e8bb
PB
10422 } else if ((op & 0xe) == 0xc) {
10423 /* Dual multiply accumulate long. */
62b44f05
AR
10424 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10425 tcg_temp_free_i32(tmp);
10426 tcg_temp_free_i32(tmp2);
10427 goto illegal_op;
10428 }
9ee6e8bb 10429 if (op & 1)
5e3f878a
PB
10430 gen_swap_half(tmp2);
10431 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10432 if (op & 0x10) {
5e3f878a 10433 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10434 } else {
5e3f878a 10435 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10436 }
7d1b0095 10437 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10438 /* BUGFIX */
10439 tmp64 = tcg_temp_new_i64();
10440 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10441 tcg_temp_free_i32(tmp);
a7812ae4
PB
10442 gen_addq(s, tmp64, rs, rd);
10443 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10444 tcg_temp_free_i64(tmp64);
2c0262af 10445 } else {
9ee6e8bb
PB
10446 if (op & 0x20) {
10447 /* Unsigned 64-bit multiply */
a7812ae4 10448 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10449 } else {
9ee6e8bb
PB
10450 if (op & 8) {
10451 /* smlalxy */
62b44f05
AR
10452 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10453 tcg_temp_free_i32(tmp2);
10454 tcg_temp_free_i32(tmp);
10455 goto illegal_op;
10456 }
5e3f878a 10457 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10458 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10459 tmp64 = tcg_temp_new_i64();
10460 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10461 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10462 } else {
10463 /* Signed 64-bit multiply */
a7812ae4 10464 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10465 }
b5ff1b31 10466 }
9ee6e8bb
PB
10467 if (op & 4) {
10468 /* umaal */
62b44f05
AR
10469 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10470 tcg_temp_free_i64(tmp64);
10471 goto illegal_op;
10472 }
a7812ae4
PB
10473 gen_addq_lo(s, tmp64, rs);
10474 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10475 } else if (op & 0x40) {
10476 /* 64-bit accumulate. */
a7812ae4 10477 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10478 }
a7812ae4 10479 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10480 tcg_temp_free_i64(tmp64);
5fd46862 10481 }
2c0262af 10482 break;
9ee6e8bb
PB
10483 }
10484 break;
10485 case 6: case 7: case 14: case 15:
10486 /* Coprocessor. */
7517748e
PM
10487 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10488 /* We don't currently implement M profile FP support,
10489 * so this entire space should give a NOCP fault.
10490 */
10491 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10492 default_exception_el(s));
10493 break;
10494 }
9ee6e8bb
PB
10495 if (((insn >> 24) & 3) == 3) {
10496 /* Translate into the equivalent ARM encoding. */
f06053e3 10497 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10498 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10499 goto illegal_op;
7dcc1f89 10500 }
6a57f3eb 10501 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10502 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10503 goto illegal_op;
10504 }
9ee6e8bb
PB
10505 } else {
10506 if (insn & (1 << 28))
10507 goto illegal_op;
7dcc1f89 10508 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10509 goto illegal_op;
7dcc1f89 10510 }
9ee6e8bb
PB
10511 }
10512 break;
10513 case 8: case 9: case 10: case 11:
10514 if (insn & (1 << 15)) {
10515 /* Branches, misc control. */
10516 if (insn & 0x5000) {
10517 /* Unconditional branch. */
10518 /* signextend(hw1[10:0]) -> offset[:12]. */
10519 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10520 /* hw1[10:0] -> offset[11:1]. */
10521 offset |= (insn & 0x7ff) << 1;
10522 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10523 offset[24:22] already have the same value because of the
10524 sign extension above. */
10525 offset ^= ((~insn) & (1 << 13)) << 10;
10526 offset ^= ((~insn) & (1 << 11)) << 11;
10527
9ee6e8bb
PB
10528 if (insn & (1 << 14)) {
10529 /* Branch and link. */
3174f8e9 10530 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10531 }
3b46e624 10532
b0109805 10533 offset += s->pc;
9ee6e8bb
PB
10534 if (insn & (1 << 12)) {
10535 /* b/bl */
b0109805 10536 gen_jmp(s, offset);
9ee6e8bb
PB
10537 } else {
10538 /* blx */
b0109805 10539 offset &= ~(uint32_t)2;
be5e7a76 10540 /* thumb2 bx, no need to check */
b0109805 10541 gen_bx_im(s, offset);
2c0262af 10542 }
9ee6e8bb
PB
10543 } else if (((insn >> 23) & 7) == 7) {
10544 /* Misc control */
10545 if (insn & (1 << 13))
10546 goto illegal_op;
10547
10548 if (insn & (1 << 26)) {
001b3cab
PM
10549 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10550 goto illegal_op;
10551 }
37e6456e
PM
10552 if (!(insn & (1 << 20))) {
10553 /* Hypervisor call (v7) */
10554 int imm16 = extract32(insn, 16, 4) << 12
10555 | extract32(insn, 0, 12);
10556 ARCH(7);
10557 if (IS_USER(s)) {
10558 goto illegal_op;
10559 }
10560 gen_hvc(s, imm16);
10561 } else {
10562 /* Secure monitor call (v6+) */
10563 ARCH(6K);
10564 if (IS_USER(s)) {
10565 goto illegal_op;
10566 }
10567 gen_smc(s);
10568 }
2c0262af 10569 } else {
9ee6e8bb
PB
10570 op = (insn >> 20) & 7;
10571 switch (op) {
10572 case 0: /* msr cpsr. */
b53d8923 10573 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 10574 tmp = load_reg(s, rn);
b28b3377
PM
10575 /* the constant is the mask and SYSm fields */
10576 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 10577 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10578 tcg_temp_free_i32(addr);
7d1b0095 10579 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10580 gen_lookup_tb(s);
10581 break;
10582 }
10583 /* fall through */
10584 case 1: /* msr spsr. */
b53d8923 10585 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10586 goto illegal_op;
b53d8923 10587 }
8bfd0550
PM
10588
10589 if (extract32(insn, 5, 1)) {
10590 /* MSR (banked) */
10591 int sysm = extract32(insn, 8, 4) |
10592 (extract32(insn, 4, 1) << 4);
10593 int r = op & 1;
10594
10595 gen_msr_banked(s, r, sysm, rm);
10596 break;
10597 }
10598
10599 /* MSR (for PSRs) */
2fbac54b
FN
10600 tmp = load_reg(s, rn);
10601 if (gen_set_psr(s,
7dcc1f89 10602 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10603 op == 1, tmp))
9ee6e8bb
PB
10604 goto illegal_op;
10605 break;
10606 case 2: /* cps, nop-hint. */
10607 if (((insn >> 8) & 7) == 0) {
10608 gen_nop_hint(s, insn & 0xff);
10609 }
10610 /* Implemented as NOP in user mode. */
10611 if (IS_USER(s))
10612 break;
10613 offset = 0;
10614 imm = 0;
10615 if (insn & (1 << 10)) {
10616 if (insn & (1 << 7))
10617 offset |= CPSR_A;
10618 if (insn & (1 << 6))
10619 offset |= CPSR_I;
10620 if (insn & (1 << 5))
10621 offset |= CPSR_F;
10622 if (insn & (1 << 9))
10623 imm = CPSR_A | CPSR_I | CPSR_F;
10624 }
10625 if (insn & (1 << 8)) {
10626 offset |= 0x1f;
10627 imm |= (insn & 0x1f);
10628 }
10629 if (offset) {
2fbac54b 10630 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10631 }
10632 break;
10633 case 3: /* Special control operations. */
426f5abc 10634 ARCH(7);
9ee6e8bb
PB
10635 op = (insn >> 4) & 0xf;
10636 switch (op) {
10637 case 2: /* clrex */
426f5abc 10638 gen_clrex(s);
9ee6e8bb
PB
10639 break;
10640 case 4: /* dsb */
10641 case 5: /* dmb */
61e4c432 10642 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10643 break;
6df99dec
SS
10644 case 6: /* isb */
10645 /* We need to break the TB after this insn
10646 * to execute self-modifying code correctly
10647 * and also to take any pending interrupts
10648 * immediately.
10649 */
0b609cc1 10650 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 10651 break;
9ee6e8bb
PB
10652 default:
10653 goto illegal_op;
10654 }
10655 break;
10656 case 4: /* bxj */
9d7c59c8
PM
10657 /* Trivial implementation equivalent to bx.
10658 * This instruction doesn't exist at all for M-profile.
10659 */
10660 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10661 goto illegal_op;
10662 }
d9ba4830
PB
10663 tmp = load_reg(s, rn);
10664 gen_bx(s, tmp);
9ee6e8bb
PB
10665 break;
10666 case 5: /* Exception return. */
b8b45b68
RV
10667 if (IS_USER(s)) {
10668 goto illegal_op;
10669 }
10670 if (rn != 14 || rd != 15) {
10671 goto illegal_op;
10672 }
10673 tmp = load_reg(s, rn);
10674 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10675 gen_exception_return(s, tmp);
10676 break;
8bfd0550 10677 case 6: /* MRS */
43ac6574
PM
10678 if (extract32(insn, 5, 1) &&
10679 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10680 /* MRS (banked) */
10681 int sysm = extract32(insn, 16, 4) |
10682 (extract32(insn, 4, 1) << 4);
10683
10684 gen_mrs_banked(s, 0, sysm, rd);
10685 break;
10686 }
10687
3d54026f
PM
10688 if (extract32(insn, 16, 4) != 0xf) {
10689 goto illegal_op;
10690 }
10691 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10692 extract32(insn, 0, 8) != 0) {
10693 goto illegal_op;
10694 }
10695
8bfd0550 10696 /* mrs cpsr */
7d1b0095 10697 tmp = tcg_temp_new_i32();
b53d8923 10698 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10699 addr = tcg_const_i32(insn & 0xff);
10700 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10701 tcg_temp_free_i32(addr);
9ee6e8bb 10702 } else {
9ef39277 10703 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10704 }
8984bd2e 10705 store_reg(s, rd, tmp);
9ee6e8bb 10706 break;
8bfd0550 10707 case 7: /* MRS */
43ac6574
PM
10708 if (extract32(insn, 5, 1) &&
10709 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10710 /* MRS (banked) */
10711 int sysm = extract32(insn, 16, 4) |
10712 (extract32(insn, 4, 1) << 4);
10713
10714 gen_mrs_banked(s, 1, sysm, rd);
10715 break;
10716 }
10717
10718 /* mrs spsr. */
9ee6e8bb 10719 /* Not accessible in user mode. */
b53d8923 10720 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10721 goto illegal_op;
b53d8923 10722 }
3d54026f
PM
10723
10724 if (extract32(insn, 16, 4) != 0xf ||
10725 extract32(insn, 0, 8) != 0) {
10726 goto illegal_op;
10727 }
10728
d9ba4830
PB
10729 tmp = load_cpu_field(spsr);
10730 store_reg(s, rd, tmp);
9ee6e8bb 10731 break;
2c0262af
FB
10732 }
10733 }
9ee6e8bb
PB
10734 } else {
10735 /* Conditional branch. */
10736 op = (insn >> 22) & 0xf;
10737 /* Generate a conditional jump to next instruction. */
10738 s->condlabel = gen_new_label();
39fb730a 10739 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
10740 s->condjmp = 1;
10741
10742 /* offset[11:1] = insn[10:0] */
10743 offset = (insn & 0x7ff) << 1;
10744 /* offset[17:12] = insn[21:16]. */
10745 offset |= (insn & 0x003f0000) >> 4;
10746 /* offset[31:20] = insn[26]. */
10747 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10748 /* offset[18] = insn[13]. */
10749 offset |= (insn & (1 << 13)) << 5;
10750 /* offset[19] = insn[11]. */
10751 offset |= (insn & (1 << 11)) << 8;
10752
10753 /* jump to the offset */
b0109805 10754 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10755 }
10756 } else {
10757 /* Data processing immediate. */
10758 if (insn & (1 << 25)) {
10759 if (insn & (1 << 24)) {
10760 if (insn & (1 << 20))
10761 goto illegal_op;
10762 /* Bitfield/Saturate. */
10763 op = (insn >> 21) & 7;
10764 imm = insn & 0x1f;
10765 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10766 if (rn == 15) {
7d1b0095 10767 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10768 tcg_gen_movi_i32(tmp, 0);
10769 } else {
10770 tmp = load_reg(s, rn);
10771 }
9ee6e8bb
PB
10772 switch (op) {
10773 case 2: /* Signed bitfield extract. */
10774 imm++;
10775 if (shift + imm > 32)
10776 goto illegal_op;
59a71b4c
RH
10777 if (imm < 32) {
10778 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10779 }
9ee6e8bb
PB
10780 break;
10781 case 6: /* Unsigned bitfield extract. */
10782 imm++;
10783 if (shift + imm > 32)
10784 goto illegal_op;
59a71b4c
RH
10785 if (imm < 32) {
10786 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10787 }
9ee6e8bb
PB
10788 break;
10789 case 3: /* Bitfield insert/clear. */
10790 if (imm < shift)
10791 goto illegal_op;
10792 imm = imm + 1 - shift;
10793 if (imm != 32) {
6ddbc6e4 10794 tmp2 = load_reg(s, rd);
d593c48e 10795 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10796 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10797 }
10798 break;
10799 case 7:
10800 goto illegal_op;
10801 default: /* Saturate. */
9ee6e8bb
PB
10802 if (shift) {
10803 if (op & 1)
6ddbc6e4 10804 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10805 else
6ddbc6e4 10806 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10807 }
6ddbc6e4 10808 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10809 if (op & 4) {
10810 /* Unsigned. */
62b44f05
AR
10811 if ((op & 1) && shift == 0) {
10812 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10813 tcg_temp_free_i32(tmp);
10814 tcg_temp_free_i32(tmp2);
10815 goto illegal_op;
10816 }
9ef39277 10817 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10818 } else {
9ef39277 10819 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10820 }
2c0262af 10821 } else {
9ee6e8bb 10822 /* Signed. */
62b44f05
AR
10823 if ((op & 1) && shift == 0) {
10824 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10825 tcg_temp_free_i32(tmp);
10826 tcg_temp_free_i32(tmp2);
10827 goto illegal_op;
10828 }
9ef39277 10829 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10830 } else {
9ef39277 10831 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10832 }
2c0262af 10833 }
b75263d6 10834 tcg_temp_free_i32(tmp2);
9ee6e8bb 10835 break;
2c0262af 10836 }
6ddbc6e4 10837 store_reg(s, rd, tmp);
9ee6e8bb
PB
10838 } else {
10839 imm = ((insn & 0x04000000) >> 15)
10840 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10841 if (insn & (1 << 22)) {
10842 /* 16-bit immediate. */
10843 imm |= (insn >> 4) & 0xf000;
10844 if (insn & (1 << 23)) {
10845 /* movt */
5e3f878a 10846 tmp = load_reg(s, rd);
86831435 10847 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10848 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10849 } else {
9ee6e8bb 10850 /* movw */
7d1b0095 10851 tmp = tcg_temp_new_i32();
5e3f878a 10852 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10853 }
10854 } else {
9ee6e8bb
PB
10855 /* Add/sub 12-bit immediate. */
10856 if (rn == 15) {
b0109805 10857 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10858 if (insn & (1 << 23))
b0109805 10859 offset -= imm;
9ee6e8bb 10860 else
b0109805 10861 offset += imm;
7d1b0095 10862 tmp = tcg_temp_new_i32();
5e3f878a 10863 tcg_gen_movi_i32(tmp, offset);
2c0262af 10864 } else {
5e3f878a 10865 tmp = load_reg(s, rn);
9ee6e8bb 10866 if (insn & (1 << 23))
5e3f878a 10867 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10868 else
5e3f878a 10869 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10870 }
9ee6e8bb 10871 }
5e3f878a 10872 store_reg(s, rd, tmp);
191abaa2 10873 }
9ee6e8bb
PB
10874 } else {
10875 int shifter_out = 0;
10876 /* modified 12-bit immediate. */
10877 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10878 imm = (insn & 0xff);
10879 switch (shift) {
10880 case 0: /* XY */
10881 /* Nothing to do. */
10882 break;
10883 case 1: /* 00XY00XY */
10884 imm |= imm << 16;
10885 break;
10886 case 2: /* XY00XY00 */
10887 imm |= imm << 16;
10888 imm <<= 8;
10889 break;
10890 case 3: /* XYXYXYXY */
10891 imm |= imm << 16;
10892 imm |= imm << 8;
10893 break;
10894 default: /* Rotated constant. */
10895 shift = (shift << 1) | (imm >> 7);
10896 imm |= 0x80;
10897 imm = imm << (32 - shift);
10898 shifter_out = 1;
10899 break;
b5ff1b31 10900 }
7d1b0095 10901 tmp2 = tcg_temp_new_i32();
3174f8e9 10902 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10903 rn = (insn >> 16) & 0xf;
3174f8e9 10904 if (rn == 15) {
7d1b0095 10905 tmp = tcg_temp_new_i32();
3174f8e9
FN
10906 tcg_gen_movi_i32(tmp, 0);
10907 } else {
10908 tmp = load_reg(s, rn);
10909 }
9ee6e8bb
PB
10910 op = (insn >> 21) & 0xf;
10911 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10912 shifter_out, tmp, tmp2))
9ee6e8bb 10913 goto illegal_op;
7d1b0095 10914 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10915 rd = (insn >> 8) & 0xf;
10916 if (rd != 15) {
3174f8e9
FN
10917 store_reg(s, rd, tmp);
10918 } else {
7d1b0095 10919 tcg_temp_free_i32(tmp);
2c0262af 10920 }
2c0262af 10921 }
9ee6e8bb
PB
10922 }
10923 break;
10924 case 12: /* Load/store single data item. */
10925 {
10926 int postinc = 0;
10927 int writeback = 0;
a99caa48 10928 int memidx;
9bb6558a
PM
10929 ISSInfo issinfo;
10930
9ee6e8bb 10931 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10932 if (disas_neon_ls_insn(s, insn)) {
c1713132 10933 goto illegal_op;
7dcc1f89 10934 }
9ee6e8bb
PB
10935 break;
10936 }
a2fdc890
PM
10937 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10938 if (rs == 15) {
10939 if (!(insn & (1 << 20))) {
10940 goto illegal_op;
10941 }
10942 if (op != 2) {
10943 /* Byte or halfword load space with dest == r15 : memory hints.
10944 * Catch them early so we don't emit pointless addressing code.
10945 * This space is a mix of:
10946 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10947 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10948 * cores)
10949 * unallocated hints, which must be treated as NOPs
10950 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10951 * which is easiest for the decoding logic
10952 * Some space which must UNDEF
10953 */
10954 int op1 = (insn >> 23) & 3;
10955 int op2 = (insn >> 6) & 0x3f;
10956 if (op & 2) {
10957 goto illegal_op;
10958 }
10959 if (rn == 15) {
02afbf64
PM
10960 /* UNPREDICTABLE, unallocated hint or
10961 * PLD/PLDW/PLI (literal)
10962 */
a2fdc890
PM
10963 return 0;
10964 }
10965 if (op1 & 1) {
02afbf64 10966 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10967 }
10968 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10969 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10970 }
10971 /* UNDEF space, or an UNPREDICTABLE */
10972 return 1;
10973 }
10974 }
a99caa48 10975 memidx = get_mem_index(s);
9ee6e8bb 10976 if (rn == 15) {
7d1b0095 10977 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10978 /* PC relative. */
10979 /* s->pc has already been incremented by 4. */
10980 imm = s->pc & 0xfffffffc;
10981 if (insn & (1 << 23))
10982 imm += insn & 0xfff;
10983 else
10984 imm -= insn & 0xfff;
b0109805 10985 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10986 } else {
b0109805 10987 addr = load_reg(s, rn);
9ee6e8bb
PB
10988 if (insn & (1 << 23)) {
10989 /* Positive offset. */
10990 imm = insn & 0xfff;
b0109805 10991 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10992 } else {
9ee6e8bb 10993 imm = insn & 0xff;
2a0308c5
PM
10994 switch ((insn >> 8) & 0xf) {
10995 case 0x0: /* Shifted Register. */
9ee6e8bb 10996 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10997 if (shift > 3) {
10998 tcg_temp_free_i32(addr);
18c9b560 10999 goto illegal_op;
2a0308c5 11000 }
b26eefb6 11001 tmp = load_reg(s, rm);
9ee6e8bb 11002 if (shift)
b26eefb6 11003 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 11004 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11005 tcg_temp_free_i32(tmp);
9ee6e8bb 11006 break;
2a0308c5 11007 case 0xc: /* Negative offset. */
b0109805 11008 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 11009 break;
2a0308c5 11010 case 0xe: /* User privilege. */
b0109805 11011 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 11012 memidx = get_a32_user_mem_index(s);
9ee6e8bb 11013 break;
2a0308c5 11014 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
11015 imm = -imm;
11016 /* Fall through. */
2a0308c5 11017 case 0xb: /* Post-increment. */
9ee6e8bb
PB
11018 postinc = 1;
11019 writeback = 1;
11020 break;
2a0308c5 11021 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
11022 imm = -imm;
11023 /* Fall through. */
2a0308c5 11024 case 0xf: /* Pre-increment. */
b0109805 11025 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
11026 writeback = 1;
11027 break;
11028 default:
2a0308c5 11029 tcg_temp_free_i32(addr);
b7bcbe95 11030 goto illegal_op;
9ee6e8bb
PB
11031 }
11032 }
11033 }
9bb6558a
PM
11034
11035 issinfo = writeback ? ISSInvalid : rs;
11036
9ee6e8bb
PB
11037 if (insn & (1 << 20)) {
11038 /* Load. */
5a839c0d 11039 tmp = tcg_temp_new_i32();
a2fdc890 11040 switch (op) {
5a839c0d 11041 case 0:
9bb6558a 11042 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11043 break;
11044 case 4:
9bb6558a 11045 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11046 break;
11047 case 1:
9bb6558a 11048 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11049 break;
11050 case 5:
9bb6558a 11051 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11052 break;
11053 case 2:
9bb6558a 11054 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11055 break;
2a0308c5 11056 default:
5a839c0d 11057 tcg_temp_free_i32(tmp);
2a0308c5
PM
11058 tcg_temp_free_i32(addr);
11059 goto illegal_op;
a2fdc890
PM
11060 }
11061 if (rs == 15) {
3bb8a96f 11062 gen_bx_excret(s, tmp);
9ee6e8bb 11063 } else {
a2fdc890 11064 store_reg(s, rs, tmp);
9ee6e8bb
PB
11065 }
11066 } else {
11067 /* Store. */
b0109805 11068 tmp = load_reg(s, rs);
9ee6e8bb 11069 switch (op) {
5a839c0d 11070 case 0:
9bb6558a 11071 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11072 break;
11073 case 1:
9bb6558a 11074 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11075 break;
11076 case 2:
9bb6558a 11077 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11078 break;
2a0308c5 11079 default:
5a839c0d 11080 tcg_temp_free_i32(tmp);
2a0308c5
PM
11081 tcg_temp_free_i32(addr);
11082 goto illegal_op;
b7bcbe95 11083 }
5a839c0d 11084 tcg_temp_free_i32(tmp);
2c0262af 11085 }
9ee6e8bb 11086 if (postinc)
b0109805
PB
11087 tcg_gen_addi_i32(addr, addr, imm);
11088 if (writeback) {
11089 store_reg(s, rn, addr);
11090 } else {
7d1b0095 11091 tcg_temp_free_i32(addr);
b0109805 11092 }
9ee6e8bb
PB
11093 }
11094 break;
11095 default:
11096 goto illegal_op;
2c0262af 11097 }
9ee6e8bb
PB
11098 return 0;
11099illegal_op:
11100 return 1;
2c0262af
FB
11101}
11102
296e5a0a 11103static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 11104{
296e5a0a 11105 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
11106 int32_t offset;
11107 int i;
39d5492a
PM
11108 TCGv_i32 tmp;
11109 TCGv_i32 tmp2;
11110 TCGv_i32 addr;
99c475ab 11111
99c475ab
FB
11112 switch (insn >> 12) {
11113 case 0: case 1:
396e467c 11114
99c475ab
FB
11115 rd = insn & 7;
11116 op = (insn >> 11) & 3;
11117 if (op == 3) {
11118 /* add/subtract */
11119 rn = (insn >> 3) & 7;
396e467c 11120 tmp = load_reg(s, rn);
99c475ab
FB
11121 if (insn & (1 << 10)) {
11122 /* immediate */
7d1b0095 11123 tmp2 = tcg_temp_new_i32();
396e467c 11124 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
11125 } else {
11126 /* reg */
11127 rm = (insn >> 6) & 7;
396e467c 11128 tmp2 = load_reg(s, rm);
99c475ab 11129 }
9ee6e8bb
PB
11130 if (insn & (1 << 9)) {
11131 if (s->condexec_mask)
396e467c 11132 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11133 else
72485ec4 11134 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
11135 } else {
11136 if (s->condexec_mask)
396e467c 11137 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 11138 else
72485ec4 11139 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 11140 }
7d1b0095 11141 tcg_temp_free_i32(tmp2);
396e467c 11142 store_reg(s, rd, tmp);
99c475ab
FB
11143 } else {
11144 /* shift immediate */
11145 rm = (insn >> 3) & 7;
11146 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
11147 tmp = load_reg(s, rm);
11148 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11149 if (!s->condexec_mask)
11150 gen_logic_CC(tmp);
11151 store_reg(s, rd, tmp);
99c475ab
FB
11152 }
11153 break;
11154 case 2: case 3:
11155 /* arithmetic large immediate */
11156 op = (insn >> 11) & 3;
11157 rd = (insn >> 8) & 0x7;
396e467c 11158 if (op == 0) { /* mov */
7d1b0095 11159 tmp = tcg_temp_new_i32();
396e467c 11160 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11161 if (!s->condexec_mask)
396e467c
FN
11162 gen_logic_CC(tmp);
11163 store_reg(s, rd, tmp);
11164 } else {
11165 tmp = load_reg(s, rd);
7d1b0095 11166 tmp2 = tcg_temp_new_i32();
396e467c
FN
11167 tcg_gen_movi_i32(tmp2, insn & 0xff);
11168 switch (op) {
11169 case 1: /* cmp */
72485ec4 11170 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11171 tcg_temp_free_i32(tmp);
11172 tcg_temp_free_i32(tmp2);
396e467c
FN
11173 break;
11174 case 2: /* add */
11175 if (s->condexec_mask)
11176 tcg_gen_add_i32(tmp, tmp, tmp2);
11177 else
72485ec4 11178 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11179 tcg_temp_free_i32(tmp2);
396e467c
FN
11180 store_reg(s, rd, tmp);
11181 break;
11182 case 3: /* sub */
11183 if (s->condexec_mask)
11184 tcg_gen_sub_i32(tmp, tmp, tmp2);
11185 else
72485ec4 11186 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11187 tcg_temp_free_i32(tmp2);
396e467c
FN
11188 store_reg(s, rd, tmp);
11189 break;
11190 }
99c475ab 11191 }
99c475ab
FB
11192 break;
11193 case 4:
11194 if (insn & (1 << 11)) {
11195 rd = (insn >> 8) & 7;
5899f386
FB
11196 /* load pc-relative. Bit 1 of PC is ignored. */
11197 val = s->pc + 2 + ((insn & 0xff) * 4);
11198 val &= ~(uint32_t)2;
7d1b0095 11199 addr = tcg_temp_new_i32();
b0109805 11200 tcg_gen_movi_i32(addr, val);
c40c8556 11201 tmp = tcg_temp_new_i32();
9bb6558a
PM
11202 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11203 rd | ISSIs16Bit);
7d1b0095 11204 tcg_temp_free_i32(addr);
b0109805 11205 store_reg(s, rd, tmp);
99c475ab
FB
11206 break;
11207 }
11208 if (insn & (1 << 10)) {
ebfe27c5
PM
11209 /* 0b0100_01xx_xxxx_xxxx
11210 * - data processing extended, branch and exchange
11211 */
99c475ab
FB
11212 rd = (insn & 7) | ((insn >> 4) & 8);
11213 rm = (insn >> 3) & 0xf;
11214 op = (insn >> 8) & 3;
11215 switch (op) {
11216 case 0: /* add */
396e467c
FN
11217 tmp = load_reg(s, rd);
11218 tmp2 = load_reg(s, rm);
11219 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11220 tcg_temp_free_i32(tmp2);
396e467c 11221 store_reg(s, rd, tmp);
99c475ab
FB
11222 break;
11223 case 1: /* cmp */
396e467c
FN
11224 tmp = load_reg(s, rd);
11225 tmp2 = load_reg(s, rm);
72485ec4 11226 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11227 tcg_temp_free_i32(tmp2);
11228 tcg_temp_free_i32(tmp);
99c475ab
FB
11229 break;
11230 case 2: /* mov/cpy */
396e467c
FN
11231 tmp = load_reg(s, rm);
11232 store_reg(s, rd, tmp);
99c475ab 11233 break;
ebfe27c5
PM
11234 case 3:
11235 {
11236 /* 0b0100_0111_xxxx_xxxx
11237 * - branch [and link] exchange thumb register
11238 */
11239 bool link = insn & (1 << 7);
11240
fb602cb7 11241 if (insn & 3) {
ebfe27c5
PM
11242 goto undef;
11243 }
11244 if (link) {
be5e7a76 11245 ARCH(5);
ebfe27c5 11246 }
fb602cb7
PM
11247 if ((insn & 4)) {
11248 /* BXNS/BLXNS: only exists for v8M with the
11249 * security extensions, and always UNDEF if NonSecure.
11250 * We don't implement these in the user-only mode
11251 * either (in theory you can use them from Secure User
11252 * mode but they are too tied in to system emulation.)
11253 */
11254 if (!s->v8m_secure || IS_USER_ONLY) {
11255 goto undef;
11256 }
11257 if (link) {
3e3fa230 11258 gen_blxns(s, rm);
fb602cb7
PM
11259 } else {
11260 gen_bxns(s, rm);
11261 }
11262 break;
11263 }
11264 /* BLX/BX */
ebfe27c5
PM
11265 tmp = load_reg(s, rm);
11266 if (link) {
99c475ab 11267 val = (uint32_t)s->pc | 1;
7d1b0095 11268 tmp2 = tcg_temp_new_i32();
b0109805
PB
11269 tcg_gen_movi_i32(tmp2, val);
11270 store_reg(s, 14, tmp2);
3bb8a96f
PM
11271 gen_bx(s, tmp);
11272 } else {
11273 /* Only BX works as exception-return, not BLX */
11274 gen_bx_excret(s, tmp);
99c475ab 11275 }
99c475ab
FB
11276 break;
11277 }
ebfe27c5 11278 }
99c475ab
FB
11279 break;
11280 }
11281
11282 /* data processing register */
11283 rd = insn & 7;
11284 rm = (insn >> 3) & 7;
11285 op = (insn >> 6) & 0xf;
11286 if (op == 2 || op == 3 || op == 4 || op == 7) {
11287 /* the shift/rotate ops want the operands backwards */
11288 val = rm;
11289 rm = rd;
11290 rd = val;
11291 val = 1;
11292 } else {
11293 val = 0;
11294 }
11295
396e467c 11296 if (op == 9) { /* neg */
7d1b0095 11297 tmp = tcg_temp_new_i32();
396e467c
FN
11298 tcg_gen_movi_i32(tmp, 0);
11299 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11300 tmp = load_reg(s, rd);
11301 } else {
39d5492a 11302 TCGV_UNUSED_I32(tmp);
396e467c 11303 }
99c475ab 11304
396e467c 11305 tmp2 = load_reg(s, rm);
5899f386 11306 switch (op) {
99c475ab 11307 case 0x0: /* and */
396e467c 11308 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11309 if (!s->condexec_mask)
396e467c 11310 gen_logic_CC(tmp);
99c475ab
FB
11311 break;
11312 case 0x1: /* eor */
396e467c 11313 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11314 if (!s->condexec_mask)
396e467c 11315 gen_logic_CC(tmp);
99c475ab
FB
11316 break;
11317 case 0x2: /* lsl */
9ee6e8bb 11318 if (s->condexec_mask) {
365af80e 11319 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11320 } else {
9ef39277 11321 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11322 gen_logic_CC(tmp2);
9ee6e8bb 11323 }
99c475ab
FB
11324 break;
11325 case 0x3: /* lsr */
9ee6e8bb 11326 if (s->condexec_mask) {
365af80e 11327 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11328 } else {
9ef39277 11329 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11330 gen_logic_CC(tmp2);
9ee6e8bb 11331 }
99c475ab
FB
11332 break;
11333 case 0x4: /* asr */
9ee6e8bb 11334 if (s->condexec_mask) {
365af80e 11335 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11336 } else {
9ef39277 11337 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11338 gen_logic_CC(tmp2);
9ee6e8bb 11339 }
99c475ab
FB
11340 break;
11341 case 0x5: /* adc */
49b4c31e 11342 if (s->condexec_mask) {
396e467c 11343 gen_adc(tmp, tmp2);
49b4c31e
RH
11344 } else {
11345 gen_adc_CC(tmp, tmp, tmp2);
11346 }
99c475ab
FB
11347 break;
11348 case 0x6: /* sbc */
2de68a49 11349 if (s->condexec_mask) {
396e467c 11350 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11351 } else {
11352 gen_sbc_CC(tmp, tmp, tmp2);
11353 }
99c475ab
FB
11354 break;
11355 case 0x7: /* ror */
9ee6e8bb 11356 if (s->condexec_mask) {
f669df27
AJ
11357 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11358 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11359 } else {
9ef39277 11360 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11361 gen_logic_CC(tmp2);
9ee6e8bb 11362 }
99c475ab
FB
11363 break;
11364 case 0x8: /* tst */
396e467c
FN
11365 tcg_gen_and_i32(tmp, tmp, tmp2);
11366 gen_logic_CC(tmp);
99c475ab 11367 rd = 16;
5899f386 11368 break;
99c475ab 11369 case 0x9: /* neg */
9ee6e8bb 11370 if (s->condexec_mask)
396e467c 11371 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11372 else
72485ec4 11373 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11374 break;
11375 case 0xa: /* cmp */
72485ec4 11376 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11377 rd = 16;
11378 break;
11379 case 0xb: /* cmn */
72485ec4 11380 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11381 rd = 16;
11382 break;
11383 case 0xc: /* orr */
396e467c 11384 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11385 if (!s->condexec_mask)
396e467c 11386 gen_logic_CC(tmp);
99c475ab
FB
11387 break;
11388 case 0xd: /* mul */
7b2919a0 11389 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11390 if (!s->condexec_mask)
396e467c 11391 gen_logic_CC(tmp);
99c475ab
FB
11392 break;
11393 case 0xe: /* bic */
f669df27 11394 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11395 if (!s->condexec_mask)
396e467c 11396 gen_logic_CC(tmp);
99c475ab
FB
11397 break;
11398 case 0xf: /* mvn */
396e467c 11399 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11400 if (!s->condexec_mask)
396e467c 11401 gen_logic_CC(tmp2);
99c475ab 11402 val = 1;
5899f386 11403 rm = rd;
99c475ab
FB
11404 break;
11405 }
11406 if (rd != 16) {
396e467c
FN
11407 if (val) {
11408 store_reg(s, rm, tmp2);
11409 if (op != 0xf)
7d1b0095 11410 tcg_temp_free_i32(tmp);
396e467c
FN
11411 } else {
11412 store_reg(s, rd, tmp);
7d1b0095 11413 tcg_temp_free_i32(tmp2);
396e467c
FN
11414 }
11415 } else {
7d1b0095
PM
11416 tcg_temp_free_i32(tmp);
11417 tcg_temp_free_i32(tmp2);
99c475ab
FB
11418 }
11419 break;
11420
11421 case 5:
11422 /* load/store register offset. */
11423 rd = insn & 7;
11424 rn = (insn >> 3) & 7;
11425 rm = (insn >> 6) & 7;
11426 op = (insn >> 9) & 7;
b0109805 11427 addr = load_reg(s, rn);
b26eefb6 11428 tmp = load_reg(s, rm);
b0109805 11429 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11430 tcg_temp_free_i32(tmp);
99c475ab 11431
c40c8556 11432 if (op < 3) { /* store */
b0109805 11433 tmp = load_reg(s, rd);
c40c8556
PM
11434 } else {
11435 tmp = tcg_temp_new_i32();
11436 }
99c475ab
FB
11437
11438 switch (op) {
11439 case 0: /* str */
9bb6558a 11440 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11441 break;
11442 case 1: /* strh */
9bb6558a 11443 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11444 break;
11445 case 2: /* strb */
9bb6558a 11446 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11447 break;
11448 case 3: /* ldrsb */
9bb6558a 11449 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11450 break;
11451 case 4: /* ldr */
9bb6558a 11452 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11453 break;
11454 case 5: /* ldrh */
9bb6558a 11455 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11456 break;
11457 case 6: /* ldrb */
9bb6558a 11458 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11459 break;
11460 case 7: /* ldrsh */
9bb6558a 11461 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11462 break;
11463 }
c40c8556 11464 if (op >= 3) { /* load */
b0109805 11465 store_reg(s, rd, tmp);
c40c8556
PM
11466 } else {
11467 tcg_temp_free_i32(tmp);
11468 }
7d1b0095 11469 tcg_temp_free_i32(addr);
99c475ab
FB
11470 break;
11471
11472 case 6:
11473 /* load/store word immediate offset */
11474 rd = insn & 7;
11475 rn = (insn >> 3) & 7;
b0109805 11476 addr = load_reg(s, rn);
99c475ab 11477 val = (insn >> 4) & 0x7c;
b0109805 11478 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11479
11480 if (insn & (1 << 11)) {
11481 /* load */
c40c8556 11482 tmp = tcg_temp_new_i32();
12dcc321 11483 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11484 store_reg(s, rd, tmp);
99c475ab
FB
11485 } else {
11486 /* store */
b0109805 11487 tmp = load_reg(s, rd);
12dcc321 11488 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11489 tcg_temp_free_i32(tmp);
99c475ab 11490 }
7d1b0095 11491 tcg_temp_free_i32(addr);
99c475ab
FB
11492 break;
11493
11494 case 7:
11495 /* load/store byte immediate offset */
11496 rd = insn & 7;
11497 rn = (insn >> 3) & 7;
b0109805 11498 addr = load_reg(s, rn);
99c475ab 11499 val = (insn >> 6) & 0x1f;
b0109805 11500 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11501
11502 if (insn & (1 << 11)) {
11503 /* load */
c40c8556 11504 tmp = tcg_temp_new_i32();
9bb6558a 11505 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11506 store_reg(s, rd, tmp);
99c475ab
FB
11507 } else {
11508 /* store */
b0109805 11509 tmp = load_reg(s, rd);
9bb6558a 11510 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11511 tcg_temp_free_i32(tmp);
99c475ab 11512 }
7d1b0095 11513 tcg_temp_free_i32(addr);
99c475ab
FB
11514 break;
11515
11516 case 8:
11517 /* load/store halfword immediate offset */
11518 rd = insn & 7;
11519 rn = (insn >> 3) & 7;
b0109805 11520 addr = load_reg(s, rn);
99c475ab 11521 val = (insn >> 5) & 0x3e;
b0109805 11522 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11523
11524 if (insn & (1 << 11)) {
11525 /* load */
c40c8556 11526 tmp = tcg_temp_new_i32();
9bb6558a 11527 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11528 store_reg(s, rd, tmp);
99c475ab
FB
11529 } else {
11530 /* store */
b0109805 11531 tmp = load_reg(s, rd);
9bb6558a 11532 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11533 tcg_temp_free_i32(tmp);
99c475ab 11534 }
7d1b0095 11535 tcg_temp_free_i32(addr);
99c475ab
FB
11536 break;
11537
11538 case 9:
11539 /* load/store from stack */
11540 rd = (insn >> 8) & 7;
b0109805 11541 addr = load_reg(s, 13);
99c475ab 11542 val = (insn & 0xff) * 4;
b0109805 11543 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11544
11545 if (insn & (1 << 11)) {
11546 /* load */
c40c8556 11547 tmp = tcg_temp_new_i32();
9bb6558a 11548 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11549 store_reg(s, rd, tmp);
99c475ab
FB
11550 } else {
11551 /* store */
b0109805 11552 tmp = load_reg(s, rd);
9bb6558a 11553 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11554 tcg_temp_free_i32(tmp);
99c475ab 11555 }
7d1b0095 11556 tcg_temp_free_i32(addr);
99c475ab
FB
11557 break;
11558
11559 case 10:
11560 /* add to high reg */
11561 rd = (insn >> 8) & 7;
5899f386
FB
11562 if (insn & (1 << 11)) {
11563 /* SP */
5e3f878a 11564 tmp = load_reg(s, 13);
5899f386
FB
11565 } else {
11566 /* PC. bit 1 is ignored. */
7d1b0095 11567 tmp = tcg_temp_new_i32();
5e3f878a 11568 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 11569 }
99c475ab 11570 val = (insn & 0xff) * 4;
5e3f878a
PB
11571 tcg_gen_addi_i32(tmp, tmp, val);
11572 store_reg(s, rd, tmp);
99c475ab
FB
11573 break;
11574
11575 case 11:
11576 /* misc */
11577 op = (insn >> 8) & 0xf;
11578 switch (op) {
11579 case 0:
11580 /* adjust stack pointer */
b26eefb6 11581 tmp = load_reg(s, 13);
99c475ab
FB
11582 val = (insn & 0x7f) * 4;
11583 if (insn & (1 << 7))
6a0d8a1d 11584 val = -(int32_t)val;
b26eefb6
PB
11585 tcg_gen_addi_i32(tmp, tmp, val);
11586 store_reg(s, 13, tmp);
99c475ab
FB
11587 break;
11588
9ee6e8bb
PB
11589 case 2: /* sign/zero extend. */
11590 ARCH(6);
11591 rd = insn & 7;
11592 rm = (insn >> 3) & 7;
b0109805 11593 tmp = load_reg(s, rm);
9ee6e8bb 11594 switch ((insn >> 6) & 3) {
b0109805
PB
11595 case 0: gen_sxth(tmp); break;
11596 case 1: gen_sxtb(tmp); break;
11597 case 2: gen_uxth(tmp); break;
11598 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11599 }
b0109805 11600 store_reg(s, rd, tmp);
9ee6e8bb 11601 break;
99c475ab
FB
11602 case 4: case 5: case 0xc: case 0xd:
11603 /* push/pop */
b0109805 11604 addr = load_reg(s, 13);
5899f386
FB
11605 if (insn & (1 << 8))
11606 offset = 4;
99c475ab 11607 else
5899f386
FB
11608 offset = 0;
11609 for (i = 0; i < 8; i++) {
11610 if (insn & (1 << i))
11611 offset += 4;
11612 }
11613 if ((insn & (1 << 11)) == 0) {
b0109805 11614 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11615 }
99c475ab
FB
11616 for (i = 0; i < 8; i++) {
11617 if (insn & (1 << i)) {
11618 if (insn & (1 << 11)) {
11619 /* pop */
c40c8556 11620 tmp = tcg_temp_new_i32();
12dcc321 11621 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11622 store_reg(s, i, tmp);
99c475ab
FB
11623 } else {
11624 /* push */
b0109805 11625 tmp = load_reg(s, i);
12dcc321 11626 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11627 tcg_temp_free_i32(tmp);
99c475ab 11628 }
5899f386 11629 /* advance to the next address. */
b0109805 11630 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11631 }
11632 }
39d5492a 11633 TCGV_UNUSED_I32(tmp);
99c475ab
FB
11634 if (insn & (1 << 8)) {
11635 if (insn & (1 << 11)) {
11636 /* pop pc */
c40c8556 11637 tmp = tcg_temp_new_i32();
12dcc321 11638 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11639 /* don't set the pc until the rest of the instruction
11640 has completed */
11641 } else {
11642 /* push lr */
b0109805 11643 tmp = load_reg(s, 14);
12dcc321 11644 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11645 tcg_temp_free_i32(tmp);
99c475ab 11646 }
b0109805 11647 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11648 }
5899f386 11649 if ((insn & (1 << 11)) == 0) {
b0109805 11650 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11651 }
99c475ab 11652 /* write back the new stack pointer */
b0109805 11653 store_reg(s, 13, addr);
99c475ab 11654 /* set the new PC value */
be5e7a76 11655 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11656 store_reg_from_load(s, 15, tmp);
be5e7a76 11657 }
99c475ab
FB
11658 break;
11659
9ee6e8bb
PB
11660 case 1: case 3: case 9: case 11: /* czb */
11661 rm = insn & 7;
d9ba4830 11662 tmp = load_reg(s, rm);
9ee6e8bb
PB
11663 s->condlabel = gen_new_label();
11664 s->condjmp = 1;
11665 if (insn & (1 << 11))
cb63669a 11666 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11667 else
cb63669a 11668 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11669 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11670 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11671 val = (uint32_t)s->pc + 2;
11672 val += offset;
11673 gen_jmp(s, val);
11674 break;
11675
11676 case 15: /* IT, nop-hint. */
11677 if ((insn & 0xf) == 0) {
11678 gen_nop_hint(s, (insn >> 4) & 0xf);
11679 break;
11680 }
11681 /* If Then. */
11682 s->condexec_cond = (insn >> 4) & 0xe;
11683 s->condexec_mask = insn & 0x1f;
11684 /* No actual code generated for this insn, just setup state. */
11685 break;
11686
06c949e6 11687 case 0xe: /* bkpt */
d4a2dc67
PM
11688 {
11689 int imm8 = extract32(insn, 0, 8);
be5e7a76 11690 ARCH(5);
73710361
GB
11691 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11692 default_exception_el(s));
06c949e6 11693 break;
d4a2dc67 11694 }
06c949e6 11695
19a6e31c
PM
11696 case 0xa: /* rev, and hlt */
11697 {
11698 int op1 = extract32(insn, 6, 2);
11699
11700 if (op1 == 2) {
11701 /* HLT */
11702 int imm6 = extract32(insn, 0, 6);
11703
11704 gen_hlt(s, imm6);
11705 break;
11706 }
11707
11708 /* Otherwise this is rev */
9ee6e8bb
PB
11709 ARCH(6);
11710 rn = (insn >> 3) & 0x7;
11711 rd = insn & 0x7;
b0109805 11712 tmp = load_reg(s, rn);
19a6e31c 11713 switch (op1) {
66896cb8 11714 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11715 case 1: gen_rev16(tmp); break;
11716 case 3: gen_revsh(tmp); break;
19a6e31c
PM
11717 default:
11718 g_assert_not_reached();
9ee6e8bb 11719 }
b0109805 11720 store_reg(s, rd, tmp);
9ee6e8bb 11721 break;
19a6e31c 11722 }
9ee6e8bb 11723
d9e028c1
PM
11724 case 6:
11725 switch ((insn >> 5) & 7) {
11726 case 2:
11727 /* setend */
11728 ARCH(6);
9886ecdf
PB
11729 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11730 gen_helper_setend(cpu_env);
dcba3a8d 11731 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 11732 }
9ee6e8bb 11733 break;
d9e028c1
PM
11734 case 3:
11735 /* cps */
11736 ARCH(6);
11737 if (IS_USER(s)) {
11738 break;
8984bd2e 11739 }
b53d8923 11740 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11741 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11742 /* FAULTMASK */
11743 if (insn & 1) {
11744 addr = tcg_const_i32(19);
11745 gen_helper_v7m_msr(cpu_env, addr, tmp);
11746 tcg_temp_free_i32(addr);
11747 }
11748 /* PRIMASK */
11749 if (insn & 2) {
11750 addr = tcg_const_i32(16);
11751 gen_helper_v7m_msr(cpu_env, addr, tmp);
11752 tcg_temp_free_i32(addr);
11753 }
11754 tcg_temp_free_i32(tmp);
11755 gen_lookup_tb(s);
11756 } else {
11757 if (insn & (1 << 4)) {
11758 shift = CPSR_A | CPSR_I | CPSR_F;
11759 } else {
11760 shift = 0;
11761 }
11762 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11763 }
d9e028c1
PM
11764 break;
11765 default:
11766 goto undef;
9ee6e8bb
PB
11767 }
11768 break;
11769
99c475ab
FB
11770 default:
11771 goto undef;
11772 }
11773 break;
11774
11775 case 12:
a7d3970d 11776 {
99c475ab 11777 /* load/store multiple */
39d5492a
PM
11778 TCGv_i32 loaded_var;
11779 TCGV_UNUSED_I32(loaded_var);
99c475ab 11780 rn = (insn >> 8) & 0x7;
b0109805 11781 addr = load_reg(s, rn);
99c475ab
FB
11782 for (i = 0; i < 8; i++) {
11783 if (insn & (1 << i)) {
99c475ab
FB
11784 if (insn & (1 << 11)) {
11785 /* load */
c40c8556 11786 tmp = tcg_temp_new_i32();
12dcc321 11787 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11788 if (i == rn) {
11789 loaded_var = tmp;
11790 } else {
11791 store_reg(s, i, tmp);
11792 }
99c475ab
FB
11793 } else {
11794 /* store */
b0109805 11795 tmp = load_reg(s, i);
12dcc321 11796 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11797 tcg_temp_free_i32(tmp);
99c475ab 11798 }
5899f386 11799 /* advance to the next address */
b0109805 11800 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11801 }
11802 }
b0109805 11803 if ((insn & (1 << rn)) == 0) {
a7d3970d 11804 /* base reg not in list: base register writeback */
b0109805
PB
11805 store_reg(s, rn, addr);
11806 } else {
a7d3970d
PM
11807 /* base reg in list: if load, complete it now */
11808 if (insn & (1 << 11)) {
11809 store_reg(s, rn, loaded_var);
11810 }
7d1b0095 11811 tcg_temp_free_i32(addr);
b0109805 11812 }
99c475ab 11813 break;
a7d3970d 11814 }
99c475ab
FB
11815 case 13:
11816 /* conditional branch or swi */
11817 cond = (insn >> 8) & 0xf;
11818 if (cond == 0xe)
11819 goto undef;
11820
11821 if (cond == 0xf) {
11822 /* swi */
eaed129d 11823 gen_set_pc_im(s, s->pc);
d4a2dc67 11824 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 11825 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
11826 break;
11827 }
11828 /* generate a conditional jump to next instruction */
e50e6a20 11829 s->condlabel = gen_new_label();
39fb730a 11830 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 11831 s->condjmp = 1;
99c475ab
FB
11832
11833 /* jump to the offset */
5899f386 11834 val = (uint32_t)s->pc + 2;
99c475ab 11835 offset = ((int32_t)insn << 24) >> 24;
5899f386 11836 val += offset << 1;
8aaca4c0 11837 gen_jmp(s, val);
99c475ab
FB
11838 break;
11839
11840 case 14:
358bf29e 11841 if (insn & (1 << 11)) {
296e5a0a
PM
11842 /* thumb_insn_is_16bit() ensures we can't get here for
11843 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
11844 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
11845 */
11846 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11847 ARCH(5);
11848 offset = ((insn & 0x7ff) << 1);
11849 tmp = load_reg(s, 14);
11850 tcg_gen_addi_i32(tmp, tmp, offset);
11851 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
11852
11853 tmp2 = tcg_temp_new_i32();
11854 tcg_gen_movi_i32(tmp2, s->pc | 1);
11855 store_reg(s, 14, tmp2);
11856 gen_bx(s, tmp);
358bf29e
PB
11857 break;
11858 }
9ee6e8bb 11859 /* unconditional branch */
99c475ab
FB
11860 val = (uint32_t)s->pc;
11861 offset = ((int32_t)insn << 21) >> 21;
11862 val += (offset << 1) + 2;
8aaca4c0 11863 gen_jmp(s, val);
99c475ab
FB
11864 break;
11865
11866 case 15:
296e5a0a
PM
11867 /* thumb_insn_is_16bit() ensures we can't get here for
11868 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
11869 */
11870 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11871
11872 if (insn & (1 << 11)) {
11873 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
11874 offset = ((insn & 0x7ff) << 1) | 1;
11875 tmp = load_reg(s, 14);
11876 tcg_gen_addi_i32(tmp, tmp, offset);
11877
11878 tmp2 = tcg_temp_new_i32();
11879 tcg_gen_movi_i32(tmp2, s->pc | 1);
11880 store_reg(s, 14, tmp2);
11881 gen_bx(s, tmp);
11882 } else {
11883 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
11884 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
11885
11886 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
11887 }
9ee6e8bb 11888 break;
99c475ab
FB
11889 }
11890 return;
9ee6e8bb 11891illegal_op:
99c475ab 11892undef:
73710361
GB
11893 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11894 default_exception_el(s));
99c475ab
FB
11895}
11896
541ebcd4
PM
11897static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11898{
11899 /* Return true if the insn at dc->pc might cross a page boundary.
11900 * (False positives are OK, false negatives are not.)
5b8d7289
PM
11901 * We know this is a Thumb insn, and our caller ensures we are
11902 * only called if dc->pc is less than 4 bytes from the page
11903 * boundary, so we cross the page if the first 16 bits indicate
11904 * that this is a 32 bit insn.
541ebcd4 11905 */
5b8d7289 11906 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 11907
5b8d7289 11908 return !thumb_insn_is_16bit(s, insn);
541ebcd4
PM
11909}
11910
1d8a5535
LV
11911static int arm_tr_init_disas_context(DisasContextBase *dcbase,
11912 CPUState *cs, int max_insns)
2c0262af 11913{
1d8a5535 11914 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 11915 CPUARMState *env = cs->env_ptr;
4e5e1215 11916 ARMCPU *cpu = arm_env_get_cpu(env);
3b46e624 11917
dcba3a8d 11918 dc->pc = dc->base.pc_first;
e50e6a20 11919 dc->condjmp = 0;
3926cc84 11920
40f860cd 11921 dc->aarch64 = 0;
cef9ee70
SS
11922 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11923 * there is no secure EL1, so we route exceptions to EL3.
11924 */
11925 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11926 !arm_el_is_aa64(env, 3);
1d8a5535
LV
11927 dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
11928 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
11929 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
11930 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
11931 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
11932 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
c1e37810 11933 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11934#if !defined(CONFIG_USER_ONLY)
c1e37810 11935 dc->user = (dc->current_el == 0);
3926cc84 11936#endif
1d8a5535
LV
11937 dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
11938 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
11939 dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
11940 dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
11941 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
11942 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
11943 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
fb602cb7
PM
11944 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11945 regime_is_secure(env, dc->mmu_idx);
60322b39 11946 dc->cp_regs = cpu->cp_regs;
a984e42c 11947 dc->features = env->features;
40f860cd 11948
50225ad0
PM
11949 /* Single step state. The code-generation logic here is:
11950 * SS_ACTIVE == 0:
11951 * generate code with no special handling for single-stepping (except
11952 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11953 * this happens anyway because those changes are all system register or
11954 * PSTATE writes).
11955 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11956 * emit code for one insn
11957 * emit code to clear PSTATE.SS
11958 * emit code to generate software step exception for completed step
11959 * end TB (as usual for having generated an exception)
11960 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11961 * emit code to generate a software step exception
11962 * end the TB
11963 */
1d8a5535
LV
11964 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
11965 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
50225ad0
PM
11966 dc->is_ldex = false;
11967 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11968
13189a90
LV
11969 dc->next_page_start =
11970 (dc->base.pc_first & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1d8a5535 11971
f7708456
RH
11972 /* If architectural single step active, limit to 1. */
11973 if (is_singlestepping(dc)) {
11974 max_insns = 1;
11975 }
11976
d0264d86
RH
11977 /* ARM is a fixed-length ISA. Bound the number of insns to execute
11978 to those left on the page. */
11979 if (!dc->thumb) {
11980 int bound = (dc->next_page_start - dc->base.pc_first) / 4;
11981 max_insns = MIN(max_insns, bound);
11982 }
11983
a7812ae4
PB
11984 cpu_F0s = tcg_temp_new_i32();
11985 cpu_F1s = tcg_temp_new_i32();
11986 cpu_F0d = tcg_temp_new_i64();
11987 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
11988 cpu_V0 = cpu_F0d;
11989 cpu_V1 = cpu_F1d;
e677137d 11990 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11991 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
11992
11993 return max_insns;
11994}
11995
b1476854
LV
11996static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
11997{
11998 DisasContext *dc = container_of(dcbase, DisasContext, base);
11999
12000 /* A note on handling of the condexec (IT) bits:
12001 *
12002 * We want to avoid the overhead of having to write the updated condexec
12003 * bits back to the CPUARMState for every instruction in an IT block. So:
12004 * (1) if the condexec bits are not already zero then we write
12005 * zero back into the CPUARMState now. This avoids complications trying
12006 * to do it at the end of the block. (For example if we don't do this
12007 * it's hard to identify whether we can safely skip writing condexec
12008 * at the end of the TB, which we definitely want to do for the case
12009 * where a TB doesn't do anything with the IT state at all.)
12010 * (2) if we are going to leave the TB then we call gen_set_condexec()
12011 * which will write the correct value into CPUARMState if zero is wrong.
12012 * This is done both for leaving the TB at the end, and for leaving
12013 * it because of an exception we know will happen, which is done in
12014 * gen_exception_insn(). The latter is necessary because we need to
12015 * leave the TB with the PC/IT state just prior to execution of the
12016 * instruction which caused the exception.
12017 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
12018 * then the CPUARMState will be wrong and we need to reset it.
12019 * This is handled in the same way as restoration of the
12020 * PC in these situations; we save the value of the condexec bits
12021 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
12022 * then uses this to restore them after an exception.
12023 *
12024 * Note that there are no instructions which can read the condexec
12025 * bits, and none which can write non-static values to them, so
12026 * we don't need to care about whether CPUARMState is correct in the
12027 * middle of a TB.
12028 */
12029
12030 /* Reset the conditional execution bits immediately. This avoids
12031 complications trying to do it at the end of the block. */
12032 if (dc->condexec_mask || dc->condexec_cond) {
12033 TCGv_i32 tmp = tcg_temp_new_i32();
12034 tcg_gen_movi_i32(tmp, 0);
12035 store_cpu_field(tmp, condexec_bits);
12036 }
23169224 12037 tcg_clear_temp_count();
b1476854
LV
12038}
12039
f62bd897
LV
12040static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
12041{
12042 DisasContext *dc = container_of(dcbase, DisasContext, base);
12043
12044 dc->insn_start_idx = tcg_op_buf_count();
12045 tcg_gen_insn_start(dc->pc,
12046 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
12047 0);
12048}
12049
a68956ad
LV
12050static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
12051 const CPUBreakpoint *bp)
12052{
12053 DisasContext *dc = container_of(dcbase, DisasContext, base);
12054
12055 if (bp->flags & BP_CPU) {
12056 gen_set_condexec(dc);
12057 gen_set_pc_im(dc, dc->pc);
12058 gen_helper_check_breakpoints(cpu_env);
12059 /* End the TB early; it's likely not going to be executed */
12060 dc->base.is_jmp = DISAS_TOO_MANY;
12061 } else {
12062 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12063 /* The address covered by the breakpoint must be
12064 included in [tb->pc, tb->pc + tb->size) in order
12065 to for it to be properly cleared -- thus we
12066 increment the PC here so that the logic setting
12067 tb->size below does the right thing. */
12068 /* TODO: Advance PC by correct instruction length to
12069 * avoid disassembler error messages */
12070 dc->pc += 2;
12071 dc->base.is_jmp = DISAS_NORETURN;
12072 }
12073
12074 return true;
12075}
12076
722ef0a5 12077static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 12078{
13189a90
LV
12079#ifdef CONFIG_USER_ONLY
12080 /* Intercept jump to the magic kernel page. */
12081 if (dc->pc >= 0xffff0000) {
12082 /* We always get here via a jump, so know we are not in a
12083 conditional execution block. */
12084 gen_exception_internal(EXCP_KERNEL_TRAP);
12085 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12086 return true;
13189a90
LV
12087 }
12088#endif
12089
12090 if (dc->ss_active && !dc->pstate_ss) {
12091 /* Singlestep state is Active-pending.
12092 * If we're in this state at the start of a TB then either
12093 * a) we just took an exception to an EL which is being debugged
12094 * and this is the first insn in the exception handler
12095 * b) debug exceptions were masked and we just unmasked them
12096 * without changing EL (eg by clearing PSTATE.D)
12097 * In either case we're going to take a swstep exception in the
12098 * "did not step an insn" case, and so the syndrome ISV and EX
12099 * bits should be zero.
12100 */
12101 assert(dc->base.num_insns == 1);
12102 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12103 default_exception_el(dc));
12104 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12105 return true;
13189a90
LV
12106 }
12107
722ef0a5
RH
12108 return false;
12109}
13189a90 12110
d0264d86 12111static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 12112{
13189a90
LV
12113 if (dc->condjmp && !dc->base.is_jmp) {
12114 gen_set_label(dc->condlabel);
12115 dc->condjmp = 0;
12116 }
13189a90 12117 dc->base.pc_next = dc->pc;
23169224 12118 translator_loop_temp_check(&dc->base);
13189a90
LV
12119}
12120
722ef0a5
RH
12121static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12122{
12123 DisasContext *dc = container_of(dcbase, DisasContext, base);
12124 CPUARMState *env = cpu->env_ptr;
12125 unsigned int insn;
12126
12127 if (arm_pre_translate_insn(dc)) {
12128 return;
12129 }
12130
12131 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
12132 dc->pc += 4;
12133 disas_arm_insn(dc, insn);
12134
d0264d86
RH
12135 arm_post_translate_insn(dc);
12136
12137 /* ARM is a fixed-length ISA. We performed the cross-page check
12138 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
12139}
12140
dcf14dfb
PM
12141static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12142{
12143 /* Return true if this Thumb insn is always unconditional,
12144 * even inside an IT block. This is true of only a very few
12145 * instructions: BKPT, HLT, and SG.
12146 *
12147 * A larger class of instructions are UNPREDICTABLE if used
12148 * inside an IT block; we do not need to detect those here, because
12149 * what we do by default (perform the cc check and update the IT
12150 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12151 * choice for those situations.
12152 *
12153 * insn is either a 16-bit or a 32-bit instruction; the two are
12154 * distinguishable because for the 16-bit case the top 16 bits
12155 * are zeroes, and that isn't a valid 32-bit encoding.
12156 */
12157 if ((insn & 0xffffff00) == 0xbe00) {
12158 /* BKPT */
12159 return true;
12160 }
12161
12162 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12163 !arm_dc_feature(s, ARM_FEATURE_M)) {
12164 /* HLT: v8A only. This is unconditional even when it is going to
12165 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12166 * For v7 cores this was a plain old undefined encoding and so
12167 * honours its cc check. (We might be using the encoding as
12168 * a semihosting trap, but we don't change the cc check behaviour
12169 * on that account, because a debugger connected to a real v7A
12170 * core and emulating semihosting traps by catching the UNDEF
12171 * exception would also only see cases where the cc check passed.
12172 * No guest code should be trying to do a HLT semihosting trap
12173 * in an IT block anyway.
12174 */
12175 return true;
12176 }
12177
12178 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12179 arm_dc_feature(s, ARM_FEATURE_M)) {
12180 /* SG: v8M only */
12181 return true;
12182 }
12183
12184 return false;
12185}
12186
722ef0a5
RH
12187static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12188{
12189 DisasContext *dc = container_of(dcbase, DisasContext, base);
12190 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
12191 uint32_t insn;
12192 bool is_16bit;
722ef0a5
RH
12193
12194 if (arm_pre_translate_insn(dc)) {
12195 return;
12196 }
12197
296e5a0a
PM
12198 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12199 is_16bit = thumb_insn_is_16bit(dc, insn);
12200 dc->pc += 2;
12201 if (!is_16bit) {
12202 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12203
12204 insn = insn << 16 | insn2;
12205 dc->pc += 2;
12206 }
12207
dcf14dfb 12208 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
12209 uint32_t cond = dc->condexec_cond;
12210
12211 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
12212 dc->condlabel = gen_new_label();
12213 arm_gen_test_cc(cond ^ 1, dc->condlabel);
12214 dc->condjmp = 1;
12215 }
12216 }
12217
12218 if (is_16bit) {
12219 disas_thumb_insn(dc, insn);
12220 } else {
12221 disas_thumb2_insn(dc, insn);
12222 }
722ef0a5
RH
12223
12224 /* Advance the Thumb condexec condition. */
12225 if (dc->condexec_mask) {
12226 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12227 ((dc->condexec_mask >> 4) & 1));
12228 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12229 if (dc->condexec_mask == 0) {
12230 dc->condexec_cond = 0;
12231 }
12232 }
12233
d0264d86
RH
12234 arm_post_translate_insn(dc);
12235
12236 /* Thumb is a variable-length ISA. Stop translation when the next insn
12237 * will touch a new page. This ensures that prefetch aborts occur at
12238 * the right place.
12239 *
12240 * We want to stop the TB if the next insn starts in a new page,
12241 * or if it spans between this page and the next. This means that
12242 * if we're looking at the last halfword in the page we need to
12243 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12244 * or a 32-bit Thumb insn (which won't).
12245 * This is to avoid generating a silly TB with a single 16-bit insn
12246 * in it at the end of this page (which would execute correctly
12247 * but isn't very efficient).
12248 */
12249 if (dc->base.is_jmp == DISAS_NEXT
12250 && (dc->pc >= dc->next_page_start
12251 || (dc->pc >= dc->next_page_start - 3
12252 && insn_crosses_page(env, dc)))) {
12253 dc->base.is_jmp = DISAS_TOO_MANY;
12254 }
722ef0a5
RH
12255}
12256
70d3c035 12257static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 12258{
70d3c035 12259 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 12260
c5a49c63 12261 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
12262 /* FIXME: This can theoretically happen with self-modifying code. */
12263 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 12264 }
9ee6e8bb 12265
b5ff1b31 12266 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12267 instruction was a conditional branch or trap, and the PC has
12268 already been written. */
f021b2c4 12269 gen_set_condexec(dc);
dcba3a8d 12270 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
12271 /* Exception return branches need some special case code at the
12272 * end of the TB, which is complex enough that it has to
12273 * handle the single-step vs not and the condition-failed
12274 * insn codepath itself.
12275 */
12276 gen_bx_excret_final_code(dc);
12277 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12278 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 12279 switch (dc->base.is_jmp) {
7999a5c8 12280 case DISAS_SWI:
50225ad0 12281 gen_ss_advance(dc);
73710361
GB
12282 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12283 default_exception_el(dc));
7999a5c8
SF
12284 break;
12285 case DISAS_HVC:
37e6456e 12286 gen_ss_advance(dc);
73710361 12287 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12288 break;
12289 case DISAS_SMC:
37e6456e 12290 gen_ss_advance(dc);
73710361 12291 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12292 break;
12293 case DISAS_NEXT:
a68956ad 12294 case DISAS_TOO_MANY:
7999a5c8
SF
12295 case DISAS_UPDATE:
12296 gen_set_pc_im(dc, dc->pc);
12297 /* fall through */
12298 default:
5425415e
PM
12299 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12300 gen_singlestep_exception(dc);
a0c231e6
RH
12301 break;
12302 case DISAS_NORETURN:
12303 break;
7999a5c8 12304 }
8aaca4c0 12305 } else {
9ee6e8bb
PB
12306 /* While branches must always occur at the end of an IT block,
12307 there are a few other things that can cause us to terminate
65626741 12308 the TB in the middle of an IT block:
9ee6e8bb
PB
12309 - Exception generating instructions (bkpt, swi, undefined).
12310 - Page boundaries.
12311 - Hardware watchpoints.
12312 Hardware breakpoints have already been handled and skip this code.
12313 */
dcba3a8d 12314 switch(dc->base.is_jmp) {
8aaca4c0 12315 case DISAS_NEXT:
a68956ad 12316 case DISAS_TOO_MANY:
6e256c93 12317 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 12318 break;
577bf808 12319 case DISAS_JUMP:
8a6b28c7
EC
12320 gen_goto_ptr();
12321 break;
e8d52302
AB
12322 case DISAS_UPDATE:
12323 gen_set_pc_im(dc, dc->pc);
12324 /* fall through */
577bf808 12325 default:
8aaca4c0 12326 /* indicate that the hash table must be used to find the next TB */
57fec1fe 12327 tcg_gen_exit_tb(0);
8aaca4c0 12328 break;
a0c231e6 12329 case DISAS_NORETURN:
8aaca4c0
FB
12330 /* nothing more to generate */
12331 break;
9ee6e8bb 12332 case DISAS_WFI:
1ce94f81 12333 gen_helper_wfi(cpu_env);
84549b6d
PM
12334 /* The helper doesn't necessarily throw an exception, but we
12335 * must go back to the main loop to check for interrupts anyway.
12336 */
12337 tcg_gen_exit_tb(0);
9ee6e8bb 12338 break;
72c1d3af
PM
12339 case DISAS_WFE:
12340 gen_helper_wfe(cpu_env);
12341 break;
c87e5a61
PM
12342 case DISAS_YIELD:
12343 gen_helper_yield(cpu_env);
12344 break;
9ee6e8bb 12345 case DISAS_SWI:
73710361
GB
12346 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12347 default_exception_el(dc));
9ee6e8bb 12348 break;
37e6456e 12349 case DISAS_HVC:
73710361 12350 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12351 break;
12352 case DISAS_SMC:
73710361 12353 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12354 break;
8aaca4c0 12355 }
f021b2c4
PM
12356 }
12357
12358 if (dc->condjmp) {
12359 /* "Condition failed" instruction codepath for the branch/trap insn */
12360 gen_set_label(dc->condlabel);
12361 gen_set_condexec(dc);
b636649f 12362 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
12363 gen_set_pc_im(dc, dc->pc);
12364 gen_singlestep_exception(dc);
12365 } else {
6e256c93 12366 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 12367 }
2c0262af 12368 }
23169224
LV
12369
12370 /* Functions above can change dc->pc, so re-align db->pc_next */
12371 dc->base.pc_next = dc->pc;
70d3c035
LV
12372}
12373
4013f7fc
LV
12374static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12375{
12376 DisasContext *dc = container_of(dcbase, DisasContext, base);
12377
12378 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
12379 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size,
12380 dc->thumb | (dc->sctlr_b << 1));
12381}
12382
23169224
LV
12383static const TranslatorOps arm_translator_ops = {
12384 .init_disas_context = arm_tr_init_disas_context,
12385 .tb_start = arm_tr_tb_start,
12386 .insn_start = arm_tr_insn_start,
12387 .breakpoint_check = arm_tr_breakpoint_check,
12388 .translate_insn = arm_tr_translate_insn,
12389 .tb_stop = arm_tr_tb_stop,
12390 .disas_log = arm_tr_disas_log,
12391};
12392
722ef0a5
RH
12393static const TranslatorOps thumb_translator_ops = {
12394 .init_disas_context = arm_tr_init_disas_context,
12395 .tb_start = arm_tr_tb_start,
12396 .insn_start = arm_tr_insn_start,
12397 .breakpoint_check = arm_tr_breakpoint_check,
12398 .translate_insn = thumb_tr_translate_insn,
12399 .tb_stop = arm_tr_tb_stop,
12400 .disas_log = arm_tr_disas_log,
12401};
12402
70d3c035 12403/* generate intermediate code for basic block 'tb'. */
23169224 12404void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
70d3c035 12405{
23169224
LV
12406 DisasContext dc;
12407 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 12408
722ef0a5
RH
12409 if (ARM_TBFLAG_THUMB(tb->flags)) {
12410 ops = &thumb_translator_ops;
12411 }
23169224 12412#ifdef TARGET_AARCH64
70d3c035 12413 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
23169224 12414 ops = &aarch64_translator_ops;
2c0262af
FB
12415 }
12416#endif
23169224
LV
12417
12418 translator_loop(ops, &dc.base, cpu, tb);
2c0262af
FB
12419}
12420
b5ff1b31 12421static const char *cpu_mode_names[16] = {
28c9457d
EI
12422 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12423 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 12424};
9ee6e8bb 12425
878096ee
AF
12426void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12427 int flags)
2c0262af 12428{
878096ee
AF
12429 ARMCPU *cpu = ARM_CPU(cs);
12430 CPUARMState *env = &cpu->env;
2c0262af
FB
12431 int i;
12432
17731115
PM
12433 if (is_a64(env)) {
12434 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12435 return;
12436 }
12437
2c0262af 12438 for(i=0;i<16;i++) {
7fe48483 12439 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12440 if ((i % 4) == 3)
7fe48483 12441 cpu_fprintf(f, "\n");
2c0262af 12442 else
7fe48483 12443 cpu_fprintf(f, " ");
2c0262af 12444 }
06e5cf7a 12445
5b906f35
PM
12446 if (arm_feature(env, ARM_FEATURE_M)) {
12447 uint32_t xpsr = xpsr_read(env);
12448 const char *mode;
1e577cc7
PM
12449 const char *ns_status = "";
12450
12451 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12452 ns_status = env->v7m.secure ? "S " : "NS ";
12453 }
5b906f35
PM
12454
12455 if (xpsr & XPSR_EXCP) {
12456 mode = "handler";
12457 } else {
8bfc26ea 12458 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
12459 mode = "unpriv-thread";
12460 } else {
12461 mode = "priv-thread";
12462 }
12463 }
12464
1e577cc7 12465 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
5b906f35
PM
12466 xpsr,
12467 xpsr & XPSR_N ? 'N' : '-',
12468 xpsr & XPSR_Z ? 'Z' : '-',
12469 xpsr & XPSR_C ? 'C' : '-',
12470 xpsr & XPSR_V ? 'V' : '-',
12471 xpsr & XPSR_T ? 'T' : 'A',
1e577cc7 12472 ns_status,
5b906f35 12473 mode);
06e5cf7a 12474 } else {
5b906f35
PM
12475 uint32_t psr = cpsr_read(env);
12476 const char *ns_status = "";
12477
12478 if (arm_feature(env, ARM_FEATURE_EL3) &&
12479 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12480 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12481 }
12482
12483 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12484 psr,
12485 psr & CPSR_N ? 'N' : '-',
12486 psr & CPSR_Z ? 'Z' : '-',
12487 psr & CPSR_C ? 'C' : '-',
12488 psr & CPSR_V ? 'V' : '-',
12489 psr & CPSR_T ? 'T' : 'A',
12490 ns_status,
12491 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
12492 }
b7bcbe95 12493
f2617cfc
PM
12494 if (flags & CPU_DUMP_FPU) {
12495 int numvfpregs = 0;
12496 if (arm_feature(env, ARM_FEATURE_VFP)) {
12497 numvfpregs += 16;
12498 }
12499 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12500 numvfpregs += 16;
12501 }
12502 for (i = 0; i < numvfpregs; i++) {
12503 uint64_t v = float64_val(env->vfp.regs[i]);
12504 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12505 i * 2, (uint32_t)v,
12506 i * 2 + 1, (uint32_t)(v >> 32),
12507 i, v);
12508 }
12509 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 12510 }
2c0262af 12511}
a6b025d3 12512
bad729e2
RH
12513void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12514 target_ulong *data)
d2856f1a 12515{
3926cc84 12516 if (is_a64(env)) {
bad729e2 12517 env->pc = data[0];
40f860cd 12518 env->condexec_bits = 0;
aaa1f954 12519 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12520 } else {
bad729e2
RH
12521 env->regs[15] = data[0];
12522 env->condexec_bits = data[1];
aaa1f954 12523 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12524 }
d2856f1a 12525}