]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
arm: Add support for M profile CPUs having different MMU index semantics
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
1de7afc9 28#include "qemu/log.h"
534df156 29#include "qemu/bitops.h"
1d854765 30#include "arm_ldst.h"
19a6e31c 31#include "exec/semihost.h"
1497c961 32
2ef6175a
RH
33#include "exec/helper-proto.h"
34#include "exec/helper-gen.h"
2c0262af 35
a7e30d84 36#include "trace-tcg.h"
508127e2 37#include "exec/log.h"
a7e30d84
LV
38
39
2b51668f
PM
40#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
41#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 42/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 43#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
9ee6e8bb 44#define ENABLE_ARCH_5J 0
2b51668f
PM
45#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
46#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
47#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
48#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
49#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 50
86753403 51#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 52
f570c61e 53#include "translate.h"
e12ce78d 54
b5ff1b31
FB
55#if defined(CONFIG_USER_ONLY)
56#define IS_USER(s) 1
57#else
58#define IS_USER(s) (s->user)
59#endif
60
1bcea73e 61TCGv_env cpu_env;
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
b26eefb6 69/* FIXME: These should be removed. */
39d5492a 70static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 71static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 72
022c62cb 73#include "exec/gen-icount.h"
2e70f6ef 74
155c3eac
FN
75static const char *regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
78
b26eefb6
PB
79/* initialize TCG globals. */
80void arm_translate_init(void)
81{
155c3eac
FN
82 int i;
83
a7812ae4 84 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7c255043 85 tcg_ctx.tcg_env = cpu_env;
a7812ae4 86
155c3eac 87 for (i = 0; i < 16; i++) {
e1ccc054 88 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 89 offsetof(CPUARMState, regs[i]),
155c3eac
FN
90 regnames[i]);
91 }
e1ccc054
RH
92 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
93 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
94 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
95 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 96
e1ccc054 97 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 98 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 99 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 100 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 101
14ade10f 102 a64_translate_init();
b26eefb6
PB
103}
104
9bb6558a
PM
105/* Flags for the disas_set_da_iss info argument:
106 * lower bits hold the Rt register number, higher bits are flags.
107 */
108typedef enum ISSInfo {
109 ISSNone = 0,
110 ISSRegMask = 0x1f,
111 ISSInvalid = (1 << 5),
112 ISSIsAcqRel = (1 << 6),
113 ISSIsWrite = (1 << 7),
114 ISSIs16Bit = (1 << 8),
115} ISSInfo;
116
117/* Save the syndrome information for a Data Abort */
118static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
119{
120 uint32_t syn;
121 int sas = memop & MO_SIZE;
122 bool sse = memop & MO_SIGN;
123 bool is_acqrel = issinfo & ISSIsAcqRel;
124 bool is_write = issinfo & ISSIsWrite;
125 bool is_16bit = issinfo & ISSIs16Bit;
126 int srt = issinfo & ISSRegMask;
127
128 if (issinfo & ISSInvalid) {
129 /* Some callsites want to conditionally provide ISS info,
130 * eg "only if this was not a writeback"
131 */
132 return;
133 }
134
135 if (srt == 15) {
136 /* For AArch32, insns where the src/dest is R15 never generate
137 * ISS information. Catching that here saves checking at all
138 * the call sites.
139 */
140 return;
141 }
142
143 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
144 0, 0, 0, is_write, 0, is_16bit);
145 disas_set_insn_syndrome(s, syn);
146}
147
8bd5c820 148static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 149{
8bd5c820 150 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
151 * insns:
152 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
153 * otherwise, access as if at PL0.
154 */
155 switch (s->mmu_idx) {
156 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
157 case ARMMMUIdx_S12NSE0:
158 case ARMMMUIdx_S12NSE1:
8bd5c820 159 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
160 case ARMMMUIdx_S1E3:
161 case ARMMMUIdx_S1SE0:
162 case ARMMMUIdx_S1SE1:
8bd5c820 163 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
579d21cc
PM
164 case ARMMMUIdx_S2NS:
165 default:
166 g_assert_not_reached();
167 }
168}
169
39d5492a 170static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 171{
39d5492a 172 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
173 tcg_gen_ld_i32(tmp, cpu_env, offset);
174 return tmp;
175}
176
0ecb72a5 177#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 178
39d5492a 179static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
180{
181 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 182 tcg_temp_free_i32(var);
d9ba4830
PB
183}
184
185#define store_cpu_field(var, name) \
0ecb72a5 186 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 187
b26eefb6 188/* Set a variable to the value of a CPU register. */
39d5492a 189static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
190{
191 if (reg == 15) {
192 uint32_t addr;
b90372ad 193 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
194 if (s->thumb)
195 addr = (long)s->pc + 2;
196 else
197 addr = (long)s->pc + 4;
198 tcg_gen_movi_i32(var, addr);
199 } else {
155c3eac 200 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
201 }
202}
203
204/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 205static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 206{
39d5492a 207 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
208 load_reg_var(s, tmp, reg);
209 return tmp;
210}
211
212/* Set a CPU register. The source must be a temporary and will be
213 marked as dead. */
39d5492a 214static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
215{
216 if (reg == 15) {
9b6a3ea7
PM
217 /* In Thumb mode, we must ignore bit 0.
218 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
219 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
220 * We choose to ignore [1:0] in ARM mode for all architecture versions.
221 */
222 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
b26eefb6
PB
223 s->is_jmp = DISAS_JUMP;
224 }
155c3eac 225 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 226 tcg_temp_free_i32(var);
b26eefb6
PB
227}
228
b26eefb6 229/* Value extensions. */
86831435
PB
230#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
231#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
232#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
233#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
234
1497c961
PB
235#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
236#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 237
b26eefb6 238
39d5492a 239static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 240{
39d5492a 241 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 242 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
243 tcg_temp_free_i32(tmp_mask);
244}
d9ba4830
PB
245/* Set NZCV flags from the high 4 bits of var. */
246#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
247
d4a2dc67 248static void gen_exception_internal(int excp)
d9ba4830 249{
d4a2dc67
PM
250 TCGv_i32 tcg_excp = tcg_const_i32(excp);
251
252 assert(excp_is_internal(excp));
253 gen_helper_exception_internal(cpu_env, tcg_excp);
254 tcg_temp_free_i32(tcg_excp);
255}
256
73710361 257static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
258{
259 TCGv_i32 tcg_excp = tcg_const_i32(excp);
260 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 261 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 262
73710361
GB
263 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
264 tcg_syn, tcg_el);
265
266 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
267 tcg_temp_free_i32(tcg_syn);
268 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
269}
270
50225ad0
PM
271static void gen_ss_advance(DisasContext *s)
272{
273 /* If the singlestep state is Active-not-pending, advance to
274 * Active-pending.
275 */
276 if (s->ss_active) {
277 s->pstate_ss = 0;
278 gen_helper_clear_pstate_ss(cpu_env);
279 }
280}
281
282static void gen_step_complete_exception(DisasContext *s)
283{
284 /* We just completed step of an insn. Move from Active-not-pending
285 * to Active-pending, and then also take the swstep exception.
286 * This corresponds to making the (IMPDEF) choice to prioritize
287 * swstep exceptions over asynchronous exceptions taken to an exception
288 * level where debug is disabled. This choice has the advantage that
289 * we do not need to maintain internal state corresponding to the
290 * ISV/EX syndrome bits between completion of the step and generation
291 * of the exception, and our syndrome information is always correct.
292 */
293 gen_ss_advance(s);
73710361
GB
294 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
295 default_exception_el(s));
50225ad0
PM
296 s->is_jmp = DISAS_EXC;
297}
298
5425415e
PM
299static void gen_singlestep_exception(DisasContext *s)
300{
301 /* Generate the right kind of exception for singlestep, which is
302 * either the architectural singlestep or EXCP_DEBUG for QEMU's
303 * gdb singlestepping.
304 */
305 if (s->ss_active) {
306 gen_step_complete_exception(s);
307 } else {
308 gen_exception_internal(EXCP_DEBUG);
309 }
310}
311
b636649f
PM
312static inline bool is_singlestepping(DisasContext *s)
313{
314 /* Return true if we are singlestepping either because of
315 * architectural singlestep or QEMU gdbstub singlestep. This does
316 * not include the command line '-singlestep' mode which is rather
317 * misnamed as it only means "one instruction per TB" and doesn't
318 * affect the code we generate.
319 */
320 return s->singlestep_enabled || s->ss_active;
321}
322
39d5492a 323static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 324{
39d5492a
PM
325 TCGv_i32 tmp1 = tcg_temp_new_i32();
326 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
327 tcg_gen_ext16s_i32(tmp1, a);
328 tcg_gen_ext16s_i32(tmp2, b);
3670669c 329 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 330 tcg_temp_free_i32(tmp2);
3670669c
PB
331 tcg_gen_sari_i32(a, a, 16);
332 tcg_gen_sari_i32(b, b, 16);
333 tcg_gen_mul_i32(b, b, a);
334 tcg_gen_mov_i32(a, tmp1);
7d1b0095 335 tcg_temp_free_i32(tmp1);
3670669c
PB
336}
337
338/* Byteswap each halfword. */
39d5492a 339static void gen_rev16(TCGv_i32 var)
3670669c 340{
39d5492a 341 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
342 tcg_gen_shri_i32(tmp, var, 8);
343 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
344 tcg_gen_shli_i32(var, var, 8);
345 tcg_gen_andi_i32(var, var, 0xff00ff00);
346 tcg_gen_or_i32(var, var, tmp);
7d1b0095 347 tcg_temp_free_i32(tmp);
3670669c
PB
348}
349
350/* Byteswap low halfword and sign extend. */
39d5492a 351static void gen_revsh(TCGv_i32 var)
3670669c 352{
1a855029
AJ
353 tcg_gen_ext16u_i32(var, var);
354 tcg_gen_bswap16_i32(var, var);
355 tcg_gen_ext16s_i32(var, var);
3670669c
PB
356}
357
838fa72d 358/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 359static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 360{
838fa72d
AJ
361 TCGv_i64 tmp64 = tcg_temp_new_i64();
362
363 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 364 tcg_temp_free_i32(b);
838fa72d
AJ
365 tcg_gen_shli_i64(tmp64, tmp64, 32);
366 tcg_gen_add_i64(a, tmp64, a);
367
368 tcg_temp_free_i64(tmp64);
369 return a;
370}
371
372/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 373static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
374{
375 TCGv_i64 tmp64 = tcg_temp_new_i64();
376
377 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 378 tcg_temp_free_i32(b);
838fa72d
AJ
379 tcg_gen_shli_i64(tmp64, tmp64, 32);
380 tcg_gen_sub_i64(a, tmp64, a);
381
382 tcg_temp_free_i64(tmp64);
383 return a;
3670669c
PB
384}
385
5e3f878a 386/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 387static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 388{
39d5492a
PM
389 TCGv_i32 lo = tcg_temp_new_i32();
390 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 391 TCGv_i64 ret;
5e3f878a 392
831d7fe8 393 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 394 tcg_temp_free_i32(a);
7d1b0095 395 tcg_temp_free_i32(b);
831d7fe8
RH
396
397 ret = tcg_temp_new_i64();
398 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
399 tcg_temp_free_i32(lo);
400 tcg_temp_free_i32(hi);
831d7fe8
RH
401
402 return ret;
5e3f878a
PB
403}
404
39d5492a 405static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 406{
39d5492a
PM
407 TCGv_i32 lo = tcg_temp_new_i32();
408 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 409 TCGv_i64 ret;
5e3f878a 410
831d7fe8 411 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 412 tcg_temp_free_i32(a);
7d1b0095 413 tcg_temp_free_i32(b);
831d7fe8
RH
414
415 ret = tcg_temp_new_i64();
416 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
417 tcg_temp_free_i32(lo);
418 tcg_temp_free_i32(hi);
831d7fe8
RH
419
420 return ret;
5e3f878a
PB
421}
422
8f01245e 423/* Swap low and high halfwords. */
39d5492a 424static void gen_swap_half(TCGv_i32 var)
8f01245e 425{
39d5492a 426 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
427 tcg_gen_shri_i32(tmp, var, 16);
428 tcg_gen_shli_i32(var, var, 16);
429 tcg_gen_or_i32(var, var, tmp);
7d1b0095 430 tcg_temp_free_i32(tmp);
8f01245e
PB
431}
432
b26eefb6
PB
433/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
434 tmp = (t0 ^ t1) & 0x8000;
435 t0 &= ~0x8000;
436 t1 &= ~0x8000;
437 t0 = (t0 + t1) ^ tmp;
438 */
439
39d5492a 440static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 441{
39d5492a 442 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
443 tcg_gen_xor_i32(tmp, t0, t1);
444 tcg_gen_andi_i32(tmp, tmp, 0x8000);
445 tcg_gen_andi_i32(t0, t0, ~0x8000);
446 tcg_gen_andi_i32(t1, t1, ~0x8000);
447 tcg_gen_add_i32(t0, t0, t1);
448 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
449 tcg_temp_free_i32(tmp);
450 tcg_temp_free_i32(t1);
b26eefb6
PB
451}
452
453/* Set CF to the top bit of var. */
39d5492a 454static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 455{
66c374de 456 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
457}
458
459/* Set N and Z flags from var. */
39d5492a 460static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 461{
66c374de
AJ
462 tcg_gen_mov_i32(cpu_NF, var);
463 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
464}
465
466/* T0 += T1 + CF. */
39d5492a 467static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 468{
396e467c 469 tcg_gen_add_i32(t0, t0, t1);
66c374de 470 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
471}
472
e9bb4aa9 473/* dest = T0 + T1 + CF. */
39d5492a 474static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 475{
e9bb4aa9 476 tcg_gen_add_i32(dest, t0, t1);
66c374de 477 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
478}
479
3670669c 480/* dest = T0 - T1 + CF - 1. */
39d5492a 481static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 482{
3670669c 483 tcg_gen_sub_i32(dest, t0, t1);
66c374de 484 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 485 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
486}
487
72485ec4 488/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 489static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 490{
39d5492a 491 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
492 tcg_gen_movi_i32(tmp, 0);
493 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 494 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 495 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
496 tcg_gen_xor_i32(tmp, t0, t1);
497 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
498 tcg_temp_free_i32(tmp);
499 tcg_gen_mov_i32(dest, cpu_NF);
500}
501
49b4c31e 502/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 503static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 504{
39d5492a 505 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
506 if (TCG_TARGET_HAS_add2_i32) {
507 tcg_gen_movi_i32(tmp, 0);
508 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 509 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
510 } else {
511 TCGv_i64 q0 = tcg_temp_new_i64();
512 TCGv_i64 q1 = tcg_temp_new_i64();
513 tcg_gen_extu_i32_i64(q0, t0);
514 tcg_gen_extu_i32_i64(q1, t1);
515 tcg_gen_add_i64(q0, q0, q1);
516 tcg_gen_extu_i32_i64(q1, cpu_CF);
517 tcg_gen_add_i64(q0, q0, q1);
518 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
519 tcg_temp_free_i64(q0);
520 tcg_temp_free_i64(q1);
521 }
522 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
523 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
524 tcg_gen_xor_i32(tmp, t0, t1);
525 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
526 tcg_temp_free_i32(tmp);
527 tcg_gen_mov_i32(dest, cpu_NF);
528}
529
72485ec4 530/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 531static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 532{
39d5492a 533 TCGv_i32 tmp;
72485ec4
AJ
534 tcg_gen_sub_i32(cpu_NF, t0, t1);
535 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
536 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
537 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
538 tmp = tcg_temp_new_i32();
539 tcg_gen_xor_i32(tmp, t0, t1);
540 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
541 tcg_temp_free_i32(tmp);
542 tcg_gen_mov_i32(dest, cpu_NF);
543}
544
e77f0832 545/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 546static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 547{
39d5492a 548 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
549 tcg_gen_not_i32(tmp, t1);
550 gen_adc_CC(dest, t0, tmp);
39d5492a 551 tcg_temp_free_i32(tmp);
2de68a49
RH
552}
553
365af80e 554#define GEN_SHIFT(name) \
39d5492a 555static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 556{ \
39d5492a 557 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
558 tmp1 = tcg_temp_new_i32(); \
559 tcg_gen_andi_i32(tmp1, t1, 0xff); \
560 tmp2 = tcg_const_i32(0); \
561 tmp3 = tcg_const_i32(0x1f); \
562 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
563 tcg_temp_free_i32(tmp3); \
564 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
565 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
566 tcg_temp_free_i32(tmp2); \
567 tcg_temp_free_i32(tmp1); \
568}
569GEN_SHIFT(shl)
570GEN_SHIFT(shr)
571#undef GEN_SHIFT
572
39d5492a 573static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 574{
39d5492a 575 TCGv_i32 tmp1, tmp2;
365af80e
AJ
576 tmp1 = tcg_temp_new_i32();
577 tcg_gen_andi_i32(tmp1, t1, 0xff);
578 tmp2 = tcg_const_i32(0x1f);
579 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
580 tcg_temp_free_i32(tmp2);
581 tcg_gen_sar_i32(dest, t0, tmp1);
582 tcg_temp_free_i32(tmp1);
583}
584
39d5492a 585static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 586{
39d5492a
PM
587 TCGv_i32 c0 = tcg_const_i32(0);
588 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
589 tcg_gen_neg_i32(tmp, src);
590 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
591 tcg_temp_free_i32(c0);
592 tcg_temp_free_i32(tmp);
593}
ad69471c 594
39d5492a 595static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 596{
9a119ff6 597 if (shift == 0) {
66c374de 598 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 599 } else {
66c374de
AJ
600 tcg_gen_shri_i32(cpu_CF, var, shift);
601 if (shift != 31) {
602 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
603 }
9a119ff6 604 }
9a119ff6 605}
b26eefb6 606
9a119ff6 607/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
608static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
609 int shift, int flags)
9a119ff6
PB
610{
611 switch (shiftop) {
612 case 0: /* LSL */
613 if (shift != 0) {
614 if (flags)
615 shifter_out_im(var, 32 - shift);
616 tcg_gen_shli_i32(var, var, shift);
617 }
618 break;
619 case 1: /* LSR */
620 if (shift == 0) {
621 if (flags) {
66c374de 622 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
623 }
624 tcg_gen_movi_i32(var, 0);
625 } else {
626 if (flags)
627 shifter_out_im(var, shift - 1);
628 tcg_gen_shri_i32(var, var, shift);
629 }
630 break;
631 case 2: /* ASR */
632 if (shift == 0)
633 shift = 32;
634 if (flags)
635 shifter_out_im(var, shift - 1);
636 if (shift == 32)
637 shift = 31;
638 tcg_gen_sari_i32(var, var, shift);
639 break;
640 case 3: /* ROR/RRX */
641 if (shift != 0) {
642 if (flags)
643 shifter_out_im(var, shift - 1);
f669df27 644 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 645 } else {
39d5492a 646 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 647 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
648 if (flags)
649 shifter_out_im(var, 0);
650 tcg_gen_shri_i32(var, var, 1);
b26eefb6 651 tcg_gen_or_i32(var, var, tmp);
7d1b0095 652 tcg_temp_free_i32(tmp);
b26eefb6
PB
653 }
654 }
655};
656
39d5492a
PM
657static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
658 TCGv_i32 shift, int flags)
8984bd2e
PB
659{
660 if (flags) {
661 switch (shiftop) {
9ef39277
BS
662 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
663 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
664 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
665 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
666 }
667 } else {
668 switch (shiftop) {
365af80e
AJ
669 case 0:
670 gen_shl(var, var, shift);
671 break;
672 case 1:
673 gen_shr(var, var, shift);
674 break;
675 case 2:
676 gen_sar(var, var, shift);
677 break;
f669df27
AJ
678 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
679 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
680 }
681 }
7d1b0095 682 tcg_temp_free_i32(shift);
8984bd2e
PB
683}
684
6ddbc6e4
PB
685#define PAS_OP(pfx) \
686 switch (op2) { \
687 case 0: gen_pas_helper(glue(pfx,add16)); break; \
688 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
689 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
690 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
691 case 4: gen_pas_helper(glue(pfx,add8)); break; \
692 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
693 }
39d5492a 694static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 695{
a7812ae4 696 TCGv_ptr tmp;
6ddbc6e4
PB
697
698 switch (op1) {
699#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
700 case 1:
a7812ae4 701 tmp = tcg_temp_new_ptr();
0ecb72a5 702 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 703 PAS_OP(s)
b75263d6 704 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
705 break;
706 case 5:
a7812ae4 707 tmp = tcg_temp_new_ptr();
0ecb72a5 708 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 709 PAS_OP(u)
b75263d6 710 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
711 break;
712#undef gen_pas_helper
713#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
714 case 2:
715 PAS_OP(q);
716 break;
717 case 3:
718 PAS_OP(sh);
719 break;
720 case 6:
721 PAS_OP(uq);
722 break;
723 case 7:
724 PAS_OP(uh);
725 break;
726#undef gen_pas_helper
727 }
728}
9ee6e8bb
PB
729#undef PAS_OP
730
6ddbc6e4
PB
731/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
732#define PAS_OP(pfx) \
ed89a2f1 733 switch (op1) { \
6ddbc6e4
PB
734 case 0: gen_pas_helper(glue(pfx,add8)); break; \
735 case 1: gen_pas_helper(glue(pfx,add16)); break; \
736 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
737 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
738 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
739 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
740 }
39d5492a 741static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 742{
a7812ae4 743 TCGv_ptr tmp;
6ddbc6e4 744
ed89a2f1 745 switch (op2) {
6ddbc6e4
PB
746#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
747 case 0:
a7812ae4 748 tmp = tcg_temp_new_ptr();
0ecb72a5 749 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 750 PAS_OP(s)
b75263d6 751 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
752 break;
753 case 4:
a7812ae4 754 tmp = tcg_temp_new_ptr();
0ecb72a5 755 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 756 PAS_OP(u)
b75263d6 757 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
758 break;
759#undef gen_pas_helper
760#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
761 case 1:
762 PAS_OP(q);
763 break;
764 case 2:
765 PAS_OP(sh);
766 break;
767 case 5:
768 PAS_OP(uq);
769 break;
770 case 6:
771 PAS_OP(uh);
772 break;
773#undef gen_pas_helper
774 }
775}
9ee6e8bb
PB
776#undef PAS_OP
777
39fb730a 778/*
6c2c63d3 779 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
780 * This is common between ARM and Aarch64 targets.
781 */
6c2c63d3 782void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 783{
6c2c63d3
RH
784 TCGv_i32 value;
785 TCGCond cond;
786 bool global = true;
d9ba4830 787
d9ba4830
PB
788 switch (cc) {
789 case 0: /* eq: Z */
d9ba4830 790 case 1: /* ne: !Z */
6c2c63d3
RH
791 cond = TCG_COND_EQ;
792 value = cpu_ZF;
d9ba4830 793 break;
6c2c63d3 794
d9ba4830 795 case 2: /* cs: C */
d9ba4830 796 case 3: /* cc: !C */
6c2c63d3
RH
797 cond = TCG_COND_NE;
798 value = cpu_CF;
d9ba4830 799 break;
6c2c63d3 800
d9ba4830 801 case 4: /* mi: N */
d9ba4830 802 case 5: /* pl: !N */
6c2c63d3
RH
803 cond = TCG_COND_LT;
804 value = cpu_NF;
d9ba4830 805 break;
6c2c63d3 806
d9ba4830 807 case 6: /* vs: V */
d9ba4830 808 case 7: /* vc: !V */
6c2c63d3
RH
809 cond = TCG_COND_LT;
810 value = cpu_VF;
d9ba4830 811 break;
6c2c63d3 812
d9ba4830 813 case 8: /* hi: C && !Z */
6c2c63d3
RH
814 case 9: /* ls: !C || Z -> !(C && !Z) */
815 cond = TCG_COND_NE;
816 value = tcg_temp_new_i32();
817 global = false;
818 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
819 ZF is non-zero for !Z; so AND the two subexpressions. */
820 tcg_gen_neg_i32(value, cpu_CF);
821 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 822 break;
6c2c63d3 823
d9ba4830 824 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 825 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
826 /* Since we're only interested in the sign bit, == 0 is >= 0. */
827 cond = TCG_COND_GE;
828 value = tcg_temp_new_i32();
829 global = false;
830 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 831 break;
6c2c63d3 832
d9ba4830 833 case 12: /* gt: !Z && N == V */
d9ba4830 834 case 13: /* le: Z || N != V */
6c2c63d3
RH
835 cond = TCG_COND_NE;
836 value = tcg_temp_new_i32();
837 global = false;
838 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
839 * the sign bit then AND with ZF to yield the result. */
840 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
841 tcg_gen_sari_i32(value, value, 31);
842 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 843 break;
6c2c63d3 844
9305eac0
RH
845 case 14: /* always */
846 case 15: /* always */
847 /* Use the ALWAYS condition, which will fold early.
848 * It doesn't matter what we use for the value. */
849 cond = TCG_COND_ALWAYS;
850 value = cpu_ZF;
851 goto no_invert;
852
d9ba4830
PB
853 default:
854 fprintf(stderr, "Bad condition code 0x%x\n", cc);
855 abort();
856 }
6c2c63d3
RH
857
858 if (cc & 1) {
859 cond = tcg_invert_cond(cond);
860 }
861
9305eac0 862 no_invert:
6c2c63d3
RH
863 cmp->cond = cond;
864 cmp->value = value;
865 cmp->value_global = global;
866}
867
868void arm_free_cc(DisasCompare *cmp)
869{
870 if (!cmp->value_global) {
871 tcg_temp_free_i32(cmp->value);
872 }
873}
874
875void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
876{
877 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
878}
879
880void arm_gen_test_cc(int cc, TCGLabel *label)
881{
882 DisasCompare cmp;
883 arm_test_cc(&cmp, cc);
884 arm_jump_cc(&cmp, label);
885 arm_free_cc(&cmp);
d9ba4830 886}
2c0262af 887
b1d8e52e 888static const uint8_t table_logic_cc[16] = {
2c0262af
FB
889 1, /* and */
890 1, /* xor */
891 0, /* sub */
892 0, /* rsb */
893 0, /* add */
894 0, /* adc */
895 0, /* sbc */
896 0, /* rsc */
897 1, /* andl */
898 1, /* xorl */
899 0, /* cmp */
900 0, /* cmn */
901 1, /* orr */
902 1, /* mov */
903 1, /* bic */
904 1, /* mvn */
905};
3b46e624 906
4d5e8c96
PM
907static inline void gen_set_condexec(DisasContext *s)
908{
909 if (s->condexec_mask) {
910 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
911 TCGv_i32 tmp = tcg_temp_new_i32();
912 tcg_gen_movi_i32(tmp, val);
913 store_cpu_field(tmp, condexec_bits);
914 }
915}
916
917static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
918{
919 tcg_gen_movi_i32(cpu_R[15], val);
920}
921
d9ba4830
PB
922/* Set PC and Thumb state from an immediate address. */
923static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 924{
39d5492a 925 TCGv_i32 tmp;
99c475ab 926
577bf808 927 s->is_jmp = DISAS_JUMP;
d9ba4830 928 if (s->thumb != (addr & 1)) {
7d1b0095 929 tmp = tcg_temp_new_i32();
d9ba4830 930 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 931 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 932 tcg_temp_free_i32(tmp);
d9ba4830 933 }
155c3eac 934 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
935}
936
937/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 938static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 939{
577bf808 940 s->is_jmp = DISAS_JUMP;
155c3eac
FN
941 tcg_gen_andi_i32(cpu_R[15], var, ~1);
942 tcg_gen_andi_i32(var, var, 1);
943 store_cpu_field(var, thumb);
d9ba4830
PB
944}
945
3bb8a96f
PM
946/* Set PC and Thumb state from var. var is marked as dead.
947 * For M-profile CPUs, include logic to detect exception-return
948 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
949 * and BX reg, and no others, and happens only for code in Handler mode.
950 */
951static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
952{
953 /* Generate the same code here as for a simple bx, but flag via
954 * s->is_jmp that we need to do the rest of the work later.
955 */
956 gen_bx(s, var);
957 if (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M)) {
958 s->is_jmp = DISAS_BX_EXCRET;
959 }
960}
961
962static inline void gen_bx_excret_final_code(DisasContext *s)
963{
964 /* Generate the code to finish possible exception return and end the TB */
965 TCGLabel *excret_label = gen_new_label();
966
967 /* Is the new PC value in the magic range indicating exception return? */
968 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], 0xff000000, excret_label);
969 /* No: end the TB as we would for a DISAS_JMP */
970 if (is_singlestepping(s)) {
971 gen_singlestep_exception(s);
972 } else {
973 tcg_gen_exit_tb(0);
974 }
975 gen_set_label(excret_label);
976 /* Yes: this is an exception return.
977 * At this point in runtime env->regs[15] and env->thumb will hold
978 * the exception-return magic number, which do_v7m_exception_exit()
979 * will read. Nothing else will be able to see those values because
980 * the cpu-exec main loop guarantees that we will always go straight
981 * from raising the exception to the exception-handling code.
982 *
983 * gen_ss_advance(s) does nothing on M profile currently but
984 * calling it is conceptually the right thing as we have executed
985 * this instruction (compare SWI, HVC, SMC handling).
986 */
987 gen_ss_advance(s);
988 gen_exception_internal(EXCP_EXCEPTION_EXIT);
989}
990
21aeb343
JR
991/* Variant of store_reg which uses branch&exchange logic when storing
992 to r15 in ARM architecture v7 and above. The source must be a temporary
993 and will be marked as dead. */
7dcc1f89 994static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
995{
996 if (reg == 15 && ENABLE_ARCH_7) {
997 gen_bx(s, var);
998 } else {
999 store_reg(s, reg, var);
1000 }
1001}
1002
be5e7a76
DES
1003/* Variant of store_reg which uses branch&exchange logic when storing
1004 * to r15 in ARM architecture v5T and above. This is used for storing
1005 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1006 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1007static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1008{
1009 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1010 gen_bx_excret(s, var);
be5e7a76
DES
1011 } else {
1012 store_reg(s, reg, var);
1013 }
1014}
1015
e334bd31
PB
1016#ifdef CONFIG_USER_ONLY
1017#define IS_USER_ONLY 1
1018#else
1019#define IS_USER_ONLY 0
1020#endif
1021
08307563
PM
1022/* Abstractions of "generate code to do a guest load/store for
1023 * AArch32", where a vaddr is always 32 bits (and is zero
1024 * extended if we're a 64 bit core) and data is also
1025 * 32 bits unless specifically doing a 64 bit access.
1026 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1027 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1028 */
08307563 1029
7f5616f5 1030static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1031{
7f5616f5
RH
1032 TCGv addr = tcg_temp_new();
1033 tcg_gen_extu_i32_tl(addr, a32);
1034
e334bd31 1035 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1036 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1037 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1038 }
7f5616f5 1039 return addr;
08307563
PM
1040}
1041
7f5616f5
RH
1042static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1043 int index, TCGMemOp opc)
08307563 1044{
7f5616f5
RH
1045 TCGv addr = gen_aa32_addr(s, a32, opc);
1046 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1047 tcg_temp_free(addr);
08307563
PM
1048}
1049
7f5616f5
RH
1050static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1051 int index, TCGMemOp opc)
1052{
1053 TCGv addr = gen_aa32_addr(s, a32, opc);
1054 tcg_gen_qemu_st_i32(val, addr, index, opc);
1055 tcg_temp_free(addr);
1056}
08307563 1057
7f5616f5 1058#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1059static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1060 TCGv_i32 a32, int index) \
08307563 1061{ \
7f5616f5 1062 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1063} \
1064static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1065 TCGv_i32 val, \
1066 TCGv_i32 a32, int index, \
1067 ISSInfo issinfo) \
1068{ \
1069 gen_aa32_ld##SUFF(s, val, a32, index); \
1070 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1071}
1072
7f5616f5 1073#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1074static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1075 TCGv_i32 a32, int index) \
08307563 1076{ \
7f5616f5 1077 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1078} \
1079static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1080 TCGv_i32 val, \
1081 TCGv_i32 a32, int index, \
1082 ISSInfo issinfo) \
1083{ \
1084 gen_aa32_st##SUFF(s, val, a32, index); \
1085 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1086}
1087
7f5616f5 1088static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1089{
e334bd31
PB
1090 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1091 if (!IS_USER_ONLY && s->sctlr_b) {
1092 tcg_gen_rotri_i64(val, val, 32);
1093 }
08307563
PM
1094}
1095
7f5616f5
RH
1096static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1097 int index, TCGMemOp opc)
08307563 1098{
7f5616f5
RH
1099 TCGv addr = gen_aa32_addr(s, a32, opc);
1100 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1101 gen_aa32_frob64(s, val);
1102 tcg_temp_free(addr);
1103}
1104
1105static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1106 TCGv_i32 a32, int index)
1107{
1108 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1109}
1110
1111static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1112 int index, TCGMemOp opc)
1113{
1114 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1115
1116 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1117 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1118 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1119 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1120 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1121 tcg_temp_free_i64(tmp);
e334bd31 1122 } else {
7f5616f5 1123 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1124 }
7f5616f5 1125 tcg_temp_free(addr);
08307563
PM
1126}
1127
7f5616f5
RH
1128static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1129 TCGv_i32 a32, int index)
1130{
1131 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1132}
08307563 1133
7f5616f5
RH
1134DO_GEN_LD(8s, MO_SB)
1135DO_GEN_LD(8u, MO_UB)
1136DO_GEN_LD(16s, MO_SW)
1137DO_GEN_LD(16u, MO_UW)
1138DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1139DO_GEN_ST(8, MO_UB)
1140DO_GEN_ST(16, MO_UW)
1141DO_GEN_ST(32, MO_UL)
08307563 1142
37e6456e
PM
1143static inline void gen_hvc(DisasContext *s, int imm16)
1144{
1145 /* The pre HVC helper handles cases when HVC gets trapped
1146 * as an undefined insn by runtime configuration (ie before
1147 * the insn really executes).
1148 */
1149 gen_set_pc_im(s, s->pc - 4);
1150 gen_helper_pre_hvc(cpu_env);
1151 /* Otherwise we will treat this as a real exception which
1152 * happens after execution of the insn. (The distinction matters
1153 * for the PC value reported to the exception handler and also
1154 * for single stepping.)
1155 */
1156 s->svc_imm = imm16;
1157 gen_set_pc_im(s, s->pc);
1158 s->is_jmp = DISAS_HVC;
1159}
1160
1161static inline void gen_smc(DisasContext *s)
1162{
1163 /* As with HVC, we may take an exception either before or after
1164 * the insn executes.
1165 */
1166 TCGv_i32 tmp;
1167
1168 gen_set_pc_im(s, s->pc - 4);
1169 tmp = tcg_const_i32(syn_aa32_smc());
1170 gen_helper_pre_smc(cpu_env, tmp);
1171 tcg_temp_free_i32(tmp);
1172 gen_set_pc_im(s, s->pc);
1173 s->is_jmp = DISAS_SMC;
1174}
1175
d4a2dc67
PM
1176static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1177{
1178 gen_set_condexec(s);
1179 gen_set_pc_im(s, s->pc - offset);
1180 gen_exception_internal(excp);
1181 s->is_jmp = DISAS_JUMP;
1182}
1183
73710361
GB
1184static void gen_exception_insn(DisasContext *s, int offset, int excp,
1185 int syn, uint32_t target_el)
d4a2dc67
PM
1186{
1187 gen_set_condexec(s);
1188 gen_set_pc_im(s, s->pc - offset);
73710361 1189 gen_exception(excp, syn, target_el);
d4a2dc67
PM
1190 s->is_jmp = DISAS_JUMP;
1191}
1192
b5ff1b31
FB
1193/* Force a TB lookup after an instruction that changes the CPU state. */
1194static inline void gen_lookup_tb(DisasContext *s)
1195{
a6445c52 1196 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
577bf808 1197 s->is_jmp = DISAS_JUMP;
b5ff1b31
FB
1198}
1199
19a6e31c
PM
1200static inline void gen_hlt(DisasContext *s, int imm)
1201{
1202 /* HLT. This has two purposes.
1203 * Architecturally, it is an external halting debug instruction.
1204 * Since QEMU doesn't implement external debug, we treat this as
1205 * it is required for halting debug disabled: it will UNDEF.
1206 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1207 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1208 * must trigger semihosting even for ARMv7 and earlier, where
1209 * HLT was an undefined encoding.
1210 * In system mode, we don't allow userspace access to
1211 * semihosting, to provide some semblance of security
1212 * (and for consistency with our 32-bit semihosting).
1213 */
1214 if (semihosting_enabled() &&
1215#ifndef CONFIG_USER_ONLY
1216 s->current_el != 0 &&
1217#endif
1218 (imm == (s->thumb ? 0x3c : 0xf000))) {
1219 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1220 return;
1221 }
1222
1223 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1224 default_exception_el(s));
1225}
1226
b0109805 1227static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1228 TCGv_i32 var)
2c0262af 1229{
1e8d4eec 1230 int val, rm, shift, shiftop;
39d5492a 1231 TCGv_i32 offset;
2c0262af
FB
1232
1233 if (!(insn & (1 << 25))) {
1234 /* immediate */
1235 val = insn & 0xfff;
1236 if (!(insn & (1 << 23)))
1237 val = -val;
537730b9 1238 if (val != 0)
b0109805 1239 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1240 } else {
1241 /* shift/register */
1242 rm = (insn) & 0xf;
1243 shift = (insn >> 7) & 0x1f;
1e8d4eec 1244 shiftop = (insn >> 5) & 3;
b26eefb6 1245 offset = load_reg(s, rm);
9a119ff6 1246 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1247 if (!(insn & (1 << 23)))
b0109805 1248 tcg_gen_sub_i32(var, var, offset);
2c0262af 1249 else
b0109805 1250 tcg_gen_add_i32(var, var, offset);
7d1b0095 1251 tcg_temp_free_i32(offset);
2c0262af
FB
1252 }
1253}
1254
191f9a93 1255static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1256 int extra, TCGv_i32 var)
2c0262af
FB
1257{
1258 int val, rm;
39d5492a 1259 TCGv_i32 offset;
3b46e624 1260
2c0262af
FB
1261 if (insn & (1 << 22)) {
1262 /* immediate */
1263 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1264 if (!(insn & (1 << 23)))
1265 val = -val;
18acad92 1266 val += extra;
537730b9 1267 if (val != 0)
b0109805 1268 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1269 } else {
1270 /* register */
191f9a93 1271 if (extra)
b0109805 1272 tcg_gen_addi_i32(var, var, extra);
2c0262af 1273 rm = (insn) & 0xf;
b26eefb6 1274 offset = load_reg(s, rm);
2c0262af 1275 if (!(insn & (1 << 23)))
b0109805 1276 tcg_gen_sub_i32(var, var, offset);
2c0262af 1277 else
b0109805 1278 tcg_gen_add_i32(var, var, offset);
7d1b0095 1279 tcg_temp_free_i32(offset);
2c0262af
FB
1280 }
1281}
1282
5aaebd13
PM
1283static TCGv_ptr get_fpstatus_ptr(int neon)
1284{
1285 TCGv_ptr statusptr = tcg_temp_new_ptr();
1286 int offset;
1287 if (neon) {
0ecb72a5 1288 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1289 } else {
0ecb72a5 1290 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1291 }
1292 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1293 return statusptr;
1294}
1295
4373f3ce
PB
1296#define VFP_OP2(name) \
1297static inline void gen_vfp_##name(int dp) \
1298{ \
ae1857ec
PM
1299 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1300 if (dp) { \
1301 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1302 } else { \
1303 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1304 } \
1305 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1306}
1307
4373f3ce
PB
1308VFP_OP2(add)
1309VFP_OP2(sub)
1310VFP_OP2(mul)
1311VFP_OP2(div)
1312
1313#undef VFP_OP2
1314
605a6aed
PM
1315static inline void gen_vfp_F1_mul(int dp)
1316{
1317 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1318 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1319 if (dp) {
ae1857ec 1320 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1321 } else {
ae1857ec 1322 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1323 }
ae1857ec 1324 tcg_temp_free_ptr(fpst);
605a6aed
PM
1325}
1326
1327static inline void gen_vfp_F1_neg(int dp)
1328{
1329 /* Like gen_vfp_neg() but put result in F1 */
1330 if (dp) {
1331 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1332 } else {
1333 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1334 }
1335}
1336
4373f3ce
PB
1337static inline void gen_vfp_abs(int dp)
1338{
1339 if (dp)
1340 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1341 else
1342 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1343}
1344
1345static inline void gen_vfp_neg(int dp)
1346{
1347 if (dp)
1348 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1349 else
1350 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1351}
1352
1353static inline void gen_vfp_sqrt(int dp)
1354{
1355 if (dp)
1356 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1357 else
1358 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1359}
1360
1361static inline void gen_vfp_cmp(int dp)
1362{
1363 if (dp)
1364 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1365 else
1366 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1367}
1368
1369static inline void gen_vfp_cmpe(int dp)
1370{
1371 if (dp)
1372 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1373 else
1374 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1375}
1376
1377static inline void gen_vfp_F1_ld0(int dp)
1378{
1379 if (dp)
5b340b51 1380 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1381 else
5b340b51 1382 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1383}
1384
5500b06c
PM
1385#define VFP_GEN_ITOF(name) \
1386static inline void gen_vfp_##name(int dp, int neon) \
1387{ \
5aaebd13 1388 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1389 if (dp) { \
1390 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1391 } else { \
1392 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1393 } \
b7fa9214 1394 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1395}
1396
5500b06c
PM
1397VFP_GEN_ITOF(uito)
1398VFP_GEN_ITOF(sito)
1399#undef VFP_GEN_ITOF
4373f3ce 1400
5500b06c
PM
1401#define VFP_GEN_FTOI(name) \
1402static inline void gen_vfp_##name(int dp, int neon) \
1403{ \
5aaebd13 1404 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1405 if (dp) { \
1406 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1407 } else { \
1408 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1409 } \
b7fa9214 1410 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1411}
1412
5500b06c
PM
1413VFP_GEN_FTOI(toui)
1414VFP_GEN_FTOI(touiz)
1415VFP_GEN_FTOI(tosi)
1416VFP_GEN_FTOI(tosiz)
1417#undef VFP_GEN_FTOI
4373f3ce 1418
16d5b3ca 1419#define VFP_GEN_FIX(name, round) \
5500b06c 1420static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1421{ \
39d5492a 1422 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1423 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1424 if (dp) { \
16d5b3ca
WN
1425 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1426 statusptr); \
5500b06c 1427 } else { \
16d5b3ca
WN
1428 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1429 statusptr); \
5500b06c 1430 } \
b75263d6 1431 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1432 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1433}
16d5b3ca
WN
1434VFP_GEN_FIX(tosh, _round_to_zero)
1435VFP_GEN_FIX(tosl, _round_to_zero)
1436VFP_GEN_FIX(touh, _round_to_zero)
1437VFP_GEN_FIX(toul, _round_to_zero)
1438VFP_GEN_FIX(shto, )
1439VFP_GEN_FIX(slto, )
1440VFP_GEN_FIX(uhto, )
1441VFP_GEN_FIX(ulto, )
4373f3ce 1442#undef VFP_GEN_FIX
9ee6e8bb 1443
39d5492a 1444static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1445{
08307563 1446 if (dp) {
12dcc321 1447 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1448 } else {
12dcc321 1449 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1450 }
b5ff1b31
FB
1451}
1452
39d5492a 1453static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1454{
08307563 1455 if (dp) {
12dcc321 1456 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1457 } else {
12dcc321 1458 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1459 }
b5ff1b31
FB
1460}
1461
8e96005d
FB
1462static inline long
1463vfp_reg_offset (int dp, int reg)
1464{
1465 if (dp)
1466 return offsetof(CPUARMState, vfp.regs[reg]);
1467 else if (reg & 1) {
1468 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1469 + offsetof(CPU_DoubleU, l.upper);
1470 } else {
1471 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1472 + offsetof(CPU_DoubleU, l.lower);
1473 }
1474}
9ee6e8bb
PB
1475
1476/* Return the offset of a 32-bit piece of a NEON register.
1477 zero is the least significant end of the register. */
1478static inline long
1479neon_reg_offset (int reg, int n)
1480{
1481 int sreg;
1482 sreg = reg * 2 + n;
1483 return vfp_reg_offset(0, sreg);
1484}
1485
39d5492a 1486static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1487{
39d5492a 1488 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1489 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1490 return tmp;
1491}
1492
39d5492a 1493static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1494{
1495 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1496 tcg_temp_free_i32(var);
8f8e3aa4
PB
1497}
1498
a7812ae4 1499static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1500{
1501 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1502}
1503
a7812ae4 1504static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1505{
1506 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1507}
1508
4373f3ce
PB
1509#define tcg_gen_ld_f32 tcg_gen_ld_i32
1510#define tcg_gen_ld_f64 tcg_gen_ld_i64
1511#define tcg_gen_st_f32 tcg_gen_st_i32
1512#define tcg_gen_st_f64 tcg_gen_st_i64
1513
b7bcbe95
FB
1514static inline void gen_mov_F0_vreg(int dp, int reg)
1515{
1516 if (dp)
4373f3ce 1517 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1518 else
4373f3ce 1519 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1520}
1521
1522static inline void gen_mov_F1_vreg(int dp, int reg)
1523{
1524 if (dp)
4373f3ce 1525 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1526 else
4373f3ce 1527 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1528}
1529
1530static inline void gen_mov_vreg_F0(int dp, int reg)
1531{
1532 if (dp)
4373f3ce 1533 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1534 else
4373f3ce 1535 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1536}
1537
18c9b560
AZ
1538#define ARM_CP_RW_BIT (1 << 20)
1539
a7812ae4 1540static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1541{
0ecb72a5 1542 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1543}
1544
a7812ae4 1545static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1546{
0ecb72a5 1547 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1548}
1549
39d5492a 1550static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1551{
39d5492a 1552 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1553 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1554 return var;
e677137d
PB
1555}
1556
39d5492a 1557static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1558{
0ecb72a5 1559 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1560 tcg_temp_free_i32(var);
e677137d
PB
1561}
1562
1563static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1564{
1565 iwmmxt_store_reg(cpu_M0, rn);
1566}
1567
1568static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1569{
1570 iwmmxt_load_reg(cpu_M0, rn);
1571}
1572
1573static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1574{
1575 iwmmxt_load_reg(cpu_V1, rn);
1576 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1577}
1578
1579static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1580{
1581 iwmmxt_load_reg(cpu_V1, rn);
1582 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1583}
1584
1585static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1586{
1587 iwmmxt_load_reg(cpu_V1, rn);
1588 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1589}
1590
1591#define IWMMXT_OP(name) \
1592static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1593{ \
1594 iwmmxt_load_reg(cpu_V1, rn); \
1595 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1596}
1597
477955bd
PM
1598#define IWMMXT_OP_ENV(name) \
1599static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1600{ \
1601 iwmmxt_load_reg(cpu_V1, rn); \
1602 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1603}
1604
1605#define IWMMXT_OP_ENV_SIZE(name) \
1606IWMMXT_OP_ENV(name##b) \
1607IWMMXT_OP_ENV(name##w) \
1608IWMMXT_OP_ENV(name##l)
e677137d 1609
477955bd 1610#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1611static inline void gen_op_iwmmxt_##name##_M0(void) \
1612{ \
477955bd 1613 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1614}
1615
1616IWMMXT_OP(maddsq)
1617IWMMXT_OP(madduq)
1618IWMMXT_OP(sadb)
1619IWMMXT_OP(sadw)
1620IWMMXT_OP(mulslw)
1621IWMMXT_OP(mulshw)
1622IWMMXT_OP(mululw)
1623IWMMXT_OP(muluhw)
1624IWMMXT_OP(macsw)
1625IWMMXT_OP(macuw)
1626
477955bd
PM
1627IWMMXT_OP_ENV_SIZE(unpackl)
1628IWMMXT_OP_ENV_SIZE(unpackh)
1629
1630IWMMXT_OP_ENV1(unpacklub)
1631IWMMXT_OP_ENV1(unpackluw)
1632IWMMXT_OP_ENV1(unpacklul)
1633IWMMXT_OP_ENV1(unpackhub)
1634IWMMXT_OP_ENV1(unpackhuw)
1635IWMMXT_OP_ENV1(unpackhul)
1636IWMMXT_OP_ENV1(unpacklsb)
1637IWMMXT_OP_ENV1(unpacklsw)
1638IWMMXT_OP_ENV1(unpacklsl)
1639IWMMXT_OP_ENV1(unpackhsb)
1640IWMMXT_OP_ENV1(unpackhsw)
1641IWMMXT_OP_ENV1(unpackhsl)
1642
1643IWMMXT_OP_ENV_SIZE(cmpeq)
1644IWMMXT_OP_ENV_SIZE(cmpgtu)
1645IWMMXT_OP_ENV_SIZE(cmpgts)
1646
1647IWMMXT_OP_ENV_SIZE(mins)
1648IWMMXT_OP_ENV_SIZE(minu)
1649IWMMXT_OP_ENV_SIZE(maxs)
1650IWMMXT_OP_ENV_SIZE(maxu)
1651
1652IWMMXT_OP_ENV_SIZE(subn)
1653IWMMXT_OP_ENV_SIZE(addn)
1654IWMMXT_OP_ENV_SIZE(subu)
1655IWMMXT_OP_ENV_SIZE(addu)
1656IWMMXT_OP_ENV_SIZE(subs)
1657IWMMXT_OP_ENV_SIZE(adds)
1658
1659IWMMXT_OP_ENV(avgb0)
1660IWMMXT_OP_ENV(avgb1)
1661IWMMXT_OP_ENV(avgw0)
1662IWMMXT_OP_ENV(avgw1)
e677137d 1663
477955bd
PM
1664IWMMXT_OP_ENV(packuw)
1665IWMMXT_OP_ENV(packul)
1666IWMMXT_OP_ENV(packuq)
1667IWMMXT_OP_ENV(packsw)
1668IWMMXT_OP_ENV(packsl)
1669IWMMXT_OP_ENV(packsq)
e677137d 1670
e677137d
PB
1671static void gen_op_iwmmxt_set_mup(void)
1672{
39d5492a 1673 TCGv_i32 tmp;
e677137d
PB
1674 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1675 tcg_gen_ori_i32(tmp, tmp, 2);
1676 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1677}
1678
1679static void gen_op_iwmmxt_set_cup(void)
1680{
39d5492a 1681 TCGv_i32 tmp;
e677137d
PB
1682 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1683 tcg_gen_ori_i32(tmp, tmp, 1);
1684 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1685}
1686
1687static void gen_op_iwmmxt_setpsr_nz(void)
1688{
39d5492a 1689 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1690 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1691 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1692}
1693
1694static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1695{
1696 iwmmxt_load_reg(cpu_V1, rn);
86831435 1697 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1698 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1699}
1700
39d5492a
PM
1701static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1702 TCGv_i32 dest)
18c9b560
AZ
1703{
1704 int rd;
1705 uint32_t offset;
39d5492a 1706 TCGv_i32 tmp;
18c9b560
AZ
1707
1708 rd = (insn >> 16) & 0xf;
da6b5335 1709 tmp = load_reg(s, rd);
18c9b560
AZ
1710
1711 offset = (insn & 0xff) << ((insn >> 7) & 2);
1712 if (insn & (1 << 24)) {
1713 /* Pre indexed */
1714 if (insn & (1 << 23))
da6b5335 1715 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1716 else
da6b5335
FN
1717 tcg_gen_addi_i32(tmp, tmp, -offset);
1718 tcg_gen_mov_i32(dest, tmp);
18c9b560 1719 if (insn & (1 << 21))
da6b5335
FN
1720 store_reg(s, rd, tmp);
1721 else
7d1b0095 1722 tcg_temp_free_i32(tmp);
18c9b560
AZ
1723 } else if (insn & (1 << 21)) {
1724 /* Post indexed */
da6b5335 1725 tcg_gen_mov_i32(dest, tmp);
18c9b560 1726 if (insn & (1 << 23))
da6b5335 1727 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1728 else
da6b5335
FN
1729 tcg_gen_addi_i32(tmp, tmp, -offset);
1730 store_reg(s, rd, tmp);
18c9b560
AZ
1731 } else if (!(insn & (1 << 23)))
1732 return 1;
1733 return 0;
1734}
1735
39d5492a 1736static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1737{
1738 int rd = (insn >> 0) & 0xf;
39d5492a 1739 TCGv_i32 tmp;
18c9b560 1740
da6b5335
FN
1741 if (insn & (1 << 8)) {
1742 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1743 return 1;
da6b5335
FN
1744 } else {
1745 tmp = iwmmxt_load_creg(rd);
1746 }
1747 } else {
7d1b0095 1748 tmp = tcg_temp_new_i32();
da6b5335 1749 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1750 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1751 }
1752 tcg_gen_andi_i32(tmp, tmp, mask);
1753 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1754 tcg_temp_free_i32(tmp);
18c9b560
AZ
1755 return 0;
1756}
1757
a1c7273b 1758/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1759 (ie. an undefined instruction). */
7dcc1f89 1760static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1761{
1762 int rd, wrd;
1763 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1764 TCGv_i32 addr;
1765 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1766
1767 if ((insn & 0x0e000e00) == 0x0c000000) {
1768 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1769 wrd = insn & 0xf;
1770 rdlo = (insn >> 12) & 0xf;
1771 rdhi = (insn >> 16) & 0xf;
1772 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1773 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1774 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1775 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1776 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1777 } else { /* TMCRR */
da6b5335
FN
1778 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1779 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1780 gen_op_iwmmxt_set_mup();
1781 }
1782 return 0;
1783 }
1784
1785 wrd = (insn >> 12) & 0xf;
7d1b0095 1786 addr = tcg_temp_new_i32();
da6b5335 1787 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1788 tcg_temp_free_i32(addr);
18c9b560 1789 return 1;
da6b5335 1790 }
18c9b560
AZ
1791 if (insn & ARM_CP_RW_BIT) {
1792 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1793 tmp = tcg_temp_new_i32();
12dcc321 1794 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1795 iwmmxt_store_creg(wrd, tmp);
18c9b560 1796 } else {
e677137d
PB
1797 i = 1;
1798 if (insn & (1 << 8)) {
1799 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1800 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d
PB
1801 i = 0;
1802 } else { /* WLDRW wRd */
29531141 1803 tmp = tcg_temp_new_i32();
12dcc321 1804 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1805 }
1806 } else {
29531141 1807 tmp = tcg_temp_new_i32();
e677137d 1808 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1809 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
e677137d 1810 } else { /* WLDRB */
12dcc321 1811 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1812 }
1813 }
1814 if (i) {
1815 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1816 tcg_temp_free_i32(tmp);
e677137d 1817 }
18c9b560
AZ
1818 gen_op_iwmmxt_movq_wRn_M0(wrd);
1819 }
1820 } else {
1821 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1822 tmp = iwmmxt_load_creg(wrd);
12dcc321 1823 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1824 } else {
1825 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1826 tmp = tcg_temp_new_i32();
e677137d
PB
1827 if (insn & (1 << 8)) {
1828 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1829 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1830 } else { /* WSTRW wRd */
ecc7b3aa 1831 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1832 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1833 }
1834 } else {
1835 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1836 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1837 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
e677137d 1838 } else { /* WSTRB */
ecc7b3aa 1839 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1840 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1841 }
1842 }
18c9b560 1843 }
29531141 1844 tcg_temp_free_i32(tmp);
18c9b560 1845 }
7d1b0095 1846 tcg_temp_free_i32(addr);
18c9b560
AZ
1847 return 0;
1848 }
1849
1850 if ((insn & 0x0f000000) != 0x0e000000)
1851 return 1;
1852
1853 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1854 case 0x000: /* WOR */
1855 wrd = (insn >> 12) & 0xf;
1856 rd0 = (insn >> 0) & 0xf;
1857 rd1 = (insn >> 16) & 0xf;
1858 gen_op_iwmmxt_movq_M0_wRn(rd0);
1859 gen_op_iwmmxt_orq_M0_wRn(rd1);
1860 gen_op_iwmmxt_setpsr_nz();
1861 gen_op_iwmmxt_movq_wRn_M0(wrd);
1862 gen_op_iwmmxt_set_mup();
1863 gen_op_iwmmxt_set_cup();
1864 break;
1865 case 0x011: /* TMCR */
1866 if (insn & 0xf)
1867 return 1;
1868 rd = (insn >> 12) & 0xf;
1869 wrd = (insn >> 16) & 0xf;
1870 switch (wrd) {
1871 case ARM_IWMMXT_wCID:
1872 case ARM_IWMMXT_wCASF:
1873 break;
1874 case ARM_IWMMXT_wCon:
1875 gen_op_iwmmxt_set_cup();
1876 /* Fall through. */
1877 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1878 tmp = iwmmxt_load_creg(wrd);
1879 tmp2 = load_reg(s, rd);
f669df27 1880 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1881 tcg_temp_free_i32(tmp2);
da6b5335 1882 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1883 break;
1884 case ARM_IWMMXT_wCGR0:
1885 case ARM_IWMMXT_wCGR1:
1886 case ARM_IWMMXT_wCGR2:
1887 case ARM_IWMMXT_wCGR3:
1888 gen_op_iwmmxt_set_cup();
da6b5335
FN
1889 tmp = load_reg(s, rd);
1890 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1891 break;
1892 default:
1893 return 1;
1894 }
1895 break;
1896 case 0x100: /* WXOR */
1897 wrd = (insn >> 12) & 0xf;
1898 rd0 = (insn >> 0) & 0xf;
1899 rd1 = (insn >> 16) & 0xf;
1900 gen_op_iwmmxt_movq_M0_wRn(rd0);
1901 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1902 gen_op_iwmmxt_setpsr_nz();
1903 gen_op_iwmmxt_movq_wRn_M0(wrd);
1904 gen_op_iwmmxt_set_mup();
1905 gen_op_iwmmxt_set_cup();
1906 break;
1907 case 0x111: /* TMRC */
1908 if (insn & 0xf)
1909 return 1;
1910 rd = (insn >> 12) & 0xf;
1911 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1912 tmp = iwmmxt_load_creg(wrd);
1913 store_reg(s, rd, tmp);
18c9b560
AZ
1914 break;
1915 case 0x300: /* WANDN */
1916 wrd = (insn >> 12) & 0xf;
1917 rd0 = (insn >> 0) & 0xf;
1918 rd1 = (insn >> 16) & 0xf;
1919 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1920 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1921 gen_op_iwmmxt_andq_M0_wRn(rd1);
1922 gen_op_iwmmxt_setpsr_nz();
1923 gen_op_iwmmxt_movq_wRn_M0(wrd);
1924 gen_op_iwmmxt_set_mup();
1925 gen_op_iwmmxt_set_cup();
1926 break;
1927 case 0x200: /* WAND */
1928 wrd = (insn >> 12) & 0xf;
1929 rd0 = (insn >> 0) & 0xf;
1930 rd1 = (insn >> 16) & 0xf;
1931 gen_op_iwmmxt_movq_M0_wRn(rd0);
1932 gen_op_iwmmxt_andq_M0_wRn(rd1);
1933 gen_op_iwmmxt_setpsr_nz();
1934 gen_op_iwmmxt_movq_wRn_M0(wrd);
1935 gen_op_iwmmxt_set_mup();
1936 gen_op_iwmmxt_set_cup();
1937 break;
1938 case 0x810: case 0xa10: /* WMADD */
1939 wrd = (insn >> 12) & 0xf;
1940 rd0 = (insn >> 0) & 0xf;
1941 rd1 = (insn >> 16) & 0xf;
1942 gen_op_iwmmxt_movq_M0_wRn(rd0);
1943 if (insn & (1 << 21))
1944 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1945 else
1946 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1947 gen_op_iwmmxt_movq_wRn_M0(wrd);
1948 gen_op_iwmmxt_set_mup();
1949 break;
1950 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1951 wrd = (insn >> 12) & 0xf;
1952 rd0 = (insn >> 16) & 0xf;
1953 rd1 = (insn >> 0) & 0xf;
1954 gen_op_iwmmxt_movq_M0_wRn(rd0);
1955 switch ((insn >> 22) & 3) {
1956 case 0:
1957 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1958 break;
1959 case 1:
1960 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1961 break;
1962 case 2:
1963 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1964 break;
1965 case 3:
1966 return 1;
1967 }
1968 gen_op_iwmmxt_movq_wRn_M0(wrd);
1969 gen_op_iwmmxt_set_mup();
1970 gen_op_iwmmxt_set_cup();
1971 break;
1972 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1973 wrd = (insn >> 12) & 0xf;
1974 rd0 = (insn >> 16) & 0xf;
1975 rd1 = (insn >> 0) & 0xf;
1976 gen_op_iwmmxt_movq_M0_wRn(rd0);
1977 switch ((insn >> 22) & 3) {
1978 case 0:
1979 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1980 break;
1981 case 1:
1982 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1983 break;
1984 case 2:
1985 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1986 break;
1987 case 3:
1988 return 1;
1989 }
1990 gen_op_iwmmxt_movq_wRn_M0(wrd);
1991 gen_op_iwmmxt_set_mup();
1992 gen_op_iwmmxt_set_cup();
1993 break;
1994 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1995 wrd = (insn >> 12) & 0xf;
1996 rd0 = (insn >> 16) & 0xf;
1997 rd1 = (insn >> 0) & 0xf;
1998 gen_op_iwmmxt_movq_M0_wRn(rd0);
1999 if (insn & (1 << 22))
2000 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2001 else
2002 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2003 if (!(insn & (1 << 20)))
2004 gen_op_iwmmxt_addl_M0_wRn(wrd);
2005 gen_op_iwmmxt_movq_wRn_M0(wrd);
2006 gen_op_iwmmxt_set_mup();
2007 break;
2008 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2009 wrd = (insn >> 12) & 0xf;
2010 rd0 = (insn >> 16) & 0xf;
2011 rd1 = (insn >> 0) & 0xf;
2012 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2013 if (insn & (1 << 21)) {
2014 if (insn & (1 << 20))
2015 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2016 else
2017 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2018 } else {
2019 if (insn & (1 << 20))
2020 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2021 else
2022 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2023 }
18c9b560
AZ
2024 gen_op_iwmmxt_movq_wRn_M0(wrd);
2025 gen_op_iwmmxt_set_mup();
2026 break;
2027 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2028 wrd = (insn >> 12) & 0xf;
2029 rd0 = (insn >> 16) & 0xf;
2030 rd1 = (insn >> 0) & 0xf;
2031 gen_op_iwmmxt_movq_M0_wRn(rd0);
2032 if (insn & (1 << 21))
2033 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2034 else
2035 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2036 if (!(insn & (1 << 20))) {
e677137d
PB
2037 iwmmxt_load_reg(cpu_V1, wrd);
2038 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2039 }
2040 gen_op_iwmmxt_movq_wRn_M0(wrd);
2041 gen_op_iwmmxt_set_mup();
2042 break;
2043 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2044 wrd = (insn >> 12) & 0xf;
2045 rd0 = (insn >> 16) & 0xf;
2046 rd1 = (insn >> 0) & 0xf;
2047 gen_op_iwmmxt_movq_M0_wRn(rd0);
2048 switch ((insn >> 22) & 3) {
2049 case 0:
2050 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2051 break;
2052 case 1:
2053 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2054 break;
2055 case 2:
2056 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2057 break;
2058 case 3:
2059 return 1;
2060 }
2061 gen_op_iwmmxt_movq_wRn_M0(wrd);
2062 gen_op_iwmmxt_set_mup();
2063 gen_op_iwmmxt_set_cup();
2064 break;
2065 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2066 wrd = (insn >> 12) & 0xf;
2067 rd0 = (insn >> 16) & 0xf;
2068 rd1 = (insn >> 0) & 0xf;
2069 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2070 if (insn & (1 << 22)) {
2071 if (insn & (1 << 20))
2072 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2073 else
2074 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2075 } else {
2076 if (insn & (1 << 20))
2077 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2078 else
2079 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2080 }
18c9b560
AZ
2081 gen_op_iwmmxt_movq_wRn_M0(wrd);
2082 gen_op_iwmmxt_set_mup();
2083 gen_op_iwmmxt_set_cup();
2084 break;
2085 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2086 wrd = (insn >> 12) & 0xf;
2087 rd0 = (insn >> 16) & 0xf;
2088 rd1 = (insn >> 0) & 0xf;
2089 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2090 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2091 tcg_gen_andi_i32(tmp, tmp, 7);
2092 iwmmxt_load_reg(cpu_V1, rd1);
2093 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2094 tcg_temp_free_i32(tmp);
18c9b560
AZ
2095 gen_op_iwmmxt_movq_wRn_M0(wrd);
2096 gen_op_iwmmxt_set_mup();
2097 break;
2098 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2099 if (((insn >> 6) & 3) == 3)
2100 return 1;
18c9b560
AZ
2101 rd = (insn >> 12) & 0xf;
2102 wrd = (insn >> 16) & 0xf;
da6b5335 2103 tmp = load_reg(s, rd);
18c9b560
AZ
2104 gen_op_iwmmxt_movq_M0_wRn(wrd);
2105 switch ((insn >> 6) & 3) {
2106 case 0:
da6b5335
FN
2107 tmp2 = tcg_const_i32(0xff);
2108 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2109 break;
2110 case 1:
da6b5335
FN
2111 tmp2 = tcg_const_i32(0xffff);
2112 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2113 break;
2114 case 2:
da6b5335
FN
2115 tmp2 = tcg_const_i32(0xffffffff);
2116 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2117 break;
da6b5335 2118 default:
39d5492a
PM
2119 TCGV_UNUSED_I32(tmp2);
2120 TCGV_UNUSED_I32(tmp3);
18c9b560 2121 }
da6b5335 2122 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2123 tcg_temp_free_i32(tmp3);
2124 tcg_temp_free_i32(tmp2);
7d1b0095 2125 tcg_temp_free_i32(tmp);
18c9b560
AZ
2126 gen_op_iwmmxt_movq_wRn_M0(wrd);
2127 gen_op_iwmmxt_set_mup();
2128 break;
2129 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2130 rd = (insn >> 12) & 0xf;
2131 wrd = (insn >> 16) & 0xf;
da6b5335 2132 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2133 return 1;
2134 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2135 tmp = tcg_temp_new_i32();
18c9b560
AZ
2136 switch ((insn >> 22) & 3) {
2137 case 0:
da6b5335 2138 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2139 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2140 if (insn & 8) {
2141 tcg_gen_ext8s_i32(tmp, tmp);
2142 } else {
2143 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2144 }
2145 break;
2146 case 1:
da6b5335 2147 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2148 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2149 if (insn & 8) {
2150 tcg_gen_ext16s_i32(tmp, tmp);
2151 } else {
2152 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2153 }
2154 break;
2155 case 2:
da6b5335 2156 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2157 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2158 break;
18c9b560 2159 }
da6b5335 2160 store_reg(s, rd, tmp);
18c9b560
AZ
2161 break;
2162 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2163 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2164 return 1;
da6b5335 2165 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2166 switch ((insn >> 22) & 3) {
2167 case 0:
da6b5335 2168 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2169 break;
2170 case 1:
da6b5335 2171 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2172 break;
2173 case 2:
da6b5335 2174 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2175 break;
18c9b560 2176 }
da6b5335
FN
2177 tcg_gen_shli_i32(tmp, tmp, 28);
2178 gen_set_nzcv(tmp);
7d1b0095 2179 tcg_temp_free_i32(tmp);
18c9b560
AZ
2180 break;
2181 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2182 if (((insn >> 6) & 3) == 3)
2183 return 1;
18c9b560
AZ
2184 rd = (insn >> 12) & 0xf;
2185 wrd = (insn >> 16) & 0xf;
da6b5335 2186 tmp = load_reg(s, rd);
18c9b560
AZ
2187 switch ((insn >> 6) & 3) {
2188 case 0:
da6b5335 2189 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2190 break;
2191 case 1:
da6b5335 2192 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2193 break;
2194 case 2:
da6b5335 2195 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2196 break;
18c9b560 2197 }
7d1b0095 2198 tcg_temp_free_i32(tmp);
18c9b560
AZ
2199 gen_op_iwmmxt_movq_wRn_M0(wrd);
2200 gen_op_iwmmxt_set_mup();
2201 break;
2202 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2203 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2204 return 1;
da6b5335 2205 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2206 tmp2 = tcg_temp_new_i32();
da6b5335 2207 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2208 switch ((insn >> 22) & 3) {
2209 case 0:
2210 for (i = 0; i < 7; i ++) {
da6b5335
FN
2211 tcg_gen_shli_i32(tmp2, tmp2, 4);
2212 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2213 }
2214 break;
2215 case 1:
2216 for (i = 0; i < 3; i ++) {
da6b5335
FN
2217 tcg_gen_shli_i32(tmp2, tmp2, 8);
2218 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2219 }
2220 break;
2221 case 2:
da6b5335
FN
2222 tcg_gen_shli_i32(tmp2, tmp2, 16);
2223 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2224 break;
18c9b560 2225 }
da6b5335 2226 gen_set_nzcv(tmp);
7d1b0095
PM
2227 tcg_temp_free_i32(tmp2);
2228 tcg_temp_free_i32(tmp);
18c9b560
AZ
2229 break;
2230 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2231 wrd = (insn >> 12) & 0xf;
2232 rd0 = (insn >> 16) & 0xf;
2233 gen_op_iwmmxt_movq_M0_wRn(rd0);
2234 switch ((insn >> 22) & 3) {
2235 case 0:
e677137d 2236 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2237 break;
2238 case 1:
e677137d 2239 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2240 break;
2241 case 2:
e677137d 2242 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2243 break;
2244 case 3:
2245 return 1;
2246 }
2247 gen_op_iwmmxt_movq_wRn_M0(wrd);
2248 gen_op_iwmmxt_set_mup();
2249 break;
2250 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2251 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2252 return 1;
da6b5335 2253 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2254 tmp2 = tcg_temp_new_i32();
da6b5335 2255 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2256 switch ((insn >> 22) & 3) {
2257 case 0:
2258 for (i = 0; i < 7; i ++) {
da6b5335
FN
2259 tcg_gen_shli_i32(tmp2, tmp2, 4);
2260 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2261 }
2262 break;
2263 case 1:
2264 for (i = 0; i < 3; i ++) {
da6b5335
FN
2265 tcg_gen_shli_i32(tmp2, tmp2, 8);
2266 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2267 }
2268 break;
2269 case 2:
da6b5335
FN
2270 tcg_gen_shli_i32(tmp2, tmp2, 16);
2271 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2272 break;
18c9b560 2273 }
da6b5335 2274 gen_set_nzcv(tmp);
7d1b0095
PM
2275 tcg_temp_free_i32(tmp2);
2276 tcg_temp_free_i32(tmp);
18c9b560
AZ
2277 break;
2278 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2279 rd = (insn >> 12) & 0xf;
2280 rd0 = (insn >> 16) & 0xf;
da6b5335 2281 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2282 return 1;
2283 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2284 tmp = tcg_temp_new_i32();
18c9b560
AZ
2285 switch ((insn >> 22) & 3) {
2286 case 0:
da6b5335 2287 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2288 break;
2289 case 1:
da6b5335 2290 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2291 break;
2292 case 2:
da6b5335 2293 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2294 break;
18c9b560 2295 }
da6b5335 2296 store_reg(s, rd, tmp);
18c9b560
AZ
2297 break;
2298 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2299 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2300 wrd = (insn >> 12) & 0xf;
2301 rd0 = (insn >> 16) & 0xf;
2302 rd1 = (insn >> 0) & 0xf;
2303 gen_op_iwmmxt_movq_M0_wRn(rd0);
2304 switch ((insn >> 22) & 3) {
2305 case 0:
2306 if (insn & (1 << 21))
2307 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2308 else
2309 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2310 break;
2311 case 1:
2312 if (insn & (1 << 21))
2313 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2314 else
2315 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2316 break;
2317 case 2:
2318 if (insn & (1 << 21))
2319 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2320 else
2321 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2322 break;
2323 case 3:
2324 return 1;
2325 }
2326 gen_op_iwmmxt_movq_wRn_M0(wrd);
2327 gen_op_iwmmxt_set_mup();
2328 gen_op_iwmmxt_set_cup();
2329 break;
2330 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2331 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2332 wrd = (insn >> 12) & 0xf;
2333 rd0 = (insn >> 16) & 0xf;
2334 gen_op_iwmmxt_movq_M0_wRn(rd0);
2335 switch ((insn >> 22) & 3) {
2336 case 0:
2337 if (insn & (1 << 21))
2338 gen_op_iwmmxt_unpacklsb_M0();
2339 else
2340 gen_op_iwmmxt_unpacklub_M0();
2341 break;
2342 case 1:
2343 if (insn & (1 << 21))
2344 gen_op_iwmmxt_unpacklsw_M0();
2345 else
2346 gen_op_iwmmxt_unpackluw_M0();
2347 break;
2348 case 2:
2349 if (insn & (1 << 21))
2350 gen_op_iwmmxt_unpacklsl_M0();
2351 else
2352 gen_op_iwmmxt_unpacklul_M0();
2353 break;
2354 case 3:
2355 return 1;
2356 }
2357 gen_op_iwmmxt_movq_wRn_M0(wrd);
2358 gen_op_iwmmxt_set_mup();
2359 gen_op_iwmmxt_set_cup();
2360 break;
2361 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2362 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2363 wrd = (insn >> 12) & 0xf;
2364 rd0 = (insn >> 16) & 0xf;
2365 gen_op_iwmmxt_movq_M0_wRn(rd0);
2366 switch ((insn >> 22) & 3) {
2367 case 0:
2368 if (insn & (1 << 21))
2369 gen_op_iwmmxt_unpackhsb_M0();
2370 else
2371 gen_op_iwmmxt_unpackhub_M0();
2372 break;
2373 case 1:
2374 if (insn & (1 << 21))
2375 gen_op_iwmmxt_unpackhsw_M0();
2376 else
2377 gen_op_iwmmxt_unpackhuw_M0();
2378 break;
2379 case 2:
2380 if (insn & (1 << 21))
2381 gen_op_iwmmxt_unpackhsl_M0();
2382 else
2383 gen_op_iwmmxt_unpackhul_M0();
2384 break;
2385 case 3:
2386 return 1;
2387 }
2388 gen_op_iwmmxt_movq_wRn_M0(wrd);
2389 gen_op_iwmmxt_set_mup();
2390 gen_op_iwmmxt_set_cup();
2391 break;
2392 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2393 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2394 if (((insn >> 22) & 3) == 0)
2395 return 1;
18c9b560
AZ
2396 wrd = (insn >> 12) & 0xf;
2397 rd0 = (insn >> 16) & 0xf;
2398 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2399 tmp = tcg_temp_new_i32();
da6b5335 2400 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2401 tcg_temp_free_i32(tmp);
18c9b560 2402 return 1;
da6b5335 2403 }
18c9b560 2404 switch ((insn >> 22) & 3) {
18c9b560 2405 case 1:
477955bd 2406 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2407 break;
2408 case 2:
477955bd 2409 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2410 break;
2411 case 3:
477955bd 2412 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2413 break;
2414 }
7d1b0095 2415 tcg_temp_free_i32(tmp);
18c9b560
AZ
2416 gen_op_iwmmxt_movq_wRn_M0(wrd);
2417 gen_op_iwmmxt_set_mup();
2418 gen_op_iwmmxt_set_cup();
2419 break;
2420 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2421 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2422 if (((insn >> 22) & 3) == 0)
2423 return 1;
18c9b560
AZ
2424 wrd = (insn >> 12) & 0xf;
2425 rd0 = (insn >> 16) & 0xf;
2426 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2427 tmp = tcg_temp_new_i32();
da6b5335 2428 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2429 tcg_temp_free_i32(tmp);
18c9b560 2430 return 1;
da6b5335 2431 }
18c9b560 2432 switch ((insn >> 22) & 3) {
18c9b560 2433 case 1:
477955bd 2434 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2435 break;
2436 case 2:
477955bd 2437 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2438 break;
2439 case 3:
477955bd 2440 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2441 break;
2442 }
7d1b0095 2443 tcg_temp_free_i32(tmp);
18c9b560
AZ
2444 gen_op_iwmmxt_movq_wRn_M0(wrd);
2445 gen_op_iwmmxt_set_mup();
2446 gen_op_iwmmxt_set_cup();
2447 break;
2448 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2449 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2450 if (((insn >> 22) & 3) == 0)
2451 return 1;
18c9b560
AZ
2452 wrd = (insn >> 12) & 0xf;
2453 rd0 = (insn >> 16) & 0xf;
2454 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2455 tmp = tcg_temp_new_i32();
da6b5335 2456 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2457 tcg_temp_free_i32(tmp);
18c9b560 2458 return 1;
da6b5335 2459 }
18c9b560 2460 switch ((insn >> 22) & 3) {
18c9b560 2461 case 1:
477955bd 2462 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2463 break;
2464 case 2:
477955bd 2465 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2466 break;
2467 case 3:
477955bd 2468 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2469 break;
2470 }
7d1b0095 2471 tcg_temp_free_i32(tmp);
18c9b560
AZ
2472 gen_op_iwmmxt_movq_wRn_M0(wrd);
2473 gen_op_iwmmxt_set_mup();
2474 gen_op_iwmmxt_set_cup();
2475 break;
2476 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2477 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2478 if (((insn >> 22) & 3) == 0)
2479 return 1;
18c9b560
AZ
2480 wrd = (insn >> 12) & 0xf;
2481 rd0 = (insn >> 16) & 0xf;
2482 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2483 tmp = tcg_temp_new_i32();
18c9b560 2484 switch ((insn >> 22) & 3) {
18c9b560 2485 case 1:
da6b5335 2486 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2487 tcg_temp_free_i32(tmp);
18c9b560 2488 return 1;
da6b5335 2489 }
477955bd 2490 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2491 break;
2492 case 2:
da6b5335 2493 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2494 tcg_temp_free_i32(tmp);
18c9b560 2495 return 1;
da6b5335 2496 }
477955bd 2497 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2498 break;
2499 case 3:
da6b5335 2500 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2501 tcg_temp_free_i32(tmp);
18c9b560 2502 return 1;
da6b5335 2503 }
477955bd 2504 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2505 break;
2506 }
7d1b0095 2507 tcg_temp_free_i32(tmp);
18c9b560
AZ
2508 gen_op_iwmmxt_movq_wRn_M0(wrd);
2509 gen_op_iwmmxt_set_mup();
2510 gen_op_iwmmxt_set_cup();
2511 break;
2512 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2513 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2514 wrd = (insn >> 12) & 0xf;
2515 rd0 = (insn >> 16) & 0xf;
2516 rd1 = (insn >> 0) & 0xf;
2517 gen_op_iwmmxt_movq_M0_wRn(rd0);
2518 switch ((insn >> 22) & 3) {
2519 case 0:
2520 if (insn & (1 << 21))
2521 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2522 else
2523 gen_op_iwmmxt_minub_M0_wRn(rd1);
2524 break;
2525 case 1:
2526 if (insn & (1 << 21))
2527 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2528 else
2529 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2530 break;
2531 case 2:
2532 if (insn & (1 << 21))
2533 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2534 else
2535 gen_op_iwmmxt_minul_M0_wRn(rd1);
2536 break;
2537 case 3:
2538 return 1;
2539 }
2540 gen_op_iwmmxt_movq_wRn_M0(wrd);
2541 gen_op_iwmmxt_set_mup();
2542 break;
2543 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2544 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2545 wrd = (insn >> 12) & 0xf;
2546 rd0 = (insn >> 16) & 0xf;
2547 rd1 = (insn >> 0) & 0xf;
2548 gen_op_iwmmxt_movq_M0_wRn(rd0);
2549 switch ((insn >> 22) & 3) {
2550 case 0:
2551 if (insn & (1 << 21))
2552 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2553 else
2554 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2555 break;
2556 case 1:
2557 if (insn & (1 << 21))
2558 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2559 else
2560 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2561 break;
2562 case 2:
2563 if (insn & (1 << 21))
2564 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2565 else
2566 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2567 break;
2568 case 3:
2569 return 1;
2570 }
2571 gen_op_iwmmxt_movq_wRn_M0(wrd);
2572 gen_op_iwmmxt_set_mup();
2573 break;
2574 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2575 case 0x402: case 0x502: case 0x602: case 0x702:
2576 wrd = (insn >> 12) & 0xf;
2577 rd0 = (insn >> 16) & 0xf;
2578 rd1 = (insn >> 0) & 0xf;
2579 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2580 tmp = tcg_const_i32((insn >> 20) & 3);
2581 iwmmxt_load_reg(cpu_V1, rd1);
2582 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2583 tcg_temp_free_i32(tmp);
18c9b560
AZ
2584 gen_op_iwmmxt_movq_wRn_M0(wrd);
2585 gen_op_iwmmxt_set_mup();
2586 break;
2587 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2588 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2589 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2590 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2591 wrd = (insn >> 12) & 0xf;
2592 rd0 = (insn >> 16) & 0xf;
2593 rd1 = (insn >> 0) & 0xf;
2594 gen_op_iwmmxt_movq_M0_wRn(rd0);
2595 switch ((insn >> 20) & 0xf) {
2596 case 0x0:
2597 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2598 break;
2599 case 0x1:
2600 gen_op_iwmmxt_subub_M0_wRn(rd1);
2601 break;
2602 case 0x3:
2603 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2604 break;
2605 case 0x4:
2606 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2607 break;
2608 case 0x5:
2609 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2610 break;
2611 case 0x7:
2612 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2613 break;
2614 case 0x8:
2615 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2616 break;
2617 case 0x9:
2618 gen_op_iwmmxt_subul_M0_wRn(rd1);
2619 break;
2620 case 0xb:
2621 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2622 break;
2623 default:
2624 return 1;
2625 }
2626 gen_op_iwmmxt_movq_wRn_M0(wrd);
2627 gen_op_iwmmxt_set_mup();
2628 gen_op_iwmmxt_set_cup();
2629 break;
2630 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2631 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2632 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2633 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2634 wrd = (insn >> 12) & 0xf;
2635 rd0 = (insn >> 16) & 0xf;
2636 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2637 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2638 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2639 tcg_temp_free_i32(tmp);
18c9b560
AZ
2640 gen_op_iwmmxt_movq_wRn_M0(wrd);
2641 gen_op_iwmmxt_set_mup();
2642 gen_op_iwmmxt_set_cup();
2643 break;
2644 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2645 case 0x418: case 0x518: case 0x618: case 0x718:
2646 case 0x818: case 0x918: case 0xa18: case 0xb18:
2647 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2648 wrd = (insn >> 12) & 0xf;
2649 rd0 = (insn >> 16) & 0xf;
2650 rd1 = (insn >> 0) & 0xf;
2651 gen_op_iwmmxt_movq_M0_wRn(rd0);
2652 switch ((insn >> 20) & 0xf) {
2653 case 0x0:
2654 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2655 break;
2656 case 0x1:
2657 gen_op_iwmmxt_addub_M0_wRn(rd1);
2658 break;
2659 case 0x3:
2660 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2661 break;
2662 case 0x4:
2663 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2664 break;
2665 case 0x5:
2666 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2667 break;
2668 case 0x7:
2669 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2670 break;
2671 case 0x8:
2672 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2673 break;
2674 case 0x9:
2675 gen_op_iwmmxt_addul_M0_wRn(rd1);
2676 break;
2677 case 0xb:
2678 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2679 break;
2680 default:
2681 return 1;
2682 }
2683 gen_op_iwmmxt_movq_wRn_M0(wrd);
2684 gen_op_iwmmxt_set_mup();
2685 gen_op_iwmmxt_set_cup();
2686 break;
2687 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2688 case 0x408: case 0x508: case 0x608: case 0x708:
2689 case 0x808: case 0x908: case 0xa08: case 0xb08:
2690 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2691 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2692 return 1;
18c9b560
AZ
2693 wrd = (insn >> 12) & 0xf;
2694 rd0 = (insn >> 16) & 0xf;
2695 rd1 = (insn >> 0) & 0xf;
2696 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2697 switch ((insn >> 22) & 3) {
18c9b560
AZ
2698 case 1:
2699 if (insn & (1 << 21))
2700 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2701 else
2702 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2703 break;
2704 case 2:
2705 if (insn & (1 << 21))
2706 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2707 else
2708 gen_op_iwmmxt_packul_M0_wRn(rd1);
2709 break;
2710 case 3:
2711 if (insn & (1 << 21))
2712 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2713 else
2714 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2715 break;
2716 }
2717 gen_op_iwmmxt_movq_wRn_M0(wrd);
2718 gen_op_iwmmxt_set_mup();
2719 gen_op_iwmmxt_set_cup();
2720 break;
2721 case 0x201: case 0x203: case 0x205: case 0x207:
2722 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2723 case 0x211: case 0x213: case 0x215: case 0x217:
2724 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2725 wrd = (insn >> 5) & 0xf;
2726 rd0 = (insn >> 12) & 0xf;
2727 rd1 = (insn >> 0) & 0xf;
2728 if (rd0 == 0xf || rd1 == 0xf)
2729 return 1;
2730 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2731 tmp = load_reg(s, rd0);
2732 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2733 switch ((insn >> 16) & 0xf) {
2734 case 0x0: /* TMIA */
da6b5335 2735 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2736 break;
2737 case 0x8: /* TMIAPH */
da6b5335 2738 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2739 break;
2740 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2741 if (insn & (1 << 16))
da6b5335 2742 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2743 if (insn & (1 << 17))
da6b5335
FN
2744 tcg_gen_shri_i32(tmp2, tmp2, 16);
2745 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2746 break;
2747 default:
7d1b0095
PM
2748 tcg_temp_free_i32(tmp2);
2749 tcg_temp_free_i32(tmp);
18c9b560
AZ
2750 return 1;
2751 }
7d1b0095
PM
2752 tcg_temp_free_i32(tmp2);
2753 tcg_temp_free_i32(tmp);
18c9b560
AZ
2754 gen_op_iwmmxt_movq_wRn_M0(wrd);
2755 gen_op_iwmmxt_set_mup();
2756 break;
2757 default:
2758 return 1;
2759 }
2760
2761 return 0;
2762}
2763
a1c7273b 2764/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2765 (ie. an undefined instruction). */
7dcc1f89 2766static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2767{
2768 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2769 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2770
2771 if ((insn & 0x0ff00f10) == 0x0e200010) {
2772 /* Multiply with Internal Accumulate Format */
2773 rd0 = (insn >> 12) & 0xf;
2774 rd1 = insn & 0xf;
2775 acc = (insn >> 5) & 7;
2776
2777 if (acc != 0)
2778 return 1;
2779
3a554c0f
FN
2780 tmp = load_reg(s, rd0);
2781 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2782 switch ((insn >> 16) & 0xf) {
2783 case 0x0: /* MIA */
3a554c0f 2784 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2785 break;
2786 case 0x8: /* MIAPH */
3a554c0f 2787 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2788 break;
2789 case 0xc: /* MIABB */
2790 case 0xd: /* MIABT */
2791 case 0xe: /* MIATB */
2792 case 0xf: /* MIATT */
18c9b560 2793 if (insn & (1 << 16))
3a554c0f 2794 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2795 if (insn & (1 << 17))
3a554c0f
FN
2796 tcg_gen_shri_i32(tmp2, tmp2, 16);
2797 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2798 break;
2799 default:
2800 return 1;
2801 }
7d1b0095
PM
2802 tcg_temp_free_i32(tmp2);
2803 tcg_temp_free_i32(tmp);
18c9b560
AZ
2804
2805 gen_op_iwmmxt_movq_wRn_M0(acc);
2806 return 0;
2807 }
2808
2809 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2810 /* Internal Accumulator Access Format */
2811 rdhi = (insn >> 16) & 0xf;
2812 rdlo = (insn >> 12) & 0xf;
2813 acc = insn & 7;
2814
2815 if (acc != 0)
2816 return 1;
2817
2818 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2819 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2820 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2821 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2822 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2823 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2824 } else { /* MAR */
3a554c0f
FN
2825 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2826 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2827 }
2828 return 0;
2829 }
2830
2831 return 1;
2832}
2833
9ee6e8bb
PB
2834#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2835#define VFP_SREG(insn, bigbit, smallbit) \
2836 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2837#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2838 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2839 reg = (((insn) >> (bigbit)) & 0x0f) \
2840 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2841 } else { \
2842 if (insn & (1 << (smallbit))) \
2843 return 1; \
2844 reg = ((insn) >> (bigbit)) & 0x0f; \
2845 }} while (0)
2846
2847#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2848#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2849#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2850#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2851#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2852#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2853
4373f3ce 2854/* Move between integer and VFP cores. */
39d5492a 2855static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2856{
39d5492a 2857 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2858 tcg_gen_mov_i32(tmp, cpu_F0s);
2859 return tmp;
2860}
2861
39d5492a 2862static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2863{
2864 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2865 tcg_temp_free_i32(tmp);
4373f3ce
PB
2866}
2867
39d5492a 2868static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2869{
39d5492a 2870 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2871 if (shift)
2872 tcg_gen_shri_i32(var, var, shift);
86831435 2873 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2874 tcg_gen_shli_i32(tmp, var, 8);
2875 tcg_gen_or_i32(var, var, tmp);
2876 tcg_gen_shli_i32(tmp, var, 16);
2877 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2878 tcg_temp_free_i32(tmp);
ad69471c
PB
2879}
2880
39d5492a 2881static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2882{
39d5492a 2883 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2884 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2885 tcg_gen_shli_i32(tmp, var, 16);
2886 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2887 tcg_temp_free_i32(tmp);
ad69471c
PB
2888}
2889
39d5492a 2890static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2891{
39d5492a 2892 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2893 tcg_gen_andi_i32(var, var, 0xffff0000);
2894 tcg_gen_shri_i32(tmp, var, 16);
2895 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2896 tcg_temp_free_i32(tmp);
ad69471c
PB
2897}
2898
39d5492a 2899static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2900{
2901 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2902 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2903 switch (size) {
2904 case 0:
12dcc321 2905 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2906 gen_neon_dup_u8(tmp, 0);
2907 break;
2908 case 1:
12dcc321 2909 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2910 gen_neon_dup_low16(tmp);
2911 break;
2912 case 2:
12dcc321 2913 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2914 break;
2915 default: /* Avoid compiler warnings. */
2916 abort();
2917 }
2918 return tmp;
2919}
2920
04731fb5
WN
2921static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2922 uint32_t dp)
2923{
2924 uint32_t cc = extract32(insn, 20, 2);
2925
2926 if (dp) {
2927 TCGv_i64 frn, frm, dest;
2928 TCGv_i64 tmp, zero, zf, nf, vf;
2929
2930 zero = tcg_const_i64(0);
2931
2932 frn = tcg_temp_new_i64();
2933 frm = tcg_temp_new_i64();
2934 dest = tcg_temp_new_i64();
2935
2936 zf = tcg_temp_new_i64();
2937 nf = tcg_temp_new_i64();
2938 vf = tcg_temp_new_i64();
2939
2940 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2941 tcg_gen_ext_i32_i64(nf, cpu_NF);
2942 tcg_gen_ext_i32_i64(vf, cpu_VF);
2943
2944 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2945 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2946 switch (cc) {
2947 case 0: /* eq: Z */
2948 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2949 frn, frm);
2950 break;
2951 case 1: /* vs: V */
2952 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2953 frn, frm);
2954 break;
2955 case 2: /* ge: N == V -> N ^ V == 0 */
2956 tmp = tcg_temp_new_i64();
2957 tcg_gen_xor_i64(tmp, vf, nf);
2958 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2959 frn, frm);
2960 tcg_temp_free_i64(tmp);
2961 break;
2962 case 3: /* gt: !Z && N == V */
2963 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2964 frn, frm);
2965 tmp = tcg_temp_new_i64();
2966 tcg_gen_xor_i64(tmp, vf, nf);
2967 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2968 dest, frm);
2969 tcg_temp_free_i64(tmp);
2970 break;
2971 }
2972 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2973 tcg_temp_free_i64(frn);
2974 tcg_temp_free_i64(frm);
2975 tcg_temp_free_i64(dest);
2976
2977 tcg_temp_free_i64(zf);
2978 tcg_temp_free_i64(nf);
2979 tcg_temp_free_i64(vf);
2980
2981 tcg_temp_free_i64(zero);
2982 } else {
2983 TCGv_i32 frn, frm, dest;
2984 TCGv_i32 tmp, zero;
2985
2986 zero = tcg_const_i32(0);
2987
2988 frn = tcg_temp_new_i32();
2989 frm = tcg_temp_new_i32();
2990 dest = tcg_temp_new_i32();
2991 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2992 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2993 switch (cc) {
2994 case 0: /* eq: Z */
2995 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2996 frn, frm);
2997 break;
2998 case 1: /* vs: V */
2999 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3000 frn, frm);
3001 break;
3002 case 2: /* ge: N == V -> N ^ V == 0 */
3003 tmp = tcg_temp_new_i32();
3004 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3005 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3006 frn, frm);
3007 tcg_temp_free_i32(tmp);
3008 break;
3009 case 3: /* gt: !Z && N == V */
3010 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3011 frn, frm);
3012 tmp = tcg_temp_new_i32();
3013 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3014 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3015 dest, frm);
3016 tcg_temp_free_i32(tmp);
3017 break;
3018 }
3019 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3020 tcg_temp_free_i32(frn);
3021 tcg_temp_free_i32(frm);
3022 tcg_temp_free_i32(dest);
3023
3024 tcg_temp_free_i32(zero);
3025 }
3026
3027 return 0;
3028}
3029
40cfacdd
WN
3030static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3031 uint32_t rm, uint32_t dp)
3032{
3033 uint32_t vmin = extract32(insn, 6, 1);
3034 TCGv_ptr fpst = get_fpstatus_ptr(0);
3035
3036 if (dp) {
3037 TCGv_i64 frn, frm, dest;
3038
3039 frn = tcg_temp_new_i64();
3040 frm = tcg_temp_new_i64();
3041 dest = tcg_temp_new_i64();
3042
3043 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3044 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3045 if (vmin) {
f71a2ae5 3046 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3047 } else {
f71a2ae5 3048 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3049 }
3050 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3051 tcg_temp_free_i64(frn);
3052 tcg_temp_free_i64(frm);
3053 tcg_temp_free_i64(dest);
3054 } else {
3055 TCGv_i32 frn, frm, dest;
3056
3057 frn = tcg_temp_new_i32();
3058 frm = tcg_temp_new_i32();
3059 dest = tcg_temp_new_i32();
3060
3061 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3062 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3063 if (vmin) {
f71a2ae5 3064 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3065 } else {
f71a2ae5 3066 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3067 }
3068 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3069 tcg_temp_free_i32(frn);
3070 tcg_temp_free_i32(frm);
3071 tcg_temp_free_i32(dest);
3072 }
3073
3074 tcg_temp_free_ptr(fpst);
3075 return 0;
3076}
3077
7655f39b
WN
3078static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3079 int rounding)
3080{
3081 TCGv_ptr fpst = get_fpstatus_ptr(0);
3082 TCGv_i32 tcg_rmode;
3083
3084 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3085 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3086
3087 if (dp) {
3088 TCGv_i64 tcg_op;
3089 TCGv_i64 tcg_res;
3090 tcg_op = tcg_temp_new_i64();
3091 tcg_res = tcg_temp_new_i64();
3092 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3093 gen_helper_rintd(tcg_res, tcg_op, fpst);
3094 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3095 tcg_temp_free_i64(tcg_op);
3096 tcg_temp_free_i64(tcg_res);
3097 } else {
3098 TCGv_i32 tcg_op;
3099 TCGv_i32 tcg_res;
3100 tcg_op = tcg_temp_new_i32();
3101 tcg_res = tcg_temp_new_i32();
3102 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3103 gen_helper_rints(tcg_res, tcg_op, fpst);
3104 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3105 tcg_temp_free_i32(tcg_op);
3106 tcg_temp_free_i32(tcg_res);
3107 }
3108
3109 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3110 tcg_temp_free_i32(tcg_rmode);
3111
3112 tcg_temp_free_ptr(fpst);
3113 return 0;
3114}
3115
c9975a83
WN
3116static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3117 int rounding)
3118{
3119 bool is_signed = extract32(insn, 7, 1);
3120 TCGv_ptr fpst = get_fpstatus_ptr(0);
3121 TCGv_i32 tcg_rmode, tcg_shift;
3122
3123 tcg_shift = tcg_const_i32(0);
3124
3125 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3126 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3127
3128 if (dp) {
3129 TCGv_i64 tcg_double, tcg_res;
3130 TCGv_i32 tcg_tmp;
3131 /* Rd is encoded as a single precision register even when the source
3132 * is double precision.
3133 */
3134 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3135 tcg_double = tcg_temp_new_i64();
3136 tcg_res = tcg_temp_new_i64();
3137 tcg_tmp = tcg_temp_new_i32();
3138 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3139 if (is_signed) {
3140 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3141 } else {
3142 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3143 }
ecc7b3aa 3144 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3145 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3146 tcg_temp_free_i32(tcg_tmp);
3147 tcg_temp_free_i64(tcg_res);
3148 tcg_temp_free_i64(tcg_double);
3149 } else {
3150 TCGv_i32 tcg_single, tcg_res;
3151 tcg_single = tcg_temp_new_i32();
3152 tcg_res = tcg_temp_new_i32();
3153 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3154 if (is_signed) {
3155 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3156 } else {
3157 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3158 }
3159 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3160 tcg_temp_free_i32(tcg_res);
3161 tcg_temp_free_i32(tcg_single);
3162 }
3163
3164 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3165 tcg_temp_free_i32(tcg_rmode);
3166
3167 tcg_temp_free_i32(tcg_shift);
3168
3169 tcg_temp_free_ptr(fpst);
3170
3171 return 0;
3172}
7655f39b
WN
3173
3174/* Table for converting the most common AArch32 encoding of
3175 * rounding mode to arm_fprounding order (which matches the
3176 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3177 */
3178static const uint8_t fp_decode_rm[] = {
3179 FPROUNDING_TIEAWAY,
3180 FPROUNDING_TIEEVEN,
3181 FPROUNDING_POSINF,
3182 FPROUNDING_NEGINF,
3183};
3184
7dcc1f89 3185static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3186{
3187 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3188
d614a513 3189 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3190 return 1;
3191 }
3192
3193 if (dp) {
3194 VFP_DREG_D(rd, insn);
3195 VFP_DREG_N(rn, insn);
3196 VFP_DREG_M(rm, insn);
3197 } else {
3198 rd = VFP_SREG_D(insn);
3199 rn = VFP_SREG_N(insn);
3200 rm = VFP_SREG_M(insn);
3201 }
3202
3203 if ((insn & 0x0f800e50) == 0x0e000a00) {
3204 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3205 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3206 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3207 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3208 /* VRINTA, VRINTN, VRINTP, VRINTM */
3209 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3210 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3211 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3212 /* VCVTA, VCVTN, VCVTP, VCVTM */
3213 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3214 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3215 }
3216 return 1;
3217}
3218
a1c7273b 3219/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3220 (ie. an undefined instruction). */
7dcc1f89 3221static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3222{
3223 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3224 int dp, veclen;
39d5492a
PM
3225 TCGv_i32 addr;
3226 TCGv_i32 tmp;
3227 TCGv_i32 tmp2;
b7bcbe95 3228
d614a513 3229 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3230 return 1;
d614a513 3231 }
40f137e1 3232
2c7ffc41
PM
3233 /* FIXME: this access check should not take precedence over UNDEF
3234 * for invalid encodings; we will generate incorrect syndrome information
3235 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3236 */
9dbbc748 3237 if (s->fp_excp_el) {
2c7ffc41 3238 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3239 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3240 return 0;
3241 }
3242
5df8bac1 3243 if (!s->vfp_enabled) {
9ee6e8bb 3244 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3245 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3246 return 1;
3247 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3248 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3249 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3250 return 1;
a50c0f51 3251 }
40f137e1 3252 }
6a57f3eb
WN
3253
3254 if (extract32(insn, 28, 4) == 0xf) {
3255 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3256 * only used in v8 and above.
3257 */
7dcc1f89 3258 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3259 }
3260
b7bcbe95
FB
3261 dp = ((insn & 0xf00) == 0xb00);
3262 switch ((insn >> 24) & 0xf) {
3263 case 0xe:
3264 if (insn & (1 << 4)) {
3265 /* single register transfer */
b7bcbe95
FB
3266 rd = (insn >> 12) & 0xf;
3267 if (dp) {
9ee6e8bb
PB
3268 int size;
3269 int pass;
3270
3271 VFP_DREG_N(rn, insn);
3272 if (insn & 0xf)
b7bcbe95 3273 return 1;
9ee6e8bb 3274 if (insn & 0x00c00060
d614a513 3275 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3276 return 1;
d614a513 3277 }
9ee6e8bb
PB
3278
3279 pass = (insn >> 21) & 1;
3280 if (insn & (1 << 22)) {
3281 size = 0;
3282 offset = ((insn >> 5) & 3) * 8;
3283 } else if (insn & (1 << 5)) {
3284 size = 1;
3285 offset = (insn & (1 << 6)) ? 16 : 0;
3286 } else {
3287 size = 2;
3288 offset = 0;
3289 }
18c9b560 3290 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3291 /* vfp->arm */
ad69471c 3292 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3293 switch (size) {
3294 case 0:
9ee6e8bb 3295 if (offset)
ad69471c 3296 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3297 if (insn & (1 << 23))
ad69471c 3298 gen_uxtb(tmp);
9ee6e8bb 3299 else
ad69471c 3300 gen_sxtb(tmp);
9ee6e8bb
PB
3301 break;
3302 case 1:
9ee6e8bb
PB
3303 if (insn & (1 << 23)) {
3304 if (offset) {
ad69471c 3305 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3306 } else {
ad69471c 3307 gen_uxth(tmp);
9ee6e8bb
PB
3308 }
3309 } else {
3310 if (offset) {
ad69471c 3311 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3312 } else {
ad69471c 3313 gen_sxth(tmp);
9ee6e8bb
PB
3314 }
3315 }
3316 break;
3317 case 2:
9ee6e8bb
PB
3318 break;
3319 }
ad69471c 3320 store_reg(s, rd, tmp);
b7bcbe95
FB
3321 } else {
3322 /* arm->vfp */
ad69471c 3323 tmp = load_reg(s, rd);
9ee6e8bb
PB
3324 if (insn & (1 << 23)) {
3325 /* VDUP */
3326 if (size == 0) {
ad69471c 3327 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3328 } else if (size == 1) {
ad69471c 3329 gen_neon_dup_low16(tmp);
9ee6e8bb 3330 }
cbbccffc 3331 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3332 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3333 tcg_gen_mov_i32(tmp2, tmp);
3334 neon_store_reg(rn, n, tmp2);
3335 }
3336 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3337 } else {
3338 /* VMOV */
3339 switch (size) {
3340 case 0:
ad69471c 3341 tmp2 = neon_load_reg(rn, pass);
d593c48e 3342 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3343 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3344 break;
3345 case 1:
ad69471c 3346 tmp2 = neon_load_reg(rn, pass);
d593c48e 3347 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3348 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3349 break;
3350 case 2:
9ee6e8bb
PB
3351 break;
3352 }
ad69471c 3353 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3354 }
b7bcbe95 3355 }
9ee6e8bb
PB
3356 } else { /* !dp */
3357 if ((insn & 0x6f) != 0x00)
3358 return 1;
3359 rn = VFP_SREG_N(insn);
18c9b560 3360 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3361 /* vfp->arm */
3362 if (insn & (1 << 21)) {
3363 /* system register */
40f137e1 3364 rn >>= 1;
9ee6e8bb 3365
b7bcbe95 3366 switch (rn) {
40f137e1 3367 case ARM_VFP_FPSID:
4373f3ce 3368 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3369 VFP3 restricts all id registers to privileged
3370 accesses. */
3371 if (IS_USER(s)
d614a513 3372 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3373 return 1;
d614a513 3374 }
4373f3ce 3375 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3376 break;
40f137e1 3377 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3378 if (IS_USER(s))
3379 return 1;
4373f3ce 3380 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3381 break;
40f137e1
PB
3382 case ARM_VFP_FPINST:
3383 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3384 /* Not present in VFP3. */
3385 if (IS_USER(s)
d614a513 3386 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3387 return 1;
d614a513 3388 }
4373f3ce 3389 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3390 break;
40f137e1 3391 case ARM_VFP_FPSCR:
601d70b9 3392 if (rd == 15) {
4373f3ce
PB
3393 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3394 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3395 } else {
7d1b0095 3396 tmp = tcg_temp_new_i32();
4373f3ce
PB
3397 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3398 }
b7bcbe95 3399 break;
a50c0f51 3400 case ARM_VFP_MVFR2:
d614a513 3401 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3402 return 1;
3403 }
3404 /* fall through */
9ee6e8bb
PB
3405 case ARM_VFP_MVFR0:
3406 case ARM_VFP_MVFR1:
3407 if (IS_USER(s)
d614a513 3408 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3409 return 1;
d614a513 3410 }
4373f3ce 3411 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3412 break;
b7bcbe95
FB
3413 default:
3414 return 1;
3415 }
3416 } else {
3417 gen_mov_F0_vreg(0, rn);
4373f3ce 3418 tmp = gen_vfp_mrs();
b7bcbe95
FB
3419 }
3420 if (rd == 15) {
b5ff1b31 3421 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3422 gen_set_nzcv(tmp);
7d1b0095 3423 tcg_temp_free_i32(tmp);
4373f3ce
PB
3424 } else {
3425 store_reg(s, rd, tmp);
3426 }
b7bcbe95
FB
3427 } else {
3428 /* arm->vfp */
b7bcbe95 3429 if (insn & (1 << 21)) {
40f137e1 3430 rn >>= 1;
b7bcbe95
FB
3431 /* system register */
3432 switch (rn) {
40f137e1 3433 case ARM_VFP_FPSID:
9ee6e8bb
PB
3434 case ARM_VFP_MVFR0:
3435 case ARM_VFP_MVFR1:
b7bcbe95
FB
3436 /* Writes are ignored. */
3437 break;
40f137e1 3438 case ARM_VFP_FPSCR:
e4c1cfa5 3439 tmp = load_reg(s, rd);
4373f3ce 3440 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3441 tcg_temp_free_i32(tmp);
b5ff1b31 3442 gen_lookup_tb(s);
b7bcbe95 3443 break;
40f137e1 3444 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3445 if (IS_USER(s))
3446 return 1;
71b3c3de
JR
3447 /* TODO: VFP subarchitecture support.
3448 * For now, keep the EN bit only */
e4c1cfa5 3449 tmp = load_reg(s, rd);
71b3c3de 3450 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3451 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3452 gen_lookup_tb(s);
3453 break;
3454 case ARM_VFP_FPINST:
3455 case ARM_VFP_FPINST2:
23adb861
PM
3456 if (IS_USER(s)) {
3457 return 1;
3458 }
e4c1cfa5 3459 tmp = load_reg(s, rd);
4373f3ce 3460 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3461 break;
b7bcbe95
FB
3462 default:
3463 return 1;
3464 }
3465 } else {
e4c1cfa5 3466 tmp = load_reg(s, rd);
4373f3ce 3467 gen_vfp_msr(tmp);
b7bcbe95
FB
3468 gen_mov_vreg_F0(0, rn);
3469 }
3470 }
3471 }
3472 } else {
3473 /* data processing */
3474 /* The opcode is in bits 23, 21, 20 and 6. */
3475 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3476 if (dp) {
3477 if (op == 15) {
3478 /* rn is opcode */
3479 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3480 } else {
3481 /* rn is register number */
9ee6e8bb 3482 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3483 }
3484
239c20c7
WN
3485 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3486 ((rn & 0x1e) == 0x6))) {
3487 /* Integer or single/half precision destination. */
9ee6e8bb 3488 rd = VFP_SREG_D(insn);
b7bcbe95 3489 } else {
9ee6e8bb 3490 VFP_DREG_D(rd, insn);
b7bcbe95 3491 }
04595bf6 3492 if (op == 15 &&
239c20c7
WN
3493 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3494 ((rn & 0x1e) == 0x4))) {
3495 /* VCVT from int or half precision is always from S reg
3496 * regardless of dp bit. VCVT with immediate frac_bits
3497 * has same format as SREG_M.
04595bf6
PM
3498 */
3499 rm = VFP_SREG_M(insn);
b7bcbe95 3500 } else {
9ee6e8bb 3501 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3502 }
3503 } else {
9ee6e8bb 3504 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3505 if (op == 15 && rn == 15) {
3506 /* Double precision destination. */
9ee6e8bb
PB
3507 VFP_DREG_D(rd, insn);
3508 } else {
3509 rd = VFP_SREG_D(insn);
3510 }
04595bf6
PM
3511 /* NB that we implicitly rely on the encoding for the frac_bits
3512 * in VCVT of fixed to float being the same as that of an SREG_M
3513 */
9ee6e8bb 3514 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3515 }
3516
69d1fc22 3517 veclen = s->vec_len;
b7bcbe95
FB
3518 if (op == 15 && rn > 3)
3519 veclen = 0;
3520
3521 /* Shut up compiler warnings. */
3522 delta_m = 0;
3523 delta_d = 0;
3524 bank_mask = 0;
3b46e624 3525
b7bcbe95
FB
3526 if (veclen > 0) {
3527 if (dp)
3528 bank_mask = 0xc;
3529 else
3530 bank_mask = 0x18;
3531
3532 /* Figure out what type of vector operation this is. */
3533 if ((rd & bank_mask) == 0) {
3534 /* scalar */
3535 veclen = 0;
3536 } else {
3537 if (dp)
69d1fc22 3538 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3539 else
69d1fc22 3540 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3541
3542 if ((rm & bank_mask) == 0) {
3543 /* mixed scalar/vector */
3544 delta_m = 0;
3545 } else {
3546 /* vector */
3547 delta_m = delta_d;
3548 }
3549 }
3550 }
3551
3552 /* Load the initial operands. */
3553 if (op == 15) {
3554 switch (rn) {
3555 case 16:
3556 case 17:
3557 /* Integer source */
3558 gen_mov_F0_vreg(0, rm);
3559 break;
3560 case 8:
3561 case 9:
3562 /* Compare */
3563 gen_mov_F0_vreg(dp, rd);
3564 gen_mov_F1_vreg(dp, rm);
3565 break;
3566 case 10:
3567 case 11:
3568 /* Compare with zero */
3569 gen_mov_F0_vreg(dp, rd);
3570 gen_vfp_F1_ld0(dp);
3571 break;
9ee6e8bb
PB
3572 case 20:
3573 case 21:
3574 case 22:
3575 case 23:
644ad806
PB
3576 case 28:
3577 case 29:
3578 case 30:
3579 case 31:
9ee6e8bb
PB
3580 /* Source and destination the same. */
3581 gen_mov_F0_vreg(dp, rd);
3582 break;
6e0c0ed1
PM
3583 case 4:
3584 case 5:
3585 case 6:
3586 case 7:
239c20c7
WN
3587 /* VCVTB, VCVTT: only present with the halfprec extension
3588 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3589 * (we choose to UNDEF)
6e0c0ed1 3590 */
d614a513
PM
3591 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3592 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3593 return 1;
3594 }
239c20c7
WN
3595 if (!extract32(rn, 1, 1)) {
3596 /* Half precision source. */
3597 gen_mov_F0_vreg(0, rm);
3598 break;
3599 }
6e0c0ed1 3600 /* Otherwise fall through */
b7bcbe95
FB
3601 default:
3602 /* One source operand. */
3603 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3604 break;
b7bcbe95
FB
3605 }
3606 } else {
3607 /* Two source operands. */
3608 gen_mov_F0_vreg(dp, rn);
3609 gen_mov_F1_vreg(dp, rm);
3610 }
3611
3612 for (;;) {
3613 /* Perform the calculation. */
3614 switch (op) {
605a6aed
PM
3615 case 0: /* VMLA: fd + (fn * fm) */
3616 /* Note that order of inputs to the add matters for NaNs */
3617 gen_vfp_F1_mul(dp);
3618 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3619 gen_vfp_add(dp);
3620 break;
605a6aed 3621 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3622 gen_vfp_mul(dp);
605a6aed
PM
3623 gen_vfp_F1_neg(dp);
3624 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3625 gen_vfp_add(dp);
3626 break;
605a6aed
PM
3627 case 2: /* VNMLS: -fd + (fn * fm) */
3628 /* Note that it isn't valid to replace (-A + B) with (B - A)
3629 * or similar plausible looking simplifications
3630 * because this will give wrong results for NaNs.
3631 */
3632 gen_vfp_F1_mul(dp);
3633 gen_mov_F0_vreg(dp, rd);
3634 gen_vfp_neg(dp);
3635 gen_vfp_add(dp);
b7bcbe95 3636 break;
605a6aed 3637 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3638 gen_vfp_mul(dp);
605a6aed
PM
3639 gen_vfp_F1_neg(dp);
3640 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3641 gen_vfp_neg(dp);
605a6aed 3642 gen_vfp_add(dp);
b7bcbe95
FB
3643 break;
3644 case 4: /* mul: fn * fm */
3645 gen_vfp_mul(dp);
3646 break;
3647 case 5: /* nmul: -(fn * fm) */
3648 gen_vfp_mul(dp);
3649 gen_vfp_neg(dp);
3650 break;
3651 case 6: /* add: fn + fm */
3652 gen_vfp_add(dp);
3653 break;
3654 case 7: /* sub: fn - fm */
3655 gen_vfp_sub(dp);
3656 break;
3657 case 8: /* div: fn / fm */
3658 gen_vfp_div(dp);
3659 break;
da97f52c
PM
3660 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3661 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3662 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3663 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3664 /* These are fused multiply-add, and must be done as one
3665 * floating point operation with no rounding between the
3666 * multiplication and addition steps.
3667 * NB that doing the negations here as separate steps is
3668 * correct : an input NaN should come out with its sign bit
3669 * flipped if it is a negated-input.
3670 */
d614a513 3671 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3672 return 1;
3673 }
3674 if (dp) {
3675 TCGv_ptr fpst;
3676 TCGv_i64 frd;
3677 if (op & 1) {
3678 /* VFNMS, VFMS */
3679 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3680 }
3681 frd = tcg_temp_new_i64();
3682 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3683 if (op & 2) {
3684 /* VFNMA, VFNMS */
3685 gen_helper_vfp_negd(frd, frd);
3686 }
3687 fpst = get_fpstatus_ptr(0);
3688 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3689 cpu_F1d, frd, fpst);
3690 tcg_temp_free_ptr(fpst);
3691 tcg_temp_free_i64(frd);
3692 } else {
3693 TCGv_ptr fpst;
3694 TCGv_i32 frd;
3695 if (op & 1) {
3696 /* VFNMS, VFMS */
3697 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3698 }
3699 frd = tcg_temp_new_i32();
3700 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3701 if (op & 2) {
3702 gen_helper_vfp_negs(frd, frd);
3703 }
3704 fpst = get_fpstatus_ptr(0);
3705 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3706 cpu_F1s, frd, fpst);
3707 tcg_temp_free_ptr(fpst);
3708 tcg_temp_free_i32(frd);
3709 }
3710 break;
9ee6e8bb 3711 case 14: /* fconst */
d614a513
PM
3712 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3713 return 1;
3714 }
9ee6e8bb
PB
3715
3716 n = (insn << 12) & 0x80000000;
3717 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3718 if (dp) {
3719 if (i & 0x40)
3720 i |= 0x3f80;
3721 else
3722 i |= 0x4000;
3723 n |= i << 16;
4373f3ce 3724 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3725 } else {
3726 if (i & 0x40)
3727 i |= 0x780;
3728 else
3729 i |= 0x800;
3730 n |= i << 19;
5b340b51 3731 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3732 }
9ee6e8bb 3733 break;
b7bcbe95
FB
3734 case 15: /* extension space */
3735 switch (rn) {
3736 case 0: /* cpy */
3737 /* no-op */
3738 break;
3739 case 1: /* abs */
3740 gen_vfp_abs(dp);
3741 break;
3742 case 2: /* neg */
3743 gen_vfp_neg(dp);
3744 break;
3745 case 3: /* sqrt */
3746 gen_vfp_sqrt(dp);
3747 break;
239c20c7 3748 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3749 tmp = gen_vfp_mrs();
3750 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3751 if (dp) {
3752 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3753 cpu_env);
3754 } else {
3755 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3756 cpu_env);
3757 }
7d1b0095 3758 tcg_temp_free_i32(tmp);
60011498 3759 break;
239c20c7 3760 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3761 tmp = gen_vfp_mrs();
3762 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3763 if (dp) {
3764 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3765 cpu_env);
3766 } else {
3767 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3768 cpu_env);
3769 }
7d1b0095 3770 tcg_temp_free_i32(tmp);
60011498 3771 break;
239c20c7 3772 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3773 tmp = tcg_temp_new_i32();
239c20c7
WN
3774 if (dp) {
3775 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3776 cpu_env);
3777 } else {
3778 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3779 cpu_env);
3780 }
60011498
PB
3781 gen_mov_F0_vreg(0, rd);
3782 tmp2 = gen_vfp_mrs();
3783 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3784 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3785 tcg_temp_free_i32(tmp2);
60011498
PB
3786 gen_vfp_msr(tmp);
3787 break;
239c20c7 3788 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3789 tmp = tcg_temp_new_i32();
239c20c7
WN
3790 if (dp) {
3791 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3792 cpu_env);
3793 } else {
3794 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3795 cpu_env);
3796 }
60011498
PB
3797 tcg_gen_shli_i32(tmp, tmp, 16);
3798 gen_mov_F0_vreg(0, rd);
3799 tmp2 = gen_vfp_mrs();
3800 tcg_gen_ext16u_i32(tmp2, tmp2);
3801 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3802 tcg_temp_free_i32(tmp2);
60011498
PB
3803 gen_vfp_msr(tmp);
3804 break;
b7bcbe95
FB
3805 case 8: /* cmp */
3806 gen_vfp_cmp(dp);
3807 break;
3808 case 9: /* cmpe */
3809 gen_vfp_cmpe(dp);
3810 break;
3811 case 10: /* cmpz */
3812 gen_vfp_cmp(dp);
3813 break;
3814 case 11: /* cmpez */
3815 gen_vfp_F1_ld0(dp);
3816 gen_vfp_cmpe(dp);
3817 break;
664c6733
WN
3818 case 12: /* vrintr */
3819 {
3820 TCGv_ptr fpst = get_fpstatus_ptr(0);
3821 if (dp) {
3822 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3823 } else {
3824 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3825 }
3826 tcg_temp_free_ptr(fpst);
3827 break;
3828 }
a290c62a
WN
3829 case 13: /* vrintz */
3830 {
3831 TCGv_ptr fpst = get_fpstatus_ptr(0);
3832 TCGv_i32 tcg_rmode;
3833 tcg_rmode = tcg_const_i32(float_round_to_zero);
3834 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3835 if (dp) {
3836 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3837 } else {
3838 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3839 }
3840 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3841 tcg_temp_free_i32(tcg_rmode);
3842 tcg_temp_free_ptr(fpst);
3843 break;
3844 }
4e82bc01
WN
3845 case 14: /* vrintx */
3846 {
3847 TCGv_ptr fpst = get_fpstatus_ptr(0);
3848 if (dp) {
3849 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3850 } else {
3851 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3852 }
3853 tcg_temp_free_ptr(fpst);
3854 break;
3855 }
b7bcbe95
FB
3856 case 15: /* single<->double conversion */
3857 if (dp)
4373f3ce 3858 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3859 else
4373f3ce 3860 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3861 break;
3862 case 16: /* fuito */
5500b06c 3863 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3864 break;
3865 case 17: /* fsito */
5500b06c 3866 gen_vfp_sito(dp, 0);
b7bcbe95 3867 break;
9ee6e8bb 3868 case 20: /* fshto */
d614a513
PM
3869 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3870 return 1;
3871 }
5500b06c 3872 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3873 break;
3874 case 21: /* fslto */
d614a513
PM
3875 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3876 return 1;
3877 }
5500b06c 3878 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3879 break;
3880 case 22: /* fuhto */
d614a513
PM
3881 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3882 return 1;
3883 }
5500b06c 3884 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3885 break;
3886 case 23: /* fulto */
d614a513
PM
3887 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3888 return 1;
3889 }
5500b06c 3890 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3891 break;
b7bcbe95 3892 case 24: /* ftoui */
5500b06c 3893 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3894 break;
3895 case 25: /* ftouiz */
5500b06c 3896 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3897 break;
3898 case 26: /* ftosi */
5500b06c 3899 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3900 break;
3901 case 27: /* ftosiz */
5500b06c 3902 gen_vfp_tosiz(dp, 0);
b7bcbe95 3903 break;
9ee6e8bb 3904 case 28: /* ftosh */
d614a513
PM
3905 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3906 return 1;
3907 }
5500b06c 3908 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3909 break;
3910 case 29: /* ftosl */
d614a513
PM
3911 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3912 return 1;
3913 }
5500b06c 3914 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3915 break;
3916 case 30: /* ftouh */
d614a513
PM
3917 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3918 return 1;
3919 }
5500b06c 3920 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3921 break;
3922 case 31: /* ftoul */
d614a513
PM
3923 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3924 return 1;
3925 }
5500b06c 3926 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3927 break;
b7bcbe95 3928 default: /* undefined */
b7bcbe95
FB
3929 return 1;
3930 }
3931 break;
3932 default: /* undefined */
b7bcbe95
FB
3933 return 1;
3934 }
3935
3936 /* Write back the result. */
239c20c7
WN
3937 if (op == 15 && (rn >= 8 && rn <= 11)) {
3938 /* Comparison, do nothing. */
3939 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3940 (rn & 0x1e) == 0x6)) {
3941 /* VCVT double to int: always integer result.
3942 * VCVT double to half precision is always a single
3943 * precision result.
3944 */
b7bcbe95 3945 gen_mov_vreg_F0(0, rd);
239c20c7 3946 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3947 /* conversion */
3948 gen_mov_vreg_F0(!dp, rd);
239c20c7 3949 } else {
b7bcbe95 3950 gen_mov_vreg_F0(dp, rd);
239c20c7 3951 }
b7bcbe95
FB
3952
3953 /* break out of the loop if we have finished */
3954 if (veclen == 0)
3955 break;
3956
3957 if (op == 15 && delta_m == 0) {
3958 /* single source one-many */
3959 while (veclen--) {
3960 rd = ((rd + delta_d) & (bank_mask - 1))
3961 | (rd & bank_mask);
3962 gen_mov_vreg_F0(dp, rd);
3963 }
3964 break;
3965 }
3966 /* Setup the next operands. */
3967 veclen--;
3968 rd = ((rd + delta_d) & (bank_mask - 1))
3969 | (rd & bank_mask);
3970
3971 if (op == 15) {
3972 /* One source operand. */
3973 rm = ((rm + delta_m) & (bank_mask - 1))
3974 | (rm & bank_mask);
3975 gen_mov_F0_vreg(dp, rm);
3976 } else {
3977 /* Two source operands. */
3978 rn = ((rn + delta_d) & (bank_mask - 1))
3979 | (rn & bank_mask);
3980 gen_mov_F0_vreg(dp, rn);
3981 if (delta_m) {
3982 rm = ((rm + delta_m) & (bank_mask - 1))
3983 | (rm & bank_mask);
3984 gen_mov_F1_vreg(dp, rm);
3985 }
3986 }
3987 }
3988 }
3989 break;
3990 case 0xc:
3991 case 0xd:
8387da81 3992 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3993 /* two-register transfer */
3994 rn = (insn >> 16) & 0xf;
3995 rd = (insn >> 12) & 0xf;
3996 if (dp) {
9ee6e8bb
PB
3997 VFP_DREG_M(rm, insn);
3998 } else {
3999 rm = VFP_SREG_M(insn);
4000 }
b7bcbe95 4001
18c9b560 4002 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4003 /* vfp->arm */
4004 if (dp) {
4373f3ce
PB
4005 gen_mov_F0_vreg(0, rm * 2);
4006 tmp = gen_vfp_mrs();
4007 store_reg(s, rd, tmp);
4008 gen_mov_F0_vreg(0, rm * 2 + 1);
4009 tmp = gen_vfp_mrs();
4010 store_reg(s, rn, tmp);
b7bcbe95
FB
4011 } else {
4012 gen_mov_F0_vreg(0, rm);
4373f3ce 4013 tmp = gen_vfp_mrs();
8387da81 4014 store_reg(s, rd, tmp);
b7bcbe95 4015 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4016 tmp = gen_vfp_mrs();
8387da81 4017 store_reg(s, rn, tmp);
b7bcbe95
FB
4018 }
4019 } else {
4020 /* arm->vfp */
4021 if (dp) {
4373f3ce
PB
4022 tmp = load_reg(s, rd);
4023 gen_vfp_msr(tmp);
4024 gen_mov_vreg_F0(0, rm * 2);
4025 tmp = load_reg(s, rn);
4026 gen_vfp_msr(tmp);
4027 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4028 } else {
8387da81 4029 tmp = load_reg(s, rd);
4373f3ce 4030 gen_vfp_msr(tmp);
b7bcbe95 4031 gen_mov_vreg_F0(0, rm);
8387da81 4032 tmp = load_reg(s, rn);
4373f3ce 4033 gen_vfp_msr(tmp);
b7bcbe95
FB
4034 gen_mov_vreg_F0(0, rm + 1);
4035 }
4036 }
4037 } else {
4038 /* Load/store */
4039 rn = (insn >> 16) & 0xf;
4040 if (dp)
9ee6e8bb 4041 VFP_DREG_D(rd, insn);
b7bcbe95 4042 else
9ee6e8bb 4043 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4044 if ((insn & 0x01200000) == 0x01000000) {
4045 /* Single load/store */
4046 offset = (insn & 0xff) << 2;
4047 if ((insn & (1 << 23)) == 0)
4048 offset = -offset;
934814f1
PM
4049 if (s->thumb && rn == 15) {
4050 /* This is actually UNPREDICTABLE */
4051 addr = tcg_temp_new_i32();
4052 tcg_gen_movi_i32(addr, s->pc & ~2);
4053 } else {
4054 addr = load_reg(s, rn);
4055 }
312eea9f 4056 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4057 if (insn & (1 << 20)) {
312eea9f 4058 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4059 gen_mov_vreg_F0(dp, rd);
4060 } else {
4061 gen_mov_F0_vreg(dp, rd);
312eea9f 4062 gen_vfp_st(s, dp, addr);
b7bcbe95 4063 }
7d1b0095 4064 tcg_temp_free_i32(addr);
b7bcbe95
FB
4065 } else {
4066 /* load/store multiple */
934814f1 4067 int w = insn & (1 << 21);
b7bcbe95
FB
4068 if (dp)
4069 n = (insn >> 1) & 0x7f;
4070 else
4071 n = insn & 0xff;
4072
934814f1
PM
4073 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4074 /* P == U , W == 1 => UNDEF */
4075 return 1;
4076 }
4077 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4078 /* UNPREDICTABLE cases for bad immediates: we choose to
4079 * UNDEF to avoid generating huge numbers of TCG ops
4080 */
4081 return 1;
4082 }
4083 if (rn == 15 && w) {
4084 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4085 return 1;
4086 }
4087
4088 if (s->thumb && rn == 15) {
4089 /* This is actually UNPREDICTABLE */
4090 addr = tcg_temp_new_i32();
4091 tcg_gen_movi_i32(addr, s->pc & ~2);
4092 } else {
4093 addr = load_reg(s, rn);
4094 }
b7bcbe95 4095 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4096 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4097
4098 if (dp)
4099 offset = 8;
4100 else
4101 offset = 4;
4102 for (i = 0; i < n; i++) {
18c9b560 4103 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4104 /* load */
312eea9f 4105 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4106 gen_mov_vreg_F0(dp, rd + i);
4107 } else {
4108 /* store */
4109 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4110 gen_vfp_st(s, dp, addr);
b7bcbe95 4111 }
312eea9f 4112 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4113 }
934814f1 4114 if (w) {
b7bcbe95
FB
4115 /* writeback */
4116 if (insn & (1 << 24))
4117 offset = -offset * n;
4118 else if (dp && (insn & 1))
4119 offset = 4;
4120 else
4121 offset = 0;
4122
4123 if (offset != 0)
312eea9f
FN
4124 tcg_gen_addi_i32(addr, addr, offset);
4125 store_reg(s, rn, addr);
4126 } else {
7d1b0095 4127 tcg_temp_free_i32(addr);
b7bcbe95
FB
4128 }
4129 }
4130 }
4131 break;
4132 default:
4133 /* Should never happen. */
4134 return 1;
4135 }
4136 return 0;
4137}
4138
90aa39a1 4139static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4140{
90aa39a1
SF
4141#ifndef CONFIG_USER_ONLY
4142 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
4143 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4144#else
4145 return true;
4146#endif
4147}
6e256c93 4148
90aa39a1
SF
4149static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
4150{
4151 if (use_goto_tb(s, dest)) {
57fec1fe 4152 tcg_gen_goto_tb(n);
eaed129d 4153 gen_set_pc_im(s, dest);
90aa39a1 4154 tcg_gen_exit_tb((uintptr_t)s->tb + n);
6e256c93 4155 } else {
eaed129d 4156 gen_set_pc_im(s, dest);
57fec1fe 4157 tcg_gen_exit_tb(0);
6e256c93 4158 }
c53be334
FB
4159}
4160
8aaca4c0
FB
4161static inline void gen_jmp (DisasContext *s, uint32_t dest)
4162{
b636649f 4163 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4164 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4165 if (s->thumb)
d9ba4830
PB
4166 dest |= 1;
4167 gen_bx_im(s, dest);
8aaca4c0 4168 } else {
6e256c93 4169 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4170 s->is_jmp = DISAS_TB_JUMP;
4171 }
4172}
4173
39d5492a 4174static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4175{
ee097184 4176 if (x)
d9ba4830 4177 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4178 else
d9ba4830 4179 gen_sxth(t0);
ee097184 4180 if (y)
d9ba4830 4181 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4182 else
d9ba4830
PB
4183 gen_sxth(t1);
4184 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4185}
4186
4187/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4188static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4189{
b5ff1b31
FB
4190 uint32_t mask;
4191
4192 mask = 0;
4193 if (flags & (1 << 0))
4194 mask |= 0xff;
4195 if (flags & (1 << 1))
4196 mask |= 0xff00;
4197 if (flags & (1 << 2))
4198 mask |= 0xff0000;
4199 if (flags & (1 << 3))
4200 mask |= 0xff000000;
9ee6e8bb 4201
2ae23e75 4202 /* Mask out undefined bits. */
9ee6e8bb 4203 mask &= ~CPSR_RESERVED;
d614a513 4204 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4205 mask &= ~CPSR_T;
d614a513
PM
4206 }
4207 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4208 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4209 }
4210 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4211 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4212 }
4213 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4214 mask &= ~CPSR_IT;
d614a513 4215 }
4051e12c
PM
4216 /* Mask out execution state and reserved bits. */
4217 if (!spsr) {
4218 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4219 }
b5ff1b31
FB
4220 /* Mask out privileged bits. */
4221 if (IS_USER(s))
9ee6e8bb 4222 mask &= CPSR_USER;
b5ff1b31
FB
4223 return mask;
4224}
4225
2fbac54b 4226/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4227static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4228{
39d5492a 4229 TCGv_i32 tmp;
b5ff1b31
FB
4230 if (spsr) {
4231 /* ??? This is also undefined in system mode. */
4232 if (IS_USER(s))
4233 return 1;
d9ba4830
PB
4234
4235 tmp = load_cpu_field(spsr);
4236 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4237 tcg_gen_andi_i32(t0, t0, mask);
4238 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4239 store_cpu_field(tmp, spsr);
b5ff1b31 4240 } else {
2fbac54b 4241 gen_set_cpsr(t0, mask);
b5ff1b31 4242 }
7d1b0095 4243 tcg_temp_free_i32(t0);
b5ff1b31
FB
4244 gen_lookup_tb(s);
4245 return 0;
4246}
4247
2fbac54b
FN
4248/* Returns nonzero if access to the PSR is not permitted. */
4249static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4250{
39d5492a 4251 TCGv_i32 tmp;
7d1b0095 4252 tmp = tcg_temp_new_i32();
2fbac54b
FN
4253 tcg_gen_movi_i32(tmp, val);
4254 return gen_set_psr(s, mask, spsr, tmp);
4255}
4256
8bfd0550
PM
4257static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4258 int *tgtmode, int *regno)
4259{
4260 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4261 * the target mode and register number, and identify the various
4262 * unpredictable cases.
4263 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4264 * + executed in user mode
4265 * + using R15 as the src/dest register
4266 * + accessing an unimplemented register
4267 * + accessing a register that's inaccessible at current PL/security state*
4268 * + accessing a register that you could access with a different insn
4269 * We choose to UNDEF in all these cases.
4270 * Since we don't know which of the various AArch32 modes we are in
4271 * we have to defer some checks to runtime.
4272 * Accesses to Monitor mode registers from Secure EL1 (which implies
4273 * that EL3 is AArch64) must trap to EL3.
4274 *
4275 * If the access checks fail this function will emit code to take
4276 * an exception and return false. Otherwise it will return true,
4277 * and set *tgtmode and *regno appropriately.
4278 */
4279 int exc_target = default_exception_el(s);
4280
4281 /* These instructions are present only in ARMv8, or in ARMv7 with the
4282 * Virtualization Extensions.
4283 */
4284 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4285 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4286 goto undef;
4287 }
4288
4289 if (IS_USER(s) || rn == 15) {
4290 goto undef;
4291 }
4292
4293 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4294 * of registers into (r, sysm).
4295 */
4296 if (r) {
4297 /* SPSRs for other modes */
4298 switch (sysm) {
4299 case 0xe: /* SPSR_fiq */
4300 *tgtmode = ARM_CPU_MODE_FIQ;
4301 break;
4302 case 0x10: /* SPSR_irq */
4303 *tgtmode = ARM_CPU_MODE_IRQ;
4304 break;
4305 case 0x12: /* SPSR_svc */
4306 *tgtmode = ARM_CPU_MODE_SVC;
4307 break;
4308 case 0x14: /* SPSR_abt */
4309 *tgtmode = ARM_CPU_MODE_ABT;
4310 break;
4311 case 0x16: /* SPSR_und */
4312 *tgtmode = ARM_CPU_MODE_UND;
4313 break;
4314 case 0x1c: /* SPSR_mon */
4315 *tgtmode = ARM_CPU_MODE_MON;
4316 break;
4317 case 0x1e: /* SPSR_hyp */
4318 *tgtmode = ARM_CPU_MODE_HYP;
4319 break;
4320 default: /* unallocated */
4321 goto undef;
4322 }
4323 /* We arbitrarily assign SPSR a register number of 16. */
4324 *regno = 16;
4325 } else {
4326 /* general purpose registers for other modes */
4327 switch (sysm) {
4328 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4329 *tgtmode = ARM_CPU_MODE_USR;
4330 *regno = sysm + 8;
4331 break;
4332 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4333 *tgtmode = ARM_CPU_MODE_FIQ;
4334 *regno = sysm;
4335 break;
4336 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4337 *tgtmode = ARM_CPU_MODE_IRQ;
4338 *regno = sysm & 1 ? 13 : 14;
4339 break;
4340 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4341 *tgtmode = ARM_CPU_MODE_SVC;
4342 *regno = sysm & 1 ? 13 : 14;
4343 break;
4344 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4345 *tgtmode = ARM_CPU_MODE_ABT;
4346 *regno = sysm & 1 ? 13 : 14;
4347 break;
4348 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4349 *tgtmode = ARM_CPU_MODE_UND;
4350 *regno = sysm & 1 ? 13 : 14;
4351 break;
4352 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4353 *tgtmode = ARM_CPU_MODE_MON;
4354 *regno = sysm & 1 ? 13 : 14;
4355 break;
4356 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4357 *tgtmode = ARM_CPU_MODE_HYP;
4358 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4359 *regno = sysm & 1 ? 13 : 17;
4360 break;
4361 default: /* unallocated */
4362 goto undef;
4363 }
4364 }
4365
4366 /* Catch the 'accessing inaccessible register' cases we can detect
4367 * at translate time.
4368 */
4369 switch (*tgtmode) {
4370 case ARM_CPU_MODE_MON:
4371 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4372 goto undef;
4373 }
4374 if (s->current_el == 1) {
4375 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4376 * then accesses to Mon registers trap to EL3
4377 */
4378 exc_target = 3;
4379 goto undef;
4380 }
4381 break;
4382 case ARM_CPU_MODE_HYP:
4383 /* Note that we can forbid accesses from EL2 here because they
4384 * must be from Hyp mode itself
4385 */
4386 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4387 goto undef;
4388 }
4389 break;
4390 default:
4391 break;
4392 }
4393
4394 return true;
4395
4396undef:
4397 /* If we get here then some access check did not pass */
4398 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4399 return false;
4400}
4401
4402static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4403{
4404 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4405 int tgtmode = 0, regno = 0;
4406
4407 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4408 return;
4409 }
4410
4411 /* Sync state because msr_banked() can raise exceptions */
4412 gen_set_condexec(s);
4413 gen_set_pc_im(s, s->pc - 4);
4414 tcg_reg = load_reg(s, rn);
4415 tcg_tgtmode = tcg_const_i32(tgtmode);
4416 tcg_regno = tcg_const_i32(regno);
4417 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4418 tcg_temp_free_i32(tcg_tgtmode);
4419 tcg_temp_free_i32(tcg_regno);
4420 tcg_temp_free_i32(tcg_reg);
4421 s->is_jmp = DISAS_UPDATE;
4422}
4423
4424static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4425{
4426 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4427 int tgtmode = 0, regno = 0;
4428
4429 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4430 return;
4431 }
4432
4433 /* Sync state because mrs_banked() can raise exceptions */
4434 gen_set_condexec(s);
4435 gen_set_pc_im(s, s->pc - 4);
4436 tcg_reg = tcg_temp_new_i32();
4437 tcg_tgtmode = tcg_const_i32(tgtmode);
4438 tcg_regno = tcg_const_i32(regno);
4439 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4440 tcg_temp_free_i32(tcg_tgtmode);
4441 tcg_temp_free_i32(tcg_regno);
4442 store_reg(s, rn, tcg_reg);
4443 s->is_jmp = DISAS_UPDATE;
4444}
4445
fb0e8e79
PM
4446/* Store value to PC as for an exception return (ie don't
4447 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4448 * will do the masking based on the new value of the Thumb bit.
4449 */
4450static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4451{
fb0e8e79
PM
4452 tcg_gen_mov_i32(cpu_R[15], pc);
4453 tcg_temp_free_i32(pc);
b5ff1b31
FB
4454}
4455
b0109805 4456/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4457static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4458{
fb0e8e79
PM
4459 store_pc_exc_ret(s, pc);
4460 /* The cpsr_write_eret helper will mask the low bits of PC
4461 * appropriately depending on the new Thumb bit, so it must
4462 * be called after storing the new PC.
4463 */
235ea1f5 4464 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 4465 tcg_temp_free_i32(cpsr);
577bf808 4466 s->is_jmp = DISAS_JUMP;
9ee6e8bb 4467}
3b46e624 4468
fb0e8e79
PM
4469/* Generate an old-style exception return. Marks pc as dead. */
4470static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4471{
4472 gen_rfe(s, pc, load_cpu_field(spsr));
4473}
4474
c22edfeb
AB
4475/*
4476 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4477 * only call the helper when running single threaded TCG code to ensure
4478 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4479 * just skip this instruction. Currently the SEV/SEVL instructions
4480 * which are *one* of many ways to wake the CPU from WFE are not
4481 * implemented so we can't sleep like WFI does.
4482 */
9ee6e8bb
PB
4483static void gen_nop_hint(DisasContext *s, int val)
4484{
4485 switch (val) {
c87e5a61 4486 case 1: /* yield */
c22edfeb
AB
4487 if (!parallel_cpus) {
4488 gen_set_pc_im(s, s->pc);
4489 s->is_jmp = DISAS_YIELD;
4490 }
c87e5a61 4491 break;
9ee6e8bb 4492 case 3: /* wfi */
eaed129d 4493 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
4494 s->is_jmp = DISAS_WFI;
4495 break;
4496 case 2: /* wfe */
c22edfeb
AB
4497 if (!parallel_cpus) {
4498 gen_set_pc_im(s, s->pc);
4499 s->is_jmp = DISAS_WFE;
4500 }
72c1d3af 4501 break;
9ee6e8bb 4502 case 4: /* sev */
12b10571
MR
4503 case 5: /* sevl */
4504 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4505 default: /* nop */
4506 break;
4507 }
4508}
99c475ab 4509
ad69471c 4510#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4511
39d5492a 4512static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4513{
4514 switch (size) {
dd8fbd78
FN
4515 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4516 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4517 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4518 default: abort();
9ee6e8bb 4519 }
9ee6e8bb
PB
4520}
4521
39d5492a 4522static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4523{
4524 switch (size) {
dd8fbd78
FN
4525 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4526 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4527 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4528 default: return;
4529 }
4530}
4531
4532/* 32-bit pairwise ops end up the same as the elementwise versions. */
4533#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4534#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4535#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4536#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4537
ad69471c
PB
4538#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4539 switch ((size << 1) | u) { \
4540 case 0: \
dd8fbd78 4541 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4542 break; \
4543 case 1: \
dd8fbd78 4544 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4545 break; \
4546 case 2: \
dd8fbd78 4547 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4548 break; \
4549 case 3: \
dd8fbd78 4550 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4551 break; \
4552 case 4: \
dd8fbd78 4553 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4554 break; \
4555 case 5: \
dd8fbd78 4556 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4557 break; \
4558 default: return 1; \
4559 }} while (0)
9ee6e8bb
PB
4560
4561#define GEN_NEON_INTEGER_OP(name) do { \
4562 switch ((size << 1) | u) { \
ad69471c 4563 case 0: \
dd8fbd78 4564 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4565 break; \
4566 case 1: \
dd8fbd78 4567 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4568 break; \
4569 case 2: \
dd8fbd78 4570 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4571 break; \
4572 case 3: \
dd8fbd78 4573 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4574 break; \
4575 case 4: \
dd8fbd78 4576 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4577 break; \
4578 case 5: \
dd8fbd78 4579 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4580 break; \
9ee6e8bb
PB
4581 default: return 1; \
4582 }} while (0)
4583
39d5492a 4584static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4585{
39d5492a 4586 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4587 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4588 return tmp;
9ee6e8bb
PB
4589}
4590
39d5492a 4591static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4592{
dd8fbd78 4593 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4594 tcg_temp_free_i32(var);
9ee6e8bb
PB
4595}
4596
39d5492a 4597static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4598{
39d5492a 4599 TCGv_i32 tmp;
9ee6e8bb 4600 if (size == 1) {
0fad6efc
PM
4601 tmp = neon_load_reg(reg & 7, reg >> 4);
4602 if (reg & 8) {
dd8fbd78 4603 gen_neon_dup_high16(tmp);
0fad6efc
PM
4604 } else {
4605 gen_neon_dup_low16(tmp);
dd8fbd78 4606 }
0fad6efc
PM
4607 } else {
4608 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4609 }
dd8fbd78 4610 return tmp;
9ee6e8bb
PB
4611}
4612
02acedf9 4613static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4614{
39d5492a 4615 TCGv_i32 tmp, tmp2;
600b828c 4616 if (!q && size == 2) {
02acedf9
PM
4617 return 1;
4618 }
4619 tmp = tcg_const_i32(rd);
4620 tmp2 = tcg_const_i32(rm);
4621 if (q) {
4622 switch (size) {
4623 case 0:
02da0b2d 4624 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4625 break;
4626 case 1:
02da0b2d 4627 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4628 break;
4629 case 2:
02da0b2d 4630 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4631 break;
4632 default:
4633 abort();
4634 }
4635 } else {
4636 switch (size) {
4637 case 0:
02da0b2d 4638 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4639 break;
4640 case 1:
02da0b2d 4641 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4642 break;
4643 default:
4644 abort();
4645 }
4646 }
4647 tcg_temp_free_i32(tmp);
4648 tcg_temp_free_i32(tmp2);
4649 return 0;
19457615
FN
4650}
4651
d68a6f3a 4652static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4653{
39d5492a 4654 TCGv_i32 tmp, tmp2;
600b828c 4655 if (!q && size == 2) {
d68a6f3a
PM
4656 return 1;
4657 }
4658 tmp = tcg_const_i32(rd);
4659 tmp2 = tcg_const_i32(rm);
4660 if (q) {
4661 switch (size) {
4662 case 0:
02da0b2d 4663 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4664 break;
4665 case 1:
02da0b2d 4666 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4667 break;
4668 case 2:
02da0b2d 4669 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4670 break;
4671 default:
4672 abort();
4673 }
4674 } else {
4675 switch (size) {
4676 case 0:
02da0b2d 4677 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4678 break;
4679 case 1:
02da0b2d 4680 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4681 break;
4682 default:
4683 abort();
4684 }
4685 }
4686 tcg_temp_free_i32(tmp);
4687 tcg_temp_free_i32(tmp2);
4688 return 0;
19457615
FN
4689}
4690
39d5492a 4691static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4692{
39d5492a 4693 TCGv_i32 rd, tmp;
19457615 4694
7d1b0095
PM
4695 rd = tcg_temp_new_i32();
4696 tmp = tcg_temp_new_i32();
19457615
FN
4697
4698 tcg_gen_shli_i32(rd, t0, 8);
4699 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4700 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4701 tcg_gen_or_i32(rd, rd, tmp);
4702
4703 tcg_gen_shri_i32(t1, t1, 8);
4704 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4705 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4706 tcg_gen_or_i32(t1, t1, tmp);
4707 tcg_gen_mov_i32(t0, rd);
4708
7d1b0095
PM
4709 tcg_temp_free_i32(tmp);
4710 tcg_temp_free_i32(rd);
19457615
FN
4711}
4712
39d5492a 4713static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4714{
39d5492a 4715 TCGv_i32 rd, tmp;
19457615 4716
7d1b0095
PM
4717 rd = tcg_temp_new_i32();
4718 tmp = tcg_temp_new_i32();
19457615
FN
4719
4720 tcg_gen_shli_i32(rd, t0, 16);
4721 tcg_gen_andi_i32(tmp, t1, 0xffff);
4722 tcg_gen_or_i32(rd, rd, tmp);
4723 tcg_gen_shri_i32(t1, t1, 16);
4724 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4725 tcg_gen_or_i32(t1, t1, tmp);
4726 tcg_gen_mov_i32(t0, rd);
4727
7d1b0095
PM
4728 tcg_temp_free_i32(tmp);
4729 tcg_temp_free_i32(rd);
19457615
FN
4730}
4731
4732
9ee6e8bb
PB
4733static struct {
4734 int nregs;
4735 int interleave;
4736 int spacing;
4737} neon_ls_element_type[11] = {
4738 {4, 4, 1},
4739 {4, 4, 2},
4740 {4, 1, 1},
4741 {4, 2, 1},
4742 {3, 3, 1},
4743 {3, 3, 2},
4744 {3, 1, 1},
4745 {1, 1, 1},
4746 {2, 2, 1},
4747 {2, 2, 2},
4748 {2, 1, 1}
4749};
4750
4751/* Translate a NEON load/store element instruction. Return nonzero if the
4752 instruction is invalid. */
7dcc1f89 4753static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4754{
4755 int rd, rn, rm;
4756 int op;
4757 int nregs;
4758 int interleave;
84496233 4759 int spacing;
9ee6e8bb
PB
4760 int stride;
4761 int size;
4762 int reg;
4763 int pass;
4764 int load;
4765 int shift;
9ee6e8bb 4766 int n;
39d5492a
PM
4767 TCGv_i32 addr;
4768 TCGv_i32 tmp;
4769 TCGv_i32 tmp2;
84496233 4770 TCGv_i64 tmp64;
9ee6e8bb 4771
2c7ffc41
PM
4772 /* FIXME: this access check should not take precedence over UNDEF
4773 * for invalid encodings; we will generate incorrect syndrome information
4774 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4775 */
9dbbc748 4776 if (s->fp_excp_el) {
2c7ffc41 4777 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4778 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4779 return 0;
4780 }
4781
5df8bac1 4782 if (!s->vfp_enabled)
9ee6e8bb
PB
4783 return 1;
4784 VFP_DREG_D(rd, insn);
4785 rn = (insn >> 16) & 0xf;
4786 rm = insn & 0xf;
4787 load = (insn & (1 << 21)) != 0;
4788 if ((insn & (1 << 23)) == 0) {
4789 /* Load store all elements. */
4790 op = (insn >> 8) & 0xf;
4791 size = (insn >> 6) & 3;
84496233 4792 if (op > 10)
9ee6e8bb 4793 return 1;
f2dd89d0
PM
4794 /* Catch UNDEF cases for bad values of align field */
4795 switch (op & 0xc) {
4796 case 4:
4797 if (((insn >> 5) & 1) == 1) {
4798 return 1;
4799 }
4800 break;
4801 case 8:
4802 if (((insn >> 4) & 3) == 3) {
4803 return 1;
4804 }
4805 break;
4806 default:
4807 break;
4808 }
9ee6e8bb
PB
4809 nregs = neon_ls_element_type[op].nregs;
4810 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4811 spacing = neon_ls_element_type[op].spacing;
4812 if (size == 3 && (interleave | spacing) != 1)
4813 return 1;
e318a60b 4814 addr = tcg_temp_new_i32();
dcc65026 4815 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4816 stride = (1 << size) * interleave;
4817 for (reg = 0; reg < nregs; reg++) {
4818 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4819 load_reg_var(s, addr, rn);
4820 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4821 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4822 load_reg_var(s, addr, rn);
4823 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4824 }
84496233 4825 if (size == 3) {
8ed1237d 4826 tmp64 = tcg_temp_new_i64();
84496233 4827 if (load) {
12dcc321 4828 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4829 neon_store_reg64(tmp64, rd);
84496233 4830 } else {
84496233 4831 neon_load_reg64(tmp64, rd);
12dcc321 4832 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4833 }
8ed1237d 4834 tcg_temp_free_i64(tmp64);
84496233
JR
4835 tcg_gen_addi_i32(addr, addr, stride);
4836 } else {
4837 for (pass = 0; pass < 2; pass++) {
4838 if (size == 2) {
4839 if (load) {
58ab8e96 4840 tmp = tcg_temp_new_i32();
12dcc321 4841 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
4842 neon_store_reg(rd, pass, tmp);
4843 } else {
4844 tmp = neon_load_reg(rd, pass);
12dcc321 4845 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 4846 tcg_temp_free_i32(tmp);
84496233 4847 }
1b2b1e54 4848 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4849 } else if (size == 1) {
4850 if (load) {
58ab8e96 4851 tmp = tcg_temp_new_i32();
12dcc321 4852 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 4853 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4854 tmp2 = tcg_temp_new_i32();
12dcc321 4855 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 4856 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4857 tcg_gen_shli_i32(tmp2, tmp2, 16);
4858 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4859 tcg_temp_free_i32(tmp2);
84496233
JR
4860 neon_store_reg(rd, pass, tmp);
4861 } else {
4862 tmp = neon_load_reg(rd, pass);
7d1b0095 4863 tmp2 = tcg_temp_new_i32();
84496233 4864 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 4865 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 4866 tcg_temp_free_i32(tmp);
84496233 4867 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 4868 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 4869 tcg_temp_free_i32(tmp2);
1b2b1e54 4870 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4871 }
84496233
JR
4872 } else /* size == 0 */ {
4873 if (load) {
39d5492a 4874 TCGV_UNUSED_I32(tmp2);
84496233 4875 for (n = 0; n < 4; n++) {
58ab8e96 4876 tmp = tcg_temp_new_i32();
12dcc321 4877 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
4878 tcg_gen_addi_i32(addr, addr, stride);
4879 if (n == 0) {
4880 tmp2 = tmp;
4881 } else {
41ba8341
PB
4882 tcg_gen_shli_i32(tmp, tmp, n * 8);
4883 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4884 tcg_temp_free_i32(tmp);
84496233 4885 }
9ee6e8bb 4886 }
84496233
JR
4887 neon_store_reg(rd, pass, tmp2);
4888 } else {
4889 tmp2 = neon_load_reg(rd, pass);
4890 for (n = 0; n < 4; n++) {
7d1b0095 4891 tmp = tcg_temp_new_i32();
84496233
JR
4892 if (n == 0) {
4893 tcg_gen_mov_i32(tmp, tmp2);
4894 } else {
4895 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4896 }
12dcc321 4897 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 4898 tcg_temp_free_i32(tmp);
84496233
JR
4899 tcg_gen_addi_i32(addr, addr, stride);
4900 }
7d1b0095 4901 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4902 }
4903 }
4904 }
4905 }
84496233 4906 rd += spacing;
9ee6e8bb 4907 }
e318a60b 4908 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4909 stride = nregs * 8;
4910 } else {
4911 size = (insn >> 10) & 3;
4912 if (size == 3) {
4913 /* Load single element to all lanes. */
8e18cde3
PM
4914 int a = (insn >> 4) & 1;
4915 if (!load) {
9ee6e8bb 4916 return 1;
8e18cde3 4917 }
9ee6e8bb
PB
4918 size = (insn >> 6) & 3;
4919 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4920
4921 if (size == 3) {
4922 if (nregs != 4 || a == 0) {
9ee6e8bb 4923 return 1;
99c475ab 4924 }
8e18cde3
PM
4925 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4926 size = 2;
4927 }
4928 if (nregs == 1 && a == 1 && size == 0) {
4929 return 1;
4930 }
4931 if (nregs == 3 && a == 1) {
4932 return 1;
4933 }
e318a60b 4934 addr = tcg_temp_new_i32();
8e18cde3
PM
4935 load_reg_var(s, addr, rn);
4936 if (nregs == 1) {
4937 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4938 tmp = gen_load_and_replicate(s, addr, size);
4939 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4940 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4941 if (insn & (1 << 5)) {
4942 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4943 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4944 }
4945 tcg_temp_free_i32(tmp);
4946 } else {
4947 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4948 stride = (insn & (1 << 5)) ? 2 : 1;
4949 for (reg = 0; reg < nregs; reg++) {
4950 tmp = gen_load_and_replicate(s, addr, size);
4951 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4952 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4953 tcg_temp_free_i32(tmp);
4954 tcg_gen_addi_i32(addr, addr, 1 << size);
4955 rd += stride;
4956 }
9ee6e8bb 4957 }
e318a60b 4958 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4959 stride = (1 << size) * nregs;
4960 } else {
4961 /* Single element. */
93262b16 4962 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4963 pass = (insn >> 7) & 1;
4964 switch (size) {
4965 case 0:
4966 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4967 stride = 1;
4968 break;
4969 case 1:
4970 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4971 stride = (insn & (1 << 5)) ? 2 : 1;
4972 break;
4973 case 2:
4974 shift = 0;
9ee6e8bb
PB
4975 stride = (insn & (1 << 6)) ? 2 : 1;
4976 break;
4977 default:
4978 abort();
4979 }
4980 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4981 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4982 switch (nregs) {
4983 case 1:
4984 if (((idx & (1 << size)) != 0) ||
4985 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4986 return 1;
4987 }
4988 break;
4989 case 3:
4990 if ((idx & 1) != 0) {
4991 return 1;
4992 }
4993 /* fall through */
4994 case 2:
4995 if (size == 2 && (idx & 2) != 0) {
4996 return 1;
4997 }
4998 break;
4999 case 4:
5000 if ((size == 2) && ((idx & 3) == 3)) {
5001 return 1;
5002 }
5003 break;
5004 default:
5005 abort();
5006 }
5007 if ((rd + stride * (nregs - 1)) > 31) {
5008 /* Attempts to write off the end of the register file
5009 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5010 * the neon_load_reg() would write off the end of the array.
5011 */
5012 return 1;
5013 }
e318a60b 5014 addr = tcg_temp_new_i32();
dcc65026 5015 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5016 for (reg = 0; reg < nregs; reg++) {
5017 if (load) {
58ab8e96 5018 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
5019 switch (size) {
5020 case 0:
12dcc321 5021 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5022 break;
5023 case 1:
12dcc321 5024 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5025 break;
5026 case 2:
12dcc321 5027 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5028 break;
a50f5b91
PB
5029 default: /* Avoid compiler warnings. */
5030 abort();
9ee6e8bb
PB
5031 }
5032 if (size != 2) {
8f8e3aa4 5033 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
5034 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5035 shift, size ? 16 : 8);
7d1b0095 5036 tcg_temp_free_i32(tmp2);
9ee6e8bb 5037 }
8f8e3aa4 5038 neon_store_reg(rd, pass, tmp);
9ee6e8bb 5039 } else { /* Store */
8f8e3aa4
PB
5040 tmp = neon_load_reg(rd, pass);
5041 if (shift)
5042 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
5043 switch (size) {
5044 case 0:
12dcc321 5045 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5046 break;
5047 case 1:
12dcc321 5048 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5049 break;
5050 case 2:
12dcc321 5051 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5052 break;
99c475ab 5053 }
58ab8e96 5054 tcg_temp_free_i32(tmp);
99c475ab 5055 }
9ee6e8bb 5056 rd += stride;
1b2b1e54 5057 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5058 }
e318a60b 5059 tcg_temp_free_i32(addr);
9ee6e8bb 5060 stride = nregs * (1 << size);
99c475ab 5061 }
9ee6e8bb
PB
5062 }
5063 if (rm != 15) {
39d5492a 5064 TCGv_i32 base;
b26eefb6
PB
5065
5066 base = load_reg(s, rn);
9ee6e8bb 5067 if (rm == 13) {
b26eefb6 5068 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5069 } else {
39d5492a 5070 TCGv_i32 index;
b26eefb6
PB
5071 index = load_reg(s, rm);
5072 tcg_gen_add_i32(base, base, index);
7d1b0095 5073 tcg_temp_free_i32(index);
9ee6e8bb 5074 }
b26eefb6 5075 store_reg(s, rn, base);
9ee6e8bb
PB
5076 }
5077 return 0;
5078}
3b46e624 5079
8f8e3aa4 5080/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 5081static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
5082{
5083 tcg_gen_and_i32(t, t, c);
f669df27 5084 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
5085 tcg_gen_or_i32(dest, t, f);
5086}
5087
39d5492a 5088static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5089{
5090 switch (size) {
5091 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5092 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5093 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5094 default: abort();
5095 }
5096}
5097
39d5492a 5098static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5099{
5100 switch (size) {
02da0b2d
PM
5101 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5102 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5103 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5104 default: abort();
5105 }
5106}
5107
39d5492a 5108static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5109{
5110 switch (size) {
02da0b2d
PM
5111 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5112 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5113 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5114 default: abort();
5115 }
5116}
5117
39d5492a 5118static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5119{
5120 switch (size) {
02da0b2d
PM
5121 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5122 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5123 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5124 default: abort();
5125 }
5126}
5127
39d5492a 5128static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5129 int q, int u)
5130{
5131 if (q) {
5132 if (u) {
5133 switch (size) {
5134 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5135 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5136 default: abort();
5137 }
5138 } else {
5139 switch (size) {
5140 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5141 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5142 default: abort();
5143 }
5144 }
5145 } else {
5146 if (u) {
5147 switch (size) {
b408a9b0
CL
5148 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5149 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5150 default: abort();
5151 }
5152 } else {
5153 switch (size) {
5154 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5155 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5156 default: abort();
5157 }
5158 }
5159 }
5160}
5161
39d5492a 5162static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5163{
5164 if (u) {
5165 switch (size) {
5166 case 0: gen_helper_neon_widen_u8(dest, src); break;
5167 case 1: gen_helper_neon_widen_u16(dest, src); break;
5168 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5169 default: abort();
5170 }
5171 } else {
5172 switch (size) {
5173 case 0: gen_helper_neon_widen_s8(dest, src); break;
5174 case 1: gen_helper_neon_widen_s16(dest, src); break;
5175 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5176 default: abort();
5177 }
5178 }
7d1b0095 5179 tcg_temp_free_i32(src);
ad69471c
PB
5180}
5181
5182static inline void gen_neon_addl(int size)
5183{
5184 switch (size) {
5185 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5186 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5187 case 2: tcg_gen_add_i64(CPU_V001); break;
5188 default: abort();
5189 }
5190}
5191
5192static inline void gen_neon_subl(int size)
5193{
5194 switch (size) {
5195 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5196 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5197 case 2: tcg_gen_sub_i64(CPU_V001); break;
5198 default: abort();
5199 }
5200}
5201
a7812ae4 5202static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5203{
5204 switch (size) {
5205 case 0: gen_helper_neon_negl_u16(var, var); break;
5206 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5207 case 2:
5208 tcg_gen_neg_i64(var, var);
5209 break;
ad69471c
PB
5210 default: abort();
5211 }
5212}
5213
a7812ae4 5214static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5215{
5216 switch (size) {
02da0b2d
PM
5217 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5218 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5219 default: abort();
5220 }
5221}
5222
39d5492a
PM
5223static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5224 int size, int u)
ad69471c 5225{
a7812ae4 5226 TCGv_i64 tmp;
ad69471c
PB
5227
5228 switch ((size << 1) | u) {
5229 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5230 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5231 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5232 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5233 case 4:
5234 tmp = gen_muls_i64_i32(a, b);
5235 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5236 tcg_temp_free_i64(tmp);
ad69471c
PB
5237 break;
5238 case 5:
5239 tmp = gen_mulu_i64_i32(a, b);
5240 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5241 tcg_temp_free_i64(tmp);
ad69471c
PB
5242 break;
5243 default: abort();
5244 }
c6067f04
CL
5245
5246 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5247 Don't forget to clean them now. */
5248 if (size < 2) {
7d1b0095
PM
5249 tcg_temp_free_i32(a);
5250 tcg_temp_free_i32(b);
c6067f04 5251 }
ad69471c
PB
5252}
5253
39d5492a
PM
5254static void gen_neon_narrow_op(int op, int u, int size,
5255 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5256{
5257 if (op) {
5258 if (u) {
5259 gen_neon_unarrow_sats(size, dest, src);
5260 } else {
5261 gen_neon_narrow(size, dest, src);
5262 }
5263 } else {
5264 if (u) {
5265 gen_neon_narrow_satu(size, dest, src);
5266 } else {
5267 gen_neon_narrow_sats(size, dest, src);
5268 }
5269 }
5270}
5271
62698be3
PM
5272/* Symbolic constants for op fields for Neon 3-register same-length.
5273 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5274 * table A7-9.
5275 */
5276#define NEON_3R_VHADD 0
5277#define NEON_3R_VQADD 1
5278#define NEON_3R_VRHADD 2
5279#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5280#define NEON_3R_VHSUB 4
5281#define NEON_3R_VQSUB 5
5282#define NEON_3R_VCGT 6
5283#define NEON_3R_VCGE 7
5284#define NEON_3R_VSHL 8
5285#define NEON_3R_VQSHL 9
5286#define NEON_3R_VRSHL 10
5287#define NEON_3R_VQRSHL 11
5288#define NEON_3R_VMAX 12
5289#define NEON_3R_VMIN 13
5290#define NEON_3R_VABD 14
5291#define NEON_3R_VABA 15
5292#define NEON_3R_VADD_VSUB 16
5293#define NEON_3R_VTST_VCEQ 17
5294#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5295#define NEON_3R_VMUL 19
5296#define NEON_3R_VPMAX 20
5297#define NEON_3R_VPMIN 21
5298#define NEON_3R_VQDMULH_VQRDMULH 22
5299#define NEON_3R_VPADD 23
f1ecb913 5300#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 5301#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
5302#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5303#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5304#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5305#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5306#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5307#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5308
5309static const uint8_t neon_3r_sizes[] = {
5310 [NEON_3R_VHADD] = 0x7,
5311 [NEON_3R_VQADD] = 0xf,
5312 [NEON_3R_VRHADD] = 0x7,
5313 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5314 [NEON_3R_VHSUB] = 0x7,
5315 [NEON_3R_VQSUB] = 0xf,
5316 [NEON_3R_VCGT] = 0x7,
5317 [NEON_3R_VCGE] = 0x7,
5318 [NEON_3R_VSHL] = 0xf,
5319 [NEON_3R_VQSHL] = 0xf,
5320 [NEON_3R_VRSHL] = 0xf,
5321 [NEON_3R_VQRSHL] = 0xf,
5322 [NEON_3R_VMAX] = 0x7,
5323 [NEON_3R_VMIN] = 0x7,
5324 [NEON_3R_VABD] = 0x7,
5325 [NEON_3R_VABA] = 0x7,
5326 [NEON_3R_VADD_VSUB] = 0xf,
5327 [NEON_3R_VTST_VCEQ] = 0x7,
5328 [NEON_3R_VML] = 0x7,
5329 [NEON_3R_VMUL] = 0x7,
5330 [NEON_3R_VPMAX] = 0x7,
5331 [NEON_3R_VPMIN] = 0x7,
5332 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5333 [NEON_3R_VPADD] = 0x7,
f1ecb913 5334 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 5335 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5336 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5337 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5338 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5339 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5340 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5341 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5342};
5343
600b828c
PM
5344/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5345 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5346 * table A7-13.
5347 */
5348#define NEON_2RM_VREV64 0
5349#define NEON_2RM_VREV32 1
5350#define NEON_2RM_VREV16 2
5351#define NEON_2RM_VPADDL 4
5352#define NEON_2RM_VPADDL_U 5
9d935509
AB
5353#define NEON_2RM_AESE 6 /* Includes AESD */
5354#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5355#define NEON_2RM_VCLS 8
5356#define NEON_2RM_VCLZ 9
5357#define NEON_2RM_VCNT 10
5358#define NEON_2RM_VMVN 11
5359#define NEON_2RM_VPADAL 12
5360#define NEON_2RM_VPADAL_U 13
5361#define NEON_2RM_VQABS 14
5362#define NEON_2RM_VQNEG 15
5363#define NEON_2RM_VCGT0 16
5364#define NEON_2RM_VCGE0 17
5365#define NEON_2RM_VCEQ0 18
5366#define NEON_2RM_VCLE0 19
5367#define NEON_2RM_VCLT0 20
f1ecb913 5368#define NEON_2RM_SHA1H 21
600b828c
PM
5369#define NEON_2RM_VABS 22
5370#define NEON_2RM_VNEG 23
5371#define NEON_2RM_VCGT0_F 24
5372#define NEON_2RM_VCGE0_F 25
5373#define NEON_2RM_VCEQ0_F 26
5374#define NEON_2RM_VCLE0_F 27
5375#define NEON_2RM_VCLT0_F 28
5376#define NEON_2RM_VABS_F 30
5377#define NEON_2RM_VNEG_F 31
5378#define NEON_2RM_VSWP 32
5379#define NEON_2RM_VTRN 33
5380#define NEON_2RM_VUZP 34
5381#define NEON_2RM_VZIP 35
5382#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5383#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5384#define NEON_2RM_VSHLL 38
f1ecb913 5385#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5386#define NEON_2RM_VRINTN 40
2ce70625 5387#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5388#define NEON_2RM_VRINTA 42
5389#define NEON_2RM_VRINTZ 43
600b828c 5390#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5391#define NEON_2RM_VRINTM 45
600b828c 5392#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5393#define NEON_2RM_VRINTP 47
901ad525
WN
5394#define NEON_2RM_VCVTAU 48
5395#define NEON_2RM_VCVTAS 49
5396#define NEON_2RM_VCVTNU 50
5397#define NEON_2RM_VCVTNS 51
5398#define NEON_2RM_VCVTPU 52
5399#define NEON_2RM_VCVTPS 53
5400#define NEON_2RM_VCVTMU 54
5401#define NEON_2RM_VCVTMS 55
600b828c
PM
5402#define NEON_2RM_VRECPE 56
5403#define NEON_2RM_VRSQRTE 57
5404#define NEON_2RM_VRECPE_F 58
5405#define NEON_2RM_VRSQRTE_F 59
5406#define NEON_2RM_VCVT_FS 60
5407#define NEON_2RM_VCVT_FU 61
5408#define NEON_2RM_VCVT_SF 62
5409#define NEON_2RM_VCVT_UF 63
5410
5411static int neon_2rm_is_float_op(int op)
5412{
5413 /* Return true if this neon 2reg-misc op is float-to-float */
5414 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5415 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5416 op == NEON_2RM_VRINTM ||
5417 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5418 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5419}
5420
fe8fcf3d
PM
5421static bool neon_2rm_is_v8_op(int op)
5422{
5423 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5424 switch (op) {
5425 case NEON_2RM_VRINTN:
5426 case NEON_2RM_VRINTA:
5427 case NEON_2RM_VRINTM:
5428 case NEON_2RM_VRINTP:
5429 case NEON_2RM_VRINTZ:
5430 case NEON_2RM_VRINTX:
5431 case NEON_2RM_VCVTAU:
5432 case NEON_2RM_VCVTAS:
5433 case NEON_2RM_VCVTNU:
5434 case NEON_2RM_VCVTNS:
5435 case NEON_2RM_VCVTPU:
5436 case NEON_2RM_VCVTPS:
5437 case NEON_2RM_VCVTMU:
5438 case NEON_2RM_VCVTMS:
5439 return true;
5440 default:
5441 return false;
5442 }
5443}
5444
600b828c
PM
5445/* Each entry in this array has bit n set if the insn allows
5446 * size value n (otherwise it will UNDEF). Since unallocated
5447 * op values will have no bits set they always UNDEF.
5448 */
5449static const uint8_t neon_2rm_sizes[] = {
5450 [NEON_2RM_VREV64] = 0x7,
5451 [NEON_2RM_VREV32] = 0x3,
5452 [NEON_2RM_VREV16] = 0x1,
5453 [NEON_2RM_VPADDL] = 0x7,
5454 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5455 [NEON_2RM_AESE] = 0x1,
5456 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5457 [NEON_2RM_VCLS] = 0x7,
5458 [NEON_2RM_VCLZ] = 0x7,
5459 [NEON_2RM_VCNT] = 0x1,
5460 [NEON_2RM_VMVN] = 0x1,
5461 [NEON_2RM_VPADAL] = 0x7,
5462 [NEON_2RM_VPADAL_U] = 0x7,
5463 [NEON_2RM_VQABS] = 0x7,
5464 [NEON_2RM_VQNEG] = 0x7,
5465 [NEON_2RM_VCGT0] = 0x7,
5466 [NEON_2RM_VCGE0] = 0x7,
5467 [NEON_2RM_VCEQ0] = 0x7,
5468 [NEON_2RM_VCLE0] = 0x7,
5469 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5470 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5471 [NEON_2RM_VABS] = 0x7,
5472 [NEON_2RM_VNEG] = 0x7,
5473 [NEON_2RM_VCGT0_F] = 0x4,
5474 [NEON_2RM_VCGE0_F] = 0x4,
5475 [NEON_2RM_VCEQ0_F] = 0x4,
5476 [NEON_2RM_VCLE0_F] = 0x4,
5477 [NEON_2RM_VCLT0_F] = 0x4,
5478 [NEON_2RM_VABS_F] = 0x4,
5479 [NEON_2RM_VNEG_F] = 0x4,
5480 [NEON_2RM_VSWP] = 0x1,
5481 [NEON_2RM_VTRN] = 0x7,
5482 [NEON_2RM_VUZP] = 0x7,
5483 [NEON_2RM_VZIP] = 0x7,
5484 [NEON_2RM_VMOVN] = 0x7,
5485 [NEON_2RM_VQMOVN] = 0x7,
5486 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5487 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5488 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5489 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5490 [NEON_2RM_VRINTA] = 0x4,
5491 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5492 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5493 [NEON_2RM_VRINTM] = 0x4,
600b828c 5494 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5495 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5496 [NEON_2RM_VCVTAU] = 0x4,
5497 [NEON_2RM_VCVTAS] = 0x4,
5498 [NEON_2RM_VCVTNU] = 0x4,
5499 [NEON_2RM_VCVTNS] = 0x4,
5500 [NEON_2RM_VCVTPU] = 0x4,
5501 [NEON_2RM_VCVTPS] = 0x4,
5502 [NEON_2RM_VCVTMU] = 0x4,
5503 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5504 [NEON_2RM_VRECPE] = 0x4,
5505 [NEON_2RM_VRSQRTE] = 0x4,
5506 [NEON_2RM_VRECPE_F] = 0x4,
5507 [NEON_2RM_VRSQRTE_F] = 0x4,
5508 [NEON_2RM_VCVT_FS] = 0x4,
5509 [NEON_2RM_VCVT_FU] = 0x4,
5510 [NEON_2RM_VCVT_SF] = 0x4,
5511 [NEON_2RM_VCVT_UF] = 0x4,
5512};
5513
9ee6e8bb
PB
5514/* Translate a NEON data processing instruction. Return nonzero if the
5515 instruction is invalid.
ad69471c
PB
5516 We process data in a mixture of 32-bit and 64-bit chunks.
5517 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5518
7dcc1f89 5519static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5520{
5521 int op;
5522 int q;
5523 int rd, rn, rm;
5524 int size;
5525 int shift;
5526 int pass;
5527 int count;
5528 int pairwise;
5529 int u;
ca9a32e4 5530 uint32_t imm, mask;
39d5492a 5531 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 5532 TCGv_i64 tmp64;
9ee6e8bb 5533
2c7ffc41
PM
5534 /* FIXME: this access check should not take precedence over UNDEF
5535 * for invalid encodings; we will generate incorrect syndrome information
5536 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5537 */
9dbbc748 5538 if (s->fp_excp_el) {
2c7ffc41 5539 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5540 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5541 return 0;
5542 }
5543
5df8bac1 5544 if (!s->vfp_enabled)
9ee6e8bb
PB
5545 return 1;
5546 q = (insn & (1 << 6)) != 0;
5547 u = (insn >> 24) & 1;
5548 VFP_DREG_D(rd, insn);
5549 VFP_DREG_N(rn, insn);
5550 VFP_DREG_M(rm, insn);
5551 size = (insn >> 20) & 3;
5552 if ((insn & (1 << 23)) == 0) {
5553 /* Three register same length. */
5554 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5555 /* Catch invalid op and bad size combinations: UNDEF */
5556 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5557 return 1;
5558 }
25f84f79
PM
5559 /* All insns of this form UNDEF for either this condition or the
5560 * superset of cases "Q==1"; we catch the latter later.
5561 */
5562 if (q && ((rd | rn | rm) & 1)) {
5563 return 1;
5564 }
f1ecb913
AB
5565 /*
5566 * The SHA-1/SHA-256 3-register instructions require special treatment
5567 * here, as their size field is overloaded as an op type selector, and
5568 * they all consume their input in a single pass.
5569 */
5570 if (op == NEON_3R_SHA) {
5571 if (!q) {
5572 return 1;
5573 }
5574 if (!u) { /* SHA-1 */
d614a513 5575 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5576 return 1;
5577 }
5578 tmp = tcg_const_i32(rd);
5579 tmp2 = tcg_const_i32(rn);
5580 tmp3 = tcg_const_i32(rm);
5581 tmp4 = tcg_const_i32(size);
5582 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5583 tcg_temp_free_i32(tmp4);
5584 } else { /* SHA-256 */
d614a513 5585 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5586 return 1;
5587 }
5588 tmp = tcg_const_i32(rd);
5589 tmp2 = tcg_const_i32(rn);
5590 tmp3 = tcg_const_i32(rm);
5591 switch (size) {
5592 case 0:
5593 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5594 break;
5595 case 1:
5596 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5597 break;
5598 case 2:
5599 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5600 break;
5601 }
5602 }
5603 tcg_temp_free_i32(tmp);
5604 tcg_temp_free_i32(tmp2);
5605 tcg_temp_free_i32(tmp3);
5606 return 0;
5607 }
62698be3
PM
5608 if (size == 3 && op != NEON_3R_LOGIC) {
5609 /* 64-bit element instructions. */
9ee6e8bb 5610 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5611 neon_load_reg64(cpu_V0, rn + pass);
5612 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5613 switch (op) {
62698be3 5614 case NEON_3R_VQADD:
9ee6e8bb 5615 if (u) {
02da0b2d
PM
5616 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5617 cpu_V0, cpu_V1);
2c0262af 5618 } else {
02da0b2d
PM
5619 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5620 cpu_V0, cpu_V1);
2c0262af 5621 }
9ee6e8bb 5622 break;
62698be3 5623 case NEON_3R_VQSUB:
9ee6e8bb 5624 if (u) {
02da0b2d
PM
5625 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5626 cpu_V0, cpu_V1);
ad69471c 5627 } else {
02da0b2d
PM
5628 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5629 cpu_V0, cpu_V1);
ad69471c
PB
5630 }
5631 break;
62698be3 5632 case NEON_3R_VSHL:
ad69471c
PB
5633 if (u) {
5634 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5635 } else {
5636 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5637 }
5638 break;
62698be3 5639 case NEON_3R_VQSHL:
ad69471c 5640 if (u) {
02da0b2d
PM
5641 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5642 cpu_V1, cpu_V0);
ad69471c 5643 } else {
02da0b2d
PM
5644 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5645 cpu_V1, cpu_V0);
ad69471c
PB
5646 }
5647 break;
62698be3 5648 case NEON_3R_VRSHL:
ad69471c
PB
5649 if (u) {
5650 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5651 } else {
ad69471c
PB
5652 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5653 }
5654 break;
62698be3 5655 case NEON_3R_VQRSHL:
ad69471c 5656 if (u) {
02da0b2d
PM
5657 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5658 cpu_V1, cpu_V0);
ad69471c 5659 } else {
02da0b2d
PM
5660 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5661 cpu_V1, cpu_V0);
1e8d4eec 5662 }
9ee6e8bb 5663 break;
62698be3 5664 case NEON_3R_VADD_VSUB:
9ee6e8bb 5665 if (u) {
ad69471c 5666 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5667 } else {
ad69471c 5668 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5669 }
5670 break;
5671 default:
5672 abort();
2c0262af 5673 }
ad69471c 5674 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5675 }
9ee6e8bb 5676 return 0;
2c0262af 5677 }
25f84f79 5678 pairwise = 0;
9ee6e8bb 5679 switch (op) {
62698be3
PM
5680 case NEON_3R_VSHL:
5681 case NEON_3R_VQSHL:
5682 case NEON_3R_VRSHL:
5683 case NEON_3R_VQRSHL:
9ee6e8bb 5684 {
ad69471c
PB
5685 int rtmp;
5686 /* Shift instruction operands are reversed. */
5687 rtmp = rn;
9ee6e8bb 5688 rn = rm;
ad69471c 5689 rm = rtmp;
9ee6e8bb 5690 }
2c0262af 5691 break;
25f84f79
PM
5692 case NEON_3R_VPADD:
5693 if (u) {
5694 return 1;
5695 }
5696 /* Fall through */
62698be3
PM
5697 case NEON_3R_VPMAX:
5698 case NEON_3R_VPMIN:
9ee6e8bb 5699 pairwise = 1;
2c0262af 5700 break;
25f84f79
PM
5701 case NEON_3R_FLOAT_ARITH:
5702 pairwise = (u && size < 2); /* if VPADD (float) */
5703 break;
5704 case NEON_3R_FLOAT_MINMAX:
5705 pairwise = u; /* if VPMIN/VPMAX (float) */
5706 break;
5707 case NEON_3R_FLOAT_CMP:
5708 if (!u && size) {
5709 /* no encoding for U=0 C=1x */
5710 return 1;
5711 }
5712 break;
5713 case NEON_3R_FLOAT_ACMP:
5714 if (!u) {
5715 return 1;
5716 }
5717 break;
505935fc
WN
5718 case NEON_3R_FLOAT_MISC:
5719 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5720 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5721 return 1;
5722 }
2c0262af 5723 break;
25f84f79
PM
5724 case NEON_3R_VMUL:
5725 if (u && (size != 0)) {
5726 /* UNDEF on invalid size for polynomial subcase */
5727 return 1;
5728 }
2c0262af 5729 break;
da97f52c 5730 case NEON_3R_VFM:
d614a513 5731 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5732 return 1;
5733 }
5734 break;
9ee6e8bb 5735 default:
2c0262af 5736 break;
9ee6e8bb 5737 }
dd8fbd78 5738
25f84f79
PM
5739 if (pairwise && q) {
5740 /* All the pairwise insns UNDEF if Q is set */
5741 return 1;
5742 }
5743
9ee6e8bb
PB
5744 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5745
5746 if (pairwise) {
5747 /* Pairwise. */
a5a14945
JR
5748 if (pass < 1) {
5749 tmp = neon_load_reg(rn, 0);
5750 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5751 } else {
a5a14945
JR
5752 tmp = neon_load_reg(rm, 0);
5753 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5754 }
5755 } else {
5756 /* Elementwise. */
dd8fbd78
FN
5757 tmp = neon_load_reg(rn, pass);
5758 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5759 }
5760 switch (op) {
62698be3 5761 case NEON_3R_VHADD:
9ee6e8bb
PB
5762 GEN_NEON_INTEGER_OP(hadd);
5763 break;
62698be3 5764 case NEON_3R_VQADD:
02da0b2d 5765 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5766 break;
62698be3 5767 case NEON_3R_VRHADD:
9ee6e8bb 5768 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5769 break;
62698be3 5770 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5771 switch ((u << 2) | size) {
5772 case 0: /* VAND */
dd8fbd78 5773 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5774 break;
5775 case 1: /* BIC */
f669df27 5776 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5777 break;
5778 case 2: /* VORR */
dd8fbd78 5779 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5780 break;
5781 case 3: /* VORN */
f669df27 5782 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5783 break;
5784 case 4: /* VEOR */
dd8fbd78 5785 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5786 break;
5787 case 5: /* VBSL */
dd8fbd78
FN
5788 tmp3 = neon_load_reg(rd, pass);
5789 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5790 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5791 break;
5792 case 6: /* VBIT */
dd8fbd78
FN
5793 tmp3 = neon_load_reg(rd, pass);
5794 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5795 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5796 break;
5797 case 7: /* VBIF */
dd8fbd78
FN
5798 tmp3 = neon_load_reg(rd, pass);
5799 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5800 tcg_temp_free_i32(tmp3);
9ee6e8bb 5801 break;
2c0262af
FB
5802 }
5803 break;
62698be3 5804 case NEON_3R_VHSUB:
9ee6e8bb
PB
5805 GEN_NEON_INTEGER_OP(hsub);
5806 break;
62698be3 5807 case NEON_3R_VQSUB:
02da0b2d 5808 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5809 break;
62698be3 5810 case NEON_3R_VCGT:
9ee6e8bb
PB
5811 GEN_NEON_INTEGER_OP(cgt);
5812 break;
62698be3 5813 case NEON_3R_VCGE:
9ee6e8bb
PB
5814 GEN_NEON_INTEGER_OP(cge);
5815 break;
62698be3 5816 case NEON_3R_VSHL:
ad69471c 5817 GEN_NEON_INTEGER_OP(shl);
2c0262af 5818 break;
62698be3 5819 case NEON_3R_VQSHL:
02da0b2d 5820 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5821 break;
62698be3 5822 case NEON_3R_VRSHL:
ad69471c 5823 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5824 break;
62698be3 5825 case NEON_3R_VQRSHL:
02da0b2d 5826 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5827 break;
62698be3 5828 case NEON_3R_VMAX:
9ee6e8bb
PB
5829 GEN_NEON_INTEGER_OP(max);
5830 break;
62698be3 5831 case NEON_3R_VMIN:
9ee6e8bb
PB
5832 GEN_NEON_INTEGER_OP(min);
5833 break;
62698be3 5834 case NEON_3R_VABD:
9ee6e8bb
PB
5835 GEN_NEON_INTEGER_OP(abd);
5836 break;
62698be3 5837 case NEON_3R_VABA:
9ee6e8bb 5838 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5839 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5840 tmp2 = neon_load_reg(rd, pass);
5841 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5842 break;
62698be3 5843 case NEON_3R_VADD_VSUB:
9ee6e8bb 5844 if (!u) { /* VADD */
62698be3 5845 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5846 } else { /* VSUB */
5847 switch (size) {
dd8fbd78
FN
5848 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5849 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5850 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5851 default: abort();
9ee6e8bb
PB
5852 }
5853 }
5854 break;
62698be3 5855 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5856 if (!u) { /* VTST */
5857 switch (size) {
dd8fbd78
FN
5858 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5859 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5860 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5861 default: abort();
9ee6e8bb
PB
5862 }
5863 } else { /* VCEQ */
5864 switch (size) {
dd8fbd78
FN
5865 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5866 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5867 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5868 default: abort();
9ee6e8bb
PB
5869 }
5870 }
5871 break;
62698be3 5872 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5873 switch (size) {
dd8fbd78
FN
5874 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5875 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5876 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5877 default: abort();
9ee6e8bb 5878 }
7d1b0095 5879 tcg_temp_free_i32(tmp2);
dd8fbd78 5880 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5881 if (u) { /* VMLS */
dd8fbd78 5882 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5883 } else { /* VMLA */
dd8fbd78 5884 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5885 }
5886 break;
62698be3 5887 case NEON_3R_VMUL:
9ee6e8bb 5888 if (u) { /* polynomial */
dd8fbd78 5889 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5890 } else { /* Integer */
5891 switch (size) {
dd8fbd78
FN
5892 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5893 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5894 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5895 default: abort();
9ee6e8bb
PB
5896 }
5897 }
5898 break;
62698be3 5899 case NEON_3R_VPMAX:
9ee6e8bb
PB
5900 GEN_NEON_INTEGER_OP(pmax);
5901 break;
62698be3 5902 case NEON_3R_VPMIN:
9ee6e8bb
PB
5903 GEN_NEON_INTEGER_OP(pmin);
5904 break;
62698be3 5905 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5906 if (!u) { /* VQDMULH */
5907 switch (size) {
02da0b2d
PM
5908 case 1:
5909 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5910 break;
5911 case 2:
5912 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5913 break;
62698be3 5914 default: abort();
9ee6e8bb 5915 }
62698be3 5916 } else { /* VQRDMULH */
9ee6e8bb 5917 switch (size) {
02da0b2d
PM
5918 case 1:
5919 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5920 break;
5921 case 2:
5922 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5923 break;
62698be3 5924 default: abort();
9ee6e8bb
PB
5925 }
5926 }
5927 break;
62698be3 5928 case NEON_3R_VPADD:
9ee6e8bb 5929 switch (size) {
dd8fbd78
FN
5930 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5931 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5932 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5933 default: abort();
9ee6e8bb
PB
5934 }
5935 break;
62698be3 5936 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5937 {
5938 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5939 switch ((u << 2) | size) {
5940 case 0: /* VADD */
aa47cfdd
PM
5941 case 4: /* VPADD */
5942 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5943 break;
5944 case 2: /* VSUB */
aa47cfdd 5945 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5946 break;
5947 case 6: /* VABD */
aa47cfdd 5948 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5949 break;
5950 default:
62698be3 5951 abort();
9ee6e8bb 5952 }
aa47cfdd 5953 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5954 break;
aa47cfdd 5955 }
62698be3 5956 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5957 {
5958 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5959 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5960 if (!u) {
7d1b0095 5961 tcg_temp_free_i32(tmp2);
dd8fbd78 5962 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5963 if (size == 0) {
aa47cfdd 5964 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5965 } else {
aa47cfdd 5966 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5967 }
5968 }
aa47cfdd 5969 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5970 break;
aa47cfdd 5971 }
62698be3 5972 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5973 {
5974 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5975 if (!u) {
aa47cfdd 5976 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5977 } else {
aa47cfdd
PM
5978 if (size == 0) {
5979 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5980 } else {
5981 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5982 }
b5ff1b31 5983 }
aa47cfdd 5984 tcg_temp_free_ptr(fpstatus);
2c0262af 5985 break;
aa47cfdd 5986 }
62698be3 5987 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5988 {
5989 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5990 if (size == 0) {
5991 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5992 } else {
5993 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5994 }
5995 tcg_temp_free_ptr(fpstatus);
2c0262af 5996 break;
aa47cfdd 5997 }
62698be3 5998 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5999 {
6000 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6001 if (size == 0) {
f71a2ae5 6002 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6003 } else {
f71a2ae5 6004 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6005 }
6006 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6007 break;
aa47cfdd 6008 }
505935fc
WN
6009 case NEON_3R_FLOAT_MISC:
6010 if (u) {
6011 /* VMAXNM/VMINNM */
6012 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6013 if (size == 0) {
f71a2ae5 6014 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6015 } else {
f71a2ae5 6016 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6017 }
6018 tcg_temp_free_ptr(fpstatus);
6019 } else {
6020 if (size == 0) {
6021 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6022 } else {
6023 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6024 }
6025 }
2c0262af 6026 break;
da97f52c
PM
6027 case NEON_3R_VFM:
6028 {
6029 /* VFMA, VFMS: fused multiply-add */
6030 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6031 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6032 if (size) {
6033 /* VFMS */
6034 gen_helper_vfp_negs(tmp, tmp);
6035 }
6036 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6037 tcg_temp_free_i32(tmp3);
6038 tcg_temp_free_ptr(fpstatus);
6039 break;
6040 }
9ee6e8bb
PB
6041 default:
6042 abort();
2c0262af 6043 }
7d1b0095 6044 tcg_temp_free_i32(tmp2);
dd8fbd78 6045
9ee6e8bb
PB
6046 /* Save the result. For elementwise operations we can put it
6047 straight into the destination register. For pairwise operations
6048 we have to be careful to avoid clobbering the source operands. */
6049 if (pairwise && rd == rm) {
dd8fbd78 6050 neon_store_scratch(pass, tmp);
9ee6e8bb 6051 } else {
dd8fbd78 6052 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6053 }
6054
6055 } /* for pass */
6056 if (pairwise && rd == rm) {
6057 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6058 tmp = neon_load_scratch(pass);
6059 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6060 }
6061 }
ad69471c 6062 /* End of 3 register same size operations. */
9ee6e8bb
PB
6063 } else if (insn & (1 << 4)) {
6064 if ((insn & 0x00380080) != 0) {
6065 /* Two registers and shift. */
6066 op = (insn >> 8) & 0xf;
6067 if (insn & (1 << 7)) {
cc13115b
PM
6068 /* 64-bit shift. */
6069 if (op > 7) {
6070 return 1;
6071 }
9ee6e8bb
PB
6072 size = 3;
6073 } else {
6074 size = 2;
6075 while ((insn & (1 << (size + 19))) == 0)
6076 size--;
6077 }
6078 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 6079 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
6080 by immediate using the variable shift operations. */
6081 if (op < 8) {
6082 /* Shift by immediate:
6083 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6084 if (q && ((rd | rm) & 1)) {
6085 return 1;
6086 }
6087 if (!u && (op == 4 || op == 6)) {
6088 return 1;
6089 }
9ee6e8bb
PB
6090 /* Right shifts are encoded as N - shift, where N is the
6091 element size in bits. */
6092 if (op <= 4)
6093 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
6094 if (size == 3) {
6095 count = q + 1;
6096 } else {
6097 count = q ? 4: 2;
6098 }
6099 switch (size) {
6100 case 0:
6101 imm = (uint8_t) shift;
6102 imm |= imm << 8;
6103 imm |= imm << 16;
6104 break;
6105 case 1:
6106 imm = (uint16_t) shift;
6107 imm |= imm << 16;
6108 break;
6109 case 2:
6110 case 3:
6111 imm = shift;
6112 break;
6113 default:
6114 abort();
6115 }
6116
6117 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6118 if (size == 3) {
6119 neon_load_reg64(cpu_V0, rm + pass);
6120 tcg_gen_movi_i64(cpu_V1, imm);
6121 switch (op) {
6122 case 0: /* VSHR */
6123 case 1: /* VSRA */
6124 if (u)
6125 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6126 else
ad69471c 6127 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6128 break;
ad69471c
PB
6129 case 2: /* VRSHR */
6130 case 3: /* VRSRA */
6131 if (u)
6132 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6133 else
ad69471c 6134 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6135 break;
ad69471c 6136 case 4: /* VSRI */
ad69471c
PB
6137 case 5: /* VSHL, VSLI */
6138 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6139 break;
0322b26e 6140 case 6: /* VQSHLU */
02da0b2d
PM
6141 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6142 cpu_V0, cpu_V1);
ad69471c 6143 break;
0322b26e
PM
6144 case 7: /* VQSHL */
6145 if (u) {
02da0b2d 6146 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6147 cpu_V0, cpu_V1);
6148 } else {
02da0b2d 6149 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6150 cpu_V0, cpu_V1);
6151 }
9ee6e8bb 6152 break;
9ee6e8bb 6153 }
ad69471c
PB
6154 if (op == 1 || op == 3) {
6155 /* Accumulate. */
5371cb81 6156 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6157 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6158 } else if (op == 4 || (op == 5 && u)) {
6159 /* Insert */
923e6509
CL
6160 neon_load_reg64(cpu_V1, rd + pass);
6161 uint64_t mask;
6162 if (shift < -63 || shift > 63) {
6163 mask = 0;
6164 } else {
6165 if (op == 4) {
6166 mask = 0xffffffffffffffffull >> -shift;
6167 } else {
6168 mask = 0xffffffffffffffffull << shift;
6169 }
6170 }
6171 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6172 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6173 }
6174 neon_store_reg64(cpu_V0, rd + pass);
6175 } else { /* size < 3 */
6176 /* Operands in T0 and T1. */
dd8fbd78 6177 tmp = neon_load_reg(rm, pass);
7d1b0095 6178 tmp2 = tcg_temp_new_i32();
dd8fbd78 6179 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6180 switch (op) {
6181 case 0: /* VSHR */
6182 case 1: /* VSRA */
6183 GEN_NEON_INTEGER_OP(shl);
6184 break;
6185 case 2: /* VRSHR */
6186 case 3: /* VRSRA */
6187 GEN_NEON_INTEGER_OP(rshl);
6188 break;
6189 case 4: /* VSRI */
ad69471c
PB
6190 case 5: /* VSHL, VSLI */
6191 switch (size) {
dd8fbd78
FN
6192 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6193 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6194 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6195 default: abort();
ad69471c
PB
6196 }
6197 break;
0322b26e 6198 case 6: /* VQSHLU */
ad69471c 6199 switch (size) {
0322b26e 6200 case 0:
02da0b2d
PM
6201 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6202 tmp, tmp2);
0322b26e
PM
6203 break;
6204 case 1:
02da0b2d
PM
6205 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6206 tmp, tmp2);
0322b26e
PM
6207 break;
6208 case 2:
02da0b2d
PM
6209 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6210 tmp, tmp2);
0322b26e
PM
6211 break;
6212 default:
cc13115b 6213 abort();
ad69471c
PB
6214 }
6215 break;
0322b26e 6216 case 7: /* VQSHL */
02da0b2d 6217 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6218 break;
ad69471c 6219 }
7d1b0095 6220 tcg_temp_free_i32(tmp2);
ad69471c
PB
6221
6222 if (op == 1 || op == 3) {
6223 /* Accumulate. */
dd8fbd78 6224 tmp2 = neon_load_reg(rd, pass);
5371cb81 6225 gen_neon_add(size, tmp, tmp2);
7d1b0095 6226 tcg_temp_free_i32(tmp2);
ad69471c
PB
6227 } else if (op == 4 || (op == 5 && u)) {
6228 /* Insert */
6229 switch (size) {
6230 case 0:
6231 if (op == 4)
ca9a32e4 6232 mask = 0xff >> -shift;
ad69471c 6233 else
ca9a32e4
JR
6234 mask = (uint8_t)(0xff << shift);
6235 mask |= mask << 8;
6236 mask |= mask << 16;
ad69471c
PB
6237 break;
6238 case 1:
6239 if (op == 4)
ca9a32e4 6240 mask = 0xffff >> -shift;
ad69471c 6241 else
ca9a32e4
JR
6242 mask = (uint16_t)(0xffff << shift);
6243 mask |= mask << 16;
ad69471c
PB
6244 break;
6245 case 2:
ca9a32e4
JR
6246 if (shift < -31 || shift > 31) {
6247 mask = 0;
6248 } else {
6249 if (op == 4)
6250 mask = 0xffffffffu >> -shift;
6251 else
6252 mask = 0xffffffffu << shift;
6253 }
ad69471c
PB
6254 break;
6255 default:
6256 abort();
6257 }
dd8fbd78 6258 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6259 tcg_gen_andi_i32(tmp, tmp, mask);
6260 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6261 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6262 tcg_temp_free_i32(tmp2);
ad69471c 6263 }
dd8fbd78 6264 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6265 }
6266 } /* for pass */
6267 } else if (op < 10) {
ad69471c 6268 /* Shift by immediate and narrow:
9ee6e8bb 6269 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6270 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6271 if (rm & 1) {
6272 return 1;
6273 }
9ee6e8bb
PB
6274 shift = shift - (1 << (size + 3));
6275 size++;
92cdfaeb 6276 if (size == 3) {
a7812ae4 6277 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6278 neon_load_reg64(cpu_V0, rm);
6279 neon_load_reg64(cpu_V1, rm + 1);
6280 for (pass = 0; pass < 2; pass++) {
6281 TCGv_i64 in;
6282 if (pass == 0) {
6283 in = cpu_V0;
6284 } else {
6285 in = cpu_V1;
6286 }
ad69471c 6287 if (q) {
0b36f4cd 6288 if (input_unsigned) {
92cdfaeb 6289 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6290 } else {
92cdfaeb 6291 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6292 }
ad69471c 6293 } else {
0b36f4cd 6294 if (input_unsigned) {
92cdfaeb 6295 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6296 } else {
92cdfaeb 6297 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6298 }
ad69471c 6299 }
7d1b0095 6300 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6301 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6302 neon_store_reg(rd, pass, tmp);
6303 } /* for pass */
6304 tcg_temp_free_i64(tmp64);
6305 } else {
6306 if (size == 1) {
6307 imm = (uint16_t)shift;
6308 imm |= imm << 16;
2c0262af 6309 } else {
92cdfaeb
PM
6310 /* size == 2 */
6311 imm = (uint32_t)shift;
6312 }
6313 tmp2 = tcg_const_i32(imm);
6314 tmp4 = neon_load_reg(rm + 1, 0);
6315 tmp5 = neon_load_reg(rm + 1, 1);
6316 for (pass = 0; pass < 2; pass++) {
6317 if (pass == 0) {
6318 tmp = neon_load_reg(rm, 0);
6319 } else {
6320 tmp = tmp4;
6321 }
0b36f4cd
CL
6322 gen_neon_shift_narrow(size, tmp, tmp2, q,
6323 input_unsigned);
92cdfaeb
PM
6324 if (pass == 0) {
6325 tmp3 = neon_load_reg(rm, 1);
6326 } else {
6327 tmp3 = tmp5;
6328 }
0b36f4cd
CL
6329 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6330 input_unsigned);
36aa55dc 6331 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6332 tcg_temp_free_i32(tmp);
6333 tcg_temp_free_i32(tmp3);
6334 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6335 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6336 neon_store_reg(rd, pass, tmp);
6337 } /* for pass */
c6067f04 6338 tcg_temp_free_i32(tmp2);
b75263d6 6339 }
9ee6e8bb 6340 } else if (op == 10) {
cc13115b
PM
6341 /* VSHLL, VMOVL */
6342 if (q || (rd & 1)) {
9ee6e8bb 6343 return 1;
cc13115b 6344 }
ad69471c
PB
6345 tmp = neon_load_reg(rm, 0);
6346 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6347 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6348 if (pass == 1)
6349 tmp = tmp2;
6350
6351 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6352
9ee6e8bb
PB
6353 if (shift != 0) {
6354 /* The shift is less than the width of the source
ad69471c
PB
6355 type, so we can just shift the whole register. */
6356 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6357 /* Widen the result of shift: we need to clear
6358 * the potential overflow bits resulting from
6359 * left bits of the narrow input appearing as
6360 * right bits of left the neighbour narrow
6361 * input. */
ad69471c
PB
6362 if (size < 2 || !u) {
6363 uint64_t imm64;
6364 if (size == 0) {
6365 imm = (0xffu >> (8 - shift));
6366 imm |= imm << 16;
acdf01ef 6367 } else if (size == 1) {
ad69471c 6368 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6369 } else {
6370 /* size == 2 */
6371 imm = 0xffffffff >> (32 - shift);
6372 }
6373 if (size < 2) {
6374 imm64 = imm | (((uint64_t)imm) << 32);
6375 } else {
6376 imm64 = imm;
9ee6e8bb 6377 }
acdf01ef 6378 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6379 }
6380 }
ad69471c 6381 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6382 }
f73534a5 6383 } else if (op >= 14) {
9ee6e8bb 6384 /* VCVT fixed-point. */
cc13115b
PM
6385 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6386 return 1;
6387 }
f73534a5
PM
6388 /* We have already masked out the must-be-1 top bit of imm6,
6389 * hence this 32-shift where the ARM ARM has 64-imm6.
6390 */
6391 shift = 32 - shift;
9ee6e8bb 6392 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6393 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6394 if (!(op & 1)) {
9ee6e8bb 6395 if (u)
5500b06c 6396 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6397 else
5500b06c 6398 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6399 } else {
6400 if (u)
5500b06c 6401 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6402 else
5500b06c 6403 gen_vfp_tosl(0, shift, 1);
2c0262af 6404 }
4373f3ce 6405 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6406 }
6407 } else {
9ee6e8bb
PB
6408 return 1;
6409 }
6410 } else { /* (insn & 0x00380080) == 0 */
6411 int invert;
7d80fee5
PM
6412 if (q && (rd & 1)) {
6413 return 1;
6414 }
9ee6e8bb
PB
6415
6416 op = (insn >> 8) & 0xf;
6417 /* One register and immediate. */
6418 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6419 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6420 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6421 * We choose to not special-case this and will behave as if a
6422 * valid constant encoding of 0 had been given.
6423 */
9ee6e8bb
PB
6424 switch (op) {
6425 case 0: case 1:
6426 /* no-op */
6427 break;
6428 case 2: case 3:
6429 imm <<= 8;
6430 break;
6431 case 4: case 5:
6432 imm <<= 16;
6433 break;
6434 case 6: case 7:
6435 imm <<= 24;
6436 break;
6437 case 8: case 9:
6438 imm |= imm << 16;
6439 break;
6440 case 10: case 11:
6441 imm = (imm << 8) | (imm << 24);
6442 break;
6443 case 12:
8e31209e 6444 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6445 break;
6446 case 13:
6447 imm = (imm << 16) | 0xffff;
6448 break;
6449 case 14:
6450 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6451 if (invert)
6452 imm = ~imm;
6453 break;
6454 case 15:
7d80fee5
PM
6455 if (invert) {
6456 return 1;
6457 }
9ee6e8bb
PB
6458 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6459 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6460 break;
6461 }
6462 if (invert)
6463 imm = ~imm;
6464
9ee6e8bb
PB
6465 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6466 if (op & 1 && op < 12) {
ad69471c 6467 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6468 if (invert) {
6469 /* The immediate value has already been inverted, so
6470 BIC becomes AND. */
ad69471c 6471 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6472 } else {
ad69471c 6473 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6474 }
9ee6e8bb 6475 } else {
ad69471c 6476 /* VMOV, VMVN. */
7d1b0095 6477 tmp = tcg_temp_new_i32();
9ee6e8bb 6478 if (op == 14 && invert) {
a5a14945 6479 int n;
ad69471c
PB
6480 uint32_t val;
6481 val = 0;
9ee6e8bb
PB
6482 for (n = 0; n < 4; n++) {
6483 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6484 val |= 0xff << (n * 8);
9ee6e8bb 6485 }
ad69471c
PB
6486 tcg_gen_movi_i32(tmp, val);
6487 } else {
6488 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6489 }
9ee6e8bb 6490 }
ad69471c 6491 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6492 }
6493 }
e4b3861d 6494 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6495 if (size != 3) {
6496 op = (insn >> 8) & 0xf;
6497 if ((insn & (1 << 6)) == 0) {
6498 /* Three registers of different lengths. */
6499 int src1_wide;
6500 int src2_wide;
6501 int prewiden;
526d0096
PM
6502 /* undefreq: bit 0 : UNDEF if size == 0
6503 * bit 1 : UNDEF if size == 1
6504 * bit 2 : UNDEF if size == 2
6505 * bit 3 : UNDEF if U == 1
6506 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6507 */
6508 int undefreq;
6509 /* prewiden, src1_wide, src2_wide, undefreq */
6510 static const int neon_3reg_wide[16][4] = {
6511 {1, 0, 0, 0}, /* VADDL */
6512 {1, 1, 0, 0}, /* VADDW */
6513 {1, 0, 0, 0}, /* VSUBL */
6514 {1, 1, 0, 0}, /* VSUBW */
6515 {0, 1, 1, 0}, /* VADDHN */
6516 {0, 0, 0, 0}, /* VABAL */
6517 {0, 1, 1, 0}, /* VSUBHN */
6518 {0, 0, 0, 0}, /* VABDL */
6519 {0, 0, 0, 0}, /* VMLAL */
526d0096 6520 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6521 {0, 0, 0, 0}, /* VMLSL */
526d0096 6522 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6523 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6524 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6525 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6526 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6527 };
6528
6529 prewiden = neon_3reg_wide[op][0];
6530 src1_wide = neon_3reg_wide[op][1];
6531 src2_wide = neon_3reg_wide[op][2];
695272dc 6532 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6533
526d0096
PM
6534 if ((undefreq & (1 << size)) ||
6535 ((undefreq & 8) && u)) {
695272dc
PM
6536 return 1;
6537 }
6538 if ((src1_wide && (rn & 1)) ||
6539 (src2_wide && (rm & 1)) ||
6540 (!src2_wide && (rd & 1))) {
ad69471c 6541 return 1;
695272dc 6542 }
ad69471c 6543
4e624eda
PM
6544 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6545 * outside the loop below as it only performs a single pass.
6546 */
6547 if (op == 14 && size == 2) {
6548 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6549
d614a513 6550 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6551 return 1;
6552 }
6553 tcg_rn = tcg_temp_new_i64();
6554 tcg_rm = tcg_temp_new_i64();
6555 tcg_rd = tcg_temp_new_i64();
6556 neon_load_reg64(tcg_rn, rn);
6557 neon_load_reg64(tcg_rm, rm);
6558 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6559 neon_store_reg64(tcg_rd, rd);
6560 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6561 neon_store_reg64(tcg_rd, rd + 1);
6562 tcg_temp_free_i64(tcg_rn);
6563 tcg_temp_free_i64(tcg_rm);
6564 tcg_temp_free_i64(tcg_rd);
6565 return 0;
6566 }
6567
9ee6e8bb
PB
6568 /* Avoid overlapping operands. Wide source operands are
6569 always aligned so will never overlap with wide
6570 destinations in problematic ways. */
8f8e3aa4 6571 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6572 tmp = neon_load_reg(rm, 1);
6573 neon_store_scratch(2, tmp);
8f8e3aa4 6574 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6575 tmp = neon_load_reg(rn, 1);
6576 neon_store_scratch(2, tmp);
9ee6e8bb 6577 }
39d5492a 6578 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6579 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6580 if (src1_wide) {
6581 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6582 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6583 } else {
ad69471c 6584 if (pass == 1 && rd == rn) {
dd8fbd78 6585 tmp = neon_load_scratch(2);
9ee6e8bb 6586 } else {
ad69471c
PB
6587 tmp = neon_load_reg(rn, pass);
6588 }
6589 if (prewiden) {
6590 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6591 }
6592 }
ad69471c
PB
6593 if (src2_wide) {
6594 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6595 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6596 } else {
ad69471c 6597 if (pass == 1 && rd == rm) {
dd8fbd78 6598 tmp2 = neon_load_scratch(2);
9ee6e8bb 6599 } else {
ad69471c
PB
6600 tmp2 = neon_load_reg(rm, pass);
6601 }
6602 if (prewiden) {
6603 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6604 }
9ee6e8bb
PB
6605 }
6606 switch (op) {
6607 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6608 gen_neon_addl(size);
9ee6e8bb 6609 break;
79b0e534 6610 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6611 gen_neon_subl(size);
9ee6e8bb
PB
6612 break;
6613 case 5: case 7: /* VABAL, VABDL */
6614 switch ((size << 1) | u) {
ad69471c
PB
6615 case 0:
6616 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6617 break;
6618 case 1:
6619 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6620 break;
6621 case 2:
6622 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6623 break;
6624 case 3:
6625 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6626 break;
6627 case 4:
6628 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6629 break;
6630 case 5:
6631 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6632 break;
9ee6e8bb
PB
6633 default: abort();
6634 }
7d1b0095
PM
6635 tcg_temp_free_i32(tmp2);
6636 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6637 break;
6638 case 8: case 9: case 10: case 11: case 12: case 13:
6639 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6640 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6641 break;
6642 case 14: /* Polynomial VMULL */
e5ca24cb 6643 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6644 tcg_temp_free_i32(tmp2);
6645 tcg_temp_free_i32(tmp);
e5ca24cb 6646 break;
695272dc
PM
6647 default: /* 15 is RESERVED: caught earlier */
6648 abort();
9ee6e8bb 6649 }
ebcd88ce
PM
6650 if (op == 13) {
6651 /* VQDMULL */
6652 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6653 neon_store_reg64(cpu_V0, rd + pass);
6654 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6655 /* Accumulate. */
ebcd88ce 6656 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6657 switch (op) {
4dc064e6
PM
6658 case 10: /* VMLSL */
6659 gen_neon_negl(cpu_V0, size);
6660 /* Fall through */
6661 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6662 gen_neon_addl(size);
9ee6e8bb
PB
6663 break;
6664 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6665 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6666 if (op == 11) {
6667 gen_neon_negl(cpu_V0, size);
6668 }
ad69471c
PB
6669 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6670 break;
9ee6e8bb
PB
6671 default:
6672 abort();
6673 }
ad69471c 6674 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6675 } else if (op == 4 || op == 6) {
6676 /* Narrowing operation. */
7d1b0095 6677 tmp = tcg_temp_new_i32();
79b0e534 6678 if (!u) {
9ee6e8bb 6679 switch (size) {
ad69471c
PB
6680 case 0:
6681 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6682 break;
6683 case 1:
6684 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6685 break;
6686 case 2:
6687 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6688 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6689 break;
9ee6e8bb
PB
6690 default: abort();
6691 }
6692 } else {
6693 switch (size) {
ad69471c
PB
6694 case 0:
6695 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6696 break;
6697 case 1:
6698 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6699 break;
6700 case 2:
6701 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6702 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6703 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6704 break;
9ee6e8bb
PB
6705 default: abort();
6706 }
6707 }
ad69471c
PB
6708 if (pass == 0) {
6709 tmp3 = tmp;
6710 } else {
6711 neon_store_reg(rd, 0, tmp3);
6712 neon_store_reg(rd, 1, tmp);
6713 }
9ee6e8bb
PB
6714 } else {
6715 /* Write back the result. */
ad69471c 6716 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6717 }
6718 }
6719 } else {
3e3326df
PM
6720 /* Two registers and a scalar. NB that for ops of this form
6721 * the ARM ARM labels bit 24 as Q, but it is in our variable
6722 * 'u', not 'q'.
6723 */
6724 if (size == 0) {
6725 return 1;
6726 }
9ee6e8bb 6727 switch (op) {
9ee6e8bb 6728 case 1: /* Float VMLA scalar */
9ee6e8bb 6729 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6730 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6731 if (size == 1) {
6732 return 1;
6733 }
6734 /* fall through */
6735 case 0: /* Integer VMLA scalar */
6736 case 4: /* Integer VMLS scalar */
6737 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6738 case 12: /* VQDMULH scalar */
6739 case 13: /* VQRDMULH scalar */
3e3326df
PM
6740 if (u && ((rd | rn) & 1)) {
6741 return 1;
6742 }
dd8fbd78
FN
6743 tmp = neon_get_scalar(size, rm);
6744 neon_store_scratch(0, tmp);
9ee6e8bb 6745 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6746 tmp = neon_load_scratch(0);
6747 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6748 if (op == 12) {
6749 if (size == 1) {
02da0b2d 6750 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6751 } else {
02da0b2d 6752 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6753 }
6754 } else if (op == 13) {
6755 if (size == 1) {
02da0b2d 6756 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6757 } else {
02da0b2d 6758 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6759 }
6760 } else if (op & 1) {
aa47cfdd
PM
6761 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6762 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6763 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6764 } else {
6765 switch (size) {
dd8fbd78
FN
6766 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6767 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6768 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6769 default: abort();
9ee6e8bb
PB
6770 }
6771 }
7d1b0095 6772 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6773 if (op < 8) {
6774 /* Accumulate. */
dd8fbd78 6775 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6776 switch (op) {
6777 case 0:
dd8fbd78 6778 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6779 break;
6780 case 1:
aa47cfdd
PM
6781 {
6782 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6783 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6784 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6785 break;
aa47cfdd 6786 }
9ee6e8bb 6787 case 4:
dd8fbd78 6788 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6789 break;
6790 case 5:
aa47cfdd
PM
6791 {
6792 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6793 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6794 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6795 break;
aa47cfdd 6796 }
9ee6e8bb
PB
6797 default:
6798 abort();
6799 }
7d1b0095 6800 tcg_temp_free_i32(tmp2);
9ee6e8bb 6801 }
dd8fbd78 6802 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6803 }
6804 break;
9ee6e8bb 6805 case 3: /* VQDMLAL scalar */
9ee6e8bb 6806 case 7: /* VQDMLSL scalar */
9ee6e8bb 6807 case 11: /* VQDMULL scalar */
3e3326df 6808 if (u == 1) {
ad69471c 6809 return 1;
3e3326df
PM
6810 }
6811 /* fall through */
6812 case 2: /* VMLAL sclar */
6813 case 6: /* VMLSL scalar */
6814 case 10: /* VMULL scalar */
6815 if (rd & 1) {
6816 return 1;
6817 }
dd8fbd78 6818 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6819 /* We need a copy of tmp2 because gen_neon_mull
6820 * deletes it during pass 0. */
7d1b0095 6821 tmp4 = tcg_temp_new_i32();
c6067f04 6822 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6823 tmp3 = neon_load_reg(rn, 1);
ad69471c 6824
9ee6e8bb 6825 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6826 if (pass == 0) {
6827 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6828 } else {
dd8fbd78 6829 tmp = tmp3;
c6067f04 6830 tmp2 = tmp4;
9ee6e8bb 6831 }
ad69471c 6832 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6833 if (op != 11) {
6834 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6835 }
9ee6e8bb 6836 switch (op) {
4dc064e6
PM
6837 case 6:
6838 gen_neon_negl(cpu_V0, size);
6839 /* Fall through */
6840 case 2:
ad69471c 6841 gen_neon_addl(size);
9ee6e8bb
PB
6842 break;
6843 case 3: case 7:
ad69471c 6844 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6845 if (op == 7) {
6846 gen_neon_negl(cpu_V0, size);
6847 }
ad69471c 6848 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6849 break;
6850 case 10:
6851 /* no-op */
6852 break;
6853 case 11:
ad69471c 6854 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6855 break;
6856 default:
6857 abort();
6858 }
ad69471c 6859 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6860 }
dd8fbd78 6861
dd8fbd78 6862
9ee6e8bb
PB
6863 break;
6864 default: /* 14 and 15 are RESERVED */
6865 return 1;
6866 }
6867 }
6868 } else { /* size == 3 */
6869 if (!u) {
6870 /* Extract. */
9ee6e8bb 6871 imm = (insn >> 8) & 0xf;
ad69471c
PB
6872
6873 if (imm > 7 && !q)
6874 return 1;
6875
52579ea1
PM
6876 if (q && ((rd | rn | rm) & 1)) {
6877 return 1;
6878 }
6879
ad69471c
PB
6880 if (imm == 0) {
6881 neon_load_reg64(cpu_V0, rn);
6882 if (q) {
6883 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6884 }
ad69471c
PB
6885 } else if (imm == 8) {
6886 neon_load_reg64(cpu_V0, rn + 1);
6887 if (q) {
6888 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6889 }
ad69471c 6890 } else if (q) {
a7812ae4 6891 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6892 if (imm < 8) {
6893 neon_load_reg64(cpu_V0, rn);
a7812ae4 6894 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6895 } else {
6896 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6897 neon_load_reg64(tmp64, rm);
ad69471c
PB
6898 }
6899 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6900 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6901 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6902 if (imm < 8) {
6903 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6904 } else {
ad69471c
PB
6905 neon_load_reg64(cpu_V1, rm + 1);
6906 imm -= 8;
9ee6e8bb 6907 }
ad69471c 6908 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6909 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6910 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6911 tcg_temp_free_i64(tmp64);
ad69471c 6912 } else {
a7812ae4 6913 /* BUGFIX */
ad69471c 6914 neon_load_reg64(cpu_V0, rn);
a7812ae4 6915 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6916 neon_load_reg64(cpu_V1, rm);
a7812ae4 6917 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6918 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6919 }
6920 neon_store_reg64(cpu_V0, rd);
6921 if (q) {
6922 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6923 }
6924 } else if ((insn & (1 << 11)) == 0) {
6925 /* Two register misc. */
6926 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6927 size = (insn >> 18) & 3;
600b828c
PM
6928 /* UNDEF for unknown op values and bad op-size combinations */
6929 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6930 return 1;
6931 }
fe8fcf3d
PM
6932 if (neon_2rm_is_v8_op(op) &&
6933 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6934 return 1;
6935 }
fc2a9b37
PM
6936 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6937 q && ((rm | rd) & 1)) {
6938 return 1;
6939 }
9ee6e8bb 6940 switch (op) {
600b828c 6941 case NEON_2RM_VREV64:
9ee6e8bb 6942 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6943 tmp = neon_load_reg(rm, pass * 2);
6944 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6945 switch (size) {
dd8fbd78
FN
6946 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6947 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6948 case 2: /* no-op */ break;
6949 default: abort();
6950 }
dd8fbd78 6951 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6952 if (size == 2) {
dd8fbd78 6953 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6954 } else {
9ee6e8bb 6955 switch (size) {
dd8fbd78
FN
6956 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6957 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6958 default: abort();
6959 }
dd8fbd78 6960 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6961 }
6962 }
6963 break;
600b828c
PM
6964 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6965 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6966 for (pass = 0; pass < q + 1; pass++) {
6967 tmp = neon_load_reg(rm, pass * 2);
6968 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6969 tmp = neon_load_reg(rm, pass * 2 + 1);
6970 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6971 switch (size) {
6972 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6973 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6974 case 2: tcg_gen_add_i64(CPU_V001); break;
6975 default: abort();
6976 }
600b828c 6977 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6978 /* Accumulate. */
ad69471c
PB
6979 neon_load_reg64(cpu_V1, rd + pass);
6980 gen_neon_addl(size);
9ee6e8bb 6981 }
ad69471c 6982 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6983 }
6984 break;
600b828c 6985 case NEON_2RM_VTRN:
9ee6e8bb 6986 if (size == 2) {
a5a14945 6987 int n;
9ee6e8bb 6988 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6989 tmp = neon_load_reg(rm, n);
6990 tmp2 = neon_load_reg(rd, n + 1);
6991 neon_store_reg(rm, n, tmp2);
6992 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6993 }
6994 } else {
6995 goto elementwise;
6996 }
6997 break;
600b828c 6998 case NEON_2RM_VUZP:
02acedf9 6999 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7000 return 1;
9ee6e8bb
PB
7001 }
7002 break;
600b828c 7003 case NEON_2RM_VZIP:
d68a6f3a 7004 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7005 return 1;
9ee6e8bb
PB
7006 }
7007 break;
600b828c
PM
7008 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7009 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7010 if (rm & 1) {
7011 return 1;
7012 }
39d5492a 7013 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 7014 for (pass = 0; pass < 2; pass++) {
ad69471c 7015 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7016 tmp = tcg_temp_new_i32();
600b828c
PM
7017 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7018 tmp, cpu_V0);
ad69471c
PB
7019 if (pass == 0) {
7020 tmp2 = tmp;
7021 } else {
7022 neon_store_reg(rd, 0, tmp2);
7023 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7024 }
9ee6e8bb
PB
7025 }
7026 break;
600b828c 7027 case NEON_2RM_VSHLL:
fc2a9b37 7028 if (q || (rd & 1)) {
9ee6e8bb 7029 return 1;
600b828c 7030 }
ad69471c
PB
7031 tmp = neon_load_reg(rm, 0);
7032 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7033 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7034 if (pass == 1)
7035 tmp = tmp2;
7036 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7037 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7038 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7039 }
7040 break;
600b828c 7041 case NEON_2RM_VCVT_F16_F32:
d614a513 7042 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7043 q || (rm & 1)) {
7044 return 1;
7045 }
7d1b0095
PM
7046 tmp = tcg_temp_new_i32();
7047 tmp2 = tcg_temp_new_i32();
60011498 7048 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 7049 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 7050 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 7051 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7052 tcg_gen_shli_i32(tmp2, tmp2, 16);
7053 tcg_gen_or_i32(tmp2, tmp2, tmp);
7054 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 7055 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
7056 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7057 neon_store_reg(rd, 0, tmp2);
7d1b0095 7058 tmp2 = tcg_temp_new_i32();
2d981da7 7059 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7060 tcg_gen_shli_i32(tmp2, tmp2, 16);
7061 tcg_gen_or_i32(tmp2, tmp2, tmp);
7062 neon_store_reg(rd, 1, tmp2);
7d1b0095 7063 tcg_temp_free_i32(tmp);
60011498 7064 break;
600b828c 7065 case NEON_2RM_VCVT_F32_F16:
d614a513 7066 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7067 q || (rd & 1)) {
7068 return 1;
7069 }
7d1b0095 7070 tmp3 = tcg_temp_new_i32();
60011498
PB
7071 tmp = neon_load_reg(rm, 0);
7072 tmp2 = neon_load_reg(rm, 1);
7073 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 7074 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7075 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7076 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 7077 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7078 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7079 tcg_temp_free_i32(tmp);
60011498 7080 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 7081 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7082 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7083 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 7084 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7085 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7086 tcg_temp_free_i32(tmp2);
7087 tcg_temp_free_i32(tmp3);
60011498 7088 break;
9d935509 7089 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 7090 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
7091 || ((rm | rd) & 1)) {
7092 return 1;
7093 }
7094 tmp = tcg_const_i32(rd);
7095 tmp2 = tcg_const_i32(rm);
7096
7097 /* Bit 6 is the lowest opcode bit; it distinguishes between
7098 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7099 */
7100 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7101
7102 if (op == NEON_2RM_AESE) {
7103 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
7104 } else {
7105 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
7106 }
7107 tcg_temp_free_i32(tmp);
7108 tcg_temp_free_i32(tmp2);
7109 tcg_temp_free_i32(tmp3);
7110 break;
f1ecb913 7111 case NEON_2RM_SHA1H:
d614a513 7112 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
7113 || ((rm | rd) & 1)) {
7114 return 1;
7115 }
7116 tmp = tcg_const_i32(rd);
7117 tmp2 = tcg_const_i32(rm);
7118
7119 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
7120
7121 tcg_temp_free_i32(tmp);
7122 tcg_temp_free_i32(tmp2);
7123 break;
7124 case NEON_2RM_SHA1SU1:
7125 if ((rm | rd) & 1) {
7126 return 1;
7127 }
7128 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7129 if (q) {
d614a513 7130 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
7131 return 1;
7132 }
d614a513 7133 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
7134 return 1;
7135 }
7136 tmp = tcg_const_i32(rd);
7137 tmp2 = tcg_const_i32(rm);
7138 if (q) {
7139 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
7140 } else {
7141 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
7142 }
7143 tcg_temp_free_i32(tmp);
7144 tcg_temp_free_i32(tmp2);
7145 break;
9ee6e8bb
PB
7146 default:
7147 elementwise:
7148 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7149 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7150 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7151 neon_reg_offset(rm, pass));
39d5492a 7152 TCGV_UNUSED_I32(tmp);
9ee6e8bb 7153 } else {
dd8fbd78 7154 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7155 }
7156 switch (op) {
600b828c 7157 case NEON_2RM_VREV32:
9ee6e8bb 7158 switch (size) {
dd8fbd78
FN
7159 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7160 case 1: gen_swap_half(tmp); break;
600b828c 7161 default: abort();
9ee6e8bb
PB
7162 }
7163 break;
600b828c 7164 case NEON_2RM_VREV16:
dd8fbd78 7165 gen_rev16(tmp);
9ee6e8bb 7166 break;
600b828c 7167 case NEON_2RM_VCLS:
9ee6e8bb 7168 switch (size) {
dd8fbd78
FN
7169 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7170 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7171 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7172 default: abort();
9ee6e8bb
PB
7173 }
7174 break;
600b828c 7175 case NEON_2RM_VCLZ:
9ee6e8bb 7176 switch (size) {
dd8fbd78
FN
7177 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7178 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 7179 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 7180 default: abort();
9ee6e8bb
PB
7181 }
7182 break;
600b828c 7183 case NEON_2RM_VCNT:
dd8fbd78 7184 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7185 break;
600b828c 7186 case NEON_2RM_VMVN:
dd8fbd78 7187 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7188 break;
600b828c 7189 case NEON_2RM_VQABS:
9ee6e8bb 7190 switch (size) {
02da0b2d
PM
7191 case 0:
7192 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7193 break;
7194 case 1:
7195 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7196 break;
7197 case 2:
7198 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7199 break;
600b828c 7200 default: abort();
9ee6e8bb
PB
7201 }
7202 break;
600b828c 7203 case NEON_2RM_VQNEG:
9ee6e8bb 7204 switch (size) {
02da0b2d
PM
7205 case 0:
7206 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7207 break;
7208 case 1:
7209 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7210 break;
7211 case 2:
7212 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7213 break;
600b828c 7214 default: abort();
9ee6e8bb
PB
7215 }
7216 break;
600b828c 7217 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7218 tmp2 = tcg_const_i32(0);
9ee6e8bb 7219 switch(size) {
dd8fbd78
FN
7220 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7221 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7222 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7223 default: abort();
9ee6e8bb 7224 }
39d5492a 7225 tcg_temp_free_i32(tmp2);
600b828c 7226 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7227 tcg_gen_not_i32(tmp, tmp);
600b828c 7228 }
9ee6e8bb 7229 break;
600b828c 7230 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7231 tmp2 = tcg_const_i32(0);
9ee6e8bb 7232 switch(size) {
dd8fbd78
FN
7233 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7234 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7235 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7236 default: abort();
9ee6e8bb 7237 }
39d5492a 7238 tcg_temp_free_i32(tmp2);
600b828c 7239 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7240 tcg_gen_not_i32(tmp, tmp);
600b828c 7241 }
9ee6e8bb 7242 break;
600b828c 7243 case NEON_2RM_VCEQ0:
dd8fbd78 7244 tmp2 = tcg_const_i32(0);
9ee6e8bb 7245 switch(size) {
dd8fbd78
FN
7246 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7247 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7248 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7249 default: abort();
9ee6e8bb 7250 }
39d5492a 7251 tcg_temp_free_i32(tmp2);
9ee6e8bb 7252 break;
600b828c 7253 case NEON_2RM_VABS:
9ee6e8bb 7254 switch(size) {
dd8fbd78
FN
7255 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7256 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7257 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7258 default: abort();
9ee6e8bb
PB
7259 }
7260 break;
600b828c 7261 case NEON_2RM_VNEG:
dd8fbd78
FN
7262 tmp2 = tcg_const_i32(0);
7263 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7264 tcg_temp_free_i32(tmp2);
9ee6e8bb 7265 break;
600b828c 7266 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7267 {
7268 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7269 tmp2 = tcg_const_i32(0);
aa47cfdd 7270 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7271 tcg_temp_free_i32(tmp2);
aa47cfdd 7272 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7273 break;
aa47cfdd 7274 }
600b828c 7275 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7276 {
7277 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7278 tmp2 = tcg_const_i32(0);
aa47cfdd 7279 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7280 tcg_temp_free_i32(tmp2);
aa47cfdd 7281 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7282 break;
aa47cfdd 7283 }
600b828c 7284 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7285 {
7286 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7287 tmp2 = tcg_const_i32(0);
aa47cfdd 7288 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7289 tcg_temp_free_i32(tmp2);
aa47cfdd 7290 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7291 break;
aa47cfdd 7292 }
600b828c 7293 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7294 {
7295 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7296 tmp2 = tcg_const_i32(0);
aa47cfdd 7297 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7298 tcg_temp_free_i32(tmp2);
aa47cfdd 7299 tcg_temp_free_ptr(fpstatus);
0e326109 7300 break;
aa47cfdd 7301 }
600b828c 7302 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7303 {
7304 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7305 tmp2 = tcg_const_i32(0);
aa47cfdd 7306 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7307 tcg_temp_free_i32(tmp2);
aa47cfdd 7308 tcg_temp_free_ptr(fpstatus);
0e326109 7309 break;
aa47cfdd 7310 }
600b828c 7311 case NEON_2RM_VABS_F:
4373f3ce 7312 gen_vfp_abs(0);
9ee6e8bb 7313 break;
600b828c 7314 case NEON_2RM_VNEG_F:
4373f3ce 7315 gen_vfp_neg(0);
9ee6e8bb 7316 break;
600b828c 7317 case NEON_2RM_VSWP:
dd8fbd78
FN
7318 tmp2 = neon_load_reg(rd, pass);
7319 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7320 break;
600b828c 7321 case NEON_2RM_VTRN:
dd8fbd78 7322 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7323 switch (size) {
dd8fbd78
FN
7324 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7325 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7326 default: abort();
9ee6e8bb 7327 }
dd8fbd78 7328 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7329 break;
34f7b0a2
WN
7330 case NEON_2RM_VRINTN:
7331 case NEON_2RM_VRINTA:
7332 case NEON_2RM_VRINTM:
7333 case NEON_2RM_VRINTP:
7334 case NEON_2RM_VRINTZ:
7335 {
7336 TCGv_i32 tcg_rmode;
7337 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7338 int rmode;
7339
7340 if (op == NEON_2RM_VRINTZ) {
7341 rmode = FPROUNDING_ZERO;
7342 } else {
7343 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7344 }
7345
7346 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7347 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7348 cpu_env);
7349 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7350 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7351 cpu_env);
7352 tcg_temp_free_ptr(fpstatus);
7353 tcg_temp_free_i32(tcg_rmode);
7354 break;
7355 }
2ce70625
WN
7356 case NEON_2RM_VRINTX:
7357 {
7358 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7359 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7360 tcg_temp_free_ptr(fpstatus);
7361 break;
7362 }
901ad525
WN
7363 case NEON_2RM_VCVTAU:
7364 case NEON_2RM_VCVTAS:
7365 case NEON_2RM_VCVTNU:
7366 case NEON_2RM_VCVTNS:
7367 case NEON_2RM_VCVTPU:
7368 case NEON_2RM_VCVTPS:
7369 case NEON_2RM_VCVTMU:
7370 case NEON_2RM_VCVTMS:
7371 {
7372 bool is_signed = !extract32(insn, 7, 1);
7373 TCGv_ptr fpst = get_fpstatus_ptr(1);
7374 TCGv_i32 tcg_rmode, tcg_shift;
7375 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7376
7377 tcg_shift = tcg_const_i32(0);
7378 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7379 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7380 cpu_env);
7381
7382 if (is_signed) {
7383 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7384 tcg_shift, fpst);
7385 } else {
7386 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7387 tcg_shift, fpst);
7388 }
7389
7390 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7391 cpu_env);
7392 tcg_temp_free_i32(tcg_rmode);
7393 tcg_temp_free_i32(tcg_shift);
7394 tcg_temp_free_ptr(fpst);
7395 break;
7396 }
600b828c 7397 case NEON_2RM_VRECPE:
b6d4443a
AB
7398 {
7399 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7400 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7401 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7402 break;
b6d4443a 7403 }
600b828c 7404 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7405 {
7406 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7407 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7408 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7409 break;
c2fb418e 7410 }
600b828c 7411 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7412 {
7413 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7414 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7415 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7416 break;
b6d4443a 7417 }
600b828c 7418 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7419 {
7420 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7421 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7422 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7423 break;
c2fb418e 7424 }
600b828c 7425 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7426 gen_vfp_sito(0, 1);
9ee6e8bb 7427 break;
600b828c 7428 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7429 gen_vfp_uito(0, 1);
9ee6e8bb 7430 break;
600b828c 7431 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7432 gen_vfp_tosiz(0, 1);
9ee6e8bb 7433 break;
600b828c 7434 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7435 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7436 break;
7437 default:
600b828c
PM
7438 /* Reserved op values were caught by the
7439 * neon_2rm_sizes[] check earlier.
7440 */
7441 abort();
9ee6e8bb 7442 }
600b828c 7443 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7444 tcg_gen_st_f32(cpu_F0s, cpu_env,
7445 neon_reg_offset(rd, pass));
9ee6e8bb 7446 } else {
dd8fbd78 7447 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7448 }
7449 }
7450 break;
7451 }
7452 } else if ((insn & (1 << 10)) == 0) {
7453 /* VTBL, VTBX. */
56907d77
PM
7454 int n = ((insn >> 8) & 3) + 1;
7455 if ((rn + n) > 32) {
7456 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7457 * helper function running off the end of the register file.
7458 */
7459 return 1;
7460 }
7461 n <<= 3;
9ee6e8bb 7462 if (insn & (1 << 6)) {
8f8e3aa4 7463 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7464 } else {
7d1b0095 7465 tmp = tcg_temp_new_i32();
8f8e3aa4 7466 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7467 }
8f8e3aa4 7468 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
7469 tmp4 = tcg_const_i32(rn);
7470 tmp5 = tcg_const_i32(n);
9ef39277 7471 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 7472 tcg_temp_free_i32(tmp);
9ee6e8bb 7473 if (insn & (1 << 6)) {
8f8e3aa4 7474 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7475 } else {
7d1b0095 7476 tmp = tcg_temp_new_i32();
8f8e3aa4 7477 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7478 }
8f8e3aa4 7479 tmp3 = neon_load_reg(rm, 1);
9ef39277 7480 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
7481 tcg_temp_free_i32(tmp5);
7482 tcg_temp_free_i32(tmp4);
8f8e3aa4 7483 neon_store_reg(rd, 0, tmp2);
3018f259 7484 neon_store_reg(rd, 1, tmp3);
7d1b0095 7485 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7486 } else if ((insn & 0x380) == 0) {
7487 /* VDUP */
133da6aa
JR
7488 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7489 return 1;
7490 }
9ee6e8bb 7491 if (insn & (1 << 19)) {
dd8fbd78 7492 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7493 } else {
dd8fbd78 7494 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7495 }
7496 if (insn & (1 << 16)) {
dd8fbd78 7497 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7498 } else if (insn & (1 << 17)) {
7499 if ((insn >> 18) & 1)
dd8fbd78 7500 gen_neon_dup_high16(tmp);
9ee6e8bb 7501 else
dd8fbd78 7502 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7503 }
7504 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7505 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7506 tcg_gen_mov_i32(tmp2, tmp);
7507 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7508 }
7d1b0095 7509 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7510 } else {
7511 return 1;
7512 }
7513 }
7514 }
7515 return 0;
7516}
7517
7dcc1f89 7518static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7519{
4b6a83fb
PM
7520 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7521 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7522
7523 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7524
7525 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7526 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7527 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7528 return 1;
7529 }
d614a513 7530 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7531 return disas_iwmmxt_insn(s, insn);
d614a513 7532 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7533 return disas_dsp_insn(s, insn);
c0f4af17
PM
7534 }
7535 return 1;
4b6a83fb
PM
7536 }
7537
7538 /* Otherwise treat as a generic register access */
7539 is64 = (insn & (1 << 25)) == 0;
7540 if (!is64 && ((insn & (1 << 4)) == 0)) {
7541 /* cdp */
7542 return 1;
7543 }
7544
7545 crm = insn & 0xf;
7546 if (is64) {
7547 crn = 0;
7548 opc1 = (insn >> 4) & 0xf;
7549 opc2 = 0;
7550 rt2 = (insn >> 16) & 0xf;
7551 } else {
7552 crn = (insn >> 16) & 0xf;
7553 opc1 = (insn >> 21) & 7;
7554 opc2 = (insn >> 5) & 7;
7555 rt2 = 0;
7556 }
7557 isread = (insn >> 20) & 1;
7558 rt = (insn >> 12) & 0xf;
7559
60322b39 7560 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7561 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7562 if (ri) {
7563 /* Check access permissions */
dcbff19b 7564 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7565 return 1;
7566 }
7567
c0f4af17 7568 if (ri->accessfn ||
d614a513 7569 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7570 /* Emit code to perform further access permissions checks at
7571 * runtime; this may result in an exception.
c0f4af17
PM
7572 * Note that on XScale all cp0..c13 registers do an access check
7573 * call in order to handle c15_cpar.
f59df3f2
PM
7574 */
7575 TCGv_ptr tmpptr;
3f208fd7 7576 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7577 uint32_t syndrome;
7578
7579 /* Note that since we are an implementation which takes an
7580 * exception on a trapped conditional instruction only if the
7581 * instruction passes its condition code check, we can take
7582 * advantage of the clause in the ARM ARM that allows us to set
7583 * the COND field in the instruction to 0xE in all cases.
7584 * We could fish the actual condition out of the insn (ARM)
7585 * or the condexec bits (Thumb) but it isn't necessary.
7586 */
7587 switch (cpnum) {
7588 case 14:
7589 if (is64) {
7590 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7591 isread, false);
8bcbf37c
PM
7592 } else {
7593 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7594 rt, isread, false);
8bcbf37c
PM
7595 }
7596 break;
7597 case 15:
7598 if (is64) {
7599 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7600 isread, false);
8bcbf37c
PM
7601 } else {
7602 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7603 rt, isread, false);
8bcbf37c
PM
7604 }
7605 break;
7606 default:
7607 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7608 * so this can only happen if this is an ARMv7 or earlier CPU,
7609 * in which case the syndrome information won't actually be
7610 * guest visible.
7611 */
d614a513 7612 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7613 syndrome = syn_uncategorized();
7614 break;
7615 }
7616
43bfa4a1 7617 gen_set_condexec(s);
3977ee5d 7618 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7619 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7620 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7621 tcg_isread = tcg_const_i32(isread);
7622 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7623 tcg_isread);
f59df3f2 7624 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7625 tcg_temp_free_i32(tcg_syn);
3f208fd7 7626 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7627 }
7628
4b6a83fb
PM
7629 /* Handle special cases first */
7630 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7631 case ARM_CP_NOP:
7632 return 0;
7633 case ARM_CP_WFI:
7634 if (isread) {
7635 return 1;
7636 }
eaed129d 7637 gen_set_pc_im(s, s->pc);
4b6a83fb 7638 s->is_jmp = DISAS_WFI;
2bee5105 7639 return 0;
4b6a83fb
PM
7640 default:
7641 break;
7642 }
7643
bd79255d 7644 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7645 gen_io_start();
7646 }
7647
4b6a83fb
PM
7648 if (isread) {
7649 /* Read */
7650 if (is64) {
7651 TCGv_i64 tmp64;
7652 TCGv_i32 tmp;
7653 if (ri->type & ARM_CP_CONST) {
7654 tmp64 = tcg_const_i64(ri->resetvalue);
7655 } else if (ri->readfn) {
7656 TCGv_ptr tmpptr;
4b6a83fb
PM
7657 tmp64 = tcg_temp_new_i64();
7658 tmpptr = tcg_const_ptr(ri);
7659 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7660 tcg_temp_free_ptr(tmpptr);
7661 } else {
7662 tmp64 = tcg_temp_new_i64();
7663 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7664 }
7665 tmp = tcg_temp_new_i32();
ecc7b3aa 7666 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7667 store_reg(s, rt, tmp);
7668 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7669 tmp = tcg_temp_new_i32();
ecc7b3aa 7670 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7671 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7672 store_reg(s, rt2, tmp);
7673 } else {
39d5492a 7674 TCGv_i32 tmp;
4b6a83fb
PM
7675 if (ri->type & ARM_CP_CONST) {
7676 tmp = tcg_const_i32(ri->resetvalue);
7677 } else if (ri->readfn) {
7678 TCGv_ptr tmpptr;
4b6a83fb
PM
7679 tmp = tcg_temp_new_i32();
7680 tmpptr = tcg_const_ptr(ri);
7681 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7682 tcg_temp_free_ptr(tmpptr);
7683 } else {
7684 tmp = load_cpu_offset(ri->fieldoffset);
7685 }
7686 if (rt == 15) {
7687 /* Destination register of r15 for 32 bit loads sets
7688 * the condition codes from the high 4 bits of the value
7689 */
7690 gen_set_nzcv(tmp);
7691 tcg_temp_free_i32(tmp);
7692 } else {
7693 store_reg(s, rt, tmp);
7694 }
7695 }
7696 } else {
7697 /* Write */
7698 if (ri->type & ARM_CP_CONST) {
7699 /* If not forbidden by access permissions, treat as WI */
7700 return 0;
7701 }
7702
7703 if (is64) {
39d5492a 7704 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7705 TCGv_i64 tmp64 = tcg_temp_new_i64();
7706 tmplo = load_reg(s, rt);
7707 tmphi = load_reg(s, rt2);
7708 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7709 tcg_temp_free_i32(tmplo);
7710 tcg_temp_free_i32(tmphi);
7711 if (ri->writefn) {
7712 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7713 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7714 tcg_temp_free_ptr(tmpptr);
7715 } else {
7716 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7717 }
7718 tcg_temp_free_i64(tmp64);
7719 } else {
7720 if (ri->writefn) {
39d5492a 7721 TCGv_i32 tmp;
4b6a83fb 7722 TCGv_ptr tmpptr;
4b6a83fb
PM
7723 tmp = load_reg(s, rt);
7724 tmpptr = tcg_const_ptr(ri);
7725 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7726 tcg_temp_free_ptr(tmpptr);
7727 tcg_temp_free_i32(tmp);
7728 } else {
39d5492a 7729 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7730 store_cpu_offset(tmp, ri->fieldoffset);
7731 }
7732 }
2452731c
PM
7733 }
7734
bd79255d 7735 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7736 /* I/O operations must end the TB here (whether read or write) */
7737 gen_io_end();
7738 gen_lookup_tb(s);
7739 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7740 /* We default to ending the TB on a coprocessor register write,
7741 * but allow this to be suppressed by the register definition
7742 * (usually only necessary to work around guest bugs).
7743 */
2452731c 7744 gen_lookup_tb(s);
4b6a83fb 7745 }
2452731c 7746
4b6a83fb
PM
7747 return 0;
7748 }
7749
626187d8
PM
7750 /* Unknown register; this might be a guest error or a QEMU
7751 * unimplemented feature.
7752 */
7753 if (is64) {
7754 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7755 "64 bit system register cp:%d opc1: %d crm:%d "
7756 "(%s)\n",
7757 isread ? "read" : "write", cpnum, opc1, crm,
7758 s->ns ? "non-secure" : "secure");
626187d8
PM
7759 } else {
7760 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7761 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7762 "(%s)\n",
7763 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7764 s->ns ? "non-secure" : "secure");
626187d8
PM
7765 }
7766
4a9a539f 7767 return 1;
9ee6e8bb
PB
7768}
7769
5e3f878a
PB
7770
7771/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7772static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7773{
39d5492a 7774 TCGv_i32 tmp;
7d1b0095 7775 tmp = tcg_temp_new_i32();
ecc7b3aa 7776 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7777 store_reg(s, rlow, tmp);
7d1b0095 7778 tmp = tcg_temp_new_i32();
5e3f878a 7779 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7780 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7781 store_reg(s, rhigh, tmp);
7782}
7783
7784/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7785static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7786{
a7812ae4 7787 TCGv_i64 tmp;
39d5492a 7788 TCGv_i32 tmp2;
5e3f878a 7789
36aa55dc 7790 /* Load value and extend to 64 bits. */
a7812ae4 7791 tmp = tcg_temp_new_i64();
5e3f878a
PB
7792 tmp2 = load_reg(s, rlow);
7793 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7794 tcg_temp_free_i32(tmp2);
5e3f878a 7795 tcg_gen_add_i64(val, val, tmp);
b75263d6 7796 tcg_temp_free_i64(tmp);
5e3f878a
PB
7797}
7798
7799/* load and add a 64-bit value from a register pair. */
a7812ae4 7800static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7801{
a7812ae4 7802 TCGv_i64 tmp;
39d5492a
PM
7803 TCGv_i32 tmpl;
7804 TCGv_i32 tmph;
5e3f878a
PB
7805
7806 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7807 tmpl = load_reg(s, rlow);
7808 tmph = load_reg(s, rhigh);
a7812ae4 7809 tmp = tcg_temp_new_i64();
36aa55dc 7810 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7811 tcg_temp_free_i32(tmpl);
7812 tcg_temp_free_i32(tmph);
5e3f878a 7813 tcg_gen_add_i64(val, val, tmp);
b75263d6 7814 tcg_temp_free_i64(tmp);
5e3f878a
PB
7815}
7816
c9f10124 7817/* Set N and Z flags from hi|lo. */
39d5492a 7818static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7819{
c9f10124
RH
7820 tcg_gen_mov_i32(cpu_NF, hi);
7821 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7822}
7823
426f5abc
PB
7824/* Load/Store exclusive instructions are implemented by remembering
7825 the value/address loaded, and seeing if these are the same
354161b3 7826 when the store is performed. This should be sufficient to implement
426f5abc 7827 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
7828 regular stores. The compare vs the remembered value is done during
7829 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 7830static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7831 TCGv_i32 addr, int size)
426f5abc 7832{
94ee24e7 7833 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 7834 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 7835
50225ad0
PM
7836 s->is_ldex = true;
7837
426f5abc 7838 if (size == 3) {
39d5492a 7839 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 7840 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 7841
354161b3
EC
7842 gen_aa32_ld_i64(s, t64, addr, get_mem_index(s), opc);
7843 tcg_gen_mov_i64(cpu_exclusive_val, t64);
7844 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7845 tcg_temp_free_i64(t64);
7846
7847 store_reg(s, rt2, tmp2);
03d05e2d 7848 } else {
354161b3 7849 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 7850 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7851 }
03d05e2d
PM
7852
7853 store_reg(s, rt, tmp);
7854 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7855}
7856
7857static void gen_clrex(DisasContext *s)
7858{
03d05e2d 7859 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7860}
7861
426f5abc 7862static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7863 TCGv_i32 addr, int size)
426f5abc 7864{
354161b3
EC
7865 TCGv_i32 t0, t1, t2;
7866 TCGv_i64 extaddr;
7867 TCGv taddr;
42a268c2
RH
7868 TCGLabel *done_label;
7869 TCGLabel *fail_label;
354161b3 7870 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
7871
7872 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7873 [addr] = {Rt};
7874 {Rd} = 0;
7875 } else {
7876 {Rd} = 1;
7877 } */
7878 fail_label = gen_new_label();
7879 done_label = gen_new_label();
03d05e2d
PM
7880 extaddr = tcg_temp_new_i64();
7881 tcg_gen_extu_i32_i64(extaddr, addr);
7882 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7883 tcg_temp_free_i64(extaddr);
7884
354161b3
EC
7885 taddr = gen_aa32_addr(s, addr, opc);
7886 t0 = tcg_temp_new_i32();
7887 t1 = load_reg(s, rt);
426f5abc 7888 if (size == 3) {
354161b3
EC
7889 TCGv_i64 o64 = tcg_temp_new_i64();
7890 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 7891
354161b3
EC
7892 t2 = load_reg(s, rt2);
7893 tcg_gen_concat_i32_i64(n64, t1, t2);
7894 tcg_temp_free_i32(t2);
7895 gen_aa32_frob64(s, n64);
03d05e2d 7896
354161b3
EC
7897 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7898 get_mem_index(s), opc);
7899 tcg_temp_free_i64(n64);
7900
7901 gen_aa32_frob64(s, o64);
7902 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7903 tcg_gen_extrl_i64_i32(t0, o64);
7904
7905 tcg_temp_free_i64(o64);
7906 } else {
7907 t2 = tcg_temp_new_i32();
7908 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7909 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7910 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7911 tcg_temp_free_i32(t2);
426f5abc 7912 }
354161b3
EC
7913 tcg_temp_free_i32(t1);
7914 tcg_temp_free(taddr);
7915 tcg_gen_mov_i32(cpu_R[rd], t0);
7916 tcg_temp_free_i32(t0);
426f5abc 7917 tcg_gen_br(done_label);
354161b3 7918
426f5abc
PB
7919 gen_set_label(fail_label);
7920 tcg_gen_movi_i32(cpu_R[rd], 1);
7921 gen_set_label(done_label);
03d05e2d 7922 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 7923}
426f5abc 7924
81465888
PM
7925/* gen_srs:
7926 * @env: CPUARMState
7927 * @s: DisasContext
7928 * @mode: mode field from insn (which stack to store to)
7929 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7930 * @writeback: true if writeback bit set
7931 *
7932 * Generate code for the SRS (Store Return State) insn.
7933 */
7934static void gen_srs(DisasContext *s,
7935 uint32_t mode, uint32_t amode, bool writeback)
7936{
7937 int32_t offset;
cbc0326b
PM
7938 TCGv_i32 addr, tmp;
7939 bool undef = false;
7940
7941 /* SRS is:
7942 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 7943 * and specified mode is monitor mode
cbc0326b
PM
7944 * - UNDEFINED in Hyp mode
7945 * - UNPREDICTABLE in User or System mode
7946 * - UNPREDICTABLE if the specified mode is:
7947 * -- not implemented
7948 * -- not a valid mode number
7949 * -- a mode that's at a higher exception level
7950 * -- Monitor, if we are Non-secure
f01377f5 7951 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 7952 */
ba63cf47 7953 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
7954 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7955 return;
7956 }
7957
7958 if (s->current_el == 0 || s->current_el == 2) {
7959 undef = true;
7960 }
7961
7962 switch (mode) {
7963 case ARM_CPU_MODE_USR:
7964 case ARM_CPU_MODE_FIQ:
7965 case ARM_CPU_MODE_IRQ:
7966 case ARM_CPU_MODE_SVC:
7967 case ARM_CPU_MODE_ABT:
7968 case ARM_CPU_MODE_UND:
7969 case ARM_CPU_MODE_SYS:
7970 break;
7971 case ARM_CPU_MODE_HYP:
7972 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7973 undef = true;
7974 }
7975 break;
7976 case ARM_CPU_MODE_MON:
7977 /* No need to check specifically for "are we non-secure" because
7978 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7979 * so if this isn't EL3 then we must be non-secure.
7980 */
7981 if (s->current_el != 3) {
7982 undef = true;
7983 }
7984 break;
7985 default:
7986 undef = true;
7987 }
7988
7989 if (undef) {
7990 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
7991 default_exception_el(s));
7992 return;
7993 }
7994
7995 addr = tcg_temp_new_i32();
7996 tmp = tcg_const_i32(mode);
f01377f5
PM
7997 /* get_r13_banked() will raise an exception if called from System mode */
7998 gen_set_condexec(s);
7999 gen_set_pc_im(s, s->pc - 4);
81465888
PM
8000 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8001 tcg_temp_free_i32(tmp);
8002 switch (amode) {
8003 case 0: /* DA */
8004 offset = -4;
8005 break;
8006 case 1: /* IA */
8007 offset = 0;
8008 break;
8009 case 2: /* DB */
8010 offset = -8;
8011 break;
8012 case 3: /* IB */
8013 offset = 4;
8014 break;
8015 default:
8016 abort();
8017 }
8018 tcg_gen_addi_i32(addr, addr, offset);
8019 tmp = load_reg(s, 14);
12dcc321 8020 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8021 tcg_temp_free_i32(tmp);
81465888
PM
8022 tmp = load_cpu_field(spsr);
8023 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 8024 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8025 tcg_temp_free_i32(tmp);
81465888
PM
8026 if (writeback) {
8027 switch (amode) {
8028 case 0:
8029 offset = -8;
8030 break;
8031 case 1:
8032 offset = 4;
8033 break;
8034 case 2:
8035 offset = -4;
8036 break;
8037 case 3:
8038 offset = 0;
8039 break;
8040 default:
8041 abort();
8042 }
8043 tcg_gen_addi_i32(addr, addr, offset);
8044 tmp = tcg_const_i32(mode);
8045 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8046 tcg_temp_free_i32(tmp);
8047 }
8048 tcg_temp_free_i32(addr);
f01377f5 8049 s->is_jmp = DISAS_UPDATE;
81465888
PM
8050}
8051
f4df2210 8052static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8053{
f4df2210 8054 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8055 TCGv_i32 tmp;
8056 TCGv_i32 tmp2;
8057 TCGv_i32 tmp3;
8058 TCGv_i32 addr;
a7812ae4 8059 TCGv_i64 tmp64;
9ee6e8bb 8060
e13886e3
PM
8061 /* M variants do not implement ARM mode; this must raise the INVSTATE
8062 * UsageFault exception.
8063 */
b53d8923 8064 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
8065 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8066 default_exception_el(s));
8067 return;
b53d8923 8068 }
9ee6e8bb
PB
8069 cond = insn >> 28;
8070 if (cond == 0xf){
be5e7a76
DES
8071 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8072 * choose to UNDEF. In ARMv5 and above the space is used
8073 * for miscellaneous unconditional instructions.
8074 */
8075 ARCH(5);
8076
9ee6e8bb
PB
8077 /* Unconditional instructions. */
8078 if (((insn >> 25) & 7) == 1) {
8079 /* NEON Data processing. */
d614a513 8080 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8081 goto illegal_op;
d614a513 8082 }
9ee6e8bb 8083
7dcc1f89 8084 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8085 goto illegal_op;
7dcc1f89 8086 }
9ee6e8bb
PB
8087 return;
8088 }
8089 if ((insn & 0x0f100000) == 0x04000000) {
8090 /* NEON load/store. */
d614a513 8091 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8092 goto illegal_op;
d614a513 8093 }
9ee6e8bb 8094
7dcc1f89 8095 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8096 goto illegal_op;
7dcc1f89 8097 }
9ee6e8bb
PB
8098 return;
8099 }
6a57f3eb
WN
8100 if ((insn & 0x0f000e10) == 0x0e000a00) {
8101 /* VFP. */
7dcc1f89 8102 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8103 goto illegal_op;
8104 }
8105 return;
8106 }
3d185e5d
PM
8107 if (((insn & 0x0f30f000) == 0x0510f000) ||
8108 ((insn & 0x0f30f010) == 0x0710f000)) {
8109 if ((insn & (1 << 22)) == 0) {
8110 /* PLDW; v7MP */
d614a513 8111 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8112 goto illegal_op;
8113 }
8114 }
8115 /* Otherwise PLD; v5TE+ */
be5e7a76 8116 ARCH(5TE);
3d185e5d
PM
8117 return;
8118 }
8119 if (((insn & 0x0f70f000) == 0x0450f000) ||
8120 ((insn & 0x0f70f010) == 0x0650f000)) {
8121 ARCH(7);
8122 return; /* PLI; V7 */
8123 }
8124 if (((insn & 0x0f700000) == 0x04100000) ||
8125 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8126 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8127 goto illegal_op;
8128 }
8129 return; /* v7MP: Unallocated memory hint: must NOP */
8130 }
8131
8132 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8133 ARCH(6);
8134 /* setend */
9886ecdf
PB
8135 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8136 gen_helper_setend(cpu_env);
8137 s->is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8138 }
8139 return;
8140 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8141 switch ((insn >> 4) & 0xf) {
8142 case 1: /* clrex */
8143 ARCH(6K);
426f5abc 8144 gen_clrex(s);
9ee6e8bb
PB
8145 return;
8146 case 4: /* dsb */
8147 case 5: /* dmb */
9ee6e8bb 8148 ARCH(7);
61e4c432 8149 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8150 return;
6df99dec
SS
8151 case 6: /* isb */
8152 /* We need to break the TB after this insn to execute
8153 * self-modifying code correctly and also to take
8154 * any pending interrupts immediately.
8155 */
8156 gen_lookup_tb(s);
8157 return;
9ee6e8bb
PB
8158 default:
8159 goto illegal_op;
8160 }
8161 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8162 /* srs */
81465888
PM
8163 ARCH(6);
8164 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8165 return;
ea825eee 8166 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8167 /* rfe */
c67b6b71 8168 int32_t offset;
9ee6e8bb
PB
8169 if (IS_USER(s))
8170 goto illegal_op;
8171 ARCH(6);
8172 rn = (insn >> 16) & 0xf;
b0109805 8173 addr = load_reg(s, rn);
9ee6e8bb
PB
8174 i = (insn >> 23) & 3;
8175 switch (i) {
b0109805 8176 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8177 case 1: offset = 0; break; /* IA */
8178 case 2: offset = -8; break; /* DB */
b0109805 8179 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8180 default: abort();
8181 }
8182 if (offset)
b0109805
PB
8183 tcg_gen_addi_i32(addr, addr, offset);
8184 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8185 tmp = tcg_temp_new_i32();
12dcc321 8186 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8187 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8188 tmp2 = tcg_temp_new_i32();
12dcc321 8189 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8190 if (insn & (1 << 21)) {
8191 /* Base writeback. */
8192 switch (i) {
b0109805 8193 case 0: offset = -8; break;
c67b6b71
FN
8194 case 1: offset = 4; break;
8195 case 2: offset = -4; break;
b0109805 8196 case 3: offset = 0; break;
9ee6e8bb
PB
8197 default: abort();
8198 }
8199 if (offset)
b0109805
PB
8200 tcg_gen_addi_i32(addr, addr, offset);
8201 store_reg(s, rn, addr);
8202 } else {
7d1b0095 8203 tcg_temp_free_i32(addr);
9ee6e8bb 8204 }
b0109805 8205 gen_rfe(s, tmp, tmp2);
c67b6b71 8206 return;
9ee6e8bb
PB
8207 } else if ((insn & 0x0e000000) == 0x0a000000) {
8208 /* branch link and change to thumb (blx <offset>) */
8209 int32_t offset;
8210
8211 val = (uint32_t)s->pc;
7d1b0095 8212 tmp = tcg_temp_new_i32();
d9ba4830
PB
8213 tcg_gen_movi_i32(tmp, val);
8214 store_reg(s, 14, tmp);
9ee6e8bb
PB
8215 /* Sign-extend the 24-bit offset */
8216 offset = (((int32_t)insn) << 8) >> 8;
8217 /* offset * 4 + bit24 * 2 + (thumb bit) */
8218 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8219 /* pipeline offset */
8220 val += 4;
be5e7a76 8221 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8222 gen_bx_im(s, val);
9ee6e8bb
PB
8223 return;
8224 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8225 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8226 /* iWMMXt register transfer. */
c0f4af17 8227 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8228 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8229 return;
c0f4af17
PM
8230 }
8231 }
9ee6e8bb
PB
8232 }
8233 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8234 /* Coprocessor double register transfer. */
be5e7a76 8235 ARCH(5TE);
9ee6e8bb
PB
8236 } else if ((insn & 0x0f000010) == 0x0e000010) {
8237 /* Additional coprocessor register transfer. */
7997d92f 8238 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8239 uint32_t mask;
8240 uint32_t val;
8241 /* cps (privileged) */
8242 if (IS_USER(s))
8243 return;
8244 mask = val = 0;
8245 if (insn & (1 << 19)) {
8246 if (insn & (1 << 8))
8247 mask |= CPSR_A;
8248 if (insn & (1 << 7))
8249 mask |= CPSR_I;
8250 if (insn & (1 << 6))
8251 mask |= CPSR_F;
8252 if (insn & (1 << 18))
8253 val |= mask;
8254 }
7997d92f 8255 if (insn & (1 << 17)) {
9ee6e8bb
PB
8256 mask |= CPSR_M;
8257 val |= (insn & 0x1f);
8258 }
8259 if (mask) {
2fbac54b 8260 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8261 }
8262 return;
8263 }
8264 goto illegal_op;
8265 }
8266 if (cond != 0xe) {
8267 /* if not always execute, we generate a conditional jump to
8268 next instruction */
8269 s->condlabel = gen_new_label();
39fb730a 8270 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8271 s->condjmp = 1;
8272 }
8273 if ((insn & 0x0f900000) == 0x03000000) {
8274 if ((insn & (1 << 21)) == 0) {
8275 ARCH(6T2);
8276 rd = (insn >> 12) & 0xf;
8277 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8278 if ((insn & (1 << 22)) == 0) {
8279 /* MOVW */
7d1b0095 8280 tmp = tcg_temp_new_i32();
5e3f878a 8281 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8282 } else {
8283 /* MOVT */
5e3f878a 8284 tmp = load_reg(s, rd);
86831435 8285 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8286 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8287 }
5e3f878a 8288 store_reg(s, rd, tmp);
9ee6e8bb
PB
8289 } else {
8290 if (((insn >> 12) & 0xf) != 0xf)
8291 goto illegal_op;
8292 if (((insn >> 16) & 0xf) == 0) {
8293 gen_nop_hint(s, insn & 0xff);
8294 } else {
8295 /* CPSR = immediate */
8296 val = insn & 0xff;
8297 shift = ((insn >> 8) & 0xf) * 2;
8298 if (shift)
8299 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8300 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8301 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8302 i, val)) {
9ee6e8bb 8303 goto illegal_op;
7dcc1f89 8304 }
9ee6e8bb
PB
8305 }
8306 }
8307 } else if ((insn & 0x0f900000) == 0x01000000
8308 && (insn & 0x00000090) != 0x00000090) {
8309 /* miscellaneous instructions */
8310 op1 = (insn >> 21) & 3;
8311 sh = (insn >> 4) & 0xf;
8312 rm = insn & 0xf;
8313 switch (sh) {
8bfd0550
PM
8314 case 0x0: /* MSR, MRS */
8315 if (insn & (1 << 9)) {
8316 /* MSR (banked) and MRS (banked) */
8317 int sysm = extract32(insn, 16, 4) |
8318 (extract32(insn, 8, 1) << 4);
8319 int r = extract32(insn, 22, 1);
8320
8321 if (op1 & 1) {
8322 /* MSR (banked) */
8323 gen_msr_banked(s, r, sysm, rm);
8324 } else {
8325 /* MRS (banked) */
8326 int rd = extract32(insn, 12, 4);
8327
8328 gen_mrs_banked(s, r, sysm, rd);
8329 }
8330 break;
8331 }
8332
8333 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8334 if (op1 & 1) {
8335 /* PSR = reg */
2fbac54b 8336 tmp = load_reg(s, rm);
9ee6e8bb 8337 i = ((op1 & 2) != 0);
7dcc1f89 8338 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8339 goto illegal_op;
8340 } else {
8341 /* reg = PSR */
8342 rd = (insn >> 12) & 0xf;
8343 if (op1 & 2) {
8344 if (IS_USER(s))
8345 goto illegal_op;
d9ba4830 8346 tmp = load_cpu_field(spsr);
9ee6e8bb 8347 } else {
7d1b0095 8348 tmp = tcg_temp_new_i32();
9ef39277 8349 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8350 }
d9ba4830 8351 store_reg(s, rd, tmp);
9ee6e8bb
PB
8352 }
8353 break;
8354 case 0x1:
8355 if (op1 == 1) {
8356 /* branch/exchange thumb (bx). */
be5e7a76 8357 ARCH(4T);
d9ba4830
PB
8358 tmp = load_reg(s, rm);
8359 gen_bx(s, tmp);
9ee6e8bb
PB
8360 } else if (op1 == 3) {
8361 /* clz */
be5e7a76 8362 ARCH(5);
9ee6e8bb 8363 rd = (insn >> 12) & 0xf;
1497c961 8364 tmp = load_reg(s, rm);
7539a012 8365 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8366 store_reg(s, rd, tmp);
9ee6e8bb
PB
8367 } else {
8368 goto illegal_op;
8369 }
8370 break;
8371 case 0x2:
8372 if (op1 == 1) {
8373 ARCH(5J); /* bxj */
8374 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8375 tmp = load_reg(s, rm);
8376 gen_bx(s, tmp);
9ee6e8bb
PB
8377 } else {
8378 goto illegal_op;
8379 }
8380 break;
8381 case 0x3:
8382 if (op1 != 1)
8383 goto illegal_op;
8384
be5e7a76 8385 ARCH(5);
9ee6e8bb 8386 /* branch link/exchange thumb (blx) */
d9ba4830 8387 tmp = load_reg(s, rm);
7d1b0095 8388 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8389 tcg_gen_movi_i32(tmp2, s->pc);
8390 store_reg(s, 14, tmp2);
8391 gen_bx(s, tmp);
9ee6e8bb 8392 break;
eb0ecd5a
WN
8393 case 0x4:
8394 {
8395 /* crc32/crc32c */
8396 uint32_t c = extract32(insn, 8, 4);
8397
8398 /* Check this CPU supports ARMv8 CRC instructions.
8399 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8400 * Bits 8, 10 and 11 should be zero.
8401 */
d614a513 8402 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8403 (c & 0xd) != 0) {
8404 goto illegal_op;
8405 }
8406
8407 rn = extract32(insn, 16, 4);
8408 rd = extract32(insn, 12, 4);
8409
8410 tmp = load_reg(s, rn);
8411 tmp2 = load_reg(s, rm);
aa633469
PM
8412 if (op1 == 0) {
8413 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8414 } else if (op1 == 1) {
8415 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8416 }
eb0ecd5a
WN
8417 tmp3 = tcg_const_i32(1 << op1);
8418 if (c & 0x2) {
8419 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8420 } else {
8421 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8422 }
8423 tcg_temp_free_i32(tmp2);
8424 tcg_temp_free_i32(tmp3);
8425 store_reg(s, rd, tmp);
8426 break;
8427 }
9ee6e8bb 8428 case 0x5: /* saturating add/subtract */
be5e7a76 8429 ARCH(5TE);
9ee6e8bb
PB
8430 rd = (insn >> 12) & 0xf;
8431 rn = (insn >> 16) & 0xf;
b40d0353 8432 tmp = load_reg(s, rm);
5e3f878a 8433 tmp2 = load_reg(s, rn);
9ee6e8bb 8434 if (op1 & 2)
9ef39277 8435 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8436 if (op1 & 1)
9ef39277 8437 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8438 else
9ef39277 8439 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8440 tcg_temp_free_i32(tmp2);
5e3f878a 8441 store_reg(s, rd, tmp);
9ee6e8bb 8442 break;
49e14940 8443 case 7:
d4a2dc67
PM
8444 {
8445 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8446 switch (op1) {
19a6e31c
PM
8447 case 0:
8448 /* HLT */
8449 gen_hlt(s, imm16);
8450 break;
37e6456e
PM
8451 case 1:
8452 /* bkpt */
8453 ARCH(5);
8454 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
8455 syn_aa32_bkpt(imm16, false),
8456 default_exception_el(s));
37e6456e
PM
8457 break;
8458 case 2:
8459 /* Hypervisor call (v7) */
8460 ARCH(7);
8461 if (IS_USER(s)) {
8462 goto illegal_op;
8463 }
8464 gen_hvc(s, imm16);
8465 break;
8466 case 3:
8467 /* Secure monitor call (v6+) */
8468 ARCH(6K);
8469 if (IS_USER(s)) {
8470 goto illegal_op;
8471 }
8472 gen_smc(s);
8473 break;
8474 default:
19a6e31c 8475 g_assert_not_reached();
49e14940 8476 }
9ee6e8bb 8477 break;
d4a2dc67 8478 }
9ee6e8bb
PB
8479 case 0x8: /* signed multiply */
8480 case 0xa:
8481 case 0xc:
8482 case 0xe:
be5e7a76 8483 ARCH(5TE);
9ee6e8bb
PB
8484 rs = (insn >> 8) & 0xf;
8485 rn = (insn >> 12) & 0xf;
8486 rd = (insn >> 16) & 0xf;
8487 if (op1 == 1) {
8488 /* (32 * 16) >> 16 */
5e3f878a
PB
8489 tmp = load_reg(s, rm);
8490 tmp2 = load_reg(s, rs);
9ee6e8bb 8491 if (sh & 4)
5e3f878a 8492 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8493 else
5e3f878a 8494 gen_sxth(tmp2);
a7812ae4
PB
8495 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8496 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8497 tmp = tcg_temp_new_i32();
ecc7b3aa 8498 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8499 tcg_temp_free_i64(tmp64);
9ee6e8bb 8500 if ((sh & 2) == 0) {
5e3f878a 8501 tmp2 = load_reg(s, rn);
9ef39277 8502 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8503 tcg_temp_free_i32(tmp2);
9ee6e8bb 8504 }
5e3f878a 8505 store_reg(s, rd, tmp);
9ee6e8bb
PB
8506 } else {
8507 /* 16 * 16 */
5e3f878a
PB
8508 tmp = load_reg(s, rm);
8509 tmp2 = load_reg(s, rs);
8510 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8511 tcg_temp_free_i32(tmp2);
9ee6e8bb 8512 if (op1 == 2) {
a7812ae4
PB
8513 tmp64 = tcg_temp_new_i64();
8514 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8515 tcg_temp_free_i32(tmp);
a7812ae4
PB
8516 gen_addq(s, tmp64, rn, rd);
8517 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8518 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8519 } else {
8520 if (op1 == 0) {
5e3f878a 8521 tmp2 = load_reg(s, rn);
9ef39277 8522 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8523 tcg_temp_free_i32(tmp2);
9ee6e8bb 8524 }
5e3f878a 8525 store_reg(s, rd, tmp);
9ee6e8bb
PB
8526 }
8527 }
8528 break;
8529 default:
8530 goto illegal_op;
8531 }
8532 } else if (((insn & 0x0e000000) == 0 &&
8533 (insn & 0x00000090) != 0x90) ||
8534 ((insn & 0x0e000000) == (1 << 25))) {
8535 int set_cc, logic_cc, shiftop;
8536
8537 op1 = (insn >> 21) & 0xf;
8538 set_cc = (insn >> 20) & 1;
8539 logic_cc = table_logic_cc[op1] & set_cc;
8540
8541 /* data processing instruction */
8542 if (insn & (1 << 25)) {
8543 /* immediate operand */
8544 val = insn & 0xff;
8545 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8546 if (shift) {
9ee6e8bb 8547 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8548 }
7d1b0095 8549 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8550 tcg_gen_movi_i32(tmp2, val);
8551 if (logic_cc && shift) {
8552 gen_set_CF_bit31(tmp2);
8553 }
9ee6e8bb
PB
8554 } else {
8555 /* register */
8556 rm = (insn) & 0xf;
e9bb4aa9 8557 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8558 shiftop = (insn >> 5) & 3;
8559 if (!(insn & (1 << 4))) {
8560 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8561 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8562 } else {
8563 rs = (insn >> 8) & 0xf;
8984bd2e 8564 tmp = load_reg(s, rs);
e9bb4aa9 8565 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8566 }
8567 }
8568 if (op1 != 0x0f && op1 != 0x0d) {
8569 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8570 tmp = load_reg(s, rn);
8571 } else {
39d5492a 8572 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
8573 }
8574 rd = (insn >> 12) & 0xf;
8575 switch(op1) {
8576 case 0x00:
e9bb4aa9
JR
8577 tcg_gen_and_i32(tmp, tmp, tmp2);
8578 if (logic_cc) {
8579 gen_logic_CC(tmp);
8580 }
7dcc1f89 8581 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8582 break;
8583 case 0x01:
e9bb4aa9
JR
8584 tcg_gen_xor_i32(tmp, tmp, tmp2);
8585 if (logic_cc) {
8586 gen_logic_CC(tmp);
8587 }
7dcc1f89 8588 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8589 break;
8590 case 0x02:
8591 if (set_cc && rd == 15) {
8592 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8593 if (IS_USER(s)) {
9ee6e8bb 8594 goto illegal_op;
e9bb4aa9 8595 }
72485ec4 8596 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8597 gen_exception_return(s, tmp);
9ee6e8bb 8598 } else {
e9bb4aa9 8599 if (set_cc) {
72485ec4 8600 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8601 } else {
8602 tcg_gen_sub_i32(tmp, tmp, tmp2);
8603 }
7dcc1f89 8604 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8605 }
8606 break;
8607 case 0x03:
e9bb4aa9 8608 if (set_cc) {
72485ec4 8609 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8610 } else {
8611 tcg_gen_sub_i32(tmp, tmp2, tmp);
8612 }
7dcc1f89 8613 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8614 break;
8615 case 0x04:
e9bb4aa9 8616 if (set_cc) {
72485ec4 8617 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8618 } else {
8619 tcg_gen_add_i32(tmp, tmp, tmp2);
8620 }
7dcc1f89 8621 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8622 break;
8623 case 0x05:
e9bb4aa9 8624 if (set_cc) {
49b4c31e 8625 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8626 } else {
8627 gen_add_carry(tmp, tmp, tmp2);
8628 }
7dcc1f89 8629 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8630 break;
8631 case 0x06:
e9bb4aa9 8632 if (set_cc) {
2de68a49 8633 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8634 } else {
8635 gen_sub_carry(tmp, tmp, tmp2);
8636 }
7dcc1f89 8637 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8638 break;
8639 case 0x07:
e9bb4aa9 8640 if (set_cc) {
2de68a49 8641 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8642 } else {
8643 gen_sub_carry(tmp, tmp2, tmp);
8644 }
7dcc1f89 8645 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8646 break;
8647 case 0x08:
8648 if (set_cc) {
e9bb4aa9
JR
8649 tcg_gen_and_i32(tmp, tmp, tmp2);
8650 gen_logic_CC(tmp);
9ee6e8bb 8651 }
7d1b0095 8652 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8653 break;
8654 case 0x09:
8655 if (set_cc) {
e9bb4aa9
JR
8656 tcg_gen_xor_i32(tmp, tmp, tmp2);
8657 gen_logic_CC(tmp);
9ee6e8bb 8658 }
7d1b0095 8659 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8660 break;
8661 case 0x0a:
8662 if (set_cc) {
72485ec4 8663 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8664 }
7d1b0095 8665 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8666 break;
8667 case 0x0b:
8668 if (set_cc) {
72485ec4 8669 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8670 }
7d1b0095 8671 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8672 break;
8673 case 0x0c:
e9bb4aa9
JR
8674 tcg_gen_or_i32(tmp, tmp, tmp2);
8675 if (logic_cc) {
8676 gen_logic_CC(tmp);
8677 }
7dcc1f89 8678 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8679 break;
8680 case 0x0d:
8681 if (logic_cc && rd == 15) {
8682 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8683 if (IS_USER(s)) {
9ee6e8bb 8684 goto illegal_op;
e9bb4aa9
JR
8685 }
8686 gen_exception_return(s, tmp2);
9ee6e8bb 8687 } else {
e9bb4aa9
JR
8688 if (logic_cc) {
8689 gen_logic_CC(tmp2);
8690 }
7dcc1f89 8691 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8692 }
8693 break;
8694 case 0x0e:
f669df27 8695 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8696 if (logic_cc) {
8697 gen_logic_CC(tmp);
8698 }
7dcc1f89 8699 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8700 break;
8701 default:
8702 case 0x0f:
e9bb4aa9
JR
8703 tcg_gen_not_i32(tmp2, tmp2);
8704 if (logic_cc) {
8705 gen_logic_CC(tmp2);
8706 }
7dcc1f89 8707 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8708 break;
8709 }
e9bb4aa9 8710 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8711 tcg_temp_free_i32(tmp2);
e9bb4aa9 8712 }
9ee6e8bb
PB
8713 } else {
8714 /* other instructions */
8715 op1 = (insn >> 24) & 0xf;
8716 switch(op1) {
8717 case 0x0:
8718 case 0x1:
8719 /* multiplies, extra load/stores */
8720 sh = (insn >> 5) & 3;
8721 if (sh == 0) {
8722 if (op1 == 0x0) {
8723 rd = (insn >> 16) & 0xf;
8724 rn = (insn >> 12) & 0xf;
8725 rs = (insn >> 8) & 0xf;
8726 rm = (insn) & 0xf;
8727 op1 = (insn >> 20) & 0xf;
8728 switch (op1) {
8729 case 0: case 1: case 2: case 3: case 6:
8730 /* 32 bit mul */
5e3f878a
PB
8731 tmp = load_reg(s, rs);
8732 tmp2 = load_reg(s, rm);
8733 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8734 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8735 if (insn & (1 << 22)) {
8736 /* Subtract (mls) */
8737 ARCH(6T2);
5e3f878a
PB
8738 tmp2 = load_reg(s, rn);
8739 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8740 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8741 } else if (insn & (1 << 21)) {
8742 /* Add */
5e3f878a
PB
8743 tmp2 = load_reg(s, rn);
8744 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8745 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8746 }
8747 if (insn & (1 << 20))
5e3f878a
PB
8748 gen_logic_CC(tmp);
8749 store_reg(s, rd, tmp);
9ee6e8bb 8750 break;
8aac08b1
AJ
8751 case 4:
8752 /* 64 bit mul double accumulate (UMAAL) */
8753 ARCH(6);
8754 tmp = load_reg(s, rs);
8755 tmp2 = load_reg(s, rm);
8756 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8757 gen_addq_lo(s, tmp64, rn);
8758 gen_addq_lo(s, tmp64, rd);
8759 gen_storeq_reg(s, rn, rd, tmp64);
8760 tcg_temp_free_i64(tmp64);
8761 break;
8762 case 8: case 9: case 10: case 11:
8763 case 12: case 13: case 14: case 15:
8764 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8765 tmp = load_reg(s, rs);
8766 tmp2 = load_reg(s, rm);
8aac08b1 8767 if (insn & (1 << 22)) {
c9f10124 8768 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8769 } else {
c9f10124 8770 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8771 }
8772 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8773 TCGv_i32 al = load_reg(s, rn);
8774 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8775 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8776 tcg_temp_free_i32(al);
8777 tcg_temp_free_i32(ah);
9ee6e8bb 8778 }
8aac08b1 8779 if (insn & (1 << 20)) {
c9f10124 8780 gen_logicq_cc(tmp, tmp2);
8aac08b1 8781 }
c9f10124
RH
8782 store_reg(s, rn, tmp);
8783 store_reg(s, rd, tmp2);
9ee6e8bb 8784 break;
8aac08b1
AJ
8785 default:
8786 goto illegal_op;
9ee6e8bb
PB
8787 }
8788 } else {
8789 rn = (insn >> 16) & 0xf;
8790 rd = (insn >> 12) & 0xf;
8791 if (insn & (1 << 23)) {
8792 /* load/store exclusive */
2359bf80 8793 int op2 = (insn >> 8) & 3;
86753403 8794 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8795
8796 switch (op2) {
8797 case 0: /* lda/stl */
8798 if (op1 == 1) {
8799 goto illegal_op;
8800 }
8801 ARCH(8);
8802 break;
8803 case 1: /* reserved */
8804 goto illegal_op;
8805 case 2: /* ldaex/stlex */
8806 ARCH(8);
8807 break;
8808 case 3: /* ldrex/strex */
8809 if (op1) {
8810 ARCH(6K);
8811 } else {
8812 ARCH(6);
8813 }
8814 break;
8815 }
8816
3174f8e9 8817 addr = tcg_temp_local_new_i32();
98a46317 8818 load_reg_var(s, addr, rn);
2359bf80
MR
8819
8820 /* Since the emulation does not have barriers,
8821 the acquire/release semantics need no special
8822 handling */
8823 if (op2 == 0) {
8824 if (insn & (1 << 20)) {
8825 tmp = tcg_temp_new_i32();
8826 switch (op1) {
8827 case 0: /* lda */
9bb6558a
PM
8828 gen_aa32_ld32u_iss(s, tmp, addr,
8829 get_mem_index(s),
8830 rd | ISSIsAcqRel);
2359bf80
MR
8831 break;
8832 case 2: /* ldab */
9bb6558a
PM
8833 gen_aa32_ld8u_iss(s, tmp, addr,
8834 get_mem_index(s),
8835 rd | ISSIsAcqRel);
2359bf80
MR
8836 break;
8837 case 3: /* ldah */
9bb6558a
PM
8838 gen_aa32_ld16u_iss(s, tmp, addr,
8839 get_mem_index(s),
8840 rd | ISSIsAcqRel);
2359bf80
MR
8841 break;
8842 default:
8843 abort();
8844 }
8845 store_reg(s, rd, tmp);
8846 } else {
8847 rm = insn & 0xf;
8848 tmp = load_reg(s, rm);
8849 switch (op1) {
8850 case 0: /* stl */
9bb6558a
PM
8851 gen_aa32_st32_iss(s, tmp, addr,
8852 get_mem_index(s),
8853 rm | ISSIsAcqRel);
2359bf80
MR
8854 break;
8855 case 2: /* stlb */
9bb6558a
PM
8856 gen_aa32_st8_iss(s, tmp, addr,
8857 get_mem_index(s),
8858 rm | ISSIsAcqRel);
2359bf80
MR
8859 break;
8860 case 3: /* stlh */
9bb6558a
PM
8861 gen_aa32_st16_iss(s, tmp, addr,
8862 get_mem_index(s),
8863 rm | ISSIsAcqRel);
2359bf80
MR
8864 break;
8865 default:
8866 abort();
8867 }
8868 tcg_temp_free_i32(tmp);
8869 }
8870 } else if (insn & (1 << 20)) {
86753403
PB
8871 switch (op1) {
8872 case 0: /* ldrex */
426f5abc 8873 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8874 break;
8875 case 1: /* ldrexd */
426f5abc 8876 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8877 break;
8878 case 2: /* ldrexb */
426f5abc 8879 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8880 break;
8881 case 3: /* ldrexh */
426f5abc 8882 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8883 break;
8884 default:
8885 abort();
8886 }
9ee6e8bb
PB
8887 } else {
8888 rm = insn & 0xf;
86753403
PB
8889 switch (op1) {
8890 case 0: /* strex */
426f5abc 8891 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8892 break;
8893 case 1: /* strexd */
502e64fe 8894 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8895 break;
8896 case 2: /* strexb */
426f5abc 8897 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8898 break;
8899 case 3: /* strexh */
426f5abc 8900 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8901 break;
8902 default:
8903 abort();
8904 }
9ee6e8bb 8905 }
39d5492a 8906 tcg_temp_free_i32(addr);
9ee6e8bb 8907 } else {
cf12bce0
EC
8908 TCGv taddr;
8909 TCGMemOp opc = s->be_data;
8910
9ee6e8bb
PB
8911 /* SWP instruction */
8912 rm = (insn) & 0xf;
8913
9ee6e8bb 8914 if (insn & (1 << 22)) {
cf12bce0 8915 opc |= MO_UB;
9ee6e8bb 8916 } else {
cf12bce0 8917 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 8918 }
cf12bce0
EC
8919
8920 addr = load_reg(s, rn);
8921 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 8922 tcg_temp_free_i32(addr);
cf12bce0
EC
8923
8924 tmp = load_reg(s, rm);
8925 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
8926 get_mem_index(s), opc);
8927 tcg_temp_free(taddr);
8928 store_reg(s, rd, tmp);
9ee6e8bb
PB
8929 }
8930 }
8931 } else {
8932 int address_offset;
3960c336 8933 bool load = insn & (1 << 20);
63f26fcf
PM
8934 bool wbit = insn & (1 << 21);
8935 bool pbit = insn & (1 << 24);
3960c336 8936 bool doubleword = false;
9bb6558a
PM
8937 ISSInfo issinfo;
8938
9ee6e8bb
PB
8939 /* Misc load/store */
8940 rn = (insn >> 16) & 0xf;
8941 rd = (insn >> 12) & 0xf;
3960c336 8942
9bb6558a
PM
8943 /* ISS not valid if writeback */
8944 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
8945
3960c336
PM
8946 if (!load && (sh & 2)) {
8947 /* doubleword */
8948 ARCH(5TE);
8949 if (rd & 1) {
8950 /* UNPREDICTABLE; we choose to UNDEF */
8951 goto illegal_op;
8952 }
8953 load = (sh & 1) == 0;
8954 doubleword = true;
8955 }
8956
b0109805 8957 addr = load_reg(s, rn);
63f26fcf 8958 if (pbit) {
b0109805 8959 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 8960 }
9ee6e8bb 8961 address_offset = 0;
3960c336
PM
8962
8963 if (doubleword) {
8964 if (!load) {
9ee6e8bb 8965 /* store */
b0109805 8966 tmp = load_reg(s, rd);
12dcc321 8967 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8968 tcg_temp_free_i32(tmp);
b0109805
PB
8969 tcg_gen_addi_i32(addr, addr, 4);
8970 tmp = load_reg(s, rd + 1);
12dcc321 8971 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8972 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8973 } else {
8974 /* load */
5a839c0d 8975 tmp = tcg_temp_new_i32();
12dcc321 8976 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
8977 store_reg(s, rd, tmp);
8978 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8979 tmp = tcg_temp_new_i32();
12dcc321 8980 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8981 rd++;
9ee6e8bb
PB
8982 }
8983 address_offset = -4;
3960c336
PM
8984 } else if (load) {
8985 /* load */
8986 tmp = tcg_temp_new_i32();
8987 switch (sh) {
8988 case 1:
9bb6558a
PM
8989 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
8990 issinfo);
3960c336
PM
8991 break;
8992 case 2:
9bb6558a
PM
8993 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
8994 issinfo);
3960c336
PM
8995 break;
8996 default:
8997 case 3:
9bb6558a
PM
8998 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
8999 issinfo);
3960c336
PM
9000 break;
9001 }
9ee6e8bb
PB
9002 } else {
9003 /* store */
b0109805 9004 tmp = load_reg(s, rd);
9bb6558a 9005 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 9006 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9007 }
9008 /* Perform base writeback before the loaded value to
9009 ensure correct behavior with overlapping index registers.
b6af0975 9010 ldrd with base writeback is undefined if the
9ee6e8bb 9011 destination and index registers overlap. */
63f26fcf 9012 if (!pbit) {
b0109805
PB
9013 gen_add_datah_offset(s, insn, address_offset, addr);
9014 store_reg(s, rn, addr);
63f26fcf 9015 } else if (wbit) {
9ee6e8bb 9016 if (address_offset)
b0109805
PB
9017 tcg_gen_addi_i32(addr, addr, address_offset);
9018 store_reg(s, rn, addr);
9019 } else {
7d1b0095 9020 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9021 }
9022 if (load) {
9023 /* Complete the load. */
b0109805 9024 store_reg(s, rd, tmp);
9ee6e8bb
PB
9025 }
9026 }
9027 break;
9028 case 0x4:
9029 case 0x5:
9030 goto do_ldst;
9031 case 0x6:
9032 case 0x7:
9033 if (insn & (1 << 4)) {
9034 ARCH(6);
9035 /* Armv6 Media instructions. */
9036 rm = insn & 0xf;
9037 rn = (insn >> 16) & 0xf;
2c0262af 9038 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
9039 rs = (insn >> 8) & 0xf;
9040 switch ((insn >> 23) & 3) {
9041 case 0: /* Parallel add/subtract. */
9042 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
9043 tmp = load_reg(s, rn);
9044 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9045 sh = (insn >> 5) & 7;
9046 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9047 goto illegal_op;
6ddbc6e4 9048 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 9049 tcg_temp_free_i32(tmp2);
6ddbc6e4 9050 store_reg(s, rd, tmp);
9ee6e8bb
PB
9051 break;
9052 case 1:
9053 if ((insn & 0x00700020) == 0) {
6c95676b 9054 /* Halfword pack. */
3670669c
PB
9055 tmp = load_reg(s, rn);
9056 tmp2 = load_reg(s, rm);
9ee6e8bb 9057 shift = (insn >> 7) & 0x1f;
3670669c
PB
9058 if (insn & (1 << 6)) {
9059 /* pkhtb */
22478e79
AZ
9060 if (shift == 0)
9061 shift = 31;
9062 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 9063 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 9064 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
9065 } else {
9066 /* pkhbt */
22478e79
AZ
9067 if (shift)
9068 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 9069 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
9070 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9071 }
9072 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9073 tcg_temp_free_i32(tmp2);
3670669c 9074 store_reg(s, rd, tmp);
9ee6e8bb
PB
9075 } else if ((insn & 0x00200020) == 0x00200000) {
9076 /* [us]sat */
6ddbc6e4 9077 tmp = load_reg(s, rm);
9ee6e8bb
PB
9078 shift = (insn >> 7) & 0x1f;
9079 if (insn & (1 << 6)) {
9080 if (shift == 0)
9081 shift = 31;
6ddbc6e4 9082 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9083 } else {
6ddbc6e4 9084 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9085 }
9086 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9087 tmp2 = tcg_const_i32(sh);
9088 if (insn & (1 << 22))
9ef39277 9089 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9090 else
9ef39277 9091 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9092 tcg_temp_free_i32(tmp2);
6ddbc6e4 9093 store_reg(s, rd, tmp);
9ee6e8bb
PB
9094 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9095 /* [us]sat16 */
6ddbc6e4 9096 tmp = load_reg(s, rm);
9ee6e8bb 9097 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9098 tmp2 = tcg_const_i32(sh);
9099 if (insn & (1 << 22))
9ef39277 9100 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9101 else
9ef39277 9102 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9103 tcg_temp_free_i32(tmp2);
6ddbc6e4 9104 store_reg(s, rd, tmp);
9ee6e8bb
PB
9105 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9106 /* Select bytes. */
6ddbc6e4
PB
9107 tmp = load_reg(s, rn);
9108 tmp2 = load_reg(s, rm);
7d1b0095 9109 tmp3 = tcg_temp_new_i32();
0ecb72a5 9110 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9111 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9112 tcg_temp_free_i32(tmp3);
9113 tcg_temp_free_i32(tmp2);
6ddbc6e4 9114 store_reg(s, rd, tmp);
9ee6e8bb 9115 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9116 tmp = load_reg(s, rm);
9ee6e8bb 9117 shift = (insn >> 10) & 3;
1301f322 9118 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9119 rotate, a shift is sufficient. */
9120 if (shift != 0)
f669df27 9121 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9122 op1 = (insn >> 20) & 7;
9123 switch (op1) {
5e3f878a
PB
9124 case 0: gen_sxtb16(tmp); break;
9125 case 2: gen_sxtb(tmp); break;
9126 case 3: gen_sxth(tmp); break;
9127 case 4: gen_uxtb16(tmp); break;
9128 case 6: gen_uxtb(tmp); break;
9129 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9130 default: goto illegal_op;
9131 }
9132 if (rn != 15) {
5e3f878a 9133 tmp2 = load_reg(s, rn);
9ee6e8bb 9134 if ((op1 & 3) == 0) {
5e3f878a 9135 gen_add16(tmp, tmp2);
9ee6e8bb 9136 } else {
5e3f878a 9137 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9138 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9139 }
9140 }
6c95676b 9141 store_reg(s, rd, tmp);
9ee6e8bb
PB
9142 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9143 /* rev */
b0109805 9144 tmp = load_reg(s, rm);
9ee6e8bb
PB
9145 if (insn & (1 << 22)) {
9146 if (insn & (1 << 7)) {
b0109805 9147 gen_revsh(tmp);
9ee6e8bb
PB
9148 } else {
9149 ARCH(6T2);
b0109805 9150 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9151 }
9152 } else {
9153 if (insn & (1 << 7))
b0109805 9154 gen_rev16(tmp);
9ee6e8bb 9155 else
66896cb8 9156 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9157 }
b0109805 9158 store_reg(s, rd, tmp);
9ee6e8bb
PB
9159 } else {
9160 goto illegal_op;
9161 }
9162 break;
9163 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9164 switch ((insn >> 20) & 0x7) {
9165 case 5:
9166 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9167 /* op2 not 00x or 11x : UNDEF */
9168 goto illegal_op;
9169 }
838fa72d
AJ
9170 /* Signed multiply most significant [accumulate].
9171 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9172 tmp = load_reg(s, rm);
9173 tmp2 = load_reg(s, rs);
a7812ae4 9174 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9175
955a7dd5 9176 if (rd != 15) {
838fa72d 9177 tmp = load_reg(s, rd);
9ee6e8bb 9178 if (insn & (1 << 6)) {
838fa72d 9179 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9180 } else {
838fa72d 9181 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9182 }
9183 }
838fa72d
AJ
9184 if (insn & (1 << 5)) {
9185 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9186 }
9187 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9188 tmp = tcg_temp_new_i32();
ecc7b3aa 9189 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9190 tcg_temp_free_i64(tmp64);
955a7dd5 9191 store_reg(s, rn, tmp);
41e9564d
PM
9192 break;
9193 case 0:
9194 case 4:
9195 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9196 if (insn & (1 << 7)) {
9197 goto illegal_op;
9198 }
9199 tmp = load_reg(s, rm);
9200 tmp2 = load_reg(s, rs);
9ee6e8bb 9201 if (insn & (1 << 5))
5e3f878a
PB
9202 gen_swap_half(tmp2);
9203 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9204 if (insn & (1 << 22)) {
5e3f878a 9205 /* smlald, smlsld */
33bbd75a
PC
9206 TCGv_i64 tmp64_2;
9207
a7812ae4 9208 tmp64 = tcg_temp_new_i64();
33bbd75a 9209 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9210 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9211 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9212 tcg_temp_free_i32(tmp);
33bbd75a
PC
9213 tcg_temp_free_i32(tmp2);
9214 if (insn & (1 << 6)) {
9215 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9216 } else {
9217 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9218 }
9219 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9220 gen_addq(s, tmp64, rd, rn);
9221 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9222 tcg_temp_free_i64(tmp64);
9ee6e8bb 9223 } else {
5e3f878a 9224 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9225 if (insn & (1 << 6)) {
9226 /* This subtraction cannot overflow. */
9227 tcg_gen_sub_i32(tmp, tmp, tmp2);
9228 } else {
9229 /* This addition cannot overflow 32 bits;
9230 * however it may overflow considered as a
9231 * signed operation, in which case we must set
9232 * the Q flag.
9233 */
9234 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9235 }
9236 tcg_temp_free_i32(tmp2);
22478e79 9237 if (rd != 15)
9ee6e8bb 9238 {
22478e79 9239 tmp2 = load_reg(s, rd);
9ef39277 9240 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9241 tcg_temp_free_i32(tmp2);
9ee6e8bb 9242 }
22478e79 9243 store_reg(s, rn, tmp);
9ee6e8bb 9244 }
41e9564d 9245 break;
b8b8ea05
PM
9246 case 1:
9247 case 3:
9248 /* SDIV, UDIV */
d614a513 9249 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9250 goto illegal_op;
9251 }
9252 if (((insn >> 5) & 7) || (rd != 15)) {
9253 goto illegal_op;
9254 }
9255 tmp = load_reg(s, rm);
9256 tmp2 = load_reg(s, rs);
9257 if (insn & (1 << 21)) {
9258 gen_helper_udiv(tmp, tmp, tmp2);
9259 } else {
9260 gen_helper_sdiv(tmp, tmp, tmp2);
9261 }
9262 tcg_temp_free_i32(tmp2);
9263 store_reg(s, rn, tmp);
9264 break;
41e9564d
PM
9265 default:
9266 goto illegal_op;
9ee6e8bb
PB
9267 }
9268 break;
9269 case 3:
9270 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9271 switch (op1) {
9272 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9273 ARCH(6);
9274 tmp = load_reg(s, rm);
9275 tmp2 = load_reg(s, rs);
9276 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9277 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9278 if (rd != 15) {
9279 tmp2 = load_reg(s, rd);
6ddbc6e4 9280 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9281 tcg_temp_free_i32(tmp2);
9ee6e8bb 9282 }
ded9d295 9283 store_reg(s, rn, tmp);
9ee6e8bb
PB
9284 break;
9285 case 0x20: case 0x24: case 0x28: case 0x2c:
9286 /* Bitfield insert/clear. */
9287 ARCH(6T2);
9288 shift = (insn >> 7) & 0x1f;
9289 i = (insn >> 16) & 0x1f;
45140a57
KB
9290 if (i < shift) {
9291 /* UNPREDICTABLE; we choose to UNDEF */
9292 goto illegal_op;
9293 }
9ee6e8bb
PB
9294 i = i + 1 - shift;
9295 if (rm == 15) {
7d1b0095 9296 tmp = tcg_temp_new_i32();
5e3f878a 9297 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9298 } else {
5e3f878a 9299 tmp = load_reg(s, rm);
9ee6e8bb
PB
9300 }
9301 if (i != 32) {
5e3f878a 9302 tmp2 = load_reg(s, rd);
d593c48e 9303 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9304 tcg_temp_free_i32(tmp2);
9ee6e8bb 9305 }
5e3f878a 9306 store_reg(s, rd, tmp);
9ee6e8bb
PB
9307 break;
9308 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9309 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9310 ARCH(6T2);
5e3f878a 9311 tmp = load_reg(s, rm);
9ee6e8bb
PB
9312 shift = (insn >> 7) & 0x1f;
9313 i = ((insn >> 16) & 0x1f) + 1;
9314 if (shift + i > 32)
9315 goto illegal_op;
9316 if (i < 32) {
9317 if (op1 & 0x20) {
59a71b4c 9318 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9319 } else {
59a71b4c 9320 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9321 }
9322 }
5e3f878a 9323 store_reg(s, rd, tmp);
9ee6e8bb
PB
9324 break;
9325 default:
9326 goto illegal_op;
9327 }
9328 break;
9329 }
9330 break;
9331 }
9332 do_ldst:
9333 /* Check for undefined extension instructions
9334 * per the ARM Bible IE:
9335 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9336 */
9337 sh = (0xf << 20) | (0xf << 4);
9338 if (op1 == 0x7 && ((insn & sh) == sh))
9339 {
9340 goto illegal_op;
9341 }
9342 /* load/store byte/word */
9343 rn = (insn >> 16) & 0xf;
9344 rd = (insn >> 12) & 0xf;
b0109805 9345 tmp2 = load_reg(s, rn);
a99caa48
PM
9346 if ((insn & 0x01200000) == 0x00200000) {
9347 /* ldrt/strt */
579d21cc 9348 i = get_a32_user_mem_index(s);
a99caa48
PM
9349 } else {
9350 i = get_mem_index(s);
9351 }
9ee6e8bb 9352 if (insn & (1 << 24))
b0109805 9353 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9354 if (insn & (1 << 20)) {
9355 /* load */
5a839c0d 9356 tmp = tcg_temp_new_i32();
9ee6e8bb 9357 if (insn & (1 << 22)) {
9bb6558a 9358 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9359 } else {
9bb6558a 9360 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9361 }
9ee6e8bb
PB
9362 } else {
9363 /* store */
b0109805 9364 tmp = load_reg(s, rd);
5a839c0d 9365 if (insn & (1 << 22)) {
9bb6558a 9366 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9367 } else {
9bb6558a 9368 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9369 }
9370 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9371 }
9372 if (!(insn & (1 << 24))) {
b0109805
PB
9373 gen_add_data_offset(s, insn, tmp2);
9374 store_reg(s, rn, tmp2);
9375 } else if (insn & (1 << 21)) {
9376 store_reg(s, rn, tmp2);
9377 } else {
7d1b0095 9378 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9379 }
9380 if (insn & (1 << 20)) {
9381 /* Complete the load. */
7dcc1f89 9382 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9383 }
9384 break;
9385 case 0x08:
9386 case 0x09:
9387 {
da3e53dd
PM
9388 int j, n, loaded_base;
9389 bool exc_return = false;
9390 bool is_load = extract32(insn, 20, 1);
9391 bool user = false;
39d5492a 9392 TCGv_i32 loaded_var;
9ee6e8bb
PB
9393 /* load/store multiple words */
9394 /* XXX: store correct base if write back */
9ee6e8bb 9395 if (insn & (1 << 22)) {
da3e53dd 9396 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9397 if (IS_USER(s))
9398 goto illegal_op; /* only usable in supervisor mode */
9399
da3e53dd
PM
9400 if (is_load && extract32(insn, 15, 1)) {
9401 exc_return = true;
9402 } else {
9403 user = true;
9404 }
9ee6e8bb
PB
9405 }
9406 rn = (insn >> 16) & 0xf;
b0109805 9407 addr = load_reg(s, rn);
9ee6e8bb
PB
9408
9409 /* compute total size */
9410 loaded_base = 0;
39d5492a 9411 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9412 n = 0;
9413 for(i=0;i<16;i++) {
9414 if (insn & (1 << i))
9415 n++;
9416 }
9417 /* XXX: test invalid n == 0 case ? */
9418 if (insn & (1 << 23)) {
9419 if (insn & (1 << 24)) {
9420 /* pre increment */
b0109805 9421 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9422 } else {
9423 /* post increment */
9424 }
9425 } else {
9426 if (insn & (1 << 24)) {
9427 /* pre decrement */
b0109805 9428 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9429 } else {
9430 /* post decrement */
9431 if (n != 1)
b0109805 9432 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9433 }
9434 }
9435 j = 0;
9436 for(i=0;i<16;i++) {
9437 if (insn & (1 << i)) {
da3e53dd 9438 if (is_load) {
9ee6e8bb 9439 /* load */
5a839c0d 9440 tmp = tcg_temp_new_i32();
12dcc321 9441 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9442 if (user) {
b75263d6 9443 tmp2 = tcg_const_i32(i);
1ce94f81 9444 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9445 tcg_temp_free_i32(tmp2);
7d1b0095 9446 tcg_temp_free_i32(tmp);
9ee6e8bb 9447 } else if (i == rn) {
b0109805 9448 loaded_var = tmp;
9ee6e8bb 9449 loaded_base = 1;
fb0e8e79
PM
9450 } else if (rn == 15 && exc_return) {
9451 store_pc_exc_ret(s, tmp);
9ee6e8bb 9452 } else {
7dcc1f89 9453 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9454 }
9455 } else {
9456 /* store */
9457 if (i == 15) {
9458 /* special case: r15 = PC + 8 */
9459 val = (long)s->pc + 4;
7d1b0095 9460 tmp = tcg_temp_new_i32();
b0109805 9461 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9462 } else if (user) {
7d1b0095 9463 tmp = tcg_temp_new_i32();
b75263d6 9464 tmp2 = tcg_const_i32(i);
9ef39277 9465 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9466 tcg_temp_free_i32(tmp2);
9ee6e8bb 9467 } else {
b0109805 9468 tmp = load_reg(s, i);
9ee6e8bb 9469 }
12dcc321 9470 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9471 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9472 }
9473 j++;
9474 /* no need to add after the last transfer */
9475 if (j != n)
b0109805 9476 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9477 }
9478 }
9479 if (insn & (1 << 21)) {
9480 /* write back */
9481 if (insn & (1 << 23)) {
9482 if (insn & (1 << 24)) {
9483 /* pre increment */
9484 } else {
9485 /* post increment */
b0109805 9486 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9487 }
9488 } else {
9489 if (insn & (1 << 24)) {
9490 /* pre decrement */
9491 if (n != 1)
b0109805 9492 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9493 } else {
9494 /* post decrement */
b0109805 9495 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9496 }
9497 }
b0109805
PB
9498 store_reg(s, rn, addr);
9499 } else {
7d1b0095 9500 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9501 }
9502 if (loaded_base) {
b0109805 9503 store_reg(s, rn, loaded_var);
9ee6e8bb 9504 }
da3e53dd 9505 if (exc_return) {
9ee6e8bb 9506 /* Restore CPSR from SPSR. */
d9ba4830 9507 tmp = load_cpu_field(spsr);
235ea1f5 9508 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 9509 tcg_temp_free_i32(tmp);
577bf808 9510 s->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9511 }
9512 }
9513 break;
9514 case 0xa:
9515 case 0xb:
9516 {
9517 int32_t offset;
9518
9519 /* branch (and link) */
9520 val = (int32_t)s->pc;
9521 if (insn & (1 << 24)) {
7d1b0095 9522 tmp = tcg_temp_new_i32();
5e3f878a
PB
9523 tcg_gen_movi_i32(tmp, val);
9524 store_reg(s, 14, tmp);
9ee6e8bb 9525 }
534df156
PM
9526 offset = sextract32(insn << 2, 0, 26);
9527 val += offset + 4;
9ee6e8bb
PB
9528 gen_jmp(s, val);
9529 }
9530 break;
9531 case 0xc:
9532 case 0xd:
9533 case 0xe:
6a57f3eb
WN
9534 if (((insn >> 8) & 0xe) == 10) {
9535 /* VFP. */
7dcc1f89 9536 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9537 goto illegal_op;
9538 }
7dcc1f89 9539 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9540 /* Coprocessor. */
9ee6e8bb 9541 goto illegal_op;
6a57f3eb 9542 }
9ee6e8bb
PB
9543 break;
9544 case 0xf:
9545 /* swi */
eaed129d 9546 gen_set_pc_im(s, s->pc);
d4a2dc67 9547 s->svc_imm = extract32(insn, 0, 24);
9ee6e8bb
PB
9548 s->is_jmp = DISAS_SWI;
9549 break;
9550 default:
9551 illegal_op:
73710361
GB
9552 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9553 default_exception_el(s));
9ee6e8bb
PB
9554 break;
9555 }
9556 }
9557}
9558
9559/* Return true if this is a Thumb-2 logical op. */
9560static int
9561thumb2_logic_op(int op)
9562{
9563 return (op < 8);
9564}
9565
9566/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9567 then set condition code flags based on the result of the operation.
9568 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9569 to the high bit of T1.
9570 Returns zero if the opcode is valid. */
9571
9572static int
39d5492a
PM
9573gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9574 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9575{
9576 int logic_cc;
9577
9578 logic_cc = 0;
9579 switch (op) {
9580 case 0: /* and */
396e467c 9581 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9582 logic_cc = conds;
9583 break;
9584 case 1: /* bic */
f669df27 9585 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9586 logic_cc = conds;
9587 break;
9588 case 2: /* orr */
396e467c 9589 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9590 logic_cc = conds;
9591 break;
9592 case 3: /* orn */
29501f1b 9593 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9594 logic_cc = conds;
9595 break;
9596 case 4: /* eor */
396e467c 9597 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9598 logic_cc = conds;
9599 break;
9600 case 8: /* add */
9601 if (conds)
72485ec4 9602 gen_add_CC(t0, t0, t1);
9ee6e8bb 9603 else
396e467c 9604 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9605 break;
9606 case 10: /* adc */
9607 if (conds)
49b4c31e 9608 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9609 else
396e467c 9610 gen_adc(t0, t1);
9ee6e8bb
PB
9611 break;
9612 case 11: /* sbc */
2de68a49
RH
9613 if (conds) {
9614 gen_sbc_CC(t0, t0, t1);
9615 } else {
396e467c 9616 gen_sub_carry(t0, t0, t1);
2de68a49 9617 }
9ee6e8bb
PB
9618 break;
9619 case 13: /* sub */
9620 if (conds)
72485ec4 9621 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9622 else
396e467c 9623 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9624 break;
9625 case 14: /* rsb */
9626 if (conds)
72485ec4 9627 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9628 else
396e467c 9629 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9630 break;
9631 default: /* 5, 6, 7, 9, 12, 15. */
9632 return 1;
9633 }
9634 if (logic_cc) {
396e467c 9635 gen_logic_CC(t0);
9ee6e8bb 9636 if (shifter_out)
396e467c 9637 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9638 }
9639 return 0;
9640}
9641
9642/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9643 is not legal. */
0ecb72a5 9644static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 9645{
b0109805 9646 uint32_t insn, imm, shift, offset;
9ee6e8bb 9647 uint32_t rd, rn, rm, rs;
39d5492a
PM
9648 TCGv_i32 tmp;
9649 TCGv_i32 tmp2;
9650 TCGv_i32 tmp3;
9651 TCGv_i32 addr;
a7812ae4 9652 TCGv_i64 tmp64;
9ee6e8bb
PB
9653 int op;
9654 int shiftop;
9655 int conds;
9656 int logic_cc;
9657
d614a513
PM
9658 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9659 || arm_dc_feature(s, ARM_FEATURE_M))) {
601d70b9 9660 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
9661 16-bit instructions to get correct prefetch abort behavior. */
9662 insn = insn_hw1;
9663 if ((insn & (1 << 12)) == 0) {
be5e7a76 9664 ARCH(5);
9ee6e8bb
PB
9665 /* Second half of blx. */
9666 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
9667 tmp = load_reg(s, 14);
9668 tcg_gen_addi_i32(tmp, tmp, offset);
9669 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 9670
7d1b0095 9671 tmp2 = tcg_temp_new_i32();
b0109805 9672 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9673 store_reg(s, 14, tmp2);
9674 gen_bx(s, tmp);
9ee6e8bb
PB
9675 return 0;
9676 }
9677 if (insn & (1 << 11)) {
9678 /* Second half of bl. */
9679 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 9680 tmp = load_reg(s, 14);
6a0d8a1d 9681 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 9682
7d1b0095 9683 tmp2 = tcg_temp_new_i32();
b0109805 9684 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9685 store_reg(s, 14, tmp2);
9686 gen_bx(s, tmp);
9ee6e8bb
PB
9687 return 0;
9688 }
9689 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9690 /* Instruction spans a page boundary. Implement it as two
9691 16-bit instructions in case the second half causes an
9692 prefetch abort. */
9693 offset = ((int32_t)insn << 21) >> 9;
396e467c 9694 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
9695 return 0;
9696 }
9697 /* Fall through to 32-bit decode. */
9698 }
9699
f9fd40eb 9700 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
9ee6e8bb
PB
9701 s->pc += 2;
9702 insn |= (uint32_t)insn_hw1 << 16;
9703
9704 if ((insn & 0xf800e800) != 0xf000e800) {
9705 ARCH(6T2);
9706 }
9707
9708 rn = (insn >> 16) & 0xf;
9709 rs = (insn >> 12) & 0xf;
9710 rd = (insn >> 8) & 0xf;
9711 rm = insn & 0xf;
9712 switch ((insn >> 25) & 0xf) {
9713 case 0: case 1: case 2: case 3:
9714 /* 16-bit instructions. Should never happen. */
9715 abort();
9716 case 4:
9717 if (insn & (1 << 22)) {
9718 /* Other load/store, table branch. */
9719 if (insn & 0x01200000) {
9720 /* Load/store doubleword. */
9721 if (rn == 15) {
7d1b0095 9722 addr = tcg_temp_new_i32();
b0109805 9723 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9724 } else {
b0109805 9725 addr = load_reg(s, rn);
9ee6e8bb
PB
9726 }
9727 offset = (insn & 0xff) * 4;
9728 if ((insn & (1 << 23)) == 0)
9729 offset = -offset;
9730 if (insn & (1 << 24)) {
b0109805 9731 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9732 offset = 0;
9733 }
9734 if (insn & (1 << 20)) {
9735 /* ldrd */
e2592fad 9736 tmp = tcg_temp_new_i32();
12dcc321 9737 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9738 store_reg(s, rs, tmp);
9739 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9740 tmp = tcg_temp_new_i32();
12dcc321 9741 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9742 store_reg(s, rd, tmp);
9ee6e8bb
PB
9743 } else {
9744 /* strd */
b0109805 9745 tmp = load_reg(s, rs);
12dcc321 9746 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9747 tcg_temp_free_i32(tmp);
b0109805
PB
9748 tcg_gen_addi_i32(addr, addr, 4);
9749 tmp = load_reg(s, rd);
12dcc321 9750 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9751 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9752 }
9753 if (insn & (1 << 21)) {
9754 /* Base writeback. */
9755 if (rn == 15)
9756 goto illegal_op;
b0109805
PB
9757 tcg_gen_addi_i32(addr, addr, offset - 4);
9758 store_reg(s, rn, addr);
9759 } else {
7d1b0095 9760 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9761 }
9762 } else if ((insn & (1 << 23)) == 0) {
9763 /* Load/store exclusive word. */
39d5492a 9764 addr = tcg_temp_local_new_i32();
98a46317 9765 load_reg_var(s, addr, rn);
426f5abc 9766 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9767 if (insn & (1 << 20)) {
426f5abc 9768 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9769 } else {
426f5abc 9770 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9771 }
39d5492a 9772 tcg_temp_free_i32(addr);
2359bf80 9773 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9774 /* Table Branch. */
9775 if (rn == 15) {
7d1b0095 9776 addr = tcg_temp_new_i32();
b0109805 9777 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9778 } else {
b0109805 9779 addr = load_reg(s, rn);
9ee6e8bb 9780 }
b26eefb6 9781 tmp = load_reg(s, rm);
b0109805 9782 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9783 if (insn & (1 << 4)) {
9784 /* tbh */
b0109805 9785 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9786 tcg_temp_free_i32(tmp);
e2592fad 9787 tmp = tcg_temp_new_i32();
12dcc321 9788 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9789 } else { /* tbb */
7d1b0095 9790 tcg_temp_free_i32(tmp);
e2592fad 9791 tmp = tcg_temp_new_i32();
12dcc321 9792 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9793 }
7d1b0095 9794 tcg_temp_free_i32(addr);
b0109805
PB
9795 tcg_gen_shli_i32(tmp, tmp, 1);
9796 tcg_gen_addi_i32(tmp, tmp, s->pc);
9797 store_reg(s, 15, tmp);
9ee6e8bb 9798 } else {
2359bf80 9799 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9800 op = (insn >> 4) & 0x3;
2359bf80
MR
9801 switch (op2) {
9802 case 0:
426f5abc 9803 goto illegal_op;
2359bf80
MR
9804 case 1:
9805 /* Load/store exclusive byte/halfword/doubleword */
9806 if (op == 2) {
9807 goto illegal_op;
9808 }
9809 ARCH(7);
9810 break;
9811 case 2:
9812 /* Load-acquire/store-release */
9813 if (op == 3) {
9814 goto illegal_op;
9815 }
9816 /* Fall through */
9817 case 3:
9818 /* Load-acquire/store-release exclusive */
9819 ARCH(8);
9820 break;
426f5abc 9821 }
39d5492a 9822 addr = tcg_temp_local_new_i32();
98a46317 9823 load_reg_var(s, addr, rn);
2359bf80
MR
9824 if (!(op2 & 1)) {
9825 if (insn & (1 << 20)) {
9826 tmp = tcg_temp_new_i32();
9827 switch (op) {
9828 case 0: /* ldab */
9bb6558a
PM
9829 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9830 rs | ISSIsAcqRel);
2359bf80
MR
9831 break;
9832 case 1: /* ldah */
9bb6558a
PM
9833 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9834 rs | ISSIsAcqRel);
2359bf80
MR
9835 break;
9836 case 2: /* lda */
9bb6558a
PM
9837 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
9838 rs | ISSIsAcqRel);
2359bf80
MR
9839 break;
9840 default:
9841 abort();
9842 }
9843 store_reg(s, rs, tmp);
9844 } else {
9845 tmp = load_reg(s, rs);
9846 switch (op) {
9847 case 0: /* stlb */
9bb6558a
PM
9848 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
9849 rs | ISSIsAcqRel);
2359bf80
MR
9850 break;
9851 case 1: /* stlh */
9bb6558a
PM
9852 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
9853 rs | ISSIsAcqRel);
2359bf80
MR
9854 break;
9855 case 2: /* stl */
9bb6558a
PM
9856 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
9857 rs | ISSIsAcqRel);
2359bf80
MR
9858 break;
9859 default:
9860 abort();
9861 }
9862 tcg_temp_free_i32(tmp);
9863 }
9864 } else if (insn & (1 << 20)) {
426f5abc 9865 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9866 } else {
426f5abc 9867 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9868 }
39d5492a 9869 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9870 }
9871 } else {
9872 /* Load/store multiple, RFE, SRS. */
9873 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9874 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9875 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9876 goto illegal_op;
00115976 9877 }
9ee6e8bb
PB
9878 if (insn & (1 << 20)) {
9879 /* rfe */
b0109805
PB
9880 addr = load_reg(s, rn);
9881 if ((insn & (1 << 24)) == 0)
9882 tcg_gen_addi_i32(addr, addr, -8);
9883 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9884 tmp = tcg_temp_new_i32();
12dcc321 9885 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9886 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9887 tmp2 = tcg_temp_new_i32();
12dcc321 9888 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9889 if (insn & (1 << 21)) {
9890 /* Base writeback. */
b0109805
PB
9891 if (insn & (1 << 24)) {
9892 tcg_gen_addi_i32(addr, addr, 4);
9893 } else {
9894 tcg_gen_addi_i32(addr, addr, -4);
9895 }
9896 store_reg(s, rn, addr);
9897 } else {
7d1b0095 9898 tcg_temp_free_i32(addr);
9ee6e8bb 9899 }
b0109805 9900 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9901 } else {
9902 /* srs */
81465888
PM
9903 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9904 insn & (1 << 21));
9ee6e8bb
PB
9905 }
9906 } else {
5856d44e 9907 int i, loaded_base = 0;
39d5492a 9908 TCGv_i32 loaded_var;
9ee6e8bb 9909 /* Load/store multiple. */
b0109805 9910 addr = load_reg(s, rn);
9ee6e8bb
PB
9911 offset = 0;
9912 for (i = 0; i < 16; i++) {
9913 if (insn & (1 << i))
9914 offset += 4;
9915 }
9916 if (insn & (1 << 24)) {
b0109805 9917 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9918 }
9919
39d5492a 9920 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9921 for (i = 0; i < 16; i++) {
9922 if ((insn & (1 << i)) == 0)
9923 continue;
9924 if (insn & (1 << 20)) {
9925 /* Load. */
e2592fad 9926 tmp = tcg_temp_new_i32();
12dcc321 9927 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9928 if (i == 15) {
3bb8a96f 9929 gen_bx_excret(s, tmp);
5856d44e
YO
9930 } else if (i == rn) {
9931 loaded_var = tmp;
9932 loaded_base = 1;
9ee6e8bb 9933 } else {
b0109805 9934 store_reg(s, i, tmp);
9ee6e8bb
PB
9935 }
9936 } else {
9937 /* Store. */
b0109805 9938 tmp = load_reg(s, i);
12dcc321 9939 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9940 tcg_temp_free_i32(tmp);
9ee6e8bb 9941 }
b0109805 9942 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9943 }
5856d44e
YO
9944 if (loaded_base) {
9945 store_reg(s, rn, loaded_var);
9946 }
9ee6e8bb
PB
9947 if (insn & (1 << 21)) {
9948 /* Base register writeback. */
9949 if (insn & (1 << 24)) {
b0109805 9950 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9951 }
9952 /* Fault if writeback register is in register list. */
9953 if (insn & (1 << rn))
9954 goto illegal_op;
b0109805
PB
9955 store_reg(s, rn, addr);
9956 } else {
7d1b0095 9957 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9958 }
9959 }
9960 }
9961 break;
2af9ab77
JB
9962 case 5:
9963
9ee6e8bb 9964 op = (insn >> 21) & 0xf;
2af9ab77 9965 if (op == 6) {
62b44f05
AR
9966 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9967 goto illegal_op;
9968 }
2af9ab77
JB
9969 /* Halfword pack. */
9970 tmp = load_reg(s, rn);
9971 tmp2 = load_reg(s, rm);
9972 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9973 if (insn & (1 << 5)) {
9974 /* pkhtb */
9975 if (shift == 0)
9976 shift = 31;
9977 tcg_gen_sari_i32(tmp2, tmp2, shift);
9978 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9979 tcg_gen_ext16u_i32(tmp2, tmp2);
9980 } else {
9981 /* pkhbt */
9982 if (shift)
9983 tcg_gen_shli_i32(tmp2, tmp2, shift);
9984 tcg_gen_ext16u_i32(tmp, tmp);
9985 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9986 }
9987 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9988 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9989 store_reg(s, rd, tmp);
9990 } else {
2af9ab77
JB
9991 /* Data processing register constant shift. */
9992 if (rn == 15) {
7d1b0095 9993 tmp = tcg_temp_new_i32();
2af9ab77
JB
9994 tcg_gen_movi_i32(tmp, 0);
9995 } else {
9996 tmp = load_reg(s, rn);
9997 }
9998 tmp2 = load_reg(s, rm);
9999
10000 shiftop = (insn >> 4) & 3;
10001 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10002 conds = (insn & (1 << 20)) != 0;
10003 logic_cc = (conds && thumb2_logic_op(op));
10004 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10005 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10006 goto illegal_op;
7d1b0095 10007 tcg_temp_free_i32(tmp2);
2af9ab77
JB
10008 if (rd != 15) {
10009 store_reg(s, rd, tmp);
10010 } else {
7d1b0095 10011 tcg_temp_free_i32(tmp);
2af9ab77 10012 }
3174f8e9 10013 }
9ee6e8bb
PB
10014 break;
10015 case 13: /* Misc data processing. */
10016 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10017 if (op < 4 && (insn & 0xf000) != 0xf000)
10018 goto illegal_op;
10019 switch (op) {
10020 case 0: /* Register controlled shift. */
8984bd2e
PB
10021 tmp = load_reg(s, rn);
10022 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10023 if ((insn & 0x70) != 0)
10024 goto illegal_op;
10025 op = (insn >> 21) & 3;
8984bd2e
PB
10026 logic_cc = (insn & (1 << 20)) != 0;
10027 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10028 if (logic_cc)
10029 gen_logic_CC(tmp);
bedb8a6b 10030 store_reg(s, rd, tmp);
9ee6e8bb
PB
10031 break;
10032 case 1: /* Sign/zero extend. */
62b44f05
AR
10033 op = (insn >> 20) & 7;
10034 switch (op) {
10035 case 0: /* SXTAH, SXTH */
10036 case 1: /* UXTAH, UXTH */
10037 case 4: /* SXTAB, SXTB */
10038 case 5: /* UXTAB, UXTB */
10039 break;
10040 case 2: /* SXTAB16, SXTB16 */
10041 case 3: /* UXTAB16, UXTB16 */
10042 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10043 goto illegal_op;
10044 }
10045 break;
10046 default:
10047 goto illegal_op;
10048 }
10049 if (rn != 15) {
10050 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10051 goto illegal_op;
10052 }
10053 }
5e3f878a 10054 tmp = load_reg(s, rm);
9ee6e8bb 10055 shift = (insn >> 4) & 3;
1301f322 10056 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10057 rotate, a shift is sufficient. */
10058 if (shift != 0)
f669df27 10059 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10060 op = (insn >> 20) & 7;
10061 switch (op) {
5e3f878a
PB
10062 case 0: gen_sxth(tmp); break;
10063 case 1: gen_uxth(tmp); break;
10064 case 2: gen_sxtb16(tmp); break;
10065 case 3: gen_uxtb16(tmp); break;
10066 case 4: gen_sxtb(tmp); break;
10067 case 5: gen_uxtb(tmp); break;
62b44f05
AR
10068 default:
10069 g_assert_not_reached();
9ee6e8bb
PB
10070 }
10071 if (rn != 15) {
5e3f878a 10072 tmp2 = load_reg(s, rn);
9ee6e8bb 10073 if ((op >> 1) == 1) {
5e3f878a 10074 gen_add16(tmp, tmp2);
9ee6e8bb 10075 } else {
5e3f878a 10076 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10077 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10078 }
10079 }
5e3f878a 10080 store_reg(s, rd, tmp);
9ee6e8bb
PB
10081 break;
10082 case 2: /* SIMD add/subtract. */
62b44f05
AR
10083 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10084 goto illegal_op;
10085 }
9ee6e8bb
PB
10086 op = (insn >> 20) & 7;
10087 shift = (insn >> 4) & 7;
10088 if ((op & 3) == 3 || (shift & 3) == 3)
10089 goto illegal_op;
6ddbc6e4
PB
10090 tmp = load_reg(s, rn);
10091 tmp2 = load_reg(s, rm);
10092 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10093 tcg_temp_free_i32(tmp2);
6ddbc6e4 10094 store_reg(s, rd, tmp);
9ee6e8bb
PB
10095 break;
10096 case 3: /* Other data processing. */
10097 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10098 if (op < 4) {
10099 /* Saturating add/subtract. */
62b44f05
AR
10100 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10101 goto illegal_op;
10102 }
d9ba4830
PB
10103 tmp = load_reg(s, rn);
10104 tmp2 = load_reg(s, rm);
9ee6e8bb 10105 if (op & 1)
9ef39277 10106 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10107 if (op & 2)
9ef39277 10108 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10109 else
9ef39277 10110 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10111 tcg_temp_free_i32(tmp2);
9ee6e8bb 10112 } else {
62b44f05
AR
10113 switch (op) {
10114 case 0x0a: /* rbit */
10115 case 0x08: /* rev */
10116 case 0x09: /* rev16 */
10117 case 0x0b: /* revsh */
10118 case 0x18: /* clz */
10119 break;
10120 case 0x10: /* sel */
10121 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10122 goto illegal_op;
10123 }
10124 break;
10125 case 0x20: /* crc32/crc32c */
10126 case 0x21:
10127 case 0x22:
10128 case 0x28:
10129 case 0x29:
10130 case 0x2a:
10131 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10132 goto illegal_op;
10133 }
10134 break;
10135 default:
10136 goto illegal_op;
10137 }
d9ba4830 10138 tmp = load_reg(s, rn);
9ee6e8bb
PB
10139 switch (op) {
10140 case 0x0a: /* rbit */
d9ba4830 10141 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10142 break;
10143 case 0x08: /* rev */
66896cb8 10144 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10145 break;
10146 case 0x09: /* rev16 */
d9ba4830 10147 gen_rev16(tmp);
9ee6e8bb
PB
10148 break;
10149 case 0x0b: /* revsh */
d9ba4830 10150 gen_revsh(tmp);
9ee6e8bb
PB
10151 break;
10152 case 0x10: /* sel */
d9ba4830 10153 tmp2 = load_reg(s, rm);
7d1b0095 10154 tmp3 = tcg_temp_new_i32();
0ecb72a5 10155 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10156 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10157 tcg_temp_free_i32(tmp3);
10158 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10159 break;
10160 case 0x18: /* clz */
7539a012 10161 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10162 break;
eb0ecd5a
WN
10163 case 0x20:
10164 case 0x21:
10165 case 0x22:
10166 case 0x28:
10167 case 0x29:
10168 case 0x2a:
10169 {
10170 /* crc32/crc32c */
10171 uint32_t sz = op & 0x3;
10172 uint32_t c = op & 0x8;
10173
eb0ecd5a 10174 tmp2 = load_reg(s, rm);
aa633469
PM
10175 if (sz == 0) {
10176 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10177 } else if (sz == 1) {
10178 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10179 }
eb0ecd5a
WN
10180 tmp3 = tcg_const_i32(1 << sz);
10181 if (c) {
10182 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10183 } else {
10184 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10185 }
10186 tcg_temp_free_i32(tmp2);
10187 tcg_temp_free_i32(tmp3);
10188 break;
10189 }
9ee6e8bb 10190 default:
62b44f05 10191 g_assert_not_reached();
9ee6e8bb
PB
10192 }
10193 }
d9ba4830 10194 store_reg(s, rd, tmp);
9ee6e8bb
PB
10195 break;
10196 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10197 switch ((insn >> 20) & 7) {
10198 case 0: /* 32 x 32 -> 32 */
10199 case 7: /* Unsigned sum of absolute differences. */
10200 break;
10201 case 1: /* 16 x 16 -> 32 */
10202 case 2: /* Dual multiply add. */
10203 case 3: /* 32 * 16 -> 32msb */
10204 case 4: /* Dual multiply subtract. */
10205 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10206 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10207 goto illegal_op;
10208 }
10209 break;
10210 }
9ee6e8bb 10211 op = (insn >> 4) & 0xf;
d9ba4830
PB
10212 tmp = load_reg(s, rn);
10213 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10214 switch ((insn >> 20) & 7) {
10215 case 0: /* 32 x 32 -> 32 */
d9ba4830 10216 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10217 tcg_temp_free_i32(tmp2);
9ee6e8bb 10218 if (rs != 15) {
d9ba4830 10219 tmp2 = load_reg(s, rs);
9ee6e8bb 10220 if (op)
d9ba4830 10221 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10222 else
d9ba4830 10223 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10224 tcg_temp_free_i32(tmp2);
9ee6e8bb 10225 }
9ee6e8bb
PB
10226 break;
10227 case 1: /* 16 x 16 -> 32 */
d9ba4830 10228 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10229 tcg_temp_free_i32(tmp2);
9ee6e8bb 10230 if (rs != 15) {
d9ba4830 10231 tmp2 = load_reg(s, rs);
9ef39277 10232 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10233 tcg_temp_free_i32(tmp2);
9ee6e8bb 10234 }
9ee6e8bb
PB
10235 break;
10236 case 2: /* Dual multiply add. */
10237 case 4: /* Dual multiply subtract. */
10238 if (op)
d9ba4830
PB
10239 gen_swap_half(tmp2);
10240 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10241 if (insn & (1 << 22)) {
e1d177b9 10242 /* This subtraction cannot overflow. */
d9ba4830 10243 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10244 } else {
e1d177b9
PM
10245 /* This addition cannot overflow 32 bits;
10246 * however it may overflow considered as a signed
10247 * operation, in which case we must set the Q flag.
10248 */
9ef39277 10249 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10250 }
7d1b0095 10251 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10252 if (rs != 15)
10253 {
d9ba4830 10254 tmp2 = load_reg(s, rs);
9ef39277 10255 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10256 tcg_temp_free_i32(tmp2);
9ee6e8bb 10257 }
9ee6e8bb
PB
10258 break;
10259 case 3: /* 32 * 16 -> 32msb */
10260 if (op)
d9ba4830 10261 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10262 else
d9ba4830 10263 gen_sxth(tmp2);
a7812ae4
PB
10264 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10265 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10266 tmp = tcg_temp_new_i32();
ecc7b3aa 10267 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10268 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10269 if (rs != 15)
10270 {
d9ba4830 10271 tmp2 = load_reg(s, rs);
9ef39277 10272 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10273 tcg_temp_free_i32(tmp2);
9ee6e8bb 10274 }
9ee6e8bb 10275 break;
838fa72d
AJ
10276 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10277 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10278 if (rs != 15) {
838fa72d
AJ
10279 tmp = load_reg(s, rs);
10280 if (insn & (1 << 20)) {
10281 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10282 } else {
838fa72d 10283 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10284 }
2c0262af 10285 }
838fa72d
AJ
10286 if (insn & (1 << 4)) {
10287 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10288 }
10289 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10290 tmp = tcg_temp_new_i32();
ecc7b3aa 10291 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10292 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10293 break;
10294 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10295 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10296 tcg_temp_free_i32(tmp2);
9ee6e8bb 10297 if (rs != 15) {
d9ba4830
PB
10298 tmp2 = load_reg(s, rs);
10299 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10300 tcg_temp_free_i32(tmp2);
5fd46862 10301 }
9ee6e8bb 10302 break;
2c0262af 10303 }
d9ba4830 10304 store_reg(s, rd, tmp);
2c0262af 10305 break;
9ee6e8bb
PB
10306 case 6: case 7: /* 64-bit multiply, Divide. */
10307 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10308 tmp = load_reg(s, rn);
10309 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10310 if ((op & 0x50) == 0x10) {
10311 /* sdiv, udiv */
d614a513 10312 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10313 goto illegal_op;
47789990 10314 }
9ee6e8bb 10315 if (op & 0x20)
5e3f878a 10316 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10317 else
5e3f878a 10318 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10319 tcg_temp_free_i32(tmp2);
5e3f878a 10320 store_reg(s, rd, tmp);
9ee6e8bb
PB
10321 } else if ((op & 0xe) == 0xc) {
10322 /* Dual multiply accumulate long. */
62b44f05
AR
10323 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10324 tcg_temp_free_i32(tmp);
10325 tcg_temp_free_i32(tmp2);
10326 goto illegal_op;
10327 }
9ee6e8bb 10328 if (op & 1)
5e3f878a
PB
10329 gen_swap_half(tmp2);
10330 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10331 if (op & 0x10) {
5e3f878a 10332 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10333 } else {
5e3f878a 10334 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10335 }
7d1b0095 10336 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10337 /* BUGFIX */
10338 tmp64 = tcg_temp_new_i64();
10339 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10340 tcg_temp_free_i32(tmp);
a7812ae4
PB
10341 gen_addq(s, tmp64, rs, rd);
10342 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10343 tcg_temp_free_i64(tmp64);
2c0262af 10344 } else {
9ee6e8bb
PB
10345 if (op & 0x20) {
10346 /* Unsigned 64-bit multiply */
a7812ae4 10347 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10348 } else {
9ee6e8bb
PB
10349 if (op & 8) {
10350 /* smlalxy */
62b44f05
AR
10351 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10352 tcg_temp_free_i32(tmp2);
10353 tcg_temp_free_i32(tmp);
10354 goto illegal_op;
10355 }
5e3f878a 10356 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10357 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10358 tmp64 = tcg_temp_new_i64();
10359 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10360 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10361 } else {
10362 /* Signed 64-bit multiply */
a7812ae4 10363 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10364 }
b5ff1b31 10365 }
9ee6e8bb
PB
10366 if (op & 4) {
10367 /* umaal */
62b44f05
AR
10368 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10369 tcg_temp_free_i64(tmp64);
10370 goto illegal_op;
10371 }
a7812ae4
PB
10372 gen_addq_lo(s, tmp64, rs);
10373 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10374 } else if (op & 0x40) {
10375 /* 64-bit accumulate. */
a7812ae4 10376 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10377 }
a7812ae4 10378 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10379 tcg_temp_free_i64(tmp64);
5fd46862 10380 }
2c0262af 10381 break;
9ee6e8bb
PB
10382 }
10383 break;
10384 case 6: case 7: case 14: case 15:
10385 /* Coprocessor. */
7517748e
PM
10386 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10387 /* We don't currently implement M profile FP support,
10388 * so this entire space should give a NOCP fault.
10389 */
10390 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10391 default_exception_el(s));
10392 break;
10393 }
9ee6e8bb
PB
10394 if (((insn >> 24) & 3) == 3) {
10395 /* Translate into the equivalent ARM encoding. */
f06053e3 10396 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10397 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10398 goto illegal_op;
7dcc1f89 10399 }
6a57f3eb 10400 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10401 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10402 goto illegal_op;
10403 }
9ee6e8bb
PB
10404 } else {
10405 if (insn & (1 << 28))
10406 goto illegal_op;
7dcc1f89 10407 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10408 goto illegal_op;
7dcc1f89 10409 }
9ee6e8bb
PB
10410 }
10411 break;
10412 case 8: case 9: case 10: case 11:
10413 if (insn & (1 << 15)) {
10414 /* Branches, misc control. */
10415 if (insn & 0x5000) {
10416 /* Unconditional branch. */
10417 /* signextend(hw1[10:0]) -> offset[:12]. */
10418 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10419 /* hw1[10:0] -> offset[11:1]. */
10420 offset |= (insn & 0x7ff) << 1;
10421 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10422 offset[24:22] already have the same value because of the
10423 sign extension above. */
10424 offset ^= ((~insn) & (1 << 13)) << 10;
10425 offset ^= ((~insn) & (1 << 11)) << 11;
10426
9ee6e8bb
PB
10427 if (insn & (1 << 14)) {
10428 /* Branch and link. */
3174f8e9 10429 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10430 }
3b46e624 10431
b0109805 10432 offset += s->pc;
9ee6e8bb
PB
10433 if (insn & (1 << 12)) {
10434 /* b/bl */
b0109805 10435 gen_jmp(s, offset);
9ee6e8bb
PB
10436 } else {
10437 /* blx */
b0109805 10438 offset &= ~(uint32_t)2;
be5e7a76 10439 /* thumb2 bx, no need to check */
b0109805 10440 gen_bx_im(s, offset);
2c0262af 10441 }
9ee6e8bb
PB
10442 } else if (((insn >> 23) & 7) == 7) {
10443 /* Misc control */
10444 if (insn & (1 << 13))
10445 goto illegal_op;
10446
10447 if (insn & (1 << 26)) {
001b3cab
PM
10448 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10449 goto illegal_op;
10450 }
37e6456e
PM
10451 if (!(insn & (1 << 20))) {
10452 /* Hypervisor call (v7) */
10453 int imm16 = extract32(insn, 16, 4) << 12
10454 | extract32(insn, 0, 12);
10455 ARCH(7);
10456 if (IS_USER(s)) {
10457 goto illegal_op;
10458 }
10459 gen_hvc(s, imm16);
10460 } else {
10461 /* Secure monitor call (v6+) */
10462 ARCH(6K);
10463 if (IS_USER(s)) {
10464 goto illegal_op;
10465 }
10466 gen_smc(s);
10467 }
2c0262af 10468 } else {
9ee6e8bb
PB
10469 op = (insn >> 20) & 7;
10470 switch (op) {
10471 case 0: /* msr cpsr. */
b53d8923 10472 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 10473 tmp = load_reg(s, rn);
b28b3377
PM
10474 /* the constant is the mask and SYSm fields */
10475 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 10476 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10477 tcg_temp_free_i32(addr);
7d1b0095 10478 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10479 gen_lookup_tb(s);
10480 break;
10481 }
10482 /* fall through */
10483 case 1: /* msr spsr. */
b53d8923 10484 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10485 goto illegal_op;
b53d8923 10486 }
8bfd0550
PM
10487
10488 if (extract32(insn, 5, 1)) {
10489 /* MSR (banked) */
10490 int sysm = extract32(insn, 8, 4) |
10491 (extract32(insn, 4, 1) << 4);
10492 int r = op & 1;
10493
10494 gen_msr_banked(s, r, sysm, rm);
10495 break;
10496 }
10497
10498 /* MSR (for PSRs) */
2fbac54b
FN
10499 tmp = load_reg(s, rn);
10500 if (gen_set_psr(s,
7dcc1f89 10501 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10502 op == 1, tmp))
9ee6e8bb
PB
10503 goto illegal_op;
10504 break;
10505 case 2: /* cps, nop-hint. */
10506 if (((insn >> 8) & 7) == 0) {
10507 gen_nop_hint(s, insn & 0xff);
10508 }
10509 /* Implemented as NOP in user mode. */
10510 if (IS_USER(s))
10511 break;
10512 offset = 0;
10513 imm = 0;
10514 if (insn & (1 << 10)) {
10515 if (insn & (1 << 7))
10516 offset |= CPSR_A;
10517 if (insn & (1 << 6))
10518 offset |= CPSR_I;
10519 if (insn & (1 << 5))
10520 offset |= CPSR_F;
10521 if (insn & (1 << 9))
10522 imm = CPSR_A | CPSR_I | CPSR_F;
10523 }
10524 if (insn & (1 << 8)) {
10525 offset |= 0x1f;
10526 imm |= (insn & 0x1f);
10527 }
10528 if (offset) {
2fbac54b 10529 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10530 }
10531 break;
10532 case 3: /* Special control operations. */
426f5abc 10533 ARCH(7);
9ee6e8bb
PB
10534 op = (insn >> 4) & 0xf;
10535 switch (op) {
10536 case 2: /* clrex */
426f5abc 10537 gen_clrex(s);
9ee6e8bb
PB
10538 break;
10539 case 4: /* dsb */
10540 case 5: /* dmb */
61e4c432 10541 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10542 break;
6df99dec
SS
10543 case 6: /* isb */
10544 /* We need to break the TB after this insn
10545 * to execute self-modifying code correctly
10546 * and also to take any pending interrupts
10547 * immediately.
10548 */
10549 gen_lookup_tb(s);
10550 break;
9ee6e8bb
PB
10551 default:
10552 goto illegal_op;
10553 }
10554 break;
10555 case 4: /* bxj */
9d7c59c8
PM
10556 /* Trivial implementation equivalent to bx.
10557 * This instruction doesn't exist at all for M-profile.
10558 */
10559 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10560 goto illegal_op;
10561 }
d9ba4830
PB
10562 tmp = load_reg(s, rn);
10563 gen_bx(s, tmp);
9ee6e8bb
PB
10564 break;
10565 case 5: /* Exception return. */
b8b45b68
RV
10566 if (IS_USER(s)) {
10567 goto illegal_op;
10568 }
10569 if (rn != 14 || rd != 15) {
10570 goto illegal_op;
10571 }
10572 tmp = load_reg(s, rn);
10573 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10574 gen_exception_return(s, tmp);
10575 break;
8bfd0550 10576 case 6: /* MRS */
43ac6574
PM
10577 if (extract32(insn, 5, 1) &&
10578 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10579 /* MRS (banked) */
10580 int sysm = extract32(insn, 16, 4) |
10581 (extract32(insn, 4, 1) << 4);
10582
10583 gen_mrs_banked(s, 0, sysm, rd);
10584 break;
10585 }
10586
3d54026f
PM
10587 if (extract32(insn, 16, 4) != 0xf) {
10588 goto illegal_op;
10589 }
10590 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10591 extract32(insn, 0, 8) != 0) {
10592 goto illegal_op;
10593 }
10594
8bfd0550 10595 /* mrs cpsr */
7d1b0095 10596 tmp = tcg_temp_new_i32();
b53d8923 10597 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10598 addr = tcg_const_i32(insn & 0xff);
10599 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10600 tcg_temp_free_i32(addr);
9ee6e8bb 10601 } else {
9ef39277 10602 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10603 }
8984bd2e 10604 store_reg(s, rd, tmp);
9ee6e8bb 10605 break;
8bfd0550 10606 case 7: /* MRS */
43ac6574
PM
10607 if (extract32(insn, 5, 1) &&
10608 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10609 /* MRS (banked) */
10610 int sysm = extract32(insn, 16, 4) |
10611 (extract32(insn, 4, 1) << 4);
10612
10613 gen_mrs_banked(s, 1, sysm, rd);
10614 break;
10615 }
10616
10617 /* mrs spsr. */
9ee6e8bb 10618 /* Not accessible in user mode. */
b53d8923 10619 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10620 goto illegal_op;
b53d8923 10621 }
3d54026f
PM
10622
10623 if (extract32(insn, 16, 4) != 0xf ||
10624 extract32(insn, 0, 8) != 0) {
10625 goto illegal_op;
10626 }
10627
d9ba4830
PB
10628 tmp = load_cpu_field(spsr);
10629 store_reg(s, rd, tmp);
9ee6e8bb 10630 break;
2c0262af
FB
10631 }
10632 }
9ee6e8bb
PB
10633 } else {
10634 /* Conditional branch. */
10635 op = (insn >> 22) & 0xf;
10636 /* Generate a conditional jump to next instruction. */
10637 s->condlabel = gen_new_label();
39fb730a 10638 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
10639 s->condjmp = 1;
10640
10641 /* offset[11:1] = insn[10:0] */
10642 offset = (insn & 0x7ff) << 1;
10643 /* offset[17:12] = insn[21:16]. */
10644 offset |= (insn & 0x003f0000) >> 4;
10645 /* offset[31:20] = insn[26]. */
10646 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10647 /* offset[18] = insn[13]. */
10648 offset |= (insn & (1 << 13)) << 5;
10649 /* offset[19] = insn[11]. */
10650 offset |= (insn & (1 << 11)) << 8;
10651
10652 /* jump to the offset */
b0109805 10653 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10654 }
10655 } else {
10656 /* Data processing immediate. */
10657 if (insn & (1 << 25)) {
10658 if (insn & (1 << 24)) {
10659 if (insn & (1 << 20))
10660 goto illegal_op;
10661 /* Bitfield/Saturate. */
10662 op = (insn >> 21) & 7;
10663 imm = insn & 0x1f;
10664 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10665 if (rn == 15) {
7d1b0095 10666 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10667 tcg_gen_movi_i32(tmp, 0);
10668 } else {
10669 tmp = load_reg(s, rn);
10670 }
9ee6e8bb
PB
10671 switch (op) {
10672 case 2: /* Signed bitfield extract. */
10673 imm++;
10674 if (shift + imm > 32)
10675 goto illegal_op;
59a71b4c
RH
10676 if (imm < 32) {
10677 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10678 }
9ee6e8bb
PB
10679 break;
10680 case 6: /* Unsigned bitfield extract. */
10681 imm++;
10682 if (shift + imm > 32)
10683 goto illegal_op;
59a71b4c
RH
10684 if (imm < 32) {
10685 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10686 }
9ee6e8bb
PB
10687 break;
10688 case 3: /* Bitfield insert/clear. */
10689 if (imm < shift)
10690 goto illegal_op;
10691 imm = imm + 1 - shift;
10692 if (imm != 32) {
6ddbc6e4 10693 tmp2 = load_reg(s, rd);
d593c48e 10694 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10695 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10696 }
10697 break;
10698 case 7:
10699 goto illegal_op;
10700 default: /* Saturate. */
9ee6e8bb
PB
10701 if (shift) {
10702 if (op & 1)
6ddbc6e4 10703 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10704 else
6ddbc6e4 10705 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10706 }
6ddbc6e4 10707 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10708 if (op & 4) {
10709 /* Unsigned. */
62b44f05
AR
10710 if ((op & 1) && shift == 0) {
10711 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10712 tcg_temp_free_i32(tmp);
10713 tcg_temp_free_i32(tmp2);
10714 goto illegal_op;
10715 }
9ef39277 10716 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10717 } else {
9ef39277 10718 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10719 }
2c0262af 10720 } else {
9ee6e8bb 10721 /* Signed. */
62b44f05
AR
10722 if ((op & 1) && shift == 0) {
10723 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10724 tcg_temp_free_i32(tmp);
10725 tcg_temp_free_i32(tmp2);
10726 goto illegal_op;
10727 }
9ef39277 10728 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10729 } else {
9ef39277 10730 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10731 }
2c0262af 10732 }
b75263d6 10733 tcg_temp_free_i32(tmp2);
9ee6e8bb 10734 break;
2c0262af 10735 }
6ddbc6e4 10736 store_reg(s, rd, tmp);
9ee6e8bb
PB
10737 } else {
10738 imm = ((insn & 0x04000000) >> 15)
10739 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10740 if (insn & (1 << 22)) {
10741 /* 16-bit immediate. */
10742 imm |= (insn >> 4) & 0xf000;
10743 if (insn & (1 << 23)) {
10744 /* movt */
5e3f878a 10745 tmp = load_reg(s, rd);
86831435 10746 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10747 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10748 } else {
9ee6e8bb 10749 /* movw */
7d1b0095 10750 tmp = tcg_temp_new_i32();
5e3f878a 10751 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10752 }
10753 } else {
9ee6e8bb
PB
10754 /* Add/sub 12-bit immediate. */
10755 if (rn == 15) {
b0109805 10756 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10757 if (insn & (1 << 23))
b0109805 10758 offset -= imm;
9ee6e8bb 10759 else
b0109805 10760 offset += imm;
7d1b0095 10761 tmp = tcg_temp_new_i32();
5e3f878a 10762 tcg_gen_movi_i32(tmp, offset);
2c0262af 10763 } else {
5e3f878a 10764 tmp = load_reg(s, rn);
9ee6e8bb 10765 if (insn & (1 << 23))
5e3f878a 10766 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10767 else
5e3f878a 10768 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10769 }
9ee6e8bb 10770 }
5e3f878a 10771 store_reg(s, rd, tmp);
191abaa2 10772 }
9ee6e8bb
PB
10773 } else {
10774 int shifter_out = 0;
10775 /* modified 12-bit immediate. */
10776 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10777 imm = (insn & 0xff);
10778 switch (shift) {
10779 case 0: /* XY */
10780 /* Nothing to do. */
10781 break;
10782 case 1: /* 00XY00XY */
10783 imm |= imm << 16;
10784 break;
10785 case 2: /* XY00XY00 */
10786 imm |= imm << 16;
10787 imm <<= 8;
10788 break;
10789 case 3: /* XYXYXYXY */
10790 imm |= imm << 16;
10791 imm |= imm << 8;
10792 break;
10793 default: /* Rotated constant. */
10794 shift = (shift << 1) | (imm >> 7);
10795 imm |= 0x80;
10796 imm = imm << (32 - shift);
10797 shifter_out = 1;
10798 break;
b5ff1b31 10799 }
7d1b0095 10800 tmp2 = tcg_temp_new_i32();
3174f8e9 10801 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10802 rn = (insn >> 16) & 0xf;
3174f8e9 10803 if (rn == 15) {
7d1b0095 10804 tmp = tcg_temp_new_i32();
3174f8e9
FN
10805 tcg_gen_movi_i32(tmp, 0);
10806 } else {
10807 tmp = load_reg(s, rn);
10808 }
9ee6e8bb
PB
10809 op = (insn >> 21) & 0xf;
10810 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10811 shifter_out, tmp, tmp2))
9ee6e8bb 10812 goto illegal_op;
7d1b0095 10813 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10814 rd = (insn >> 8) & 0xf;
10815 if (rd != 15) {
3174f8e9
FN
10816 store_reg(s, rd, tmp);
10817 } else {
7d1b0095 10818 tcg_temp_free_i32(tmp);
2c0262af 10819 }
2c0262af 10820 }
9ee6e8bb
PB
10821 }
10822 break;
10823 case 12: /* Load/store single data item. */
10824 {
10825 int postinc = 0;
10826 int writeback = 0;
a99caa48 10827 int memidx;
9bb6558a
PM
10828 ISSInfo issinfo;
10829
9ee6e8bb 10830 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10831 if (disas_neon_ls_insn(s, insn)) {
c1713132 10832 goto illegal_op;
7dcc1f89 10833 }
9ee6e8bb
PB
10834 break;
10835 }
a2fdc890
PM
10836 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10837 if (rs == 15) {
10838 if (!(insn & (1 << 20))) {
10839 goto illegal_op;
10840 }
10841 if (op != 2) {
10842 /* Byte or halfword load space with dest == r15 : memory hints.
10843 * Catch them early so we don't emit pointless addressing code.
10844 * This space is a mix of:
10845 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10846 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10847 * cores)
10848 * unallocated hints, which must be treated as NOPs
10849 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10850 * which is easiest for the decoding logic
10851 * Some space which must UNDEF
10852 */
10853 int op1 = (insn >> 23) & 3;
10854 int op2 = (insn >> 6) & 0x3f;
10855 if (op & 2) {
10856 goto illegal_op;
10857 }
10858 if (rn == 15) {
02afbf64
PM
10859 /* UNPREDICTABLE, unallocated hint or
10860 * PLD/PLDW/PLI (literal)
10861 */
a2fdc890
PM
10862 return 0;
10863 }
10864 if (op1 & 1) {
02afbf64 10865 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10866 }
10867 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10868 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10869 }
10870 /* UNDEF space, or an UNPREDICTABLE */
10871 return 1;
10872 }
10873 }
a99caa48 10874 memidx = get_mem_index(s);
9ee6e8bb 10875 if (rn == 15) {
7d1b0095 10876 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10877 /* PC relative. */
10878 /* s->pc has already been incremented by 4. */
10879 imm = s->pc & 0xfffffffc;
10880 if (insn & (1 << 23))
10881 imm += insn & 0xfff;
10882 else
10883 imm -= insn & 0xfff;
b0109805 10884 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10885 } else {
b0109805 10886 addr = load_reg(s, rn);
9ee6e8bb
PB
10887 if (insn & (1 << 23)) {
10888 /* Positive offset. */
10889 imm = insn & 0xfff;
b0109805 10890 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10891 } else {
9ee6e8bb 10892 imm = insn & 0xff;
2a0308c5
PM
10893 switch ((insn >> 8) & 0xf) {
10894 case 0x0: /* Shifted Register. */
9ee6e8bb 10895 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10896 if (shift > 3) {
10897 tcg_temp_free_i32(addr);
18c9b560 10898 goto illegal_op;
2a0308c5 10899 }
b26eefb6 10900 tmp = load_reg(s, rm);
9ee6e8bb 10901 if (shift)
b26eefb6 10902 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10903 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10904 tcg_temp_free_i32(tmp);
9ee6e8bb 10905 break;
2a0308c5 10906 case 0xc: /* Negative offset. */
b0109805 10907 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10908 break;
2a0308c5 10909 case 0xe: /* User privilege. */
b0109805 10910 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 10911 memidx = get_a32_user_mem_index(s);
9ee6e8bb 10912 break;
2a0308c5 10913 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10914 imm = -imm;
10915 /* Fall through. */
2a0308c5 10916 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10917 postinc = 1;
10918 writeback = 1;
10919 break;
2a0308c5 10920 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10921 imm = -imm;
10922 /* Fall through. */
2a0308c5 10923 case 0xf: /* Pre-increment. */
b0109805 10924 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10925 writeback = 1;
10926 break;
10927 default:
2a0308c5 10928 tcg_temp_free_i32(addr);
b7bcbe95 10929 goto illegal_op;
9ee6e8bb
PB
10930 }
10931 }
10932 }
9bb6558a
PM
10933
10934 issinfo = writeback ? ISSInvalid : rs;
10935
9ee6e8bb
PB
10936 if (insn & (1 << 20)) {
10937 /* Load. */
5a839c0d 10938 tmp = tcg_temp_new_i32();
a2fdc890 10939 switch (op) {
5a839c0d 10940 case 0:
9bb6558a 10941 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10942 break;
10943 case 4:
9bb6558a 10944 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10945 break;
10946 case 1:
9bb6558a 10947 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10948 break;
10949 case 5:
9bb6558a 10950 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10951 break;
10952 case 2:
9bb6558a 10953 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 10954 break;
2a0308c5 10955 default:
5a839c0d 10956 tcg_temp_free_i32(tmp);
2a0308c5
PM
10957 tcg_temp_free_i32(addr);
10958 goto illegal_op;
a2fdc890
PM
10959 }
10960 if (rs == 15) {
3bb8a96f 10961 gen_bx_excret(s, tmp);
9ee6e8bb 10962 } else {
a2fdc890 10963 store_reg(s, rs, tmp);
9ee6e8bb
PB
10964 }
10965 } else {
10966 /* Store. */
b0109805 10967 tmp = load_reg(s, rs);
9ee6e8bb 10968 switch (op) {
5a839c0d 10969 case 0:
9bb6558a 10970 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10971 break;
10972 case 1:
9bb6558a 10973 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10974 break;
10975 case 2:
9bb6558a 10976 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 10977 break;
2a0308c5 10978 default:
5a839c0d 10979 tcg_temp_free_i32(tmp);
2a0308c5
PM
10980 tcg_temp_free_i32(addr);
10981 goto illegal_op;
b7bcbe95 10982 }
5a839c0d 10983 tcg_temp_free_i32(tmp);
2c0262af 10984 }
9ee6e8bb 10985 if (postinc)
b0109805
PB
10986 tcg_gen_addi_i32(addr, addr, imm);
10987 if (writeback) {
10988 store_reg(s, rn, addr);
10989 } else {
7d1b0095 10990 tcg_temp_free_i32(addr);
b0109805 10991 }
9ee6e8bb
PB
10992 }
10993 break;
10994 default:
10995 goto illegal_op;
2c0262af 10996 }
9ee6e8bb
PB
10997 return 0;
10998illegal_op:
10999 return 1;
2c0262af
FB
11000}
11001
0ecb72a5 11002static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
11003{
11004 uint32_t val, insn, op, rm, rn, rd, shift, cond;
11005 int32_t offset;
11006 int i;
39d5492a
PM
11007 TCGv_i32 tmp;
11008 TCGv_i32 tmp2;
11009 TCGv_i32 addr;
99c475ab 11010
9ee6e8bb
PB
11011 if (s->condexec_mask) {
11012 cond = s->condexec_cond;
bedd2912
JB
11013 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
11014 s->condlabel = gen_new_label();
39fb730a 11015 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
11016 s->condjmp = 1;
11017 }
9ee6e8bb
PB
11018 }
11019
f9fd40eb 11020 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
99c475ab 11021 s->pc += 2;
b5ff1b31 11022
99c475ab
FB
11023 switch (insn >> 12) {
11024 case 0: case 1:
396e467c 11025
99c475ab
FB
11026 rd = insn & 7;
11027 op = (insn >> 11) & 3;
11028 if (op == 3) {
11029 /* add/subtract */
11030 rn = (insn >> 3) & 7;
396e467c 11031 tmp = load_reg(s, rn);
99c475ab
FB
11032 if (insn & (1 << 10)) {
11033 /* immediate */
7d1b0095 11034 tmp2 = tcg_temp_new_i32();
396e467c 11035 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
11036 } else {
11037 /* reg */
11038 rm = (insn >> 6) & 7;
396e467c 11039 tmp2 = load_reg(s, rm);
99c475ab 11040 }
9ee6e8bb
PB
11041 if (insn & (1 << 9)) {
11042 if (s->condexec_mask)
396e467c 11043 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11044 else
72485ec4 11045 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
11046 } else {
11047 if (s->condexec_mask)
396e467c 11048 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 11049 else
72485ec4 11050 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 11051 }
7d1b0095 11052 tcg_temp_free_i32(tmp2);
396e467c 11053 store_reg(s, rd, tmp);
99c475ab
FB
11054 } else {
11055 /* shift immediate */
11056 rm = (insn >> 3) & 7;
11057 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
11058 tmp = load_reg(s, rm);
11059 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11060 if (!s->condexec_mask)
11061 gen_logic_CC(tmp);
11062 store_reg(s, rd, tmp);
99c475ab
FB
11063 }
11064 break;
11065 case 2: case 3:
11066 /* arithmetic large immediate */
11067 op = (insn >> 11) & 3;
11068 rd = (insn >> 8) & 0x7;
396e467c 11069 if (op == 0) { /* mov */
7d1b0095 11070 tmp = tcg_temp_new_i32();
396e467c 11071 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11072 if (!s->condexec_mask)
396e467c
FN
11073 gen_logic_CC(tmp);
11074 store_reg(s, rd, tmp);
11075 } else {
11076 tmp = load_reg(s, rd);
7d1b0095 11077 tmp2 = tcg_temp_new_i32();
396e467c
FN
11078 tcg_gen_movi_i32(tmp2, insn & 0xff);
11079 switch (op) {
11080 case 1: /* cmp */
72485ec4 11081 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11082 tcg_temp_free_i32(tmp);
11083 tcg_temp_free_i32(tmp2);
396e467c
FN
11084 break;
11085 case 2: /* add */
11086 if (s->condexec_mask)
11087 tcg_gen_add_i32(tmp, tmp, tmp2);
11088 else
72485ec4 11089 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11090 tcg_temp_free_i32(tmp2);
396e467c
FN
11091 store_reg(s, rd, tmp);
11092 break;
11093 case 3: /* sub */
11094 if (s->condexec_mask)
11095 tcg_gen_sub_i32(tmp, tmp, tmp2);
11096 else
72485ec4 11097 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11098 tcg_temp_free_i32(tmp2);
396e467c
FN
11099 store_reg(s, rd, tmp);
11100 break;
11101 }
99c475ab 11102 }
99c475ab
FB
11103 break;
11104 case 4:
11105 if (insn & (1 << 11)) {
11106 rd = (insn >> 8) & 7;
5899f386
FB
11107 /* load pc-relative. Bit 1 of PC is ignored. */
11108 val = s->pc + 2 + ((insn & 0xff) * 4);
11109 val &= ~(uint32_t)2;
7d1b0095 11110 addr = tcg_temp_new_i32();
b0109805 11111 tcg_gen_movi_i32(addr, val);
c40c8556 11112 tmp = tcg_temp_new_i32();
9bb6558a
PM
11113 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11114 rd | ISSIs16Bit);
7d1b0095 11115 tcg_temp_free_i32(addr);
b0109805 11116 store_reg(s, rd, tmp);
99c475ab
FB
11117 break;
11118 }
11119 if (insn & (1 << 10)) {
11120 /* data processing extended or blx */
11121 rd = (insn & 7) | ((insn >> 4) & 8);
11122 rm = (insn >> 3) & 0xf;
11123 op = (insn >> 8) & 3;
11124 switch (op) {
11125 case 0: /* add */
396e467c
FN
11126 tmp = load_reg(s, rd);
11127 tmp2 = load_reg(s, rm);
11128 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11129 tcg_temp_free_i32(tmp2);
396e467c 11130 store_reg(s, rd, tmp);
99c475ab
FB
11131 break;
11132 case 1: /* cmp */
396e467c
FN
11133 tmp = load_reg(s, rd);
11134 tmp2 = load_reg(s, rm);
72485ec4 11135 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11136 tcg_temp_free_i32(tmp2);
11137 tcg_temp_free_i32(tmp);
99c475ab
FB
11138 break;
11139 case 2: /* mov/cpy */
396e467c
FN
11140 tmp = load_reg(s, rm);
11141 store_reg(s, rd, tmp);
99c475ab
FB
11142 break;
11143 case 3:/* branch [and link] exchange thumb register */
b0109805 11144 tmp = load_reg(s, rm);
99c475ab 11145 if (insn & (1 << 7)) {
be5e7a76 11146 ARCH(5);
99c475ab 11147 val = (uint32_t)s->pc | 1;
7d1b0095 11148 tmp2 = tcg_temp_new_i32();
b0109805
PB
11149 tcg_gen_movi_i32(tmp2, val);
11150 store_reg(s, 14, tmp2);
3bb8a96f
PM
11151 gen_bx(s, tmp);
11152 } else {
11153 /* Only BX works as exception-return, not BLX */
11154 gen_bx_excret(s, tmp);
99c475ab 11155 }
99c475ab
FB
11156 break;
11157 }
11158 break;
11159 }
11160
11161 /* data processing register */
11162 rd = insn & 7;
11163 rm = (insn >> 3) & 7;
11164 op = (insn >> 6) & 0xf;
11165 if (op == 2 || op == 3 || op == 4 || op == 7) {
11166 /* the shift/rotate ops want the operands backwards */
11167 val = rm;
11168 rm = rd;
11169 rd = val;
11170 val = 1;
11171 } else {
11172 val = 0;
11173 }
11174
396e467c 11175 if (op == 9) { /* neg */
7d1b0095 11176 tmp = tcg_temp_new_i32();
396e467c
FN
11177 tcg_gen_movi_i32(tmp, 0);
11178 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11179 tmp = load_reg(s, rd);
11180 } else {
39d5492a 11181 TCGV_UNUSED_I32(tmp);
396e467c 11182 }
99c475ab 11183
396e467c 11184 tmp2 = load_reg(s, rm);
5899f386 11185 switch (op) {
99c475ab 11186 case 0x0: /* and */
396e467c 11187 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11188 if (!s->condexec_mask)
396e467c 11189 gen_logic_CC(tmp);
99c475ab
FB
11190 break;
11191 case 0x1: /* eor */
396e467c 11192 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11193 if (!s->condexec_mask)
396e467c 11194 gen_logic_CC(tmp);
99c475ab
FB
11195 break;
11196 case 0x2: /* lsl */
9ee6e8bb 11197 if (s->condexec_mask) {
365af80e 11198 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11199 } else {
9ef39277 11200 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11201 gen_logic_CC(tmp2);
9ee6e8bb 11202 }
99c475ab
FB
11203 break;
11204 case 0x3: /* lsr */
9ee6e8bb 11205 if (s->condexec_mask) {
365af80e 11206 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11207 } else {
9ef39277 11208 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11209 gen_logic_CC(tmp2);
9ee6e8bb 11210 }
99c475ab
FB
11211 break;
11212 case 0x4: /* asr */
9ee6e8bb 11213 if (s->condexec_mask) {
365af80e 11214 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11215 } else {
9ef39277 11216 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11217 gen_logic_CC(tmp2);
9ee6e8bb 11218 }
99c475ab
FB
11219 break;
11220 case 0x5: /* adc */
49b4c31e 11221 if (s->condexec_mask) {
396e467c 11222 gen_adc(tmp, tmp2);
49b4c31e
RH
11223 } else {
11224 gen_adc_CC(tmp, tmp, tmp2);
11225 }
99c475ab
FB
11226 break;
11227 case 0x6: /* sbc */
2de68a49 11228 if (s->condexec_mask) {
396e467c 11229 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11230 } else {
11231 gen_sbc_CC(tmp, tmp, tmp2);
11232 }
99c475ab
FB
11233 break;
11234 case 0x7: /* ror */
9ee6e8bb 11235 if (s->condexec_mask) {
f669df27
AJ
11236 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11237 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11238 } else {
9ef39277 11239 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11240 gen_logic_CC(tmp2);
9ee6e8bb 11241 }
99c475ab
FB
11242 break;
11243 case 0x8: /* tst */
396e467c
FN
11244 tcg_gen_and_i32(tmp, tmp, tmp2);
11245 gen_logic_CC(tmp);
99c475ab 11246 rd = 16;
5899f386 11247 break;
99c475ab 11248 case 0x9: /* neg */
9ee6e8bb 11249 if (s->condexec_mask)
396e467c 11250 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11251 else
72485ec4 11252 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11253 break;
11254 case 0xa: /* cmp */
72485ec4 11255 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11256 rd = 16;
11257 break;
11258 case 0xb: /* cmn */
72485ec4 11259 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11260 rd = 16;
11261 break;
11262 case 0xc: /* orr */
396e467c 11263 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11264 if (!s->condexec_mask)
396e467c 11265 gen_logic_CC(tmp);
99c475ab
FB
11266 break;
11267 case 0xd: /* mul */
7b2919a0 11268 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11269 if (!s->condexec_mask)
396e467c 11270 gen_logic_CC(tmp);
99c475ab
FB
11271 break;
11272 case 0xe: /* bic */
f669df27 11273 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11274 if (!s->condexec_mask)
396e467c 11275 gen_logic_CC(tmp);
99c475ab
FB
11276 break;
11277 case 0xf: /* mvn */
396e467c 11278 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11279 if (!s->condexec_mask)
396e467c 11280 gen_logic_CC(tmp2);
99c475ab 11281 val = 1;
5899f386 11282 rm = rd;
99c475ab
FB
11283 break;
11284 }
11285 if (rd != 16) {
396e467c
FN
11286 if (val) {
11287 store_reg(s, rm, tmp2);
11288 if (op != 0xf)
7d1b0095 11289 tcg_temp_free_i32(tmp);
396e467c
FN
11290 } else {
11291 store_reg(s, rd, tmp);
7d1b0095 11292 tcg_temp_free_i32(tmp2);
396e467c
FN
11293 }
11294 } else {
7d1b0095
PM
11295 tcg_temp_free_i32(tmp);
11296 tcg_temp_free_i32(tmp2);
99c475ab
FB
11297 }
11298 break;
11299
11300 case 5:
11301 /* load/store register offset. */
11302 rd = insn & 7;
11303 rn = (insn >> 3) & 7;
11304 rm = (insn >> 6) & 7;
11305 op = (insn >> 9) & 7;
b0109805 11306 addr = load_reg(s, rn);
b26eefb6 11307 tmp = load_reg(s, rm);
b0109805 11308 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11309 tcg_temp_free_i32(tmp);
99c475ab 11310
c40c8556 11311 if (op < 3) { /* store */
b0109805 11312 tmp = load_reg(s, rd);
c40c8556
PM
11313 } else {
11314 tmp = tcg_temp_new_i32();
11315 }
99c475ab
FB
11316
11317 switch (op) {
11318 case 0: /* str */
9bb6558a 11319 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11320 break;
11321 case 1: /* strh */
9bb6558a 11322 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11323 break;
11324 case 2: /* strb */
9bb6558a 11325 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11326 break;
11327 case 3: /* ldrsb */
9bb6558a 11328 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11329 break;
11330 case 4: /* ldr */
9bb6558a 11331 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11332 break;
11333 case 5: /* ldrh */
9bb6558a 11334 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11335 break;
11336 case 6: /* ldrb */
9bb6558a 11337 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11338 break;
11339 case 7: /* ldrsh */
9bb6558a 11340 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11341 break;
11342 }
c40c8556 11343 if (op >= 3) { /* load */
b0109805 11344 store_reg(s, rd, tmp);
c40c8556
PM
11345 } else {
11346 tcg_temp_free_i32(tmp);
11347 }
7d1b0095 11348 tcg_temp_free_i32(addr);
99c475ab
FB
11349 break;
11350
11351 case 6:
11352 /* load/store word immediate offset */
11353 rd = insn & 7;
11354 rn = (insn >> 3) & 7;
b0109805 11355 addr = load_reg(s, rn);
99c475ab 11356 val = (insn >> 4) & 0x7c;
b0109805 11357 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11358
11359 if (insn & (1 << 11)) {
11360 /* load */
c40c8556 11361 tmp = tcg_temp_new_i32();
12dcc321 11362 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11363 store_reg(s, rd, tmp);
99c475ab
FB
11364 } else {
11365 /* store */
b0109805 11366 tmp = load_reg(s, rd);
12dcc321 11367 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11368 tcg_temp_free_i32(tmp);
99c475ab 11369 }
7d1b0095 11370 tcg_temp_free_i32(addr);
99c475ab
FB
11371 break;
11372
11373 case 7:
11374 /* load/store byte immediate offset */
11375 rd = insn & 7;
11376 rn = (insn >> 3) & 7;
b0109805 11377 addr = load_reg(s, rn);
99c475ab 11378 val = (insn >> 6) & 0x1f;
b0109805 11379 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11380
11381 if (insn & (1 << 11)) {
11382 /* load */
c40c8556 11383 tmp = tcg_temp_new_i32();
9bb6558a 11384 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11385 store_reg(s, rd, tmp);
99c475ab
FB
11386 } else {
11387 /* store */
b0109805 11388 tmp = load_reg(s, rd);
9bb6558a 11389 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11390 tcg_temp_free_i32(tmp);
99c475ab 11391 }
7d1b0095 11392 tcg_temp_free_i32(addr);
99c475ab
FB
11393 break;
11394
11395 case 8:
11396 /* load/store halfword immediate offset */
11397 rd = insn & 7;
11398 rn = (insn >> 3) & 7;
b0109805 11399 addr = load_reg(s, rn);
99c475ab 11400 val = (insn >> 5) & 0x3e;
b0109805 11401 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11402
11403 if (insn & (1 << 11)) {
11404 /* load */
c40c8556 11405 tmp = tcg_temp_new_i32();
9bb6558a 11406 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11407 store_reg(s, rd, tmp);
99c475ab
FB
11408 } else {
11409 /* store */
b0109805 11410 tmp = load_reg(s, rd);
9bb6558a 11411 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11412 tcg_temp_free_i32(tmp);
99c475ab 11413 }
7d1b0095 11414 tcg_temp_free_i32(addr);
99c475ab
FB
11415 break;
11416
11417 case 9:
11418 /* load/store from stack */
11419 rd = (insn >> 8) & 7;
b0109805 11420 addr = load_reg(s, 13);
99c475ab 11421 val = (insn & 0xff) * 4;
b0109805 11422 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11423
11424 if (insn & (1 << 11)) {
11425 /* load */
c40c8556 11426 tmp = tcg_temp_new_i32();
9bb6558a 11427 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11428 store_reg(s, rd, tmp);
99c475ab
FB
11429 } else {
11430 /* store */
b0109805 11431 tmp = load_reg(s, rd);
9bb6558a 11432 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11433 tcg_temp_free_i32(tmp);
99c475ab 11434 }
7d1b0095 11435 tcg_temp_free_i32(addr);
99c475ab
FB
11436 break;
11437
11438 case 10:
11439 /* add to high reg */
11440 rd = (insn >> 8) & 7;
5899f386
FB
11441 if (insn & (1 << 11)) {
11442 /* SP */
5e3f878a 11443 tmp = load_reg(s, 13);
5899f386
FB
11444 } else {
11445 /* PC. bit 1 is ignored. */
7d1b0095 11446 tmp = tcg_temp_new_i32();
5e3f878a 11447 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 11448 }
99c475ab 11449 val = (insn & 0xff) * 4;
5e3f878a
PB
11450 tcg_gen_addi_i32(tmp, tmp, val);
11451 store_reg(s, rd, tmp);
99c475ab
FB
11452 break;
11453
11454 case 11:
11455 /* misc */
11456 op = (insn >> 8) & 0xf;
11457 switch (op) {
11458 case 0:
11459 /* adjust stack pointer */
b26eefb6 11460 tmp = load_reg(s, 13);
99c475ab
FB
11461 val = (insn & 0x7f) * 4;
11462 if (insn & (1 << 7))
6a0d8a1d 11463 val = -(int32_t)val;
b26eefb6
PB
11464 tcg_gen_addi_i32(tmp, tmp, val);
11465 store_reg(s, 13, tmp);
99c475ab
FB
11466 break;
11467
9ee6e8bb
PB
11468 case 2: /* sign/zero extend. */
11469 ARCH(6);
11470 rd = insn & 7;
11471 rm = (insn >> 3) & 7;
b0109805 11472 tmp = load_reg(s, rm);
9ee6e8bb 11473 switch ((insn >> 6) & 3) {
b0109805
PB
11474 case 0: gen_sxth(tmp); break;
11475 case 1: gen_sxtb(tmp); break;
11476 case 2: gen_uxth(tmp); break;
11477 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11478 }
b0109805 11479 store_reg(s, rd, tmp);
9ee6e8bb 11480 break;
99c475ab
FB
11481 case 4: case 5: case 0xc: case 0xd:
11482 /* push/pop */
b0109805 11483 addr = load_reg(s, 13);
5899f386
FB
11484 if (insn & (1 << 8))
11485 offset = 4;
99c475ab 11486 else
5899f386
FB
11487 offset = 0;
11488 for (i = 0; i < 8; i++) {
11489 if (insn & (1 << i))
11490 offset += 4;
11491 }
11492 if ((insn & (1 << 11)) == 0) {
b0109805 11493 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11494 }
99c475ab
FB
11495 for (i = 0; i < 8; i++) {
11496 if (insn & (1 << i)) {
11497 if (insn & (1 << 11)) {
11498 /* pop */
c40c8556 11499 tmp = tcg_temp_new_i32();
12dcc321 11500 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11501 store_reg(s, i, tmp);
99c475ab
FB
11502 } else {
11503 /* push */
b0109805 11504 tmp = load_reg(s, i);
12dcc321 11505 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11506 tcg_temp_free_i32(tmp);
99c475ab 11507 }
5899f386 11508 /* advance to the next address. */
b0109805 11509 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11510 }
11511 }
39d5492a 11512 TCGV_UNUSED_I32(tmp);
99c475ab
FB
11513 if (insn & (1 << 8)) {
11514 if (insn & (1 << 11)) {
11515 /* pop pc */
c40c8556 11516 tmp = tcg_temp_new_i32();
12dcc321 11517 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11518 /* don't set the pc until the rest of the instruction
11519 has completed */
11520 } else {
11521 /* push lr */
b0109805 11522 tmp = load_reg(s, 14);
12dcc321 11523 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11524 tcg_temp_free_i32(tmp);
99c475ab 11525 }
b0109805 11526 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11527 }
5899f386 11528 if ((insn & (1 << 11)) == 0) {
b0109805 11529 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11530 }
99c475ab 11531 /* write back the new stack pointer */
b0109805 11532 store_reg(s, 13, addr);
99c475ab 11533 /* set the new PC value */
be5e7a76 11534 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11535 store_reg_from_load(s, 15, tmp);
be5e7a76 11536 }
99c475ab
FB
11537 break;
11538
9ee6e8bb
PB
11539 case 1: case 3: case 9: case 11: /* czb */
11540 rm = insn & 7;
d9ba4830 11541 tmp = load_reg(s, rm);
9ee6e8bb
PB
11542 s->condlabel = gen_new_label();
11543 s->condjmp = 1;
11544 if (insn & (1 << 11))
cb63669a 11545 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11546 else
cb63669a 11547 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11548 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11549 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11550 val = (uint32_t)s->pc + 2;
11551 val += offset;
11552 gen_jmp(s, val);
11553 break;
11554
11555 case 15: /* IT, nop-hint. */
11556 if ((insn & 0xf) == 0) {
11557 gen_nop_hint(s, (insn >> 4) & 0xf);
11558 break;
11559 }
11560 /* If Then. */
11561 s->condexec_cond = (insn >> 4) & 0xe;
11562 s->condexec_mask = insn & 0x1f;
11563 /* No actual code generated for this insn, just setup state. */
11564 break;
11565
06c949e6 11566 case 0xe: /* bkpt */
d4a2dc67
PM
11567 {
11568 int imm8 = extract32(insn, 0, 8);
be5e7a76 11569 ARCH(5);
73710361
GB
11570 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11571 default_exception_el(s));
06c949e6 11572 break;
d4a2dc67 11573 }
06c949e6 11574
19a6e31c
PM
11575 case 0xa: /* rev, and hlt */
11576 {
11577 int op1 = extract32(insn, 6, 2);
11578
11579 if (op1 == 2) {
11580 /* HLT */
11581 int imm6 = extract32(insn, 0, 6);
11582
11583 gen_hlt(s, imm6);
11584 break;
11585 }
11586
11587 /* Otherwise this is rev */
9ee6e8bb
PB
11588 ARCH(6);
11589 rn = (insn >> 3) & 0x7;
11590 rd = insn & 0x7;
b0109805 11591 tmp = load_reg(s, rn);
19a6e31c 11592 switch (op1) {
66896cb8 11593 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11594 case 1: gen_rev16(tmp); break;
11595 case 3: gen_revsh(tmp); break;
19a6e31c
PM
11596 default:
11597 g_assert_not_reached();
9ee6e8bb 11598 }
b0109805 11599 store_reg(s, rd, tmp);
9ee6e8bb 11600 break;
19a6e31c 11601 }
9ee6e8bb 11602
d9e028c1
PM
11603 case 6:
11604 switch ((insn >> 5) & 7) {
11605 case 2:
11606 /* setend */
11607 ARCH(6);
9886ecdf
PB
11608 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11609 gen_helper_setend(cpu_env);
11610 s->is_jmp = DISAS_UPDATE;
d9e028c1 11611 }
9ee6e8bb 11612 break;
d9e028c1
PM
11613 case 3:
11614 /* cps */
11615 ARCH(6);
11616 if (IS_USER(s)) {
11617 break;
8984bd2e 11618 }
b53d8923 11619 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11620 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11621 /* FAULTMASK */
11622 if (insn & 1) {
11623 addr = tcg_const_i32(19);
11624 gen_helper_v7m_msr(cpu_env, addr, tmp);
11625 tcg_temp_free_i32(addr);
11626 }
11627 /* PRIMASK */
11628 if (insn & 2) {
11629 addr = tcg_const_i32(16);
11630 gen_helper_v7m_msr(cpu_env, addr, tmp);
11631 tcg_temp_free_i32(addr);
11632 }
11633 tcg_temp_free_i32(tmp);
11634 gen_lookup_tb(s);
11635 } else {
11636 if (insn & (1 << 4)) {
11637 shift = CPSR_A | CPSR_I | CPSR_F;
11638 } else {
11639 shift = 0;
11640 }
11641 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11642 }
d9e028c1
PM
11643 break;
11644 default:
11645 goto undef;
9ee6e8bb
PB
11646 }
11647 break;
11648
99c475ab
FB
11649 default:
11650 goto undef;
11651 }
11652 break;
11653
11654 case 12:
a7d3970d 11655 {
99c475ab 11656 /* load/store multiple */
39d5492a
PM
11657 TCGv_i32 loaded_var;
11658 TCGV_UNUSED_I32(loaded_var);
99c475ab 11659 rn = (insn >> 8) & 0x7;
b0109805 11660 addr = load_reg(s, rn);
99c475ab
FB
11661 for (i = 0; i < 8; i++) {
11662 if (insn & (1 << i)) {
99c475ab
FB
11663 if (insn & (1 << 11)) {
11664 /* load */
c40c8556 11665 tmp = tcg_temp_new_i32();
12dcc321 11666 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11667 if (i == rn) {
11668 loaded_var = tmp;
11669 } else {
11670 store_reg(s, i, tmp);
11671 }
99c475ab
FB
11672 } else {
11673 /* store */
b0109805 11674 tmp = load_reg(s, i);
12dcc321 11675 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11676 tcg_temp_free_i32(tmp);
99c475ab 11677 }
5899f386 11678 /* advance to the next address */
b0109805 11679 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11680 }
11681 }
b0109805 11682 if ((insn & (1 << rn)) == 0) {
a7d3970d 11683 /* base reg not in list: base register writeback */
b0109805
PB
11684 store_reg(s, rn, addr);
11685 } else {
a7d3970d
PM
11686 /* base reg in list: if load, complete it now */
11687 if (insn & (1 << 11)) {
11688 store_reg(s, rn, loaded_var);
11689 }
7d1b0095 11690 tcg_temp_free_i32(addr);
b0109805 11691 }
99c475ab 11692 break;
a7d3970d 11693 }
99c475ab
FB
11694 case 13:
11695 /* conditional branch or swi */
11696 cond = (insn >> 8) & 0xf;
11697 if (cond == 0xe)
11698 goto undef;
11699
11700 if (cond == 0xf) {
11701 /* swi */
eaed129d 11702 gen_set_pc_im(s, s->pc);
d4a2dc67 11703 s->svc_imm = extract32(insn, 0, 8);
9ee6e8bb 11704 s->is_jmp = DISAS_SWI;
99c475ab
FB
11705 break;
11706 }
11707 /* generate a conditional jump to next instruction */
e50e6a20 11708 s->condlabel = gen_new_label();
39fb730a 11709 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 11710 s->condjmp = 1;
99c475ab
FB
11711
11712 /* jump to the offset */
5899f386 11713 val = (uint32_t)s->pc + 2;
99c475ab 11714 offset = ((int32_t)insn << 24) >> 24;
5899f386 11715 val += offset << 1;
8aaca4c0 11716 gen_jmp(s, val);
99c475ab
FB
11717 break;
11718
11719 case 14:
358bf29e 11720 if (insn & (1 << 11)) {
9ee6e8bb
PB
11721 if (disas_thumb2_insn(env, s, insn))
11722 goto undef32;
358bf29e
PB
11723 break;
11724 }
9ee6e8bb 11725 /* unconditional branch */
99c475ab
FB
11726 val = (uint32_t)s->pc;
11727 offset = ((int32_t)insn << 21) >> 21;
11728 val += (offset << 1) + 2;
8aaca4c0 11729 gen_jmp(s, val);
99c475ab
FB
11730 break;
11731
11732 case 15:
9ee6e8bb 11733 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 11734 goto undef32;
9ee6e8bb 11735 break;
99c475ab
FB
11736 }
11737 return;
9ee6e8bb 11738undef32:
73710361
GB
11739 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11740 default_exception_el(s));
9ee6e8bb
PB
11741 return;
11742illegal_op:
99c475ab 11743undef:
73710361
GB
11744 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11745 default_exception_el(s));
99c475ab
FB
11746}
11747
541ebcd4
PM
11748static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11749{
11750 /* Return true if the insn at dc->pc might cross a page boundary.
11751 * (False positives are OK, false negatives are not.)
11752 */
11753 uint16_t insn;
11754
11755 if ((s->pc & 3) == 0) {
11756 /* At a 4-aligned address we can't be crossing a page */
11757 return false;
11758 }
11759
11760 /* This must be a Thumb insn */
f9fd40eb 11761 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4
PM
11762
11763 if ((insn >> 11) >= 0x1d) {
11764 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11765 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11766 * end up actually treating this as two 16-bit insns (see the
11767 * code at the start of disas_thumb2_insn()) but we don't bother
11768 * to check for that as it is unlikely, and false positives here
11769 * are harmless.
11770 */
11771 return true;
11772 }
11773 /* Definitely a 16-bit insn, can't be crossing a page. */
11774 return false;
11775}
11776
20157705 11777/* generate intermediate code for basic block 'tb'. */
4e5e1215 11778void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 11779{
4e5e1215 11780 ARMCPU *cpu = arm_env_get_cpu(env);
ed2803da 11781 CPUState *cs = CPU(cpu);
2c0262af 11782 DisasContext dc1, *dc = &dc1;
0fa85d43 11783 target_ulong pc_start;
0a2461fa 11784 target_ulong next_page_start;
2e70f6ef
PB
11785 int num_insns;
11786 int max_insns;
541ebcd4 11787 bool end_of_page;
3b46e624 11788
2c0262af 11789 /* generate intermediate code */
40f860cd
PM
11790
11791 /* The A64 decoder has its own top level loop, because it doesn't need
11792 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11793 */
11794 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
4e5e1215 11795 gen_intermediate_code_a64(cpu, tb);
40f860cd
PM
11796 return;
11797 }
11798
0fa85d43 11799 pc_start = tb->pc;
3b46e624 11800
2c0262af
FB
11801 dc->tb = tb;
11802
2c0262af
FB
11803 dc->is_jmp = DISAS_NEXT;
11804 dc->pc = pc_start;
ed2803da 11805 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 11806 dc->condjmp = 0;
3926cc84 11807
40f860cd 11808 dc->aarch64 = 0;
cef9ee70
SS
11809 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11810 * there is no secure EL1, so we route exceptions to EL3.
11811 */
11812 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11813 !arm_el_is_aa64(env, 3);
40f860cd 11814 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
f9fd40eb 11815 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(tb->flags);
91cca2cd 11816 dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
40f860cd
PM
11817 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11818 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
8bd5c820 11819 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(tb->flags));
c1e37810 11820 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11821#if !defined(CONFIG_USER_ONLY)
c1e37810 11822 dc->user = (dc->current_el == 0);
3926cc84 11823#endif
3f342b9e 11824 dc->ns = ARM_TBFLAG_NS(tb->flags);
9dbbc748 11825 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
40f860cd
PM
11826 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11827 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11828 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
c0f4af17 11829 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
064c379c 11830 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(tb->flags);
60322b39 11831 dc->cp_regs = cpu->cp_regs;
a984e42c 11832 dc->features = env->features;
40f860cd 11833
50225ad0
PM
11834 /* Single step state. The code-generation logic here is:
11835 * SS_ACTIVE == 0:
11836 * generate code with no special handling for single-stepping (except
11837 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11838 * this happens anyway because those changes are all system register or
11839 * PSTATE writes).
11840 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11841 * emit code for one insn
11842 * emit code to clear PSTATE.SS
11843 * emit code to generate software step exception for completed step
11844 * end TB (as usual for having generated an exception)
11845 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11846 * emit code to generate a software step exception
11847 * end the TB
11848 */
11849 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11850 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11851 dc->is_ldex = false;
11852 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11853
a7812ae4
PB
11854 cpu_F0s = tcg_temp_new_i32();
11855 cpu_F1s = tcg_temp_new_i32();
11856 cpu_F0d = tcg_temp_new_i64();
11857 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
11858 cpu_V0 = cpu_F0d;
11859 cpu_V1 = cpu_F1d;
e677137d 11860 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11861 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 11862 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2e70f6ef
PB
11863 num_insns = 0;
11864 max_insns = tb->cflags & CF_COUNT_MASK;
190ce7fb 11865 if (max_insns == 0) {
2e70f6ef 11866 max_insns = CF_COUNT_MASK;
190ce7fb
RH
11867 }
11868 if (max_insns > TCG_MAX_INSNS) {
11869 max_insns = TCG_MAX_INSNS;
11870 }
2e70f6ef 11871
cd42d5b2 11872 gen_tb_start(tb);
e12ce78d 11873
3849902c
PM
11874 tcg_clear_temp_count();
11875
e12ce78d
PM
11876 /* A note on handling of the condexec (IT) bits:
11877 *
11878 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 11879 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 11880 * (1) if the condexec bits are not already zero then we write
0ecb72a5 11881 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
11882 * to do it at the end of the block. (For example if we don't do this
11883 * it's hard to identify whether we can safely skip writing condexec
11884 * at the end of the TB, which we definitely want to do for the case
11885 * where a TB doesn't do anything with the IT state at all.)
11886 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 11887 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
11888 * This is done both for leaving the TB at the end, and for leaving
11889 * it because of an exception we know will happen, which is done in
11890 * gen_exception_insn(). The latter is necessary because we need to
11891 * leave the TB with the PC/IT state just prior to execution of the
11892 * instruction which caused the exception.
11893 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 11894 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d 11895 * This is handled in the same way as restoration of the
4e5e1215
RH
11896 * PC in these situations; we save the value of the condexec bits
11897 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11898 * then uses this to restore them after an exception.
e12ce78d
PM
11899 *
11900 * Note that there are no instructions which can read the condexec
11901 * bits, and none which can write non-static values to them, so
0ecb72a5 11902 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
11903 * middle of a TB.
11904 */
11905
9ee6e8bb
PB
11906 /* Reset the conditional execution bits immediately. This avoids
11907 complications trying to do it at the end of the block. */
98eac7ca 11908 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 11909 {
39d5492a 11910 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 11911 tcg_gen_movi_i32(tmp, 0);
d9ba4830 11912 store_cpu_field(tmp, condexec_bits);
8f01245e 11913 }
2c0262af 11914 do {
9bb6558a 11915 dc->insn_start_idx = tcg_op_buf_count();
52e971d9 11916 tcg_gen_insn_start(dc->pc,
aaa1f954
EI
11917 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11918 0);
b933066a
RH
11919 num_insns++;
11920
fbb4a2e3
PB
11921#ifdef CONFIG_USER_ONLY
11922 /* Intercept jump to the magic kernel page. */
40f860cd 11923 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
11924 /* We always get here via a jump, so know we are not in a
11925 conditional execution block. */
d4a2dc67 11926 gen_exception_internal(EXCP_KERNEL_TRAP);
577bf808 11927 dc->is_jmp = DISAS_EXC;
fbb4a2e3
PB
11928 break;
11929 }
9ee6e8bb
PB
11930#endif
11931
f0c3c505 11932 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
b933066a 11933 CPUBreakpoint *bp;
f0c3c505 11934 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 11935 if (bp->pc == dc->pc) {
5d98bf8f 11936 if (bp->flags & BP_CPU) {
ce8a1b54 11937 gen_set_condexec(dc);
ed6c6448 11938 gen_set_pc_im(dc, dc->pc);
5d98bf8f
SF
11939 gen_helper_check_breakpoints(cpu_env);
11940 /* End the TB early; it's likely not going to be executed */
11941 dc->is_jmp = DISAS_UPDATE;
11942 } else {
11943 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
522a0d4e
RH
11944 /* The address covered by the breakpoint must be
11945 included in [tb->pc, tb->pc + tb->size) in order
11946 to for it to be properly cleared -- thus we
11947 increment the PC here so that the logic setting
11948 tb->size below does the right thing. */
5d98bf8f
SF
11949 /* TODO: Advance PC by correct instruction length to
11950 * avoid disassembler error messages */
11951 dc->pc += 2;
11952 goto done_generating;
11953 }
11954 break;
1fddef4b
FB
11955 }
11956 }
11957 }
e50e6a20 11958
959082fc 11959 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 11960 gen_io_start();
959082fc 11961 }
2e70f6ef 11962
50225ad0
PM
11963 if (dc->ss_active && !dc->pstate_ss) {
11964 /* Singlestep state is Active-pending.
11965 * If we're in this state at the start of a TB then either
11966 * a) we just took an exception to an EL which is being debugged
11967 * and this is the first insn in the exception handler
11968 * b) debug exceptions were masked and we just unmasked them
11969 * without changing EL (eg by clearing PSTATE.D)
11970 * In either case we're going to take a swstep exception in the
11971 * "did not step an insn" case, and so the syndrome ISV and EX
11972 * bits should be zero.
11973 */
959082fc 11974 assert(num_insns == 1);
73710361
GB
11975 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11976 default_exception_el(dc));
50225ad0
PM
11977 goto done_generating;
11978 }
11979
40f860cd 11980 if (dc->thumb) {
9ee6e8bb
PB
11981 disas_thumb_insn(env, dc);
11982 if (dc->condexec_mask) {
11983 dc->condexec_cond = (dc->condexec_cond & 0xe)
11984 | ((dc->condexec_mask >> 4) & 1);
11985 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11986 if (dc->condexec_mask == 0) {
11987 dc->condexec_cond = 0;
11988 }
11989 }
11990 } else {
f9fd40eb 11991 unsigned int insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
f4df2210
PM
11992 dc->pc += 4;
11993 disas_arm_insn(dc, insn);
9ee6e8bb 11994 }
e50e6a20
FB
11995
11996 if (dc->condjmp && !dc->is_jmp) {
11997 gen_set_label(dc->condlabel);
11998 dc->condjmp = 0;
11999 }
3849902c
PM
12000
12001 if (tcg_check_temp_count()) {
0a2461fa
AG
12002 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
12003 dc->pc);
3849902c
PM
12004 }
12005
aaf2d97d 12006 /* Translation stops when a conditional branch is encountered.
e50e6a20 12007 * Otherwise the subsequent code could get translated several times.
b5ff1b31 12008 * Also stop translation when a page boundary is reached. This
bf20dc07 12009 * ensures prefetch aborts occur at the right place. */
541ebcd4
PM
12010
12011 /* We want to stop the TB if the next insn starts in a new page,
12012 * or if it spans between this page and the next. This means that
12013 * if we're looking at the last halfword in the page we need to
12014 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12015 * or a 32-bit Thumb insn (which won't).
12016 * This is to avoid generating a silly TB with a single 16-bit insn
12017 * in it at the end of this page (which would execute correctly
12018 * but isn't very efficient).
12019 */
12020 end_of_page = (dc->pc >= next_page_start) ||
12021 ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
12022
fe700adb 12023 } while (!dc->is_jmp && !tcg_op_buf_full() &&
b636649f 12024 !is_singlestepping(dc) &&
1b530a6d 12025 !singlestep &&
541ebcd4 12026 !end_of_page &&
2e70f6ef
PB
12027 num_insns < max_insns);
12028
12029 if (tb->cflags & CF_LAST_IO) {
12030 if (dc->condjmp) {
12031 /* FIXME: This can theoretically happen with self-modifying
12032 code. */
a47dddd7 12033 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
12034 }
12035 gen_io_end();
12036 }
9ee6e8bb 12037
b5ff1b31 12038 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12039 instruction was a conditional branch or trap, and the PC has
12040 already been written. */
f021b2c4 12041 gen_set_condexec(dc);
3bb8a96f
PM
12042 if (dc->is_jmp == DISAS_BX_EXCRET) {
12043 /* Exception return branches need some special case code at the
12044 * end of the TB, which is complex enough that it has to
12045 * handle the single-step vs not and the condition-failed
12046 * insn codepath itself.
12047 */
12048 gen_bx_excret_final_code(dc);
12049 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12050 /* Unconditional and "condition passed" instruction codepath. */
7999a5c8
SF
12051 switch (dc->is_jmp) {
12052 case DISAS_SWI:
50225ad0 12053 gen_ss_advance(dc);
73710361
GB
12054 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12055 default_exception_el(dc));
7999a5c8
SF
12056 break;
12057 case DISAS_HVC:
37e6456e 12058 gen_ss_advance(dc);
73710361 12059 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12060 break;
12061 case DISAS_SMC:
37e6456e 12062 gen_ss_advance(dc);
73710361 12063 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12064 break;
12065 case DISAS_NEXT:
12066 case DISAS_UPDATE:
12067 gen_set_pc_im(dc, dc->pc);
12068 /* fall through */
12069 default:
5425415e
PM
12070 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12071 gen_singlestep_exception(dc);
7999a5c8 12072 }
8aaca4c0 12073 } else {
9ee6e8bb
PB
12074 /* While branches must always occur at the end of an IT block,
12075 there are a few other things that can cause us to terminate
65626741 12076 the TB in the middle of an IT block:
9ee6e8bb
PB
12077 - Exception generating instructions (bkpt, swi, undefined).
12078 - Page boundaries.
12079 - Hardware watchpoints.
12080 Hardware breakpoints have already been handled and skip this code.
12081 */
8aaca4c0 12082 switch(dc->is_jmp) {
8aaca4c0 12083 case DISAS_NEXT:
6e256c93 12084 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 12085 break;
8aaca4c0 12086 case DISAS_UPDATE:
577bf808
SF
12087 gen_set_pc_im(dc, dc->pc);
12088 /* fall through */
12089 case DISAS_JUMP:
12090 default:
8aaca4c0 12091 /* indicate that the hash table must be used to find the next TB */
57fec1fe 12092 tcg_gen_exit_tb(0);
8aaca4c0
FB
12093 break;
12094 case DISAS_TB_JUMP:
12095 /* nothing more to generate */
12096 break;
9ee6e8bb 12097 case DISAS_WFI:
1ce94f81 12098 gen_helper_wfi(cpu_env);
84549b6d
PM
12099 /* The helper doesn't necessarily throw an exception, but we
12100 * must go back to the main loop to check for interrupts anyway.
12101 */
12102 tcg_gen_exit_tb(0);
9ee6e8bb 12103 break;
72c1d3af
PM
12104 case DISAS_WFE:
12105 gen_helper_wfe(cpu_env);
12106 break;
c87e5a61
PM
12107 case DISAS_YIELD:
12108 gen_helper_yield(cpu_env);
12109 break;
9ee6e8bb 12110 case DISAS_SWI:
73710361
GB
12111 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12112 default_exception_el(dc));
9ee6e8bb 12113 break;
37e6456e 12114 case DISAS_HVC:
73710361 12115 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12116 break;
12117 case DISAS_SMC:
73710361 12118 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12119 break;
8aaca4c0 12120 }
f021b2c4
PM
12121 }
12122
12123 if (dc->condjmp) {
12124 /* "Condition failed" instruction codepath for the branch/trap insn */
12125 gen_set_label(dc->condlabel);
12126 gen_set_condexec(dc);
b636649f 12127 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
12128 gen_set_pc_im(dc, dc->pc);
12129 gen_singlestep_exception(dc);
12130 } else {
6e256c93 12131 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 12132 }
2c0262af 12133 }
2e70f6ef 12134
9ee6e8bb 12135done_generating:
806f352d 12136 gen_tb_end(tb, num_insns);
2c0262af
FB
12137
12138#ifdef DEBUG_DISAS
06486077
AB
12139 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
12140 qemu_log_in_addr_range(pc_start)) {
1ee73216 12141 qemu_log_lock();
93fcfe39
AL
12142 qemu_log("----------------\n");
12143 qemu_log("IN: %s\n", lookup_symbol(pc_start));
d49190c4 12144 log_target_disas(cs, pc_start, dc->pc - pc_start,
f9fd40eb 12145 dc->thumb | (dc->sctlr_b << 1));
93fcfe39 12146 qemu_log("\n");
1ee73216 12147 qemu_log_unlock();
2c0262af
FB
12148 }
12149#endif
4e5e1215
RH
12150 tb->size = dc->pc - pc_start;
12151 tb->icount = num_insns;
2c0262af
FB
12152}
12153
b5ff1b31 12154static const char *cpu_mode_names[16] = {
28c9457d
EI
12155 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12156 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 12157};
9ee6e8bb 12158
878096ee
AF
12159void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12160 int flags)
2c0262af 12161{
878096ee
AF
12162 ARMCPU *cpu = ARM_CPU(cs);
12163 CPUARMState *env = &cpu->env;
2c0262af 12164 int i;
b5ff1b31 12165 uint32_t psr;
06e5cf7a 12166 const char *ns_status;
2c0262af 12167
17731115
PM
12168 if (is_a64(env)) {
12169 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12170 return;
12171 }
12172
2c0262af 12173 for(i=0;i<16;i++) {
7fe48483 12174 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12175 if ((i % 4) == 3)
7fe48483 12176 cpu_fprintf(f, "\n");
2c0262af 12177 else
7fe48483 12178 cpu_fprintf(f, " ");
2c0262af 12179 }
b5ff1b31 12180 psr = cpsr_read(env);
06e5cf7a
PM
12181
12182 if (arm_feature(env, ARM_FEATURE_EL3) &&
12183 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12184 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12185 } else {
12186 ns_status = "";
12187 }
12188
12189 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
687fa640 12190 psr,
b5ff1b31
FB
12191 psr & (1 << 31) ? 'N' : '-',
12192 psr & (1 << 30) ? 'Z' : '-',
12193 psr & (1 << 29) ? 'C' : '-',
12194 psr & (1 << 28) ? 'V' : '-',
5fafdf24 12195 psr & CPSR_T ? 'T' : 'A',
06e5cf7a 12196 ns_status,
b5ff1b31 12197 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 12198
f2617cfc
PM
12199 if (flags & CPU_DUMP_FPU) {
12200 int numvfpregs = 0;
12201 if (arm_feature(env, ARM_FEATURE_VFP)) {
12202 numvfpregs += 16;
12203 }
12204 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12205 numvfpregs += 16;
12206 }
12207 for (i = 0; i < numvfpregs; i++) {
12208 uint64_t v = float64_val(env->vfp.regs[i]);
12209 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12210 i * 2, (uint32_t)v,
12211 i * 2 + 1, (uint32_t)(v >> 32),
12212 i, v);
12213 }
12214 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 12215 }
2c0262af 12216}
a6b025d3 12217
bad729e2
RH
12218void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12219 target_ulong *data)
d2856f1a 12220{
3926cc84 12221 if (is_a64(env)) {
bad729e2 12222 env->pc = data[0];
40f860cd 12223 env->condexec_bits = 0;
aaa1f954 12224 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12225 } else {
bad729e2
RH
12226 env->regs[15] = data[0];
12227 env->condexec_bits = data[1];
aaa1f954 12228 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12229 }
d2856f1a 12230}