]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-s390x: Use mulu2 for mlgr insn
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
1de7afc9 30#include "qemu/log.h"
1497c961 31
7b59220e 32#include "helper.h"
1497c961 33#define GEN_HELPER 1
7b59220e 34#include "helper.h"
2c0262af 35
be5e7a76
DES
36#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38/* currently all emulated v5 cores are also v5TE, so don't bother */
39#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
40#define ENABLE_ARCH_5J 0
41#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 45
86753403 46#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 47
2c0262af
FB
48/* internal defines */
49typedef struct DisasContext {
0fa85d43 50 target_ulong pc;
2c0262af 51 int is_jmp;
e50e6a20
FB
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
b90372ad 56 /* Thumb-2 conditional execution bits. */
9ee6e8bb
PB
57 int condexec_mask;
58 int condexec_cond;
2c0262af 59 struct TranslationBlock *tb;
8aaca4c0 60 int singlestep_enabled;
5899f386 61 int thumb;
d8fd2954 62 int bswap_code;
b5ff1b31
FB
63#if !defined(CONFIG_USER_ONLY)
64 int user;
65#endif
5df8bac1 66 int vfp_enabled;
69d1fc22
PM
67 int vec_len;
68 int vec_stride;
2c0262af
FB
69} DisasContext;
70
e12ce78d
PM
71static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
b5ff1b31
FB
73#if defined(CONFIG_USER_ONLY)
74#define IS_USER(s) 1
75#else
76#define IS_USER(s) (s->user)
77#endif
78
9ee6e8bb 79/* These instructions trap after executing, so defer them until after the
b90372ad 80 conditional execution state has been updated. */
9ee6e8bb
PB
81#define DISAS_WFI 4
82#define DISAS_SWI 5
2c0262af 83
a7812ae4 84static TCGv_ptr cpu_env;
ad69471c 85/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 86static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 87static TCGv_i32 cpu_R[16];
66c374de 88static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
426f5abc
PB
89static TCGv_i32 cpu_exclusive_addr;
90static TCGv_i32 cpu_exclusive_val;
91static TCGv_i32 cpu_exclusive_high;
92#ifdef CONFIG_USER_ONLY
93static TCGv_i32 cpu_exclusive_test;
94static TCGv_i32 cpu_exclusive_info;
95#endif
ad69471c 96
b26eefb6 97/* FIXME: These should be removed. */
a7812ae4
PB
98static TCGv cpu_F0s, cpu_F1s;
99static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 100
022c62cb 101#include "exec/gen-icount.h"
2e70f6ef 102
155c3eac
FN
103static const char *regnames[] =
104 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
105 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106
b26eefb6
PB
107/* initialize TCG globals. */
108void arm_translate_init(void)
109{
155c3eac
FN
110 int i;
111
a7812ae4
PB
112 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
113
155c3eac
FN
114 for (i = 0; i < 16; i++) {
115 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 116 offsetof(CPUARMState, regs[i]),
155c3eac
FN
117 regnames[i]);
118 }
66c374de
AJ
119 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
120 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
121 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
122 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
123
426f5abc 124 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 125 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
426f5abc 126 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 127 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 128 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 129 offsetof(CPUARMState, exclusive_high), "exclusive_high");
426f5abc
PB
130#ifdef CONFIG_USER_ONLY
131 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 132 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 133 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 134 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 135#endif
155c3eac 136
a7812ae4 137#define GEN_HELPER 2
7b59220e 138#include "helper.h"
b26eefb6
PB
139}
140
d9ba4830
PB
141static inline TCGv load_cpu_offset(int offset)
142{
7d1b0095 143 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
144 tcg_gen_ld_i32(tmp, cpu_env, offset);
145 return tmp;
146}
147
0ecb72a5 148#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830
PB
149
150static inline void store_cpu_offset(TCGv var, int offset)
151{
152 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 153 tcg_temp_free_i32(var);
d9ba4830
PB
154}
155
156#define store_cpu_field(var, name) \
0ecb72a5 157 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 158
b26eefb6
PB
159/* Set a variable to the value of a CPU register. */
160static void load_reg_var(DisasContext *s, TCGv var, int reg)
161{
162 if (reg == 15) {
163 uint32_t addr;
b90372ad 164 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
165 if (s->thumb)
166 addr = (long)s->pc + 2;
167 else
168 addr = (long)s->pc + 4;
169 tcg_gen_movi_i32(var, addr);
170 } else {
155c3eac 171 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
172 }
173}
174
175/* Create a new temporary and set it to the value of a CPU register. */
176static inline TCGv load_reg(DisasContext *s, int reg)
177{
7d1b0095 178 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
179 load_reg_var(s, tmp, reg);
180 return tmp;
181}
182
183/* Set a CPU register. The source must be a temporary and will be
184 marked as dead. */
185static void store_reg(DisasContext *s, int reg, TCGv var)
186{
187 if (reg == 15) {
188 tcg_gen_andi_i32(var, var, ~1);
189 s->is_jmp = DISAS_JUMP;
190 }
155c3eac 191 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 192 tcg_temp_free_i32(var);
b26eefb6
PB
193}
194
b26eefb6 195/* Value extensions. */
86831435
PB
196#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
197#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
198#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
199#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
200
1497c961
PB
201#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
202#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 203
b26eefb6 204
b75263d6
JR
205static inline void gen_set_cpsr(TCGv var, uint32_t mask)
206{
207 TCGv tmp_mask = tcg_const_i32(mask);
1ce94f81 208 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
209 tcg_temp_free_i32(tmp_mask);
210}
d9ba4830
PB
211/* Set NZCV flags from the high 4 bits of var. */
212#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
213
214static void gen_exception(int excp)
215{
7d1b0095 216 TCGv tmp = tcg_temp_new_i32();
d9ba4830 217 tcg_gen_movi_i32(tmp, excp);
1ce94f81 218 gen_helper_exception(cpu_env, tmp);
7d1b0095 219 tcg_temp_free_i32(tmp);
d9ba4830
PB
220}
221
3670669c
PB
222static void gen_smul_dual(TCGv a, TCGv b)
223{
7d1b0095
PM
224 TCGv tmp1 = tcg_temp_new_i32();
225 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
226 tcg_gen_ext16s_i32(tmp1, a);
227 tcg_gen_ext16s_i32(tmp2, b);
3670669c 228 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 229 tcg_temp_free_i32(tmp2);
3670669c
PB
230 tcg_gen_sari_i32(a, a, 16);
231 tcg_gen_sari_i32(b, b, 16);
232 tcg_gen_mul_i32(b, b, a);
233 tcg_gen_mov_i32(a, tmp1);
7d1b0095 234 tcg_temp_free_i32(tmp1);
3670669c
PB
235}
236
237/* Byteswap each halfword. */
238static void gen_rev16(TCGv var)
239{
7d1b0095 240 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
241 tcg_gen_shri_i32(tmp, var, 8);
242 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
243 tcg_gen_shli_i32(var, var, 8);
244 tcg_gen_andi_i32(var, var, 0xff00ff00);
245 tcg_gen_or_i32(var, var, tmp);
7d1b0095 246 tcg_temp_free_i32(tmp);
3670669c
PB
247}
248
249/* Byteswap low halfword and sign extend. */
250static void gen_revsh(TCGv var)
251{
1a855029
AJ
252 tcg_gen_ext16u_i32(var, var);
253 tcg_gen_bswap16_i32(var, var);
254 tcg_gen_ext16s_i32(var, var);
3670669c
PB
255}
256
257/* Unsigned bitfield extract. */
258static void gen_ubfx(TCGv var, int shift, uint32_t mask)
259{
260 if (shift)
261 tcg_gen_shri_i32(var, var, shift);
262 tcg_gen_andi_i32(var, var, mask);
263}
264
265/* Signed bitfield extract. */
266static void gen_sbfx(TCGv var, int shift, int width)
267{
268 uint32_t signbit;
269
270 if (shift)
271 tcg_gen_sari_i32(var, var, shift);
272 if (shift + width < 32) {
273 signbit = 1u << (width - 1);
274 tcg_gen_andi_i32(var, var, (1u << width) - 1);
275 tcg_gen_xori_i32(var, var, signbit);
276 tcg_gen_subi_i32(var, var, signbit);
277 }
278}
279
838fa72d
AJ
280/* Return (b << 32) + a. Mark inputs as dead */
281static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 282{
838fa72d
AJ
283 TCGv_i64 tmp64 = tcg_temp_new_i64();
284
285 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 286 tcg_temp_free_i32(b);
838fa72d
AJ
287 tcg_gen_shli_i64(tmp64, tmp64, 32);
288 tcg_gen_add_i64(a, tmp64, a);
289
290 tcg_temp_free_i64(tmp64);
291 return a;
292}
293
294/* Return (b << 32) - a. Mark inputs as dead. */
295static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
296{
297 TCGv_i64 tmp64 = tcg_temp_new_i64();
298
299 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 300 tcg_temp_free_i32(b);
838fa72d
AJ
301 tcg_gen_shli_i64(tmp64, tmp64, 32);
302 tcg_gen_sub_i64(a, tmp64, a);
303
304 tcg_temp_free_i64(tmp64);
305 return a;
3670669c
PB
306}
307
8f01245e
PB
308/* FIXME: Most targets have native widening multiplication.
309 It would be good to use that instead of a full wide multiply. */
5e3f878a 310/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 311static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 312{
a7812ae4
PB
313 TCGv_i64 tmp1 = tcg_temp_new_i64();
314 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
315
316 tcg_gen_extu_i32_i64(tmp1, a);
7d1b0095 317 tcg_temp_free_i32(a);
5e3f878a 318 tcg_gen_extu_i32_i64(tmp2, b);
7d1b0095 319 tcg_temp_free_i32(b);
5e3f878a 320 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 321 tcg_temp_free_i64(tmp2);
5e3f878a
PB
322 return tmp1;
323}
324
a7812ae4 325static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 326{
a7812ae4
PB
327 TCGv_i64 tmp1 = tcg_temp_new_i64();
328 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
329
330 tcg_gen_ext_i32_i64(tmp1, a);
7d1b0095 331 tcg_temp_free_i32(a);
5e3f878a 332 tcg_gen_ext_i32_i64(tmp2, b);
7d1b0095 333 tcg_temp_free_i32(b);
5e3f878a 334 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 335 tcg_temp_free_i64(tmp2);
5e3f878a
PB
336 return tmp1;
337}
338
8f01245e
PB
339/* Swap low and high halfwords. */
340static void gen_swap_half(TCGv var)
341{
7d1b0095 342 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
343 tcg_gen_shri_i32(tmp, var, 16);
344 tcg_gen_shli_i32(var, var, 16);
345 tcg_gen_or_i32(var, var, tmp);
7d1b0095 346 tcg_temp_free_i32(tmp);
8f01245e
PB
347}
348
b26eefb6
PB
349/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
350 tmp = (t0 ^ t1) & 0x8000;
351 t0 &= ~0x8000;
352 t1 &= ~0x8000;
353 t0 = (t0 + t1) ^ tmp;
354 */
355
356static void gen_add16(TCGv t0, TCGv t1)
357{
7d1b0095 358 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
359 tcg_gen_xor_i32(tmp, t0, t1);
360 tcg_gen_andi_i32(tmp, tmp, 0x8000);
361 tcg_gen_andi_i32(t0, t0, ~0x8000);
362 tcg_gen_andi_i32(t1, t1, ~0x8000);
363 tcg_gen_add_i32(t0, t0, t1);
364 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
365 tcg_temp_free_i32(tmp);
366 tcg_temp_free_i32(t1);
b26eefb6
PB
367}
368
369/* Set CF to the top bit of var. */
370static void gen_set_CF_bit31(TCGv var)
371{
66c374de 372 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
373}
374
375/* Set N and Z flags from var. */
376static inline void gen_logic_CC(TCGv var)
377{
66c374de
AJ
378 tcg_gen_mov_i32(cpu_NF, var);
379 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
380}
381
382/* T0 += T1 + CF. */
396e467c 383static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 384{
396e467c 385 tcg_gen_add_i32(t0, t0, t1);
66c374de 386 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
387}
388
e9bb4aa9
JR
389/* dest = T0 + T1 + CF. */
390static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
391{
e9bb4aa9 392 tcg_gen_add_i32(dest, t0, t1);
66c374de 393 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
394}
395
3670669c
PB
396/* dest = T0 - T1 + CF - 1. */
397static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
398{
3670669c 399 tcg_gen_sub_i32(dest, t0, t1);
66c374de 400 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 401 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
402}
403
72485ec4
AJ
404/* dest = T0 + T1. Compute C, N, V and Z flags */
405static void gen_add_CC(TCGv dest, TCGv t0, TCGv t1)
406{
407 TCGv tmp;
408 tcg_gen_add_i32(cpu_NF, t0, t1);
409 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
410 tcg_gen_setcond_i32(TCG_COND_LTU, cpu_CF, cpu_NF, t0);
411 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
412 tmp = tcg_temp_new_i32();
413 tcg_gen_xor_i32(tmp, t0, t1);
414 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
415 tcg_temp_free_i32(tmp);
416 tcg_gen_mov_i32(dest, cpu_NF);
417}
418
419/* dest = T0 - T1. Compute C, N, V and Z flags */
420static void gen_sub_CC(TCGv dest, TCGv t0, TCGv t1)
421{
422 TCGv tmp;
423 tcg_gen_sub_i32(cpu_NF, t0, t1);
424 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
425 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
426 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
427 tmp = tcg_temp_new_i32();
428 tcg_gen_xor_i32(tmp, t0, t1);
429 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
430 tcg_temp_free_i32(tmp);
431 tcg_gen_mov_i32(dest, cpu_NF);
432}
433
365af80e
AJ
434#define GEN_SHIFT(name) \
435static void gen_##name(TCGv dest, TCGv t0, TCGv t1) \
436{ \
437 TCGv tmp1, tmp2, tmp3; \
438 tmp1 = tcg_temp_new_i32(); \
439 tcg_gen_andi_i32(tmp1, t1, 0xff); \
440 tmp2 = tcg_const_i32(0); \
441 tmp3 = tcg_const_i32(0x1f); \
442 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
443 tcg_temp_free_i32(tmp3); \
444 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
445 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
446 tcg_temp_free_i32(tmp2); \
447 tcg_temp_free_i32(tmp1); \
448}
449GEN_SHIFT(shl)
450GEN_SHIFT(shr)
451#undef GEN_SHIFT
452
453static void gen_sar(TCGv dest, TCGv t0, TCGv t1)
454{
455 TCGv tmp1, tmp2;
456 tmp1 = tcg_temp_new_i32();
457 tcg_gen_andi_i32(tmp1, t1, 0xff);
458 tmp2 = tcg_const_i32(0x1f);
459 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
460 tcg_temp_free_i32(tmp2);
461 tcg_gen_sar_i32(dest, t0, tmp1);
462 tcg_temp_free_i32(tmp1);
463}
464
36c91fd1
PM
465static void tcg_gen_abs_i32(TCGv dest, TCGv src)
466{
467 TCGv c0 = tcg_const_i32(0);
468 TCGv tmp = tcg_temp_new_i32();
469 tcg_gen_neg_i32(tmp, src);
470 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
471 tcg_temp_free_i32(c0);
472 tcg_temp_free_i32(tmp);
473}
ad69471c 474
9a119ff6 475static void shifter_out_im(TCGv var, int shift)
b26eefb6 476{
9a119ff6 477 if (shift == 0) {
66c374de 478 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 479 } else {
66c374de
AJ
480 tcg_gen_shri_i32(cpu_CF, var, shift);
481 if (shift != 31) {
482 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
483 }
9a119ff6 484 }
9a119ff6 485}
b26eefb6 486
9a119ff6
PB
487/* Shift by immediate. Includes special handling for shift == 0. */
488static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
489{
490 switch (shiftop) {
491 case 0: /* LSL */
492 if (shift != 0) {
493 if (flags)
494 shifter_out_im(var, 32 - shift);
495 tcg_gen_shli_i32(var, var, shift);
496 }
497 break;
498 case 1: /* LSR */
499 if (shift == 0) {
500 if (flags) {
66c374de 501 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
502 }
503 tcg_gen_movi_i32(var, 0);
504 } else {
505 if (flags)
506 shifter_out_im(var, shift - 1);
507 tcg_gen_shri_i32(var, var, shift);
508 }
509 break;
510 case 2: /* ASR */
511 if (shift == 0)
512 shift = 32;
513 if (flags)
514 shifter_out_im(var, shift - 1);
515 if (shift == 32)
516 shift = 31;
517 tcg_gen_sari_i32(var, var, shift);
518 break;
519 case 3: /* ROR/RRX */
520 if (shift != 0) {
521 if (flags)
522 shifter_out_im(var, shift - 1);
f669df27 523 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 524 } else {
66c374de 525 TCGv tmp = tcg_temp_new_i32();
b6348f29 526 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
527 if (flags)
528 shifter_out_im(var, 0);
529 tcg_gen_shri_i32(var, var, 1);
b26eefb6 530 tcg_gen_or_i32(var, var, tmp);
7d1b0095 531 tcg_temp_free_i32(tmp);
b26eefb6
PB
532 }
533 }
534};
535
8984bd2e
PB
536static inline void gen_arm_shift_reg(TCGv var, int shiftop,
537 TCGv shift, int flags)
538{
539 if (flags) {
540 switch (shiftop) {
9ef39277
BS
541 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
542 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
543 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
544 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
545 }
546 } else {
547 switch (shiftop) {
365af80e
AJ
548 case 0:
549 gen_shl(var, var, shift);
550 break;
551 case 1:
552 gen_shr(var, var, shift);
553 break;
554 case 2:
555 gen_sar(var, var, shift);
556 break;
f669df27
AJ
557 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
558 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
559 }
560 }
7d1b0095 561 tcg_temp_free_i32(shift);
8984bd2e
PB
562}
563
6ddbc6e4
PB
564#define PAS_OP(pfx) \
565 switch (op2) { \
566 case 0: gen_pas_helper(glue(pfx,add16)); break; \
567 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
568 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
569 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
570 case 4: gen_pas_helper(glue(pfx,add8)); break; \
571 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
572 }
d9ba4830 573static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 574{
a7812ae4 575 TCGv_ptr tmp;
6ddbc6e4
PB
576
577 switch (op1) {
578#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
579 case 1:
a7812ae4 580 tmp = tcg_temp_new_ptr();
0ecb72a5 581 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 582 PAS_OP(s)
b75263d6 583 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
584 break;
585 case 5:
a7812ae4 586 tmp = tcg_temp_new_ptr();
0ecb72a5 587 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 588 PAS_OP(u)
b75263d6 589 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
590 break;
591#undef gen_pas_helper
592#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
593 case 2:
594 PAS_OP(q);
595 break;
596 case 3:
597 PAS_OP(sh);
598 break;
599 case 6:
600 PAS_OP(uq);
601 break;
602 case 7:
603 PAS_OP(uh);
604 break;
605#undef gen_pas_helper
606 }
607}
9ee6e8bb
PB
608#undef PAS_OP
609
6ddbc6e4
PB
610/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
611#define PAS_OP(pfx) \
ed89a2f1 612 switch (op1) { \
6ddbc6e4
PB
613 case 0: gen_pas_helper(glue(pfx,add8)); break; \
614 case 1: gen_pas_helper(glue(pfx,add16)); break; \
615 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
616 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
617 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
618 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
619 }
d9ba4830 620static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 621{
a7812ae4 622 TCGv_ptr tmp;
6ddbc6e4 623
ed89a2f1 624 switch (op2) {
6ddbc6e4
PB
625#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
626 case 0:
a7812ae4 627 tmp = tcg_temp_new_ptr();
0ecb72a5 628 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 629 PAS_OP(s)
b75263d6 630 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
631 break;
632 case 4:
a7812ae4 633 tmp = tcg_temp_new_ptr();
0ecb72a5 634 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 635 PAS_OP(u)
b75263d6 636 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
637 break;
638#undef gen_pas_helper
639#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
640 case 1:
641 PAS_OP(q);
642 break;
643 case 2:
644 PAS_OP(sh);
645 break;
646 case 5:
647 PAS_OP(uq);
648 break;
649 case 6:
650 PAS_OP(uh);
651 break;
652#undef gen_pas_helper
653 }
654}
9ee6e8bb
PB
655#undef PAS_OP
656
d9ba4830
PB
657static void gen_test_cc(int cc, int label)
658{
659 TCGv tmp;
d9ba4830
PB
660 int inv;
661
d9ba4830
PB
662 switch (cc) {
663 case 0: /* eq: Z */
66c374de 664 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
665 break;
666 case 1: /* ne: !Z */
66c374de 667 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
668 break;
669 case 2: /* cs: C */
66c374de 670 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
671 break;
672 case 3: /* cc: !C */
66c374de 673 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
674 break;
675 case 4: /* mi: N */
66c374de 676 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
677 break;
678 case 5: /* pl: !N */
66c374de 679 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
680 break;
681 case 6: /* vs: V */
66c374de 682 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
683 break;
684 case 7: /* vc: !V */
66c374de 685 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
686 break;
687 case 8: /* hi: C && !Z */
688 inv = gen_new_label();
66c374de
AJ
689 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
690 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
691 gen_set_label(inv);
692 break;
693 case 9: /* ls: !C || Z */
66c374de
AJ
694 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
695 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
696 break;
697 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
698 tmp = tcg_temp_new_i32();
699 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 700 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 701 tcg_temp_free_i32(tmp);
d9ba4830
PB
702 break;
703 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
704 tmp = tcg_temp_new_i32();
705 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 706 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 707 tcg_temp_free_i32(tmp);
d9ba4830
PB
708 break;
709 case 12: /* gt: !Z && N == V */
710 inv = gen_new_label();
66c374de
AJ
711 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
712 tmp = tcg_temp_new_i32();
713 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 714 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 715 tcg_temp_free_i32(tmp);
d9ba4830
PB
716 gen_set_label(inv);
717 break;
718 case 13: /* le: Z || N != V */
66c374de
AJ
719 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
720 tmp = tcg_temp_new_i32();
721 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 722 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 723 tcg_temp_free_i32(tmp);
d9ba4830
PB
724 break;
725 default:
726 fprintf(stderr, "Bad condition code 0x%x\n", cc);
727 abort();
728 }
d9ba4830 729}
2c0262af 730
b1d8e52e 731static const uint8_t table_logic_cc[16] = {
2c0262af
FB
732 1, /* and */
733 1, /* xor */
734 0, /* sub */
735 0, /* rsb */
736 0, /* add */
737 0, /* adc */
738 0, /* sbc */
739 0, /* rsc */
740 1, /* andl */
741 1, /* xorl */
742 0, /* cmp */
743 0, /* cmn */
744 1, /* orr */
745 1, /* mov */
746 1, /* bic */
747 1, /* mvn */
748};
3b46e624 749
d9ba4830
PB
750/* Set PC and Thumb state from an immediate address. */
751static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 752{
b26eefb6 753 TCGv tmp;
99c475ab 754
b26eefb6 755 s->is_jmp = DISAS_UPDATE;
d9ba4830 756 if (s->thumb != (addr & 1)) {
7d1b0095 757 tmp = tcg_temp_new_i32();
d9ba4830 758 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 759 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 760 tcg_temp_free_i32(tmp);
d9ba4830 761 }
155c3eac 762 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
763}
764
765/* Set PC and Thumb state from var. var is marked as dead. */
766static inline void gen_bx(DisasContext *s, TCGv var)
767{
d9ba4830 768 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
769 tcg_gen_andi_i32(cpu_R[15], var, ~1);
770 tcg_gen_andi_i32(var, var, 1);
771 store_cpu_field(var, thumb);
d9ba4830
PB
772}
773
21aeb343
JR
774/* Variant of store_reg which uses branch&exchange logic when storing
775 to r15 in ARM architecture v7 and above. The source must be a temporary
776 and will be marked as dead. */
0ecb72a5 777static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
21aeb343
JR
778 int reg, TCGv var)
779{
780 if (reg == 15 && ENABLE_ARCH_7) {
781 gen_bx(s, var);
782 } else {
783 store_reg(s, reg, var);
784 }
785}
786
be5e7a76
DES
787/* Variant of store_reg which uses branch&exchange logic when storing
788 * to r15 in ARM architecture v5T and above. This is used for storing
789 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
790 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 791static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
be5e7a76
DES
792 int reg, TCGv var)
793{
794 if (reg == 15 && ENABLE_ARCH_5) {
795 gen_bx(s, var);
796 } else {
797 store_reg(s, reg, var);
798 }
799}
800
b0109805
PB
801static inline TCGv gen_ld8s(TCGv addr, int index)
802{
7d1b0095 803 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
804 tcg_gen_qemu_ld8s(tmp, addr, index);
805 return tmp;
806}
807static inline TCGv gen_ld8u(TCGv addr, int index)
808{
7d1b0095 809 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
810 tcg_gen_qemu_ld8u(tmp, addr, index);
811 return tmp;
812}
813static inline TCGv gen_ld16s(TCGv addr, int index)
814{
7d1b0095 815 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
816 tcg_gen_qemu_ld16s(tmp, addr, index);
817 return tmp;
818}
819static inline TCGv gen_ld16u(TCGv addr, int index)
820{
7d1b0095 821 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
822 tcg_gen_qemu_ld16u(tmp, addr, index);
823 return tmp;
824}
825static inline TCGv gen_ld32(TCGv addr, int index)
826{
7d1b0095 827 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
828 tcg_gen_qemu_ld32u(tmp, addr, index);
829 return tmp;
830}
84496233
JR
831static inline TCGv_i64 gen_ld64(TCGv addr, int index)
832{
833 TCGv_i64 tmp = tcg_temp_new_i64();
834 tcg_gen_qemu_ld64(tmp, addr, index);
835 return tmp;
836}
b0109805
PB
837static inline void gen_st8(TCGv val, TCGv addr, int index)
838{
839 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 840 tcg_temp_free_i32(val);
b0109805
PB
841}
842static inline void gen_st16(TCGv val, TCGv addr, int index)
843{
844 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 845 tcg_temp_free_i32(val);
b0109805
PB
846}
847static inline void gen_st32(TCGv val, TCGv addr, int index)
848{
849 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 850 tcg_temp_free_i32(val);
b0109805 851}
84496233
JR
852static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
853{
854 tcg_gen_qemu_st64(val, addr, index);
855 tcg_temp_free_i64(val);
856}
b5ff1b31 857
5e3f878a
PB
858static inline void gen_set_pc_im(uint32_t val)
859{
155c3eac 860 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
861}
862
b5ff1b31
FB
863/* Force a TB lookup after an instruction that changes the CPU state. */
864static inline void gen_lookup_tb(DisasContext *s)
865{
a6445c52 866 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
867 s->is_jmp = DISAS_UPDATE;
868}
869
b0109805
PB
870static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
871 TCGv var)
2c0262af 872{
1e8d4eec 873 int val, rm, shift, shiftop;
b26eefb6 874 TCGv offset;
2c0262af
FB
875
876 if (!(insn & (1 << 25))) {
877 /* immediate */
878 val = insn & 0xfff;
879 if (!(insn & (1 << 23)))
880 val = -val;
537730b9 881 if (val != 0)
b0109805 882 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
883 } else {
884 /* shift/register */
885 rm = (insn) & 0xf;
886 shift = (insn >> 7) & 0x1f;
1e8d4eec 887 shiftop = (insn >> 5) & 3;
b26eefb6 888 offset = load_reg(s, rm);
9a119ff6 889 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 890 if (!(insn & (1 << 23)))
b0109805 891 tcg_gen_sub_i32(var, var, offset);
2c0262af 892 else
b0109805 893 tcg_gen_add_i32(var, var, offset);
7d1b0095 894 tcg_temp_free_i32(offset);
2c0262af
FB
895 }
896}
897
191f9a93 898static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 899 int extra, TCGv var)
2c0262af
FB
900{
901 int val, rm;
b26eefb6 902 TCGv offset;
3b46e624 903
2c0262af
FB
904 if (insn & (1 << 22)) {
905 /* immediate */
906 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
907 if (!(insn & (1 << 23)))
908 val = -val;
18acad92 909 val += extra;
537730b9 910 if (val != 0)
b0109805 911 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
912 } else {
913 /* register */
191f9a93 914 if (extra)
b0109805 915 tcg_gen_addi_i32(var, var, extra);
2c0262af 916 rm = (insn) & 0xf;
b26eefb6 917 offset = load_reg(s, rm);
2c0262af 918 if (!(insn & (1 << 23)))
b0109805 919 tcg_gen_sub_i32(var, var, offset);
2c0262af 920 else
b0109805 921 tcg_gen_add_i32(var, var, offset);
7d1b0095 922 tcg_temp_free_i32(offset);
2c0262af
FB
923 }
924}
925
5aaebd13
PM
926static TCGv_ptr get_fpstatus_ptr(int neon)
927{
928 TCGv_ptr statusptr = tcg_temp_new_ptr();
929 int offset;
930 if (neon) {
0ecb72a5 931 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 932 } else {
0ecb72a5 933 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
934 }
935 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
936 return statusptr;
937}
938
4373f3ce
PB
939#define VFP_OP2(name) \
940static inline void gen_vfp_##name(int dp) \
941{ \
ae1857ec
PM
942 TCGv_ptr fpst = get_fpstatus_ptr(0); \
943 if (dp) { \
944 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
945 } else { \
946 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
947 } \
948 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
949}
950
4373f3ce
PB
951VFP_OP2(add)
952VFP_OP2(sub)
953VFP_OP2(mul)
954VFP_OP2(div)
955
956#undef VFP_OP2
957
605a6aed
PM
958static inline void gen_vfp_F1_mul(int dp)
959{
960 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 961 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 962 if (dp) {
ae1857ec 963 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 964 } else {
ae1857ec 965 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 966 }
ae1857ec 967 tcg_temp_free_ptr(fpst);
605a6aed
PM
968}
969
970static inline void gen_vfp_F1_neg(int dp)
971{
972 /* Like gen_vfp_neg() but put result in F1 */
973 if (dp) {
974 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
975 } else {
976 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
977 }
978}
979
4373f3ce
PB
980static inline void gen_vfp_abs(int dp)
981{
982 if (dp)
983 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
984 else
985 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
986}
987
988static inline void gen_vfp_neg(int dp)
989{
990 if (dp)
991 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
992 else
993 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
994}
995
996static inline void gen_vfp_sqrt(int dp)
997{
998 if (dp)
999 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1000 else
1001 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1002}
1003
1004static inline void gen_vfp_cmp(int dp)
1005{
1006 if (dp)
1007 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1008 else
1009 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1010}
1011
1012static inline void gen_vfp_cmpe(int dp)
1013{
1014 if (dp)
1015 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1016 else
1017 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1018}
1019
1020static inline void gen_vfp_F1_ld0(int dp)
1021{
1022 if (dp)
5b340b51 1023 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1024 else
5b340b51 1025 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1026}
1027
5500b06c
PM
1028#define VFP_GEN_ITOF(name) \
1029static inline void gen_vfp_##name(int dp, int neon) \
1030{ \
5aaebd13 1031 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1032 if (dp) { \
1033 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1034 } else { \
1035 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1036 } \
b7fa9214 1037 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1038}
1039
5500b06c
PM
1040VFP_GEN_ITOF(uito)
1041VFP_GEN_ITOF(sito)
1042#undef VFP_GEN_ITOF
4373f3ce 1043
5500b06c
PM
1044#define VFP_GEN_FTOI(name) \
1045static inline void gen_vfp_##name(int dp, int neon) \
1046{ \
5aaebd13 1047 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1048 if (dp) { \
1049 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1050 } else { \
1051 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1052 } \
b7fa9214 1053 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1054}
1055
5500b06c
PM
1056VFP_GEN_FTOI(toui)
1057VFP_GEN_FTOI(touiz)
1058VFP_GEN_FTOI(tosi)
1059VFP_GEN_FTOI(tosiz)
1060#undef VFP_GEN_FTOI
4373f3ce
PB
1061
1062#define VFP_GEN_FIX(name) \
5500b06c 1063static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1064{ \
b75263d6 1065 TCGv tmp_shift = tcg_const_i32(shift); \
5aaebd13 1066 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1067 if (dp) { \
1068 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1069 } else { \
1070 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1071 } \
b75263d6 1072 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1073 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1074}
4373f3ce
PB
1075VFP_GEN_FIX(tosh)
1076VFP_GEN_FIX(tosl)
1077VFP_GEN_FIX(touh)
1078VFP_GEN_FIX(toul)
1079VFP_GEN_FIX(shto)
1080VFP_GEN_FIX(slto)
1081VFP_GEN_FIX(uhto)
1082VFP_GEN_FIX(ulto)
1083#undef VFP_GEN_FIX
9ee6e8bb 1084
312eea9f 1085static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1086{
1087 if (dp)
312eea9f 1088 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1089 else
312eea9f 1090 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1091}
1092
312eea9f 1093static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1094{
1095 if (dp)
312eea9f 1096 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1097 else
312eea9f 1098 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1099}
1100
8e96005d
FB
1101static inline long
1102vfp_reg_offset (int dp, int reg)
1103{
1104 if (dp)
1105 return offsetof(CPUARMState, vfp.regs[reg]);
1106 else if (reg & 1) {
1107 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1108 + offsetof(CPU_DoubleU, l.upper);
1109 } else {
1110 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1111 + offsetof(CPU_DoubleU, l.lower);
1112 }
1113}
9ee6e8bb
PB
1114
1115/* Return the offset of a 32-bit piece of a NEON register.
1116 zero is the least significant end of the register. */
1117static inline long
1118neon_reg_offset (int reg, int n)
1119{
1120 int sreg;
1121 sreg = reg * 2 + n;
1122 return vfp_reg_offset(0, sreg);
1123}
1124
8f8e3aa4
PB
1125static TCGv neon_load_reg(int reg, int pass)
1126{
7d1b0095 1127 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1128 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1129 return tmp;
1130}
1131
1132static void neon_store_reg(int reg, int pass, TCGv var)
1133{
1134 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1135 tcg_temp_free_i32(var);
8f8e3aa4
PB
1136}
1137
a7812ae4 1138static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1139{
1140 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1141}
1142
a7812ae4 1143static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1144{
1145 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1146}
1147
4373f3ce
PB
1148#define tcg_gen_ld_f32 tcg_gen_ld_i32
1149#define tcg_gen_ld_f64 tcg_gen_ld_i64
1150#define tcg_gen_st_f32 tcg_gen_st_i32
1151#define tcg_gen_st_f64 tcg_gen_st_i64
1152
b7bcbe95
FB
1153static inline void gen_mov_F0_vreg(int dp, int reg)
1154{
1155 if (dp)
4373f3ce 1156 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1157 else
4373f3ce 1158 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1159}
1160
1161static inline void gen_mov_F1_vreg(int dp, int reg)
1162{
1163 if (dp)
4373f3ce 1164 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1165 else
4373f3ce 1166 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1167}
1168
1169static inline void gen_mov_vreg_F0(int dp, int reg)
1170{
1171 if (dp)
4373f3ce 1172 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1173 else
4373f3ce 1174 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1175}
1176
18c9b560
AZ
1177#define ARM_CP_RW_BIT (1 << 20)
1178
a7812ae4 1179static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1180{
0ecb72a5 1181 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1182}
1183
a7812ae4 1184static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1185{
0ecb72a5 1186 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1187}
1188
da6b5335 1189static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1190{
7d1b0095 1191 TCGv var = tcg_temp_new_i32();
0ecb72a5 1192 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1193 return var;
e677137d
PB
1194}
1195
da6b5335 1196static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1197{
0ecb72a5 1198 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1199 tcg_temp_free_i32(var);
e677137d
PB
1200}
1201
1202static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1203{
1204 iwmmxt_store_reg(cpu_M0, rn);
1205}
1206
1207static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1208{
1209 iwmmxt_load_reg(cpu_M0, rn);
1210}
1211
1212static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1213{
1214 iwmmxt_load_reg(cpu_V1, rn);
1215 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1216}
1217
1218static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1219{
1220 iwmmxt_load_reg(cpu_V1, rn);
1221 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1222}
1223
1224static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1225{
1226 iwmmxt_load_reg(cpu_V1, rn);
1227 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1228}
1229
1230#define IWMMXT_OP(name) \
1231static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1232{ \
1233 iwmmxt_load_reg(cpu_V1, rn); \
1234 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1235}
1236
477955bd
PM
1237#define IWMMXT_OP_ENV(name) \
1238static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1239{ \
1240 iwmmxt_load_reg(cpu_V1, rn); \
1241 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1242}
1243
1244#define IWMMXT_OP_ENV_SIZE(name) \
1245IWMMXT_OP_ENV(name##b) \
1246IWMMXT_OP_ENV(name##w) \
1247IWMMXT_OP_ENV(name##l)
e677137d 1248
477955bd 1249#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1250static inline void gen_op_iwmmxt_##name##_M0(void) \
1251{ \
477955bd 1252 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1253}
1254
1255IWMMXT_OP(maddsq)
1256IWMMXT_OP(madduq)
1257IWMMXT_OP(sadb)
1258IWMMXT_OP(sadw)
1259IWMMXT_OP(mulslw)
1260IWMMXT_OP(mulshw)
1261IWMMXT_OP(mululw)
1262IWMMXT_OP(muluhw)
1263IWMMXT_OP(macsw)
1264IWMMXT_OP(macuw)
1265
477955bd
PM
1266IWMMXT_OP_ENV_SIZE(unpackl)
1267IWMMXT_OP_ENV_SIZE(unpackh)
1268
1269IWMMXT_OP_ENV1(unpacklub)
1270IWMMXT_OP_ENV1(unpackluw)
1271IWMMXT_OP_ENV1(unpacklul)
1272IWMMXT_OP_ENV1(unpackhub)
1273IWMMXT_OP_ENV1(unpackhuw)
1274IWMMXT_OP_ENV1(unpackhul)
1275IWMMXT_OP_ENV1(unpacklsb)
1276IWMMXT_OP_ENV1(unpacklsw)
1277IWMMXT_OP_ENV1(unpacklsl)
1278IWMMXT_OP_ENV1(unpackhsb)
1279IWMMXT_OP_ENV1(unpackhsw)
1280IWMMXT_OP_ENV1(unpackhsl)
1281
1282IWMMXT_OP_ENV_SIZE(cmpeq)
1283IWMMXT_OP_ENV_SIZE(cmpgtu)
1284IWMMXT_OP_ENV_SIZE(cmpgts)
1285
1286IWMMXT_OP_ENV_SIZE(mins)
1287IWMMXT_OP_ENV_SIZE(minu)
1288IWMMXT_OP_ENV_SIZE(maxs)
1289IWMMXT_OP_ENV_SIZE(maxu)
1290
1291IWMMXT_OP_ENV_SIZE(subn)
1292IWMMXT_OP_ENV_SIZE(addn)
1293IWMMXT_OP_ENV_SIZE(subu)
1294IWMMXT_OP_ENV_SIZE(addu)
1295IWMMXT_OP_ENV_SIZE(subs)
1296IWMMXT_OP_ENV_SIZE(adds)
1297
1298IWMMXT_OP_ENV(avgb0)
1299IWMMXT_OP_ENV(avgb1)
1300IWMMXT_OP_ENV(avgw0)
1301IWMMXT_OP_ENV(avgw1)
e677137d
PB
1302
1303IWMMXT_OP(msadb)
1304
477955bd
PM
1305IWMMXT_OP_ENV(packuw)
1306IWMMXT_OP_ENV(packul)
1307IWMMXT_OP_ENV(packuq)
1308IWMMXT_OP_ENV(packsw)
1309IWMMXT_OP_ENV(packsl)
1310IWMMXT_OP_ENV(packsq)
e677137d 1311
e677137d
PB
1312static void gen_op_iwmmxt_set_mup(void)
1313{
1314 TCGv tmp;
1315 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1316 tcg_gen_ori_i32(tmp, tmp, 2);
1317 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1318}
1319
1320static void gen_op_iwmmxt_set_cup(void)
1321{
1322 TCGv tmp;
1323 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1324 tcg_gen_ori_i32(tmp, tmp, 1);
1325 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1326}
1327
1328static void gen_op_iwmmxt_setpsr_nz(void)
1329{
7d1b0095 1330 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1331 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1332 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1333}
1334
1335static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1336{
1337 iwmmxt_load_reg(cpu_V1, rn);
86831435 1338 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1339 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1340}
1341
da6b5335 1342static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1343{
1344 int rd;
1345 uint32_t offset;
da6b5335 1346 TCGv tmp;
18c9b560
AZ
1347
1348 rd = (insn >> 16) & 0xf;
da6b5335 1349 tmp = load_reg(s, rd);
18c9b560
AZ
1350
1351 offset = (insn & 0xff) << ((insn >> 7) & 2);
1352 if (insn & (1 << 24)) {
1353 /* Pre indexed */
1354 if (insn & (1 << 23))
da6b5335 1355 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1356 else
da6b5335
FN
1357 tcg_gen_addi_i32(tmp, tmp, -offset);
1358 tcg_gen_mov_i32(dest, tmp);
18c9b560 1359 if (insn & (1 << 21))
da6b5335
FN
1360 store_reg(s, rd, tmp);
1361 else
7d1b0095 1362 tcg_temp_free_i32(tmp);
18c9b560
AZ
1363 } else if (insn & (1 << 21)) {
1364 /* Post indexed */
da6b5335 1365 tcg_gen_mov_i32(dest, tmp);
18c9b560 1366 if (insn & (1 << 23))
da6b5335 1367 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1368 else
da6b5335
FN
1369 tcg_gen_addi_i32(tmp, tmp, -offset);
1370 store_reg(s, rd, tmp);
18c9b560
AZ
1371 } else if (!(insn & (1 << 23)))
1372 return 1;
1373 return 0;
1374}
1375
da6b5335 1376static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1377{
1378 int rd = (insn >> 0) & 0xf;
da6b5335 1379 TCGv tmp;
18c9b560 1380
da6b5335
FN
1381 if (insn & (1 << 8)) {
1382 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1383 return 1;
da6b5335
FN
1384 } else {
1385 tmp = iwmmxt_load_creg(rd);
1386 }
1387 } else {
7d1b0095 1388 tmp = tcg_temp_new_i32();
da6b5335
FN
1389 iwmmxt_load_reg(cpu_V0, rd);
1390 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1391 }
1392 tcg_gen_andi_i32(tmp, tmp, mask);
1393 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1394 tcg_temp_free_i32(tmp);
18c9b560
AZ
1395 return 0;
1396}
1397
a1c7273b 1398/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1399 (ie. an undefined instruction). */
0ecb72a5 1400static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1401{
1402 int rd, wrd;
1403 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1404 TCGv addr;
1405 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1406
1407 if ((insn & 0x0e000e00) == 0x0c000000) {
1408 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1409 wrd = insn & 0xf;
1410 rdlo = (insn >> 12) & 0xf;
1411 rdhi = (insn >> 16) & 0xf;
1412 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1413 iwmmxt_load_reg(cpu_V0, wrd);
1414 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1415 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1416 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1417 } else { /* TMCRR */
da6b5335
FN
1418 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1419 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1420 gen_op_iwmmxt_set_mup();
1421 }
1422 return 0;
1423 }
1424
1425 wrd = (insn >> 12) & 0xf;
7d1b0095 1426 addr = tcg_temp_new_i32();
da6b5335 1427 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1428 tcg_temp_free_i32(addr);
18c9b560 1429 return 1;
da6b5335 1430 }
18c9b560
AZ
1431 if (insn & ARM_CP_RW_BIT) {
1432 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1433 tmp = tcg_temp_new_i32();
da6b5335
FN
1434 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1435 iwmmxt_store_creg(wrd, tmp);
18c9b560 1436 } else {
e677137d
PB
1437 i = 1;
1438 if (insn & (1 << 8)) {
1439 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1440 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1441 i = 0;
1442 } else { /* WLDRW wRd */
da6b5335 1443 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1444 }
1445 } else {
1446 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1447 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1448 } else { /* WLDRB */
da6b5335 1449 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1450 }
1451 }
1452 if (i) {
1453 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1454 tcg_temp_free_i32(tmp);
e677137d 1455 }
18c9b560
AZ
1456 gen_op_iwmmxt_movq_wRn_M0(wrd);
1457 }
1458 } else {
1459 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1460 tmp = iwmmxt_load_creg(wrd);
1461 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1462 } else {
1463 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1464 tmp = tcg_temp_new_i32();
e677137d
PB
1465 if (insn & (1 << 8)) {
1466 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1467 tcg_temp_free_i32(tmp);
da6b5335 1468 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1469 } else { /* WSTRW wRd */
1470 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1471 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1472 }
1473 } else {
1474 if (insn & (1 << 22)) { /* WSTRH */
1475 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1476 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1477 } else { /* WSTRB */
1478 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1479 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1480 }
1481 }
18c9b560
AZ
1482 }
1483 }
7d1b0095 1484 tcg_temp_free_i32(addr);
18c9b560
AZ
1485 return 0;
1486 }
1487
1488 if ((insn & 0x0f000000) != 0x0e000000)
1489 return 1;
1490
1491 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1492 case 0x000: /* WOR */
1493 wrd = (insn >> 12) & 0xf;
1494 rd0 = (insn >> 0) & 0xf;
1495 rd1 = (insn >> 16) & 0xf;
1496 gen_op_iwmmxt_movq_M0_wRn(rd0);
1497 gen_op_iwmmxt_orq_M0_wRn(rd1);
1498 gen_op_iwmmxt_setpsr_nz();
1499 gen_op_iwmmxt_movq_wRn_M0(wrd);
1500 gen_op_iwmmxt_set_mup();
1501 gen_op_iwmmxt_set_cup();
1502 break;
1503 case 0x011: /* TMCR */
1504 if (insn & 0xf)
1505 return 1;
1506 rd = (insn >> 12) & 0xf;
1507 wrd = (insn >> 16) & 0xf;
1508 switch (wrd) {
1509 case ARM_IWMMXT_wCID:
1510 case ARM_IWMMXT_wCASF:
1511 break;
1512 case ARM_IWMMXT_wCon:
1513 gen_op_iwmmxt_set_cup();
1514 /* Fall through. */
1515 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1516 tmp = iwmmxt_load_creg(wrd);
1517 tmp2 = load_reg(s, rd);
f669df27 1518 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1519 tcg_temp_free_i32(tmp2);
da6b5335 1520 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1521 break;
1522 case ARM_IWMMXT_wCGR0:
1523 case ARM_IWMMXT_wCGR1:
1524 case ARM_IWMMXT_wCGR2:
1525 case ARM_IWMMXT_wCGR3:
1526 gen_op_iwmmxt_set_cup();
da6b5335
FN
1527 tmp = load_reg(s, rd);
1528 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1529 break;
1530 default:
1531 return 1;
1532 }
1533 break;
1534 case 0x100: /* WXOR */
1535 wrd = (insn >> 12) & 0xf;
1536 rd0 = (insn >> 0) & 0xf;
1537 rd1 = (insn >> 16) & 0xf;
1538 gen_op_iwmmxt_movq_M0_wRn(rd0);
1539 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1540 gen_op_iwmmxt_setpsr_nz();
1541 gen_op_iwmmxt_movq_wRn_M0(wrd);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1544 break;
1545 case 0x111: /* TMRC */
1546 if (insn & 0xf)
1547 return 1;
1548 rd = (insn >> 12) & 0xf;
1549 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1550 tmp = iwmmxt_load_creg(wrd);
1551 store_reg(s, rd, tmp);
18c9b560
AZ
1552 break;
1553 case 0x300: /* WANDN */
1554 wrd = (insn >> 12) & 0xf;
1555 rd0 = (insn >> 0) & 0xf;
1556 rd1 = (insn >> 16) & 0xf;
1557 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1558 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1559 gen_op_iwmmxt_andq_M0_wRn(rd1);
1560 gen_op_iwmmxt_setpsr_nz();
1561 gen_op_iwmmxt_movq_wRn_M0(wrd);
1562 gen_op_iwmmxt_set_mup();
1563 gen_op_iwmmxt_set_cup();
1564 break;
1565 case 0x200: /* WAND */
1566 wrd = (insn >> 12) & 0xf;
1567 rd0 = (insn >> 0) & 0xf;
1568 rd1 = (insn >> 16) & 0xf;
1569 gen_op_iwmmxt_movq_M0_wRn(rd0);
1570 gen_op_iwmmxt_andq_M0_wRn(rd1);
1571 gen_op_iwmmxt_setpsr_nz();
1572 gen_op_iwmmxt_movq_wRn_M0(wrd);
1573 gen_op_iwmmxt_set_mup();
1574 gen_op_iwmmxt_set_cup();
1575 break;
1576 case 0x810: case 0xa10: /* WMADD */
1577 wrd = (insn >> 12) & 0xf;
1578 rd0 = (insn >> 0) & 0xf;
1579 rd1 = (insn >> 16) & 0xf;
1580 gen_op_iwmmxt_movq_M0_wRn(rd0);
1581 if (insn & (1 << 21))
1582 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1583 else
1584 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1585 gen_op_iwmmxt_movq_wRn_M0(wrd);
1586 gen_op_iwmmxt_set_mup();
1587 break;
1588 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1589 wrd = (insn >> 12) & 0xf;
1590 rd0 = (insn >> 16) & 0xf;
1591 rd1 = (insn >> 0) & 0xf;
1592 gen_op_iwmmxt_movq_M0_wRn(rd0);
1593 switch ((insn >> 22) & 3) {
1594 case 0:
1595 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1596 break;
1597 case 1:
1598 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1599 break;
1600 case 2:
1601 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1602 break;
1603 case 3:
1604 return 1;
1605 }
1606 gen_op_iwmmxt_movq_wRn_M0(wrd);
1607 gen_op_iwmmxt_set_mup();
1608 gen_op_iwmmxt_set_cup();
1609 break;
1610 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1611 wrd = (insn >> 12) & 0xf;
1612 rd0 = (insn >> 16) & 0xf;
1613 rd1 = (insn >> 0) & 0xf;
1614 gen_op_iwmmxt_movq_M0_wRn(rd0);
1615 switch ((insn >> 22) & 3) {
1616 case 0:
1617 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1618 break;
1619 case 1:
1620 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1621 break;
1622 case 2:
1623 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1624 break;
1625 case 3:
1626 return 1;
1627 }
1628 gen_op_iwmmxt_movq_wRn_M0(wrd);
1629 gen_op_iwmmxt_set_mup();
1630 gen_op_iwmmxt_set_cup();
1631 break;
1632 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1633 wrd = (insn >> 12) & 0xf;
1634 rd0 = (insn >> 16) & 0xf;
1635 rd1 = (insn >> 0) & 0xf;
1636 gen_op_iwmmxt_movq_M0_wRn(rd0);
1637 if (insn & (1 << 22))
1638 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1639 else
1640 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1641 if (!(insn & (1 << 20)))
1642 gen_op_iwmmxt_addl_M0_wRn(wrd);
1643 gen_op_iwmmxt_movq_wRn_M0(wrd);
1644 gen_op_iwmmxt_set_mup();
1645 break;
1646 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1647 wrd = (insn >> 12) & 0xf;
1648 rd0 = (insn >> 16) & 0xf;
1649 rd1 = (insn >> 0) & 0xf;
1650 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1651 if (insn & (1 << 21)) {
1652 if (insn & (1 << 20))
1653 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1654 else
1655 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1656 } else {
1657 if (insn & (1 << 20))
1658 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1659 else
1660 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1661 }
18c9b560
AZ
1662 gen_op_iwmmxt_movq_wRn_M0(wrd);
1663 gen_op_iwmmxt_set_mup();
1664 break;
1665 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1666 wrd = (insn >> 12) & 0xf;
1667 rd0 = (insn >> 16) & 0xf;
1668 rd1 = (insn >> 0) & 0xf;
1669 gen_op_iwmmxt_movq_M0_wRn(rd0);
1670 if (insn & (1 << 21))
1671 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1672 else
1673 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1674 if (!(insn & (1 << 20))) {
e677137d
PB
1675 iwmmxt_load_reg(cpu_V1, wrd);
1676 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1677 }
1678 gen_op_iwmmxt_movq_wRn_M0(wrd);
1679 gen_op_iwmmxt_set_mup();
1680 break;
1681 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1682 wrd = (insn >> 12) & 0xf;
1683 rd0 = (insn >> 16) & 0xf;
1684 rd1 = (insn >> 0) & 0xf;
1685 gen_op_iwmmxt_movq_M0_wRn(rd0);
1686 switch ((insn >> 22) & 3) {
1687 case 0:
1688 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1689 break;
1690 case 1:
1691 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1692 break;
1693 case 2:
1694 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1695 break;
1696 case 3:
1697 return 1;
1698 }
1699 gen_op_iwmmxt_movq_wRn_M0(wrd);
1700 gen_op_iwmmxt_set_mup();
1701 gen_op_iwmmxt_set_cup();
1702 break;
1703 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1704 wrd = (insn >> 12) & 0xf;
1705 rd0 = (insn >> 16) & 0xf;
1706 rd1 = (insn >> 0) & 0xf;
1707 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1708 if (insn & (1 << 22)) {
1709 if (insn & (1 << 20))
1710 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1711 else
1712 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1713 } else {
1714 if (insn & (1 << 20))
1715 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1716 else
1717 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1718 }
18c9b560
AZ
1719 gen_op_iwmmxt_movq_wRn_M0(wrd);
1720 gen_op_iwmmxt_set_mup();
1721 gen_op_iwmmxt_set_cup();
1722 break;
1723 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1724 wrd = (insn >> 12) & 0xf;
1725 rd0 = (insn >> 16) & 0xf;
1726 rd1 = (insn >> 0) & 0xf;
1727 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1728 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1729 tcg_gen_andi_i32(tmp, tmp, 7);
1730 iwmmxt_load_reg(cpu_V1, rd1);
1731 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1732 tcg_temp_free_i32(tmp);
18c9b560
AZ
1733 gen_op_iwmmxt_movq_wRn_M0(wrd);
1734 gen_op_iwmmxt_set_mup();
1735 break;
1736 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1737 if (((insn >> 6) & 3) == 3)
1738 return 1;
18c9b560
AZ
1739 rd = (insn >> 12) & 0xf;
1740 wrd = (insn >> 16) & 0xf;
da6b5335 1741 tmp = load_reg(s, rd);
18c9b560
AZ
1742 gen_op_iwmmxt_movq_M0_wRn(wrd);
1743 switch ((insn >> 6) & 3) {
1744 case 0:
da6b5335
FN
1745 tmp2 = tcg_const_i32(0xff);
1746 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1747 break;
1748 case 1:
da6b5335
FN
1749 tmp2 = tcg_const_i32(0xffff);
1750 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1751 break;
1752 case 2:
da6b5335
FN
1753 tmp2 = tcg_const_i32(0xffffffff);
1754 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1755 break;
da6b5335
FN
1756 default:
1757 TCGV_UNUSED(tmp2);
1758 TCGV_UNUSED(tmp3);
18c9b560 1759 }
da6b5335
FN
1760 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1761 tcg_temp_free(tmp3);
1762 tcg_temp_free(tmp2);
7d1b0095 1763 tcg_temp_free_i32(tmp);
18c9b560
AZ
1764 gen_op_iwmmxt_movq_wRn_M0(wrd);
1765 gen_op_iwmmxt_set_mup();
1766 break;
1767 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1768 rd = (insn >> 12) & 0xf;
1769 wrd = (insn >> 16) & 0xf;
da6b5335 1770 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1771 return 1;
1772 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1773 tmp = tcg_temp_new_i32();
18c9b560
AZ
1774 switch ((insn >> 22) & 3) {
1775 case 0:
da6b5335
FN
1776 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1777 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1778 if (insn & 8) {
1779 tcg_gen_ext8s_i32(tmp, tmp);
1780 } else {
1781 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1782 }
1783 break;
1784 case 1:
da6b5335
FN
1785 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1786 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1787 if (insn & 8) {
1788 tcg_gen_ext16s_i32(tmp, tmp);
1789 } else {
1790 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1791 }
1792 break;
1793 case 2:
da6b5335
FN
1794 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1795 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1796 break;
18c9b560 1797 }
da6b5335 1798 store_reg(s, rd, tmp);
18c9b560
AZ
1799 break;
1800 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1801 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1802 return 1;
da6b5335 1803 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1804 switch ((insn >> 22) & 3) {
1805 case 0:
da6b5335 1806 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1807 break;
1808 case 1:
da6b5335 1809 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1810 break;
1811 case 2:
da6b5335 1812 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1813 break;
18c9b560 1814 }
da6b5335
FN
1815 tcg_gen_shli_i32(tmp, tmp, 28);
1816 gen_set_nzcv(tmp);
7d1b0095 1817 tcg_temp_free_i32(tmp);
18c9b560
AZ
1818 break;
1819 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1820 if (((insn >> 6) & 3) == 3)
1821 return 1;
18c9b560
AZ
1822 rd = (insn >> 12) & 0xf;
1823 wrd = (insn >> 16) & 0xf;
da6b5335 1824 tmp = load_reg(s, rd);
18c9b560
AZ
1825 switch ((insn >> 6) & 3) {
1826 case 0:
da6b5335 1827 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1828 break;
1829 case 1:
da6b5335 1830 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1831 break;
1832 case 2:
da6b5335 1833 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1834 break;
18c9b560 1835 }
7d1b0095 1836 tcg_temp_free_i32(tmp);
18c9b560
AZ
1837 gen_op_iwmmxt_movq_wRn_M0(wrd);
1838 gen_op_iwmmxt_set_mup();
1839 break;
1840 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1841 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1842 return 1;
da6b5335 1843 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1844 tmp2 = tcg_temp_new_i32();
da6b5335 1845 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1846 switch ((insn >> 22) & 3) {
1847 case 0:
1848 for (i = 0; i < 7; i ++) {
da6b5335
FN
1849 tcg_gen_shli_i32(tmp2, tmp2, 4);
1850 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1851 }
1852 break;
1853 case 1:
1854 for (i = 0; i < 3; i ++) {
da6b5335
FN
1855 tcg_gen_shli_i32(tmp2, tmp2, 8);
1856 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1857 }
1858 break;
1859 case 2:
da6b5335
FN
1860 tcg_gen_shli_i32(tmp2, tmp2, 16);
1861 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1862 break;
18c9b560 1863 }
da6b5335 1864 gen_set_nzcv(tmp);
7d1b0095
PM
1865 tcg_temp_free_i32(tmp2);
1866 tcg_temp_free_i32(tmp);
18c9b560
AZ
1867 break;
1868 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1869 wrd = (insn >> 12) & 0xf;
1870 rd0 = (insn >> 16) & 0xf;
1871 gen_op_iwmmxt_movq_M0_wRn(rd0);
1872 switch ((insn >> 22) & 3) {
1873 case 0:
e677137d 1874 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1875 break;
1876 case 1:
e677137d 1877 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1878 break;
1879 case 2:
e677137d 1880 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1881 break;
1882 case 3:
1883 return 1;
1884 }
1885 gen_op_iwmmxt_movq_wRn_M0(wrd);
1886 gen_op_iwmmxt_set_mup();
1887 break;
1888 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1889 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1890 return 1;
da6b5335 1891 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1892 tmp2 = tcg_temp_new_i32();
da6b5335 1893 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1894 switch ((insn >> 22) & 3) {
1895 case 0:
1896 for (i = 0; i < 7; i ++) {
da6b5335
FN
1897 tcg_gen_shli_i32(tmp2, tmp2, 4);
1898 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1899 }
1900 break;
1901 case 1:
1902 for (i = 0; i < 3; i ++) {
da6b5335
FN
1903 tcg_gen_shli_i32(tmp2, tmp2, 8);
1904 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1905 }
1906 break;
1907 case 2:
da6b5335
FN
1908 tcg_gen_shli_i32(tmp2, tmp2, 16);
1909 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1910 break;
18c9b560 1911 }
da6b5335 1912 gen_set_nzcv(tmp);
7d1b0095
PM
1913 tcg_temp_free_i32(tmp2);
1914 tcg_temp_free_i32(tmp);
18c9b560
AZ
1915 break;
1916 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1917 rd = (insn >> 12) & 0xf;
1918 rd0 = (insn >> 16) & 0xf;
da6b5335 1919 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1920 return 1;
1921 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1922 tmp = tcg_temp_new_i32();
18c9b560
AZ
1923 switch ((insn >> 22) & 3) {
1924 case 0:
da6b5335 1925 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1926 break;
1927 case 1:
da6b5335 1928 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1929 break;
1930 case 2:
da6b5335 1931 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1932 break;
18c9b560 1933 }
da6b5335 1934 store_reg(s, rd, tmp);
18c9b560
AZ
1935 break;
1936 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1937 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1938 wrd = (insn >> 12) & 0xf;
1939 rd0 = (insn >> 16) & 0xf;
1940 rd1 = (insn >> 0) & 0xf;
1941 gen_op_iwmmxt_movq_M0_wRn(rd0);
1942 switch ((insn >> 22) & 3) {
1943 case 0:
1944 if (insn & (1 << 21))
1945 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1946 else
1947 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1948 break;
1949 case 1:
1950 if (insn & (1 << 21))
1951 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1952 else
1953 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1954 break;
1955 case 2:
1956 if (insn & (1 << 21))
1957 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1958 else
1959 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1960 break;
1961 case 3:
1962 return 1;
1963 }
1964 gen_op_iwmmxt_movq_wRn_M0(wrd);
1965 gen_op_iwmmxt_set_mup();
1966 gen_op_iwmmxt_set_cup();
1967 break;
1968 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1969 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
1973 switch ((insn >> 22) & 3) {
1974 case 0:
1975 if (insn & (1 << 21))
1976 gen_op_iwmmxt_unpacklsb_M0();
1977 else
1978 gen_op_iwmmxt_unpacklub_M0();
1979 break;
1980 case 1:
1981 if (insn & (1 << 21))
1982 gen_op_iwmmxt_unpacklsw_M0();
1983 else
1984 gen_op_iwmmxt_unpackluw_M0();
1985 break;
1986 case 2:
1987 if (insn & (1 << 21))
1988 gen_op_iwmmxt_unpacklsl_M0();
1989 else
1990 gen_op_iwmmxt_unpacklul_M0();
1991 break;
1992 case 3:
1993 return 1;
1994 }
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2000 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2001 wrd = (insn >> 12) & 0xf;
2002 rd0 = (insn >> 16) & 0xf;
2003 gen_op_iwmmxt_movq_M0_wRn(rd0);
2004 switch ((insn >> 22) & 3) {
2005 case 0:
2006 if (insn & (1 << 21))
2007 gen_op_iwmmxt_unpackhsb_M0();
2008 else
2009 gen_op_iwmmxt_unpackhub_M0();
2010 break;
2011 case 1:
2012 if (insn & (1 << 21))
2013 gen_op_iwmmxt_unpackhsw_M0();
2014 else
2015 gen_op_iwmmxt_unpackhuw_M0();
2016 break;
2017 case 2:
2018 if (insn & (1 << 21))
2019 gen_op_iwmmxt_unpackhsl_M0();
2020 else
2021 gen_op_iwmmxt_unpackhul_M0();
2022 break;
2023 case 3:
2024 return 1;
2025 }
2026 gen_op_iwmmxt_movq_wRn_M0(wrd);
2027 gen_op_iwmmxt_set_mup();
2028 gen_op_iwmmxt_set_cup();
2029 break;
2030 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2031 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2032 if (((insn >> 22) & 3) == 0)
2033 return 1;
18c9b560
AZ
2034 wrd = (insn >> 12) & 0xf;
2035 rd0 = (insn >> 16) & 0xf;
2036 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2037 tmp = tcg_temp_new_i32();
da6b5335 2038 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2039 tcg_temp_free_i32(tmp);
18c9b560 2040 return 1;
da6b5335 2041 }
18c9b560 2042 switch ((insn >> 22) & 3) {
18c9b560 2043 case 1:
477955bd 2044 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2045 break;
2046 case 2:
477955bd 2047 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2048 break;
2049 case 3:
477955bd 2050 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2051 break;
2052 }
7d1b0095 2053 tcg_temp_free_i32(tmp);
18c9b560
AZ
2054 gen_op_iwmmxt_movq_wRn_M0(wrd);
2055 gen_op_iwmmxt_set_mup();
2056 gen_op_iwmmxt_set_cup();
2057 break;
2058 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2059 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2060 if (((insn >> 22) & 3) == 0)
2061 return 1;
18c9b560
AZ
2062 wrd = (insn >> 12) & 0xf;
2063 rd0 = (insn >> 16) & 0xf;
2064 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2065 tmp = tcg_temp_new_i32();
da6b5335 2066 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2067 tcg_temp_free_i32(tmp);
18c9b560 2068 return 1;
da6b5335 2069 }
18c9b560 2070 switch ((insn >> 22) & 3) {
18c9b560 2071 case 1:
477955bd 2072 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2073 break;
2074 case 2:
477955bd 2075 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2076 break;
2077 case 3:
477955bd 2078 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2079 break;
2080 }
7d1b0095 2081 tcg_temp_free_i32(tmp);
18c9b560
AZ
2082 gen_op_iwmmxt_movq_wRn_M0(wrd);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2085 break;
2086 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2087 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2088 if (((insn >> 22) & 3) == 0)
2089 return 1;
18c9b560
AZ
2090 wrd = (insn >> 12) & 0xf;
2091 rd0 = (insn >> 16) & 0xf;
2092 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2093 tmp = tcg_temp_new_i32();
da6b5335 2094 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2095 tcg_temp_free_i32(tmp);
18c9b560 2096 return 1;
da6b5335 2097 }
18c9b560 2098 switch ((insn >> 22) & 3) {
18c9b560 2099 case 1:
477955bd 2100 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2101 break;
2102 case 2:
477955bd 2103 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2104 break;
2105 case 3:
477955bd 2106 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2107 break;
2108 }
7d1b0095 2109 tcg_temp_free_i32(tmp);
18c9b560
AZ
2110 gen_op_iwmmxt_movq_wRn_M0(wrd);
2111 gen_op_iwmmxt_set_mup();
2112 gen_op_iwmmxt_set_cup();
2113 break;
2114 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2115 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2116 if (((insn >> 22) & 3) == 0)
2117 return 1;
18c9b560
AZ
2118 wrd = (insn >> 12) & 0xf;
2119 rd0 = (insn >> 16) & 0xf;
2120 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2121 tmp = tcg_temp_new_i32();
18c9b560 2122 switch ((insn >> 22) & 3) {
18c9b560 2123 case 1:
da6b5335 2124 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2125 tcg_temp_free_i32(tmp);
18c9b560 2126 return 1;
da6b5335 2127 }
477955bd 2128 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2129 break;
2130 case 2:
da6b5335 2131 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2132 tcg_temp_free_i32(tmp);
18c9b560 2133 return 1;
da6b5335 2134 }
477955bd 2135 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2136 break;
2137 case 3:
da6b5335 2138 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2139 tcg_temp_free_i32(tmp);
18c9b560 2140 return 1;
da6b5335 2141 }
477955bd 2142 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2143 break;
2144 }
7d1b0095 2145 tcg_temp_free_i32(tmp);
18c9b560
AZ
2146 gen_op_iwmmxt_movq_wRn_M0(wrd);
2147 gen_op_iwmmxt_set_mup();
2148 gen_op_iwmmxt_set_cup();
2149 break;
2150 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2151 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2152 wrd = (insn >> 12) & 0xf;
2153 rd0 = (insn >> 16) & 0xf;
2154 rd1 = (insn >> 0) & 0xf;
2155 gen_op_iwmmxt_movq_M0_wRn(rd0);
2156 switch ((insn >> 22) & 3) {
2157 case 0:
2158 if (insn & (1 << 21))
2159 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2160 else
2161 gen_op_iwmmxt_minub_M0_wRn(rd1);
2162 break;
2163 case 1:
2164 if (insn & (1 << 21))
2165 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2166 else
2167 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2168 break;
2169 case 2:
2170 if (insn & (1 << 21))
2171 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2172 else
2173 gen_op_iwmmxt_minul_M0_wRn(rd1);
2174 break;
2175 case 3:
2176 return 1;
2177 }
2178 gen_op_iwmmxt_movq_wRn_M0(wrd);
2179 gen_op_iwmmxt_set_mup();
2180 break;
2181 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2182 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2183 wrd = (insn >> 12) & 0xf;
2184 rd0 = (insn >> 16) & 0xf;
2185 rd1 = (insn >> 0) & 0xf;
2186 gen_op_iwmmxt_movq_M0_wRn(rd0);
2187 switch ((insn >> 22) & 3) {
2188 case 0:
2189 if (insn & (1 << 21))
2190 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2191 else
2192 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2193 break;
2194 case 1:
2195 if (insn & (1 << 21))
2196 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2197 else
2198 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2199 break;
2200 case 2:
2201 if (insn & (1 << 21))
2202 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2203 else
2204 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2205 break;
2206 case 3:
2207 return 1;
2208 }
2209 gen_op_iwmmxt_movq_wRn_M0(wrd);
2210 gen_op_iwmmxt_set_mup();
2211 break;
2212 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2213 case 0x402: case 0x502: case 0x602: case 0x702:
2214 wrd = (insn >> 12) & 0xf;
2215 rd0 = (insn >> 16) & 0xf;
2216 rd1 = (insn >> 0) & 0xf;
2217 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2218 tmp = tcg_const_i32((insn >> 20) & 3);
2219 iwmmxt_load_reg(cpu_V1, rd1);
2220 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2221 tcg_temp_free(tmp);
18c9b560
AZ
2222 gen_op_iwmmxt_movq_wRn_M0(wrd);
2223 gen_op_iwmmxt_set_mup();
2224 break;
2225 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2226 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2227 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2228 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2229 wrd = (insn >> 12) & 0xf;
2230 rd0 = (insn >> 16) & 0xf;
2231 rd1 = (insn >> 0) & 0xf;
2232 gen_op_iwmmxt_movq_M0_wRn(rd0);
2233 switch ((insn >> 20) & 0xf) {
2234 case 0x0:
2235 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2236 break;
2237 case 0x1:
2238 gen_op_iwmmxt_subub_M0_wRn(rd1);
2239 break;
2240 case 0x3:
2241 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2242 break;
2243 case 0x4:
2244 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2245 break;
2246 case 0x5:
2247 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2248 break;
2249 case 0x7:
2250 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2251 break;
2252 case 0x8:
2253 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2254 break;
2255 case 0x9:
2256 gen_op_iwmmxt_subul_M0_wRn(rd1);
2257 break;
2258 case 0xb:
2259 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2260 break;
2261 default:
2262 return 1;
2263 }
2264 gen_op_iwmmxt_movq_wRn_M0(wrd);
2265 gen_op_iwmmxt_set_mup();
2266 gen_op_iwmmxt_set_cup();
2267 break;
2268 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2269 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2270 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2271 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2272 wrd = (insn >> 12) & 0xf;
2273 rd0 = (insn >> 16) & 0xf;
2274 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2275 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2276 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
da6b5335 2277 tcg_temp_free(tmp);
18c9b560
AZ
2278 gen_op_iwmmxt_movq_wRn_M0(wrd);
2279 gen_op_iwmmxt_set_mup();
2280 gen_op_iwmmxt_set_cup();
2281 break;
2282 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2283 case 0x418: case 0x518: case 0x618: case 0x718:
2284 case 0x818: case 0x918: case 0xa18: case 0xb18:
2285 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2286 wrd = (insn >> 12) & 0xf;
2287 rd0 = (insn >> 16) & 0xf;
2288 rd1 = (insn >> 0) & 0xf;
2289 gen_op_iwmmxt_movq_M0_wRn(rd0);
2290 switch ((insn >> 20) & 0xf) {
2291 case 0x0:
2292 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2293 break;
2294 case 0x1:
2295 gen_op_iwmmxt_addub_M0_wRn(rd1);
2296 break;
2297 case 0x3:
2298 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2299 break;
2300 case 0x4:
2301 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2302 break;
2303 case 0x5:
2304 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2305 break;
2306 case 0x7:
2307 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2308 break;
2309 case 0x8:
2310 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2311 break;
2312 case 0x9:
2313 gen_op_iwmmxt_addul_M0_wRn(rd1);
2314 break;
2315 case 0xb:
2316 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2317 break;
2318 default:
2319 return 1;
2320 }
2321 gen_op_iwmmxt_movq_wRn_M0(wrd);
2322 gen_op_iwmmxt_set_mup();
2323 gen_op_iwmmxt_set_cup();
2324 break;
2325 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2326 case 0x408: case 0x508: case 0x608: case 0x708:
2327 case 0x808: case 0x908: case 0xa08: case 0xb08:
2328 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2329 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2330 return 1;
18c9b560
AZ
2331 wrd = (insn >> 12) & 0xf;
2332 rd0 = (insn >> 16) & 0xf;
2333 rd1 = (insn >> 0) & 0xf;
2334 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2335 switch ((insn >> 22) & 3) {
18c9b560
AZ
2336 case 1:
2337 if (insn & (1 << 21))
2338 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2339 else
2340 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2341 break;
2342 case 2:
2343 if (insn & (1 << 21))
2344 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2345 else
2346 gen_op_iwmmxt_packul_M0_wRn(rd1);
2347 break;
2348 case 3:
2349 if (insn & (1 << 21))
2350 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2351 else
2352 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2353 break;
2354 }
2355 gen_op_iwmmxt_movq_wRn_M0(wrd);
2356 gen_op_iwmmxt_set_mup();
2357 gen_op_iwmmxt_set_cup();
2358 break;
2359 case 0x201: case 0x203: case 0x205: case 0x207:
2360 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2361 case 0x211: case 0x213: case 0x215: case 0x217:
2362 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2363 wrd = (insn >> 5) & 0xf;
2364 rd0 = (insn >> 12) & 0xf;
2365 rd1 = (insn >> 0) & 0xf;
2366 if (rd0 == 0xf || rd1 == 0xf)
2367 return 1;
2368 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2369 tmp = load_reg(s, rd0);
2370 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2371 switch ((insn >> 16) & 0xf) {
2372 case 0x0: /* TMIA */
da6b5335 2373 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2374 break;
2375 case 0x8: /* TMIAPH */
da6b5335 2376 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2377 break;
2378 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2379 if (insn & (1 << 16))
da6b5335 2380 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2381 if (insn & (1 << 17))
da6b5335
FN
2382 tcg_gen_shri_i32(tmp2, tmp2, 16);
2383 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2384 break;
2385 default:
7d1b0095
PM
2386 tcg_temp_free_i32(tmp2);
2387 tcg_temp_free_i32(tmp);
18c9b560
AZ
2388 return 1;
2389 }
7d1b0095
PM
2390 tcg_temp_free_i32(tmp2);
2391 tcg_temp_free_i32(tmp);
18c9b560
AZ
2392 gen_op_iwmmxt_movq_wRn_M0(wrd);
2393 gen_op_iwmmxt_set_mup();
2394 break;
2395 default:
2396 return 1;
2397 }
2398
2399 return 0;
2400}
2401
a1c7273b 2402/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2403 (ie. an undefined instruction). */
0ecb72a5 2404static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2405{
2406 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2407 TCGv tmp, tmp2;
18c9b560
AZ
2408
2409 if ((insn & 0x0ff00f10) == 0x0e200010) {
2410 /* Multiply with Internal Accumulate Format */
2411 rd0 = (insn >> 12) & 0xf;
2412 rd1 = insn & 0xf;
2413 acc = (insn >> 5) & 7;
2414
2415 if (acc != 0)
2416 return 1;
2417
3a554c0f
FN
2418 tmp = load_reg(s, rd0);
2419 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2420 switch ((insn >> 16) & 0xf) {
2421 case 0x0: /* MIA */
3a554c0f 2422 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2423 break;
2424 case 0x8: /* MIAPH */
3a554c0f 2425 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2426 break;
2427 case 0xc: /* MIABB */
2428 case 0xd: /* MIABT */
2429 case 0xe: /* MIATB */
2430 case 0xf: /* MIATT */
18c9b560 2431 if (insn & (1 << 16))
3a554c0f 2432 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2433 if (insn & (1 << 17))
3a554c0f
FN
2434 tcg_gen_shri_i32(tmp2, tmp2, 16);
2435 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2436 break;
2437 default:
2438 return 1;
2439 }
7d1b0095
PM
2440 tcg_temp_free_i32(tmp2);
2441 tcg_temp_free_i32(tmp);
18c9b560
AZ
2442
2443 gen_op_iwmmxt_movq_wRn_M0(acc);
2444 return 0;
2445 }
2446
2447 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2448 /* Internal Accumulator Access Format */
2449 rdhi = (insn >> 16) & 0xf;
2450 rdlo = (insn >> 12) & 0xf;
2451 acc = insn & 7;
2452
2453 if (acc != 0)
2454 return 1;
2455
2456 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2457 iwmmxt_load_reg(cpu_V0, acc);
2458 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2459 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2460 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2461 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2462 } else { /* MAR */
3a554c0f
FN
2463 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2464 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2465 }
2466 return 0;
2467 }
2468
2469 return 1;
2470}
2471
9ee6e8bb
PB
2472#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2473#define VFP_SREG(insn, bigbit, smallbit) \
2474 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2475#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2476 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2477 reg = (((insn) >> (bigbit)) & 0x0f) \
2478 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2479 } else { \
2480 if (insn & (1 << (smallbit))) \
2481 return 1; \
2482 reg = ((insn) >> (bigbit)) & 0x0f; \
2483 }} while (0)
2484
2485#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2486#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2487#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2488#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2489#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2490#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2491
4373f3ce
PB
2492/* Move between integer and VFP cores. */
2493static TCGv gen_vfp_mrs(void)
2494{
7d1b0095 2495 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2496 tcg_gen_mov_i32(tmp, cpu_F0s);
2497 return tmp;
2498}
2499
2500static void gen_vfp_msr(TCGv tmp)
2501{
2502 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2503 tcg_temp_free_i32(tmp);
4373f3ce
PB
2504}
2505
ad69471c
PB
2506static void gen_neon_dup_u8(TCGv var, int shift)
2507{
7d1b0095 2508 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2509 if (shift)
2510 tcg_gen_shri_i32(var, var, shift);
86831435 2511 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2512 tcg_gen_shli_i32(tmp, var, 8);
2513 tcg_gen_or_i32(var, var, tmp);
2514 tcg_gen_shli_i32(tmp, var, 16);
2515 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2516 tcg_temp_free_i32(tmp);
ad69471c
PB
2517}
2518
2519static void gen_neon_dup_low16(TCGv var)
2520{
7d1b0095 2521 TCGv tmp = tcg_temp_new_i32();
86831435 2522 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2523 tcg_gen_shli_i32(tmp, var, 16);
2524 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2525 tcg_temp_free_i32(tmp);
ad69471c
PB
2526}
2527
2528static void gen_neon_dup_high16(TCGv var)
2529{
7d1b0095 2530 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2531 tcg_gen_andi_i32(var, var, 0xffff0000);
2532 tcg_gen_shri_i32(tmp, var, 16);
2533 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2534 tcg_temp_free_i32(tmp);
ad69471c
PB
2535}
2536
8e18cde3
PM
2537static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2538{
2539 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2540 TCGv tmp;
2541 switch (size) {
2542 case 0:
2543 tmp = gen_ld8u(addr, IS_USER(s));
2544 gen_neon_dup_u8(tmp, 0);
2545 break;
2546 case 1:
2547 tmp = gen_ld16u(addr, IS_USER(s));
2548 gen_neon_dup_low16(tmp);
2549 break;
2550 case 2:
2551 tmp = gen_ld32(addr, IS_USER(s));
2552 break;
2553 default: /* Avoid compiler warnings. */
2554 abort();
2555 }
2556 return tmp;
2557}
2558
a1c7273b 2559/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2560 (ie. an undefined instruction). */
0ecb72a5 2561static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2562{
2563 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2564 int dp, veclen;
312eea9f 2565 TCGv addr;
4373f3ce 2566 TCGv tmp;
ad69471c 2567 TCGv tmp2;
b7bcbe95 2568
40f137e1
PB
2569 if (!arm_feature(env, ARM_FEATURE_VFP))
2570 return 1;
2571
5df8bac1 2572 if (!s->vfp_enabled) {
9ee6e8bb 2573 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2574 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2575 return 1;
2576 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2577 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2578 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2579 return 1;
2580 }
b7bcbe95
FB
2581 dp = ((insn & 0xf00) == 0xb00);
2582 switch ((insn >> 24) & 0xf) {
2583 case 0xe:
2584 if (insn & (1 << 4)) {
2585 /* single register transfer */
b7bcbe95
FB
2586 rd = (insn >> 12) & 0xf;
2587 if (dp) {
9ee6e8bb
PB
2588 int size;
2589 int pass;
2590
2591 VFP_DREG_N(rn, insn);
2592 if (insn & 0xf)
b7bcbe95 2593 return 1;
9ee6e8bb
PB
2594 if (insn & 0x00c00060
2595 && !arm_feature(env, ARM_FEATURE_NEON))
2596 return 1;
2597
2598 pass = (insn >> 21) & 1;
2599 if (insn & (1 << 22)) {
2600 size = 0;
2601 offset = ((insn >> 5) & 3) * 8;
2602 } else if (insn & (1 << 5)) {
2603 size = 1;
2604 offset = (insn & (1 << 6)) ? 16 : 0;
2605 } else {
2606 size = 2;
2607 offset = 0;
2608 }
18c9b560 2609 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2610 /* vfp->arm */
ad69471c 2611 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2612 switch (size) {
2613 case 0:
9ee6e8bb 2614 if (offset)
ad69471c 2615 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2616 if (insn & (1 << 23))
ad69471c 2617 gen_uxtb(tmp);
9ee6e8bb 2618 else
ad69471c 2619 gen_sxtb(tmp);
9ee6e8bb
PB
2620 break;
2621 case 1:
9ee6e8bb
PB
2622 if (insn & (1 << 23)) {
2623 if (offset) {
ad69471c 2624 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2625 } else {
ad69471c 2626 gen_uxth(tmp);
9ee6e8bb
PB
2627 }
2628 } else {
2629 if (offset) {
ad69471c 2630 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2631 } else {
ad69471c 2632 gen_sxth(tmp);
9ee6e8bb
PB
2633 }
2634 }
2635 break;
2636 case 2:
9ee6e8bb
PB
2637 break;
2638 }
ad69471c 2639 store_reg(s, rd, tmp);
b7bcbe95
FB
2640 } else {
2641 /* arm->vfp */
ad69471c 2642 tmp = load_reg(s, rd);
9ee6e8bb
PB
2643 if (insn & (1 << 23)) {
2644 /* VDUP */
2645 if (size == 0) {
ad69471c 2646 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2647 } else if (size == 1) {
ad69471c 2648 gen_neon_dup_low16(tmp);
9ee6e8bb 2649 }
cbbccffc 2650 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2651 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2652 tcg_gen_mov_i32(tmp2, tmp);
2653 neon_store_reg(rn, n, tmp2);
2654 }
2655 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2656 } else {
2657 /* VMOV */
2658 switch (size) {
2659 case 0:
ad69471c 2660 tmp2 = neon_load_reg(rn, pass);
d593c48e 2661 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 2662 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2663 break;
2664 case 1:
ad69471c 2665 tmp2 = neon_load_reg(rn, pass);
d593c48e 2666 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 2667 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2668 break;
2669 case 2:
9ee6e8bb
PB
2670 break;
2671 }
ad69471c 2672 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2673 }
b7bcbe95 2674 }
9ee6e8bb
PB
2675 } else { /* !dp */
2676 if ((insn & 0x6f) != 0x00)
2677 return 1;
2678 rn = VFP_SREG_N(insn);
18c9b560 2679 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2680 /* vfp->arm */
2681 if (insn & (1 << 21)) {
2682 /* system register */
40f137e1 2683 rn >>= 1;
9ee6e8bb 2684
b7bcbe95 2685 switch (rn) {
40f137e1 2686 case ARM_VFP_FPSID:
4373f3ce 2687 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2688 VFP3 restricts all id registers to privileged
2689 accesses. */
2690 if (IS_USER(s)
2691 && arm_feature(env, ARM_FEATURE_VFP3))
2692 return 1;
4373f3ce 2693 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2694 break;
40f137e1 2695 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2696 if (IS_USER(s))
2697 return 1;
4373f3ce 2698 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2699 break;
40f137e1
PB
2700 case ARM_VFP_FPINST:
2701 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2702 /* Not present in VFP3. */
2703 if (IS_USER(s)
2704 || arm_feature(env, ARM_FEATURE_VFP3))
2705 return 1;
4373f3ce 2706 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2707 break;
40f137e1 2708 case ARM_VFP_FPSCR:
601d70b9 2709 if (rd == 15) {
4373f3ce
PB
2710 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2711 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2712 } else {
7d1b0095 2713 tmp = tcg_temp_new_i32();
4373f3ce
PB
2714 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2715 }
b7bcbe95 2716 break;
9ee6e8bb
PB
2717 case ARM_VFP_MVFR0:
2718 case ARM_VFP_MVFR1:
2719 if (IS_USER(s)
06ed5d66 2720 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 2721 return 1;
4373f3ce 2722 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2723 break;
b7bcbe95
FB
2724 default:
2725 return 1;
2726 }
2727 } else {
2728 gen_mov_F0_vreg(0, rn);
4373f3ce 2729 tmp = gen_vfp_mrs();
b7bcbe95
FB
2730 }
2731 if (rd == 15) {
b5ff1b31 2732 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2733 gen_set_nzcv(tmp);
7d1b0095 2734 tcg_temp_free_i32(tmp);
4373f3ce
PB
2735 } else {
2736 store_reg(s, rd, tmp);
2737 }
b7bcbe95
FB
2738 } else {
2739 /* arm->vfp */
b7bcbe95 2740 if (insn & (1 << 21)) {
40f137e1 2741 rn >>= 1;
b7bcbe95
FB
2742 /* system register */
2743 switch (rn) {
40f137e1 2744 case ARM_VFP_FPSID:
9ee6e8bb
PB
2745 case ARM_VFP_MVFR0:
2746 case ARM_VFP_MVFR1:
b7bcbe95
FB
2747 /* Writes are ignored. */
2748 break;
40f137e1 2749 case ARM_VFP_FPSCR:
e4c1cfa5 2750 tmp = load_reg(s, rd);
4373f3ce 2751 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2752 tcg_temp_free_i32(tmp);
b5ff1b31 2753 gen_lookup_tb(s);
b7bcbe95 2754 break;
40f137e1 2755 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2756 if (IS_USER(s))
2757 return 1;
71b3c3de
JR
2758 /* TODO: VFP subarchitecture support.
2759 * For now, keep the EN bit only */
e4c1cfa5 2760 tmp = load_reg(s, rd);
71b3c3de 2761 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2762 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2763 gen_lookup_tb(s);
2764 break;
2765 case ARM_VFP_FPINST:
2766 case ARM_VFP_FPINST2:
e4c1cfa5 2767 tmp = load_reg(s, rd);
4373f3ce 2768 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2769 break;
b7bcbe95
FB
2770 default:
2771 return 1;
2772 }
2773 } else {
e4c1cfa5 2774 tmp = load_reg(s, rd);
4373f3ce 2775 gen_vfp_msr(tmp);
b7bcbe95
FB
2776 gen_mov_vreg_F0(0, rn);
2777 }
2778 }
2779 }
2780 } else {
2781 /* data processing */
2782 /* The opcode is in bits 23, 21, 20 and 6. */
2783 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2784 if (dp) {
2785 if (op == 15) {
2786 /* rn is opcode */
2787 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2788 } else {
2789 /* rn is register number */
9ee6e8bb 2790 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2791 }
2792
04595bf6 2793 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2794 /* Integer or single precision destination. */
9ee6e8bb 2795 rd = VFP_SREG_D(insn);
b7bcbe95 2796 } else {
9ee6e8bb 2797 VFP_DREG_D(rd, insn);
b7bcbe95 2798 }
04595bf6
PM
2799 if (op == 15 &&
2800 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2801 /* VCVT from int is always from S reg regardless of dp bit.
2802 * VCVT with immediate frac_bits has same format as SREG_M
2803 */
2804 rm = VFP_SREG_M(insn);
b7bcbe95 2805 } else {
9ee6e8bb 2806 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2807 }
2808 } else {
9ee6e8bb 2809 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2810 if (op == 15 && rn == 15) {
2811 /* Double precision destination. */
9ee6e8bb
PB
2812 VFP_DREG_D(rd, insn);
2813 } else {
2814 rd = VFP_SREG_D(insn);
2815 }
04595bf6
PM
2816 /* NB that we implicitly rely on the encoding for the frac_bits
2817 * in VCVT of fixed to float being the same as that of an SREG_M
2818 */
9ee6e8bb 2819 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2820 }
2821
69d1fc22 2822 veclen = s->vec_len;
b7bcbe95
FB
2823 if (op == 15 && rn > 3)
2824 veclen = 0;
2825
2826 /* Shut up compiler warnings. */
2827 delta_m = 0;
2828 delta_d = 0;
2829 bank_mask = 0;
3b46e624 2830
b7bcbe95
FB
2831 if (veclen > 0) {
2832 if (dp)
2833 bank_mask = 0xc;
2834 else
2835 bank_mask = 0x18;
2836
2837 /* Figure out what type of vector operation this is. */
2838 if ((rd & bank_mask) == 0) {
2839 /* scalar */
2840 veclen = 0;
2841 } else {
2842 if (dp)
69d1fc22 2843 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2844 else
69d1fc22 2845 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2846
2847 if ((rm & bank_mask) == 0) {
2848 /* mixed scalar/vector */
2849 delta_m = 0;
2850 } else {
2851 /* vector */
2852 delta_m = delta_d;
2853 }
2854 }
2855 }
2856
2857 /* Load the initial operands. */
2858 if (op == 15) {
2859 switch (rn) {
2860 case 16:
2861 case 17:
2862 /* Integer source */
2863 gen_mov_F0_vreg(0, rm);
2864 break;
2865 case 8:
2866 case 9:
2867 /* Compare */
2868 gen_mov_F0_vreg(dp, rd);
2869 gen_mov_F1_vreg(dp, rm);
2870 break;
2871 case 10:
2872 case 11:
2873 /* Compare with zero */
2874 gen_mov_F0_vreg(dp, rd);
2875 gen_vfp_F1_ld0(dp);
2876 break;
9ee6e8bb
PB
2877 case 20:
2878 case 21:
2879 case 22:
2880 case 23:
644ad806
PB
2881 case 28:
2882 case 29:
2883 case 30:
2884 case 31:
9ee6e8bb
PB
2885 /* Source and destination the same. */
2886 gen_mov_F0_vreg(dp, rd);
2887 break;
6e0c0ed1
PM
2888 case 4:
2889 case 5:
2890 case 6:
2891 case 7:
2892 /* VCVTB, VCVTT: only present with the halfprec extension,
2893 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2894 */
2895 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2896 return 1;
2897 }
2898 /* Otherwise fall through */
b7bcbe95
FB
2899 default:
2900 /* One source operand. */
2901 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2902 break;
b7bcbe95
FB
2903 }
2904 } else {
2905 /* Two source operands. */
2906 gen_mov_F0_vreg(dp, rn);
2907 gen_mov_F1_vreg(dp, rm);
2908 }
2909
2910 for (;;) {
2911 /* Perform the calculation. */
2912 switch (op) {
605a6aed
PM
2913 case 0: /* VMLA: fd + (fn * fm) */
2914 /* Note that order of inputs to the add matters for NaNs */
2915 gen_vfp_F1_mul(dp);
2916 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2917 gen_vfp_add(dp);
2918 break;
605a6aed 2919 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 2920 gen_vfp_mul(dp);
605a6aed
PM
2921 gen_vfp_F1_neg(dp);
2922 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2923 gen_vfp_add(dp);
2924 break;
605a6aed
PM
2925 case 2: /* VNMLS: -fd + (fn * fm) */
2926 /* Note that it isn't valid to replace (-A + B) with (B - A)
2927 * or similar plausible looking simplifications
2928 * because this will give wrong results for NaNs.
2929 */
2930 gen_vfp_F1_mul(dp);
2931 gen_mov_F0_vreg(dp, rd);
2932 gen_vfp_neg(dp);
2933 gen_vfp_add(dp);
b7bcbe95 2934 break;
605a6aed 2935 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 2936 gen_vfp_mul(dp);
605a6aed
PM
2937 gen_vfp_F1_neg(dp);
2938 gen_mov_F0_vreg(dp, rd);
b7bcbe95 2939 gen_vfp_neg(dp);
605a6aed 2940 gen_vfp_add(dp);
b7bcbe95
FB
2941 break;
2942 case 4: /* mul: fn * fm */
2943 gen_vfp_mul(dp);
2944 break;
2945 case 5: /* nmul: -(fn * fm) */
2946 gen_vfp_mul(dp);
2947 gen_vfp_neg(dp);
2948 break;
2949 case 6: /* add: fn + fm */
2950 gen_vfp_add(dp);
2951 break;
2952 case 7: /* sub: fn - fm */
2953 gen_vfp_sub(dp);
2954 break;
2955 case 8: /* div: fn / fm */
2956 gen_vfp_div(dp);
2957 break;
da97f52c
PM
2958 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
2959 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
2960 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
2961 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
2962 /* These are fused multiply-add, and must be done as one
2963 * floating point operation with no rounding between the
2964 * multiplication and addition steps.
2965 * NB that doing the negations here as separate steps is
2966 * correct : an input NaN should come out with its sign bit
2967 * flipped if it is a negated-input.
2968 */
2969 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
2970 return 1;
2971 }
2972 if (dp) {
2973 TCGv_ptr fpst;
2974 TCGv_i64 frd;
2975 if (op & 1) {
2976 /* VFNMS, VFMS */
2977 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
2978 }
2979 frd = tcg_temp_new_i64();
2980 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
2981 if (op & 2) {
2982 /* VFNMA, VFNMS */
2983 gen_helper_vfp_negd(frd, frd);
2984 }
2985 fpst = get_fpstatus_ptr(0);
2986 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
2987 cpu_F1d, frd, fpst);
2988 tcg_temp_free_ptr(fpst);
2989 tcg_temp_free_i64(frd);
2990 } else {
2991 TCGv_ptr fpst;
2992 TCGv_i32 frd;
2993 if (op & 1) {
2994 /* VFNMS, VFMS */
2995 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
2996 }
2997 frd = tcg_temp_new_i32();
2998 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
2999 if (op & 2) {
3000 gen_helper_vfp_negs(frd, frd);
3001 }
3002 fpst = get_fpstatus_ptr(0);
3003 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3004 cpu_F1s, frd, fpst);
3005 tcg_temp_free_ptr(fpst);
3006 tcg_temp_free_i32(frd);
3007 }
3008 break;
9ee6e8bb
PB
3009 case 14: /* fconst */
3010 if (!arm_feature(env, ARM_FEATURE_VFP3))
3011 return 1;
3012
3013 n = (insn << 12) & 0x80000000;
3014 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3015 if (dp) {
3016 if (i & 0x40)
3017 i |= 0x3f80;
3018 else
3019 i |= 0x4000;
3020 n |= i << 16;
4373f3ce 3021 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3022 } else {
3023 if (i & 0x40)
3024 i |= 0x780;
3025 else
3026 i |= 0x800;
3027 n |= i << 19;
5b340b51 3028 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3029 }
9ee6e8bb 3030 break;
b7bcbe95
FB
3031 case 15: /* extension space */
3032 switch (rn) {
3033 case 0: /* cpy */
3034 /* no-op */
3035 break;
3036 case 1: /* abs */
3037 gen_vfp_abs(dp);
3038 break;
3039 case 2: /* neg */
3040 gen_vfp_neg(dp);
3041 break;
3042 case 3: /* sqrt */
3043 gen_vfp_sqrt(dp);
3044 break;
60011498 3045 case 4: /* vcvtb.f32.f16 */
60011498
PB
3046 tmp = gen_vfp_mrs();
3047 tcg_gen_ext16u_i32(tmp, tmp);
3048 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3049 tcg_temp_free_i32(tmp);
60011498
PB
3050 break;
3051 case 5: /* vcvtt.f32.f16 */
60011498
PB
3052 tmp = gen_vfp_mrs();
3053 tcg_gen_shri_i32(tmp, tmp, 16);
3054 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3055 tcg_temp_free_i32(tmp);
60011498
PB
3056 break;
3057 case 6: /* vcvtb.f16.f32 */
7d1b0095 3058 tmp = tcg_temp_new_i32();
60011498
PB
3059 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3060 gen_mov_F0_vreg(0, rd);
3061 tmp2 = gen_vfp_mrs();
3062 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3063 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3064 tcg_temp_free_i32(tmp2);
60011498
PB
3065 gen_vfp_msr(tmp);
3066 break;
3067 case 7: /* vcvtt.f16.f32 */
7d1b0095 3068 tmp = tcg_temp_new_i32();
60011498
PB
3069 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3070 tcg_gen_shli_i32(tmp, tmp, 16);
3071 gen_mov_F0_vreg(0, rd);
3072 tmp2 = gen_vfp_mrs();
3073 tcg_gen_ext16u_i32(tmp2, tmp2);
3074 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3075 tcg_temp_free_i32(tmp2);
60011498
PB
3076 gen_vfp_msr(tmp);
3077 break;
b7bcbe95
FB
3078 case 8: /* cmp */
3079 gen_vfp_cmp(dp);
3080 break;
3081 case 9: /* cmpe */
3082 gen_vfp_cmpe(dp);
3083 break;
3084 case 10: /* cmpz */
3085 gen_vfp_cmp(dp);
3086 break;
3087 case 11: /* cmpez */
3088 gen_vfp_F1_ld0(dp);
3089 gen_vfp_cmpe(dp);
3090 break;
3091 case 15: /* single<->double conversion */
3092 if (dp)
4373f3ce 3093 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3094 else
4373f3ce 3095 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3096 break;
3097 case 16: /* fuito */
5500b06c 3098 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3099 break;
3100 case 17: /* fsito */
5500b06c 3101 gen_vfp_sito(dp, 0);
b7bcbe95 3102 break;
9ee6e8bb
PB
3103 case 20: /* fshto */
3104 if (!arm_feature(env, ARM_FEATURE_VFP3))
3105 return 1;
5500b06c 3106 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3107 break;
3108 case 21: /* fslto */
3109 if (!arm_feature(env, ARM_FEATURE_VFP3))
3110 return 1;
5500b06c 3111 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3112 break;
3113 case 22: /* fuhto */
3114 if (!arm_feature(env, ARM_FEATURE_VFP3))
3115 return 1;
5500b06c 3116 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3117 break;
3118 case 23: /* fulto */
3119 if (!arm_feature(env, ARM_FEATURE_VFP3))
3120 return 1;
5500b06c 3121 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3122 break;
b7bcbe95 3123 case 24: /* ftoui */
5500b06c 3124 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3125 break;
3126 case 25: /* ftouiz */
5500b06c 3127 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3128 break;
3129 case 26: /* ftosi */
5500b06c 3130 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3131 break;
3132 case 27: /* ftosiz */
5500b06c 3133 gen_vfp_tosiz(dp, 0);
b7bcbe95 3134 break;
9ee6e8bb
PB
3135 case 28: /* ftosh */
3136 if (!arm_feature(env, ARM_FEATURE_VFP3))
3137 return 1;
5500b06c 3138 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3139 break;
3140 case 29: /* ftosl */
3141 if (!arm_feature(env, ARM_FEATURE_VFP3))
3142 return 1;
5500b06c 3143 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3144 break;
3145 case 30: /* ftouh */
3146 if (!arm_feature(env, ARM_FEATURE_VFP3))
3147 return 1;
5500b06c 3148 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3149 break;
3150 case 31: /* ftoul */
3151 if (!arm_feature(env, ARM_FEATURE_VFP3))
3152 return 1;
5500b06c 3153 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3154 break;
b7bcbe95 3155 default: /* undefined */
b7bcbe95
FB
3156 return 1;
3157 }
3158 break;
3159 default: /* undefined */
b7bcbe95
FB
3160 return 1;
3161 }
3162
3163 /* Write back the result. */
3164 if (op == 15 && (rn >= 8 && rn <= 11))
3165 ; /* Comparison, do nothing. */
04595bf6
PM
3166 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3167 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3168 gen_mov_vreg_F0(0, rd);
3169 else if (op == 15 && rn == 15)
3170 /* conversion */
3171 gen_mov_vreg_F0(!dp, rd);
3172 else
3173 gen_mov_vreg_F0(dp, rd);
3174
3175 /* break out of the loop if we have finished */
3176 if (veclen == 0)
3177 break;
3178
3179 if (op == 15 && delta_m == 0) {
3180 /* single source one-many */
3181 while (veclen--) {
3182 rd = ((rd + delta_d) & (bank_mask - 1))
3183 | (rd & bank_mask);
3184 gen_mov_vreg_F0(dp, rd);
3185 }
3186 break;
3187 }
3188 /* Setup the next operands. */
3189 veclen--;
3190 rd = ((rd + delta_d) & (bank_mask - 1))
3191 | (rd & bank_mask);
3192
3193 if (op == 15) {
3194 /* One source operand. */
3195 rm = ((rm + delta_m) & (bank_mask - 1))
3196 | (rm & bank_mask);
3197 gen_mov_F0_vreg(dp, rm);
3198 } else {
3199 /* Two source operands. */
3200 rn = ((rn + delta_d) & (bank_mask - 1))
3201 | (rn & bank_mask);
3202 gen_mov_F0_vreg(dp, rn);
3203 if (delta_m) {
3204 rm = ((rm + delta_m) & (bank_mask - 1))
3205 | (rm & bank_mask);
3206 gen_mov_F1_vreg(dp, rm);
3207 }
3208 }
3209 }
3210 }
3211 break;
3212 case 0xc:
3213 case 0xd:
8387da81 3214 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3215 /* two-register transfer */
3216 rn = (insn >> 16) & 0xf;
3217 rd = (insn >> 12) & 0xf;
3218 if (dp) {
9ee6e8bb
PB
3219 VFP_DREG_M(rm, insn);
3220 } else {
3221 rm = VFP_SREG_M(insn);
3222 }
b7bcbe95 3223
18c9b560 3224 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3225 /* vfp->arm */
3226 if (dp) {
4373f3ce
PB
3227 gen_mov_F0_vreg(0, rm * 2);
3228 tmp = gen_vfp_mrs();
3229 store_reg(s, rd, tmp);
3230 gen_mov_F0_vreg(0, rm * 2 + 1);
3231 tmp = gen_vfp_mrs();
3232 store_reg(s, rn, tmp);
b7bcbe95
FB
3233 } else {
3234 gen_mov_F0_vreg(0, rm);
4373f3ce 3235 tmp = gen_vfp_mrs();
8387da81 3236 store_reg(s, rd, tmp);
b7bcbe95 3237 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3238 tmp = gen_vfp_mrs();
8387da81 3239 store_reg(s, rn, tmp);
b7bcbe95
FB
3240 }
3241 } else {
3242 /* arm->vfp */
3243 if (dp) {
4373f3ce
PB
3244 tmp = load_reg(s, rd);
3245 gen_vfp_msr(tmp);
3246 gen_mov_vreg_F0(0, rm * 2);
3247 tmp = load_reg(s, rn);
3248 gen_vfp_msr(tmp);
3249 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3250 } else {
8387da81 3251 tmp = load_reg(s, rd);
4373f3ce 3252 gen_vfp_msr(tmp);
b7bcbe95 3253 gen_mov_vreg_F0(0, rm);
8387da81 3254 tmp = load_reg(s, rn);
4373f3ce 3255 gen_vfp_msr(tmp);
b7bcbe95
FB
3256 gen_mov_vreg_F0(0, rm + 1);
3257 }
3258 }
3259 } else {
3260 /* Load/store */
3261 rn = (insn >> 16) & 0xf;
3262 if (dp)
9ee6e8bb 3263 VFP_DREG_D(rd, insn);
b7bcbe95 3264 else
9ee6e8bb 3265 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3266 if ((insn & 0x01200000) == 0x01000000) {
3267 /* Single load/store */
3268 offset = (insn & 0xff) << 2;
3269 if ((insn & (1 << 23)) == 0)
3270 offset = -offset;
934814f1
PM
3271 if (s->thumb && rn == 15) {
3272 /* This is actually UNPREDICTABLE */
3273 addr = tcg_temp_new_i32();
3274 tcg_gen_movi_i32(addr, s->pc & ~2);
3275 } else {
3276 addr = load_reg(s, rn);
3277 }
312eea9f 3278 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3279 if (insn & (1 << 20)) {
312eea9f 3280 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3281 gen_mov_vreg_F0(dp, rd);
3282 } else {
3283 gen_mov_F0_vreg(dp, rd);
312eea9f 3284 gen_vfp_st(s, dp, addr);
b7bcbe95 3285 }
7d1b0095 3286 tcg_temp_free_i32(addr);
b7bcbe95
FB
3287 } else {
3288 /* load/store multiple */
934814f1 3289 int w = insn & (1 << 21);
b7bcbe95
FB
3290 if (dp)
3291 n = (insn >> 1) & 0x7f;
3292 else
3293 n = insn & 0xff;
3294
934814f1
PM
3295 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3296 /* P == U , W == 1 => UNDEF */
3297 return 1;
3298 }
3299 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3300 /* UNPREDICTABLE cases for bad immediates: we choose to
3301 * UNDEF to avoid generating huge numbers of TCG ops
3302 */
3303 return 1;
3304 }
3305 if (rn == 15 && w) {
3306 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3307 return 1;
3308 }
3309
3310 if (s->thumb && rn == 15) {
3311 /* This is actually UNPREDICTABLE */
3312 addr = tcg_temp_new_i32();
3313 tcg_gen_movi_i32(addr, s->pc & ~2);
3314 } else {
3315 addr = load_reg(s, rn);
3316 }
b7bcbe95 3317 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3318 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3319
3320 if (dp)
3321 offset = 8;
3322 else
3323 offset = 4;
3324 for (i = 0; i < n; i++) {
18c9b560 3325 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3326 /* load */
312eea9f 3327 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3328 gen_mov_vreg_F0(dp, rd + i);
3329 } else {
3330 /* store */
3331 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3332 gen_vfp_st(s, dp, addr);
b7bcbe95 3333 }
312eea9f 3334 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3335 }
934814f1 3336 if (w) {
b7bcbe95
FB
3337 /* writeback */
3338 if (insn & (1 << 24))
3339 offset = -offset * n;
3340 else if (dp && (insn & 1))
3341 offset = 4;
3342 else
3343 offset = 0;
3344
3345 if (offset != 0)
312eea9f
FN
3346 tcg_gen_addi_i32(addr, addr, offset);
3347 store_reg(s, rn, addr);
3348 } else {
7d1b0095 3349 tcg_temp_free_i32(addr);
b7bcbe95
FB
3350 }
3351 }
3352 }
3353 break;
3354 default:
3355 /* Should never happen. */
3356 return 1;
3357 }
3358 return 0;
3359}
3360
6e256c93 3361static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3362{
6e256c93
FB
3363 TranslationBlock *tb;
3364
3365 tb = s->tb;
3366 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3367 tcg_gen_goto_tb(n);
8984bd2e 3368 gen_set_pc_im(dest);
4b4a72e5 3369 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3370 } else {
8984bd2e 3371 gen_set_pc_im(dest);
57fec1fe 3372 tcg_gen_exit_tb(0);
6e256c93 3373 }
c53be334
FB
3374}
3375
8aaca4c0
FB
3376static inline void gen_jmp (DisasContext *s, uint32_t dest)
3377{
551bd27f 3378 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3379 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3380 if (s->thumb)
d9ba4830
PB
3381 dest |= 1;
3382 gen_bx_im(s, dest);
8aaca4c0 3383 } else {
6e256c93 3384 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3385 s->is_jmp = DISAS_TB_JUMP;
3386 }
3387}
3388
d9ba4830 3389static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3390{
ee097184 3391 if (x)
d9ba4830 3392 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3393 else
d9ba4830 3394 gen_sxth(t0);
ee097184 3395 if (y)
d9ba4830 3396 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3397 else
d9ba4830
PB
3398 gen_sxth(t1);
3399 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3400}
3401
3402/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3403static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3404 uint32_t mask;
3405
3406 mask = 0;
3407 if (flags & (1 << 0))
3408 mask |= 0xff;
3409 if (flags & (1 << 1))
3410 mask |= 0xff00;
3411 if (flags & (1 << 2))
3412 mask |= 0xff0000;
3413 if (flags & (1 << 3))
3414 mask |= 0xff000000;
9ee6e8bb 3415
2ae23e75 3416 /* Mask out undefined bits. */
9ee6e8bb 3417 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3418 if (!arm_feature(env, ARM_FEATURE_V4T))
3419 mask &= ~CPSR_T;
3420 if (!arm_feature(env, ARM_FEATURE_V5))
3421 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3422 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3423 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3424 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3425 mask &= ~CPSR_IT;
9ee6e8bb 3426 /* Mask out execution state bits. */
2ae23e75 3427 if (!spsr)
e160c51c 3428 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3429 /* Mask out privileged bits. */
3430 if (IS_USER(s))
9ee6e8bb 3431 mask &= CPSR_USER;
b5ff1b31
FB
3432 return mask;
3433}
3434
2fbac54b
FN
3435/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3436static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3437{
d9ba4830 3438 TCGv tmp;
b5ff1b31
FB
3439 if (spsr) {
3440 /* ??? This is also undefined in system mode. */
3441 if (IS_USER(s))
3442 return 1;
d9ba4830
PB
3443
3444 tmp = load_cpu_field(spsr);
3445 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3446 tcg_gen_andi_i32(t0, t0, mask);
3447 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3448 store_cpu_field(tmp, spsr);
b5ff1b31 3449 } else {
2fbac54b 3450 gen_set_cpsr(t0, mask);
b5ff1b31 3451 }
7d1b0095 3452 tcg_temp_free_i32(t0);
b5ff1b31
FB
3453 gen_lookup_tb(s);
3454 return 0;
3455}
3456
2fbac54b
FN
3457/* Returns nonzero if access to the PSR is not permitted. */
3458static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3459{
3460 TCGv tmp;
7d1b0095 3461 tmp = tcg_temp_new_i32();
2fbac54b
FN
3462 tcg_gen_movi_i32(tmp, val);
3463 return gen_set_psr(s, mask, spsr, tmp);
3464}
3465
e9bb4aa9
JR
3466/* Generate an old-style exception return. Marks pc as dead. */
3467static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3468{
d9ba4830 3469 TCGv tmp;
e9bb4aa9 3470 store_reg(s, 15, pc);
d9ba4830
PB
3471 tmp = load_cpu_field(spsr);
3472 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3473 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3474 s->is_jmp = DISAS_UPDATE;
3475}
3476
b0109805
PB
3477/* Generate a v6 exception return. Marks both values as dead. */
3478static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3479{
b0109805 3480 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3481 tcg_temp_free_i32(cpsr);
b0109805 3482 store_reg(s, 15, pc);
9ee6e8bb
PB
3483 s->is_jmp = DISAS_UPDATE;
3484}
3b46e624 3485
9ee6e8bb
PB
3486static inline void
3487gen_set_condexec (DisasContext *s)
3488{
3489 if (s->condexec_mask) {
8f01245e 3490 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3491 TCGv tmp = tcg_temp_new_i32();
8f01245e 3492 tcg_gen_movi_i32(tmp, val);
d9ba4830 3493 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3494 }
3495}
3b46e624 3496
bc4a0de0
PM
3497static void gen_exception_insn(DisasContext *s, int offset, int excp)
3498{
3499 gen_set_condexec(s);
3500 gen_set_pc_im(s->pc - offset);
3501 gen_exception(excp);
3502 s->is_jmp = DISAS_JUMP;
3503}
3504
9ee6e8bb
PB
3505static void gen_nop_hint(DisasContext *s, int val)
3506{
3507 switch (val) {
3508 case 3: /* wfi */
8984bd2e 3509 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3510 s->is_jmp = DISAS_WFI;
3511 break;
3512 case 2: /* wfe */
3513 case 4: /* sev */
3514 /* TODO: Implement SEV and WFE. May help SMP performance. */
3515 default: /* nop */
3516 break;
3517 }
3518}
99c475ab 3519
ad69471c 3520#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3521
62698be3 3522static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3523{
3524 switch (size) {
dd8fbd78
FN
3525 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3526 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3527 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3528 default: abort();
9ee6e8bb 3529 }
9ee6e8bb
PB
3530}
3531
dd8fbd78 3532static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3533{
3534 switch (size) {
dd8fbd78
FN
3535 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3536 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3537 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3538 default: return;
3539 }
3540}
3541
3542/* 32-bit pairwise ops end up the same as the elementwise versions. */
3543#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3544#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3545#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3546#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3547
ad69471c
PB
3548#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3549 switch ((size << 1) | u) { \
3550 case 0: \
dd8fbd78 3551 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3552 break; \
3553 case 1: \
dd8fbd78 3554 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3555 break; \
3556 case 2: \
dd8fbd78 3557 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3558 break; \
3559 case 3: \
dd8fbd78 3560 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3561 break; \
3562 case 4: \
dd8fbd78 3563 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3564 break; \
3565 case 5: \
dd8fbd78 3566 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3567 break; \
3568 default: return 1; \
3569 }} while (0)
9ee6e8bb
PB
3570
3571#define GEN_NEON_INTEGER_OP(name) do { \
3572 switch ((size << 1) | u) { \
ad69471c 3573 case 0: \
dd8fbd78 3574 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3575 break; \
3576 case 1: \
dd8fbd78 3577 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3578 break; \
3579 case 2: \
dd8fbd78 3580 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3581 break; \
3582 case 3: \
dd8fbd78 3583 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3584 break; \
3585 case 4: \
dd8fbd78 3586 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3587 break; \
3588 case 5: \
dd8fbd78 3589 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3590 break; \
9ee6e8bb
PB
3591 default: return 1; \
3592 }} while (0)
3593
dd8fbd78 3594static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3595{
7d1b0095 3596 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3597 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3598 return tmp;
9ee6e8bb
PB
3599}
3600
dd8fbd78 3601static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3602{
dd8fbd78 3603 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3604 tcg_temp_free_i32(var);
9ee6e8bb
PB
3605}
3606
dd8fbd78 3607static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3608{
dd8fbd78 3609 TCGv tmp;
9ee6e8bb 3610 if (size == 1) {
0fad6efc
PM
3611 tmp = neon_load_reg(reg & 7, reg >> 4);
3612 if (reg & 8) {
dd8fbd78 3613 gen_neon_dup_high16(tmp);
0fad6efc
PM
3614 } else {
3615 gen_neon_dup_low16(tmp);
dd8fbd78 3616 }
0fad6efc
PM
3617 } else {
3618 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3619 }
dd8fbd78 3620 return tmp;
9ee6e8bb
PB
3621}
3622
02acedf9 3623static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3624{
02acedf9 3625 TCGv tmp, tmp2;
600b828c 3626 if (!q && size == 2) {
02acedf9
PM
3627 return 1;
3628 }
3629 tmp = tcg_const_i32(rd);
3630 tmp2 = tcg_const_i32(rm);
3631 if (q) {
3632 switch (size) {
3633 case 0:
02da0b2d 3634 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3635 break;
3636 case 1:
02da0b2d 3637 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3638 break;
3639 case 2:
02da0b2d 3640 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3641 break;
3642 default:
3643 abort();
3644 }
3645 } else {
3646 switch (size) {
3647 case 0:
02da0b2d 3648 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3649 break;
3650 case 1:
02da0b2d 3651 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3652 break;
3653 default:
3654 abort();
3655 }
3656 }
3657 tcg_temp_free_i32(tmp);
3658 tcg_temp_free_i32(tmp2);
3659 return 0;
19457615
FN
3660}
3661
d68a6f3a 3662static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3663{
3664 TCGv tmp, tmp2;
600b828c 3665 if (!q && size == 2) {
d68a6f3a
PM
3666 return 1;
3667 }
3668 tmp = tcg_const_i32(rd);
3669 tmp2 = tcg_const_i32(rm);
3670 if (q) {
3671 switch (size) {
3672 case 0:
02da0b2d 3673 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3674 break;
3675 case 1:
02da0b2d 3676 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3677 break;
3678 case 2:
02da0b2d 3679 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3680 break;
3681 default:
3682 abort();
3683 }
3684 } else {
3685 switch (size) {
3686 case 0:
02da0b2d 3687 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3688 break;
3689 case 1:
02da0b2d 3690 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3691 break;
3692 default:
3693 abort();
3694 }
3695 }
3696 tcg_temp_free_i32(tmp);
3697 tcg_temp_free_i32(tmp2);
3698 return 0;
19457615
FN
3699}
3700
19457615
FN
3701static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3702{
3703 TCGv rd, tmp;
3704
7d1b0095
PM
3705 rd = tcg_temp_new_i32();
3706 tmp = tcg_temp_new_i32();
19457615
FN
3707
3708 tcg_gen_shli_i32(rd, t0, 8);
3709 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3710 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3711 tcg_gen_or_i32(rd, rd, tmp);
3712
3713 tcg_gen_shri_i32(t1, t1, 8);
3714 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3715 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3716 tcg_gen_or_i32(t1, t1, tmp);
3717 tcg_gen_mov_i32(t0, rd);
3718
7d1b0095
PM
3719 tcg_temp_free_i32(tmp);
3720 tcg_temp_free_i32(rd);
19457615
FN
3721}
3722
3723static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3724{
3725 TCGv rd, tmp;
3726
7d1b0095
PM
3727 rd = tcg_temp_new_i32();
3728 tmp = tcg_temp_new_i32();
19457615
FN
3729
3730 tcg_gen_shli_i32(rd, t0, 16);
3731 tcg_gen_andi_i32(tmp, t1, 0xffff);
3732 tcg_gen_or_i32(rd, rd, tmp);
3733 tcg_gen_shri_i32(t1, t1, 16);
3734 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3735 tcg_gen_or_i32(t1, t1, tmp);
3736 tcg_gen_mov_i32(t0, rd);
3737
7d1b0095
PM
3738 tcg_temp_free_i32(tmp);
3739 tcg_temp_free_i32(rd);
19457615
FN
3740}
3741
3742
9ee6e8bb
PB
3743static struct {
3744 int nregs;
3745 int interleave;
3746 int spacing;
3747} neon_ls_element_type[11] = {
3748 {4, 4, 1},
3749 {4, 4, 2},
3750 {4, 1, 1},
3751 {4, 2, 1},
3752 {3, 3, 1},
3753 {3, 3, 2},
3754 {3, 1, 1},
3755 {1, 1, 1},
3756 {2, 2, 1},
3757 {2, 2, 2},
3758 {2, 1, 1}
3759};
3760
3761/* Translate a NEON load/store element instruction. Return nonzero if the
3762 instruction is invalid. */
0ecb72a5 3763static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3764{
3765 int rd, rn, rm;
3766 int op;
3767 int nregs;
3768 int interleave;
84496233 3769 int spacing;
9ee6e8bb
PB
3770 int stride;
3771 int size;
3772 int reg;
3773 int pass;
3774 int load;
3775 int shift;
9ee6e8bb 3776 int n;
1b2b1e54 3777 TCGv addr;
b0109805 3778 TCGv tmp;
8f8e3aa4 3779 TCGv tmp2;
84496233 3780 TCGv_i64 tmp64;
9ee6e8bb 3781
5df8bac1 3782 if (!s->vfp_enabled)
9ee6e8bb
PB
3783 return 1;
3784 VFP_DREG_D(rd, insn);
3785 rn = (insn >> 16) & 0xf;
3786 rm = insn & 0xf;
3787 load = (insn & (1 << 21)) != 0;
3788 if ((insn & (1 << 23)) == 0) {
3789 /* Load store all elements. */
3790 op = (insn >> 8) & 0xf;
3791 size = (insn >> 6) & 3;
84496233 3792 if (op > 10)
9ee6e8bb 3793 return 1;
f2dd89d0
PM
3794 /* Catch UNDEF cases for bad values of align field */
3795 switch (op & 0xc) {
3796 case 4:
3797 if (((insn >> 5) & 1) == 1) {
3798 return 1;
3799 }
3800 break;
3801 case 8:
3802 if (((insn >> 4) & 3) == 3) {
3803 return 1;
3804 }
3805 break;
3806 default:
3807 break;
3808 }
9ee6e8bb
PB
3809 nregs = neon_ls_element_type[op].nregs;
3810 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3811 spacing = neon_ls_element_type[op].spacing;
3812 if (size == 3 && (interleave | spacing) != 1)
3813 return 1;
e318a60b 3814 addr = tcg_temp_new_i32();
dcc65026 3815 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3816 stride = (1 << size) * interleave;
3817 for (reg = 0; reg < nregs; reg++) {
3818 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3819 load_reg_var(s, addr, rn);
3820 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3821 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3822 load_reg_var(s, addr, rn);
3823 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3824 }
84496233
JR
3825 if (size == 3) {
3826 if (load) {
3827 tmp64 = gen_ld64(addr, IS_USER(s));
3828 neon_store_reg64(tmp64, rd);
3829 tcg_temp_free_i64(tmp64);
3830 } else {
3831 tmp64 = tcg_temp_new_i64();
3832 neon_load_reg64(tmp64, rd);
3833 gen_st64(tmp64, addr, IS_USER(s));
3834 }
3835 tcg_gen_addi_i32(addr, addr, stride);
3836 } else {
3837 for (pass = 0; pass < 2; pass++) {
3838 if (size == 2) {
3839 if (load) {
3840 tmp = gen_ld32(addr, IS_USER(s));
3841 neon_store_reg(rd, pass, tmp);
3842 } else {
3843 tmp = neon_load_reg(rd, pass);
3844 gen_st32(tmp, addr, IS_USER(s));
3845 }
1b2b1e54 3846 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3847 } else if (size == 1) {
3848 if (load) {
3849 tmp = gen_ld16u(addr, IS_USER(s));
3850 tcg_gen_addi_i32(addr, addr, stride);
3851 tmp2 = gen_ld16u(addr, IS_USER(s));
3852 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3853 tcg_gen_shli_i32(tmp2, tmp2, 16);
3854 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3855 tcg_temp_free_i32(tmp2);
84496233
JR
3856 neon_store_reg(rd, pass, tmp);
3857 } else {
3858 tmp = neon_load_reg(rd, pass);
7d1b0095 3859 tmp2 = tcg_temp_new_i32();
84496233
JR
3860 tcg_gen_shri_i32(tmp2, tmp, 16);
3861 gen_st16(tmp, addr, IS_USER(s));
3862 tcg_gen_addi_i32(addr, addr, stride);
3863 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3864 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3865 }
84496233
JR
3866 } else /* size == 0 */ {
3867 if (load) {
3868 TCGV_UNUSED(tmp2);
3869 for (n = 0; n < 4; n++) {
3870 tmp = gen_ld8u(addr, IS_USER(s));
3871 tcg_gen_addi_i32(addr, addr, stride);
3872 if (n == 0) {
3873 tmp2 = tmp;
3874 } else {
41ba8341
PB
3875 tcg_gen_shli_i32(tmp, tmp, n * 8);
3876 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3877 tcg_temp_free_i32(tmp);
84496233 3878 }
9ee6e8bb 3879 }
84496233
JR
3880 neon_store_reg(rd, pass, tmp2);
3881 } else {
3882 tmp2 = neon_load_reg(rd, pass);
3883 for (n = 0; n < 4; n++) {
7d1b0095 3884 tmp = tcg_temp_new_i32();
84496233
JR
3885 if (n == 0) {
3886 tcg_gen_mov_i32(tmp, tmp2);
3887 } else {
3888 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3889 }
3890 gen_st8(tmp, addr, IS_USER(s));
3891 tcg_gen_addi_i32(addr, addr, stride);
3892 }
7d1b0095 3893 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3894 }
3895 }
3896 }
3897 }
84496233 3898 rd += spacing;
9ee6e8bb 3899 }
e318a60b 3900 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3901 stride = nregs * 8;
3902 } else {
3903 size = (insn >> 10) & 3;
3904 if (size == 3) {
3905 /* Load single element to all lanes. */
8e18cde3
PM
3906 int a = (insn >> 4) & 1;
3907 if (!load) {
9ee6e8bb 3908 return 1;
8e18cde3 3909 }
9ee6e8bb
PB
3910 size = (insn >> 6) & 3;
3911 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3912
3913 if (size == 3) {
3914 if (nregs != 4 || a == 0) {
9ee6e8bb 3915 return 1;
99c475ab 3916 }
8e18cde3
PM
3917 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3918 size = 2;
3919 }
3920 if (nregs == 1 && a == 1 && size == 0) {
3921 return 1;
3922 }
3923 if (nregs == 3 && a == 1) {
3924 return 1;
3925 }
e318a60b 3926 addr = tcg_temp_new_i32();
8e18cde3
PM
3927 load_reg_var(s, addr, rn);
3928 if (nregs == 1) {
3929 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3930 tmp = gen_load_and_replicate(s, addr, size);
3931 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3932 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3933 if (insn & (1 << 5)) {
3934 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3935 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3936 }
3937 tcg_temp_free_i32(tmp);
3938 } else {
3939 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3940 stride = (insn & (1 << 5)) ? 2 : 1;
3941 for (reg = 0; reg < nregs; reg++) {
3942 tmp = gen_load_and_replicate(s, addr, size);
3943 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3944 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3945 tcg_temp_free_i32(tmp);
3946 tcg_gen_addi_i32(addr, addr, 1 << size);
3947 rd += stride;
3948 }
9ee6e8bb 3949 }
e318a60b 3950 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3951 stride = (1 << size) * nregs;
3952 } else {
3953 /* Single element. */
93262b16 3954 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
3955 pass = (insn >> 7) & 1;
3956 switch (size) {
3957 case 0:
3958 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3959 stride = 1;
3960 break;
3961 case 1:
3962 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3963 stride = (insn & (1 << 5)) ? 2 : 1;
3964 break;
3965 case 2:
3966 shift = 0;
9ee6e8bb
PB
3967 stride = (insn & (1 << 6)) ? 2 : 1;
3968 break;
3969 default:
3970 abort();
3971 }
3972 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
3973 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3974 switch (nregs) {
3975 case 1:
3976 if (((idx & (1 << size)) != 0) ||
3977 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3978 return 1;
3979 }
3980 break;
3981 case 3:
3982 if ((idx & 1) != 0) {
3983 return 1;
3984 }
3985 /* fall through */
3986 case 2:
3987 if (size == 2 && (idx & 2) != 0) {
3988 return 1;
3989 }
3990 break;
3991 case 4:
3992 if ((size == 2) && ((idx & 3) == 3)) {
3993 return 1;
3994 }
3995 break;
3996 default:
3997 abort();
3998 }
3999 if ((rd + stride * (nregs - 1)) > 31) {
4000 /* Attempts to write off the end of the register file
4001 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4002 * the neon_load_reg() would write off the end of the array.
4003 */
4004 return 1;
4005 }
e318a60b 4006 addr = tcg_temp_new_i32();
dcc65026 4007 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4008 for (reg = 0; reg < nregs; reg++) {
4009 if (load) {
9ee6e8bb
PB
4010 switch (size) {
4011 case 0:
1b2b1e54 4012 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
4013 break;
4014 case 1:
1b2b1e54 4015 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
4016 break;
4017 case 2:
1b2b1e54 4018 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 4019 break;
a50f5b91
PB
4020 default: /* Avoid compiler warnings. */
4021 abort();
9ee6e8bb
PB
4022 }
4023 if (size != 2) {
8f8e3aa4 4024 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4025 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4026 shift, size ? 16 : 8);
7d1b0095 4027 tcg_temp_free_i32(tmp2);
9ee6e8bb 4028 }
8f8e3aa4 4029 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4030 } else { /* Store */
8f8e3aa4
PB
4031 tmp = neon_load_reg(rd, pass);
4032 if (shift)
4033 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4034 switch (size) {
4035 case 0:
1b2b1e54 4036 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4037 break;
4038 case 1:
1b2b1e54 4039 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4040 break;
4041 case 2:
1b2b1e54 4042 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4043 break;
99c475ab 4044 }
99c475ab 4045 }
9ee6e8bb 4046 rd += stride;
1b2b1e54 4047 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4048 }
e318a60b 4049 tcg_temp_free_i32(addr);
9ee6e8bb 4050 stride = nregs * (1 << size);
99c475ab 4051 }
9ee6e8bb
PB
4052 }
4053 if (rm != 15) {
b26eefb6
PB
4054 TCGv base;
4055
4056 base = load_reg(s, rn);
9ee6e8bb 4057 if (rm == 13) {
b26eefb6 4058 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4059 } else {
b26eefb6
PB
4060 TCGv index;
4061 index = load_reg(s, rm);
4062 tcg_gen_add_i32(base, base, index);
7d1b0095 4063 tcg_temp_free_i32(index);
9ee6e8bb 4064 }
b26eefb6 4065 store_reg(s, rn, base);
9ee6e8bb
PB
4066 }
4067 return 0;
4068}
3b46e624 4069
8f8e3aa4
PB
4070/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4071static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4072{
4073 tcg_gen_and_i32(t, t, c);
f669df27 4074 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4075 tcg_gen_or_i32(dest, t, f);
4076}
4077
a7812ae4 4078static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4079{
4080 switch (size) {
4081 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4082 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4083 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4084 default: abort();
4085 }
4086}
4087
a7812ae4 4088static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4089{
4090 switch (size) {
02da0b2d
PM
4091 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4092 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4093 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4094 default: abort();
4095 }
4096}
4097
a7812ae4 4098static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4099{
4100 switch (size) {
02da0b2d
PM
4101 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4102 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4103 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4104 default: abort();
4105 }
4106}
4107
af1bbf30
JR
4108static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4109{
4110 switch (size) {
02da0b2d
PM
4111 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4112 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4113 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4114 default: abort();
4115 }
4116}
4117
ad69471c
PB
4118static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4119 int q, int u)
4120{
4121 if (q) {
4122 if (u) {
4123 switch (size) {
4124 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4125 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4126 default: abort();
4127 }
4128 } else {
4129 switch (size) {
4130 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4131 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4132 default: abort();
4133 }
4134 }
4135 } else {
4136 if (u) {
4137 switch (size) {
b408a9b0
CL
4138 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4139 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4140 default: abort();
4141 }
4142 } else {
4143 switch (size) {
4144 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4145 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4146 default: abort();
4147 }
4148 }
4149 }
4150}
4151
a7812ae4 4152static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4153{
4154 if (u) {
4155 switch (size) {
4156 case 0: gen_helper_neon_widen_u8(dest, src); break;
4157 case 1: gen_helper_neon_widen_u16(dest, src); break;
4158 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4159 default: abort();
4160 }
4161 } else {
4162 switch (size) {
4163 case 0: gen_helper_neon_widen_s8(dest, src); break;
4164 case 1: gen_helper_neon_widen_s16(dest, src); break;
4165 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4166 default: abort();
4167 }
4168 }
7d1b0095 4169 tcg_temp_free_i32(src);
ad69471c
PB
4170}
4171
4172static inline void gen_neon_addl(int size)
4173{
4174 switch (size) {
4175 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4176 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4177 case 2: tcg_gen_add_i64(CPU_V001); break;
4178 default: abort();
4179 }
4180}
4181
4182static inline void gen_neon_subl(int size)
4183{
4184 switch (size) {
4185 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4186 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4187 case 2: tcg_gen_sub_i64(CPU_V001); break;
4188 default: abort();
4189 }
4190}
4191
a7812ae4 4192static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4193{
4194 switch (size) {
4195 case 0: gen_helper_neon_negl_u16(var, var); break;
4196 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4197 case 2:
4198 tcg_gen_neg_i64(var, var);
4199 break;
ad69471c
PB
4200 default: abort();
4201 }
4202}
4203
a7812ae4 4204static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4205{
4206 switch (size) {
02da0b2d
PM
4207 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4208 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4209 default: abort();
4210 }
4211}
4212
a7812ae4 4213static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4214{
a7812ae4 4215 TCGv_i64 tmp;
ad69471c
PB
4216
4217 switch ((size << 1) | u) {
4218 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4219 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4220 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4221 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4222 case 4:
4223 tmp = gen_muls_i64_i32(a, b);
4224 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4225 tcg_temp_free_i64(tmp);
ad69471c
PB
4226 break;
4227 case 5:
4228 tmp = gen_mulu_i64_i32(a, b);
4229 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4230 tcg_temp_free_i64(tmp);
ad69471c
PB
4231 break;
4232 default: abort();
4233 }
c6067f04
CL
4234
4235 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4236 Don't forget to clean them now. */
4237 if (size < 2) {
7d1b0095
PM
4238 tcg_temp_free_i32(a);
4239 tcg_temp_free_i32(b);
c6067f04 4240 }
ad69471c
PB
4241}
4242
c33171c7
PM
4243static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4244{
4245 if (op) {
4246 if (u) {
4247 gen_neon_unarrow_sats(size, dest, src);
4248 } else {
4249 gen_neon_narrow(size, dest, src);
4250 }
4251 } else {
4252 if (u) {
4253 gen_neon_narrow_satu(size, dest, src);
4254 } else {
4255 gen_neon_narrow_sats(size, dest, src);
4256 }
4257 }
4258}
4259
62698be3
PM
4260/* Symbolic constants for op fields for Neon 3-register same-length.
4261 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4262 * table A7-9.
4263 */
4264#define NEON_3R_VHADD 0
4265#define NEON_3R_VQADD 1
4266#define NEON_3R_VRHADD 2
4267#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4268#define NEON_3R_VHSUB 4
4269#define NEON_3R_VQSUB 5
4270#define NEON_3R_VCGT 6
4271#define NEON_3R_VCGE 7
4272#define NEON_3R_VSHL 8
4273#define NEON_3R_VQSHL 9
4274#define NEON_3R_VRSHL 10
4275#define NEON_3R_VQRSHL 11
4276#define NEON_3R_VMAX 12
4277#define NEON_3R_VMIN 13
4278#define NEON_3R_VABD 14
4279#define NEON_3R_VABA 15
4280#define NEON_3R_VADD_VSUB 16
4281#define NEON_3R_VTST_VCEQ 17
4282#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4283#define NEON_3R_VMUL 19
4284#define NEON_3R_VPMAX 20
4285#define NEON_3R_VPMIN 21
4286#define NEON_3R_VQDMULH_VQRDMULH 22
4287#define NEON_3R_VPADD 23
da97f52c 4288#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4289#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4290#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4291#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4292#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4293#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4294#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4295
4296static const uint8_t neon_3r_sizes[] = {
4297 [NEON_3R_VHADD] = 0x7,
4298 [NEON_3R_VQADD] = 0xf,
4299 [NEON_3R_VRHADD] = 0x7,
4300 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4301 [NEON_3R_VHSUB] = 0x7,
4302 [NEON_3R_VQSUB] = 0xf,
4303 [NEON_3R_VCGT] = 0x7,
4304 [NEON_3R_VCGE] = 0x7,
4305 [NEON_3R_VSHL] = 0xf,
4306 [NEON_3R_VQSHL] = 0xf,
4307 [NEON_3R_VRSHL] = 0xf,
4308 [NEON_3R_VQRSHL] = 0xf,
4309 [NEON_3R_VMAX] = 0x7,
4310 [NEON_3R_VMIN] = 0x7,
4311 [NEON_3R_VABD] = 0x7,
4312 [NEON_3R_VABA] = 0x7,
4313 [NEON_3R_VADD_VSUB] = 0xf,
4314 [NEON_3R_VTST_VCEQ] = 0x7,
4315 [NEON_3R_VML] = 0x7,
4316 [NEON_3R_VMUL] = 0x7,
4317 [NEON_3R_VPMAX] = 0x7,
4318 [NEON_3R_VPMIN] = 0x7,
4319 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4320 [NEON_3R_VPADD] = 0x7,
da97f52c 4321 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4322 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4323 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4324 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4325 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4326 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4327 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4328};
4329
600b828c
PM
4330/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4331 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4332 * table A7-13.
4333 */
4334#define NEON_2RM_VREV64 0
4335#define NEON_2RM_VREV32 1
4336#define NEON_2RM_VREV16 2
4337#define NEON_2RM_VPADDL 4
4338#define NEON_2RM_VPADDL_U 5
4339#define NEON_2RM_VCLS 8
4340#define NEON_2RM_VCLZ 9
4341#define NEON_2RM_VCNT 10
4342#define NEON_2RM_VMVN 11
4343#define NEON_2RM_VPADAL 12
4344#define NEON_2RM_VPADAL_U 13
4345#define NEON_2RM_VQABS 14
4346#define NEON_2RM_VQNEG 15
4347#define NEON_2RM_VCGT0 16
4348#define NEON_2RM_VCGE0 17
4349#define NEON_2RM_VCEQ0 18
4350#define NEON_2RM_VCLE0 19
4351#define NEON_2RM_VCLT0 20
4352#define NEON_2RM_VABS 22
4353#define NEON_2RM_VNEG 23
4354#define NEON_2RM_VCGT0_F 24
4355#define NEON_2RM_VCGE0_F 25
4356#define NEON_2RM_VCEQ0_F 26
4357#define NEON_2RM_VCLE0_F 27
4358#define NEON_2RM_VCLT0_F 28
4359#define NEON_2RM_VABS_F 30
4360#define NEON_2RM_VNEG_F 31
4361#define NEON_2RM_VSWP 32
4362#define NEON_2RM_VTRN 33
4363#define NEON_2RM_VUZP 34
4364#define NEON_2RM_VZIP 35
4365#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4366#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4367#define NEON_2RM_VSHLL 38
4368#define NEON_2RM_VCVT_F16_F32 44
4369#define NEON_2RM_VCVT_F32_F16 46
4370#define NEON_2RM_VRECPE 56
4371#define NEON_2RM_VRSQRTE 57
4372#define NEON_2RM_VRECPE_F 58
4373#define NEON_2RM_VRSQRTE_F 59
4374#define NEON_2RM_VCVT_FS 60
4375#define NEON_2RM_VCVT_FU 61
4376#define NEON_2RM_VCVT_SF 62
4377#define NEON_2RM_VCVT_UF 63
4378
4379static int neon_2rm_is_float_op(int op)
4380{
4381 /* Return true if this neon 2reg-misc op is float-to-float */
4382 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4383 op >= NEON_2RM_VRECPE_F);
4384}
4385
4386/* Each entry in this array has bit n set if the insn allows
4387 * size value n (otherwise it will UNDEF). Since unallocated
4388 * op values will have no bits set they always UNDEF.
4389 */
4390static const uint8_t neon_2rm_sizes[] = {
4391 [NEON_2RM_VREV64] = 0x7,
4392 [NEON_2RM_VREV32] = 0x3,
4393 [NEON_2RM_VREV16] = 0x1,
4394 [NEON_2RM_VPADDL] = 0x7,
4395 [NEON_2RM_VPADDL_U] = 0x7,
4396 [NEON_2RM_VCLS] = 0x7,
4397 [NEON_2RM_VCLZ] = 0x7,
4398 [NEON_2RM_VCNT] = 0x1,
4399 [NEON_2RM_VMVN] = 0x1,
4400 [NEON_2RM_VPADAL] = 0x7,
4401 [NEON_2RM_VPADAL_U] = 0x7,
4402 [NEON_2RM_VQABS] = 0x7,
4403 [NEON_2RM_VQNEG] = 0x7,
4404 [NEON_2RM_VCGT0] = 0x7,
4405 [NEON_2RM_VCGE0] = 0x7,
4406 [NEON_2RM_VCEQ0] = 0x7,
4407 [NEON_2RM_VCLE0] = 0x7,
4408 [NEON_2RM_VCLT0] = 0x7,
4409 [NEON_2RM_VABS] = 0x7,
4410 [NEON_2RM_VNEG] = 0x7,
4411 [NEON_2RM_VCGT0_F] = 0x4,
4412 [NEON_2RM_VCGE0_F] = 0x4,
4413 [NEON_2RM_VCEQ0_F] = 0x4,
4414 [NEON_2RM_VCLE0_F] = 0x4,
4415 [NEON_2RM_VCLT0_F] = 0x4,
4416 [NEON_2RM_VABS_F] = 0x4,
4417 [NEON_2RM_VNEG_F] = 0x4,
4418 [NEON_2RM_VSWP] = 0x1,
4419 [NEON_2RM_VTRN] = 0x7,
4420 [NEON_2RM_VUZP] = 0x7,
4421 [NEON_2RM_VZIP] = 0x7,
4422 [NEON_2RM_VMOVN] = 0x7,
4423 [NEON_2RM_VQMOVN] = 0x7,
4424 [NEON_2RM_VSHLL] = 0x7,
4425 [NEON_2RM_VCVT_F16_F32] = 0x2,
4426 [NEON_2RM_VCVT_F32_F16] = 0x2,
4427 [NEON_2RM_VRECPE] = 0x4,
4428 [NEON_2RM_VRSQRTE] = 0x4,
4429 [NEON_2RM_VRECPE_F] = 0x4,
4430 [NEON_2RM_VRSQRTE_F] = 0x4,
4431 [NEON_2RM_VCVT_FS] = 0x4,
4432 [NEON_2RM_VCVT_FU] = 0x4,
4433 [NEON_2RM_VCVT_SF] = 0x4,
4434 [NEON_2RM_VCVT_UF] = 0x4,
4435};
4436
9ee6e8bb
PB
4437/* Translate a NEON data processing instruction. Return nonzero if the
4438 instruction is invalid.
ad69471c
PB
4439 We process data in a mixture of 32-bit and 64-bit chunks.
4440 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4441
0ecb72a5 4442static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4443{
4444 int op;
4445 int q;
4446 int rd, rn, rm;
4447 int size;
4448 int shift;
4449 int pass;
4450 int count;
4451 int pairwise;
4452 int u;
ca9a32e4 4453 uint32_t imm, mask;
b75263d6 4454 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4455 TCGv_i64 tmp64;
9ee6e8bb 4456
5df8bac1 4457 if (!s->vfp_enabled)
9ee6e8bb
PB
4458 return 1;
4459 q = (insn & (1 << 6)) != 0;
4460 u = (insn >> 24) & 1;
4461 VFP_DREG_D(rd, insn);
4462 VFP_DREG_N(rn, insn);
4463 VFP_DREG_M(rm, insn);
4464 size = (insn >> 20) & 3;
4465 if ((insn & (1 << 23)) == 0) {
4466 /* Three register same length. */
4467 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4468 /* Catch invalid op and bad size combinations: UNDEF */
4469 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4470 return 1;
4471 }
25f84f79
PM
4472 /* All insns of this form UNDEF for either this condition or the
4473 * superset of cases "Q==1"; we catch the latter later.
4474 */
4475 if (q && ((rd | rn | rm) & 1)) {
4476 return 1;
4477 }
62698be3
PM
4478 if (size == 3 && op != NEON_3R_LOGIC) {
4479 /* 64-bit element instructions. */
9ee6e8bb 4480 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4481 neon_load_reg64(cpu_V0, rn + pass);
4482 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4483 switch (op) {
62698be3 4484 case NEON_3R_VQADD:
9ee6e8bb 4485 if (u) {
02da0b2d
PM
4486 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4487 cpu_V0, cpu_V1);
2c0262af 4488 } else {
02da0b2d
PM
4489 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4490 cpu_V0, cpu_V1);
2c0262af 4491 }
9ee6e8bb 4492 break;
62698be3 4493 case NEON_3R_VQSUB:
9ee6e8bb 4494 if (u) {
02da0b2d
PM
4495 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4496 cpu_V0, cpu_V1);
ad69471c 4497 } else {
02da0b2d
PM
4498 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4499 cpu_V0, cpu_V1);
ad69471c
PB
4500 }
4501 break;
62698be3 4502 case NEON_3R_VSHL:
ad69471c
PB
4503 if (u) {
4504 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4505 } else {
4506 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4507 }
4508 break;
62698be3 4509 case NEON_3R_VQSHL:
ad69471c 4510 if (u) {
02da0b2d
PM
4511 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4512 cpu_V1, cpu_V0);
ad69471c 4513 } else {
02da0b2d
PM
4514 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4515 cpu_V1, cpu_V0);
ad69471c
PB
4516 }
4517 break;
62698be3 4518 case NEON_3R_VRSHL:
ad69471c
PB
4519 if (u) {
4520 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4521 } else {
ad69471c
PB
4522 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4523 }
4524 break;
62698be3 4525 case NEON_3R_VQRSHL:
ad69471c 4526 if (u) {
02da0b2d
PM
4527 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4528 cpu_V1, cpu_V0);
ad69471c 4529 } else {
02da0b2d
PM
4530 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4531 cpu_V1, cpu_V0);
1e8d4eec 4532 }
9ee6e8bb 4533 break;
62698be3 4534 case NEON_3R_VADD_VSUB:
9ee6e8bb 4535 if (u) {
ad69471c 4536 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4537 } else {
ad69471c 4538 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4539 }
4540 break;
4541 default:
4542 abort();
2c0262af 4543 }
ad69471c 4544 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4545 }
9ee6e8bb 4546 return 0;
2c0262af 4547 }
25f84f79 4548 pairwise = 0;
9ee6e8bb 4549 switch (op) {
62698be3
PM
4550 case NEON_3R_VSHL:
4551 case NEON_3R_VQSHL:
4552 case NEON_3R_VRSHL:
4553 case NEON_3R_VQRSHL:
9ee6e8bb 4554 {
ad69471c
PB
4555 int rtmp;
4556 /* Shift instruction operands are reversed. */
4557 rtmp = rn;
9ee6e8bb 4558 rn = rm;
ad69471c 4559 rm = rtmp;
9ee6e8bb 4560 }
2c0262af 4561 break;
25f84f79
PM
4562 case NEON_3R_VPADD:
4563 if (u) {
4564 return 1;
4565 }
4566 /* Fall through */
62698be3
PM
4567 case NEON_3R_VPMAX:
4568 case NEON_3R_VPMIN:
9ee6e8bb 4569 pairwise = 1;
2c0262af 4570 break;
25f84f79
PM
4571 case NEON_3R_FLOAT_ARITH:
4572 pairwise = (u && size < 2); /* if VPADD (float) */
4573 break;
4574 case NEON_3R_FLOAT_MINMAX:
4575 pairwise = u; /* if VPMIN/VPMAX (float) */
4576 break;
4577 case NEON_3R_FLOAT_CMP:
4578 if (!u && size) {
4579 /* no encoding for U=0 C=1x */
4580 return 1;
4581 }
4582 break;
4583 case NEON_3R_FLOAT_ACMP:
4584 if (!u) {
4585 return 1;
4586 }
4587 break;
4588 case NEON_3R_VRECPS_VRSQRTS:
4589 if (u) {
4590 return 1;
4591 }
2c0262af 4592 break;
25f84f79
PM
4593 case NEON_3R_VMUL:
4594 if (u && (size != 0)) {
4595 /* UNDEF on invalid size for polynomial subcase */
4596 return 1;
4597 }
2c0262af 4598 break;
da97f52c
PM
4599 case NEON_3R_VFM:
4600 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4601 return 1;
4602 }
4603 break;
9ee6e8bb 4604 default:
2c0262af 4605 break;
9ee6e8bb 4606 }
dd8fbd78 4607
25f84f79
PM
4608 if (pairwise && q) {
4609 /* All the pairwise insns UNDEF if Q is set */
4610 return 1;
4611 }
4612
9ee6e8bb
PB
4613 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4614
4615 if (pairwise) {
4616 /* Pairwise. */
a5a14945
JR
4617 if (pass < 1) {
4618 tmp = neon_load_reg(rn, 0);
4619 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4620 } else {
a5a14945
JR
4621 tmp = neon_load_reg(rm, 0);
4622 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4623 }
4624 } else {
4625 /* Elementwise. */
dd8fbd78
FN
4626 tmp = neon_load_reg(rn, pass);
4627 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4628 }
4629 switch (op) {
62698be3 4630 case NEON_3R_VHADD:
9ee6e8bb
PB
4631 GEN_NEON_INTEGER_OP(hadd);
4632 break;
62698be3 4633 case NEON_3R_VQADD:
02da0b2d 4634 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4635 break;
62698be3 4636 case NEON_3R_VRHADD:
9ee6e8bb 4637 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4638 break;
62698be3 4639 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4640 switch ((u << 2) | size) {
4641 case 0: /* VAND */
dd8fbd78 4642 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4643 break;
4644 case 1: /* BIC */
f669df27 4645 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4646 break;
4647 case 2: /* VORR */
dd8fbd78 4648 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4649 break;
4650 case 3: /* VORN */
f669df27 4651 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4652 break;
4653 case 4: /* VEOR */
dd8fbd78 4654 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4655 break;
4656 case 5: /* VBSL */
dd8fbd78
FN
4657 tmp3 = neon_load_reg(rd, pass);
4658 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4659 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4660 break;
4661 case 6: /* VBIT */
dd8fbd78
FN
4662 tmp3 = neon_load_reg(rd, pass);
4663 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4664 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4665 break;
4666 case 7: /* VBIF */
dd8fbd78
FN
4667 tmp3 = neon_load_reg(rd, pass);
4668 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4669 tcg_temp_free_i32(tmp3);
9ee6e8bb 4670 break;
2c0262af
FB
4671 }
4672 break;
62698be3 4673 case NEON_3R_VHSUB:
9ee6e8bb
PB
4674 GEN_NEON_INTEGER_OP(hsub);
4675 break;
62698be3 4676 case NEON_3R_VQSUB:
02da0b2d 4677 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4678 break;
62698be3 4679 case NEON_3R_VCGT:
9ee6e8bb
PB
4680 GEN_NEON_INTEGER_OP(cgt);
4681 break;
62698be3 4682 case NEON_3R_VCGE:
9ee6e8bb
PB
4683 GEN_NEON_INTEGER_OP(cge);
4684 break;
62698be3 4685 case NEON_3R_VSHL:
ad69471c 4686 GEN_NEON_INTEGER_OP(shl);
2c0262af 4687 break;
62698be3 4688 case NEON_3R_VQSHL:
02da0b2d 4689 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4690 break;
62698be3 4691 case NEON_3R_VRSHL:
ad69471c 4692 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4693 break;
62698be3 4694 case NEON_3R_VQRSHL:
02da0b2d 4695 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4696 break;
62698be3 4697 case NEON_3R_VMAX:
9ee6e8bb
PB
4698 GEN_NEON_INTEGER_OP(max);
4699 break;
62698be3 4700 case NEON_3R_VMIN:
9ee6e8bb
PB
4701 GEN_NEON_INTEGER_OP(min);
4702 break;
62698be3 4703 case NEON_3R_VABD:
9ee6e8bb
PB
4704 GEN_NEON_INTEGER_OP(abd);
4705 break;
62698be3 4706 case NEON_3R_VABA:
9ee6e8bb 4707 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4708 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4709 tmp2 = neon_load_reg(rd, pass);
4710 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4711 break;
62698be3 4712 case NEON_3R_VADD_VSUB:
9ee6e8bb 4713 if (!u) { /* VADD */
62698be3 4714 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4715 } else { /* VSUB */
4716 switch (size) {
dd8fbd78
FN
4717 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4718 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4719 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4720 default: abort();
9ee6e8bb
PB
4721 }
4722 }
4723 break;
62698be3 4724 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4725 if (!u) { /* VTST */
4726 switch (size) {
dd8fbd78
FN
4727 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4728 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4729 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4730 default: abort();
9ee6e8bb
PB
4731 }
4732 } else { /* VCEQ */
4733 switch (size) {
dd8fbd78
FN
4734 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4735 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4736 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4737 default: abort();
9ee6e8bb
PB
4738 }
4739 }
4740 break;
62698be3 4741 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4742 switch (size) {
dd8fbd78
FN
4743 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4744 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4745 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4746 default: abort();
9ee6e8bb 4747 }
7d1b0095 4748 tcg_temp_free_i32(tmp2);
dd8fbd78 4749 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4750 if (u) { /* VMLS */
dd8fbd78 4751 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4752 } else { /* VMLA */
dd8fbd78 4753 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4754 }
4755 break;
62698be3 4756 case NEON_3R_VMUL:
9ee6e8bb 4757 if (u) { /* polynomial */
dd8fbd78 4758 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4759 } else { /* Integer */
4760 switch (size) {
dd8fbd78
FN
4761 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4762 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4763 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4764 default: abort();
9ee6e8bb
PB
4765 }
4766 }
4767 break;
62698be3 4768 case NEON_3R_VPMAX:
9ee6e8bb
PB
4769 GEN_NEON_INTEGER_OP(pmax);
4770 break;
62698be3 4771 case NEON_3R_VPMIN:
9ee6e8bb
PB
4772 GEN_NEON_INTEGER_OP(pmin);
4773 break;
62698be3 4774 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4775 if (!u) { /* VQDMULH */
4776 switch (size) {
02da0b2d
PM
4777 case 1:
4778 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4779 break;
4780 case 2:
4781 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4782 break;
62698be3 4783 default: abort();
9ee6e8bb 4784 }
62698be3 4785 } else { /* VQRDMULH */
9ee6e8bb 4786 switch (size) {
02da0b2d
PM
4787 case 1:
4788 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4789 break;
4790 case 2:
4791 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4792 break;
62698be3 4793 default: abort();
9ee6e8bb
PB
4794 }
4795 }
4796 break;
62698be3 4797 case NEON_3R_VPADD:
9ee6e8bb 4798 switch (size) {
dd8fbd78
FN
4799 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4800 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4801 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4802 default: abort();
9ee6e8bb
PB
4803 }
4804 break;
62698be3 4805 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4806 {
4807 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4808 switch ((u << 2) | size) {
4809 case 0: /* VADD */
aa47cfdd
PM
4810 case 4: /* VPADD */
4811 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4812 break;
4813 case 2: /* VSUB */
aa47cfdd 4814 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4815 break;
4816 case 6: /* VABD */
aa47cfdd 4817 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4818 break;
4819 default:
62698be3 4820 abort();
9ee6e8bb 4821 }
aa47cfdd 4822 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4823 break;
aa47cfdd 4824 }
62698be3 4825 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
4826 {
4827 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4828 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4829 if (!u) {
7d1b0095 4830 tcg_temp_free_i32(tmp2);
dd8fbd78 4831 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4832 if (size == 0) {
aa47cfdd 4833 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4834 } else {
aa47cfdd 4835 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
4836 }
4837 }
aa47cfdd 4838 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4839 break;
aa47cfdd 4840 }
62698be3 4841 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
4842 {
4843 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 4844 if (!u) {
aa47cfdd 4845 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 4846 } else {
aa47cfdd
PM
4847 if (size == 0) {
4848 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4849 } else {
4850 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4851 }
b5ff1b31 4852 }
aa47cfdd 4853 tcg_temp_free_ptr(fpstatus);
2c0262af 4854 break;
aa47cfdd 4855 }
62698be3 4856 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
4857 {
4858 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4859 if (size == 0) {
4860 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4861 } else {
4862 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4863 }
4864 tcg_temp_free_ptr(fpstatus);
2c0262af 4865 break;
aa47cfdd 4866 }
62698be3 4867 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
4868 {
4869 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4870 if (size == 0) {
4871 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4872 } else {
4873 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4874 }
4875 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4876 break;
aa47cfdd 4877 }
62698be3 4878 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4879 if (size == 0)
dd8fbd78 4880 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4881 else
dd8fbd78 4882 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4883 break;
da97f52c
PM
4884 case NEON_3R_VFM:
4885 {
4886 /* VFMA, VFMS: fused multiply-add */
4887 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4888 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4889 if (size) {
4890 /* VFMS */
4891 gen_helper_vfp_negs(tmp, tmp);
4892 }
4893 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4894 tcg_temp_free_i32(tmp3);
4895 tcg_temp_free_ptr(fpstatus);
4896 break;
4897 }
9ee6e8bb
PB
4898 default:
4899 abort();
2c0262af 4900 }
7d1b0095 4901 tcg_temp_free_i32(tmp2);
dd8fbd78 4902
9ee6e8bb
PB
4903 /* Save the result. For elementwise operations we can put it
4904 straight into the destination register. For pairwise operations
4905 we have to be careful to avoid clobbering the source operands. */
4906 if (pairwise && rd == rm) {
dd8fbd78 4907 neon_store_scratch(pass, tmp);
9ee6e8bb 4908 } else {
dd8fbd78 4909 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4910 }
4911
4912 } /* for pass */
4913 if (pairwise && rd == rm) {
4914 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4915 tmp = neon_load_scratch(pass);
4916 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4917 }
4918 }
ad69471c 4919 /* End of 3 register same size operations. */
9ee6e8bb
PB
4920 } else if (insn & (1 << 4)) {
4921 if ((insn & 0x00380080) != 0) {
4922 /* Two registers and shift. */
4923 op = (insn >> 8) & 0xf;
4924 if (insn & (1 << 7)) {
cc13115b
PM
4925 /* 64-bit shift. */
4926 if (op > 7) {
4927 return 1;
4928 }
9ee6e8bb
PB
4929 size = 3;
4930 } else {
4931 size = 2;
4932 while ((insn & (1 << (size + 19))) == 0)
4933 size--;
4934 }
4935 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 4936 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
4937 by immediate using the variable shift operations. */
4938 if (op < 8) {
4939 /* Shift by immediate:
4940 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
4941 if (q && ((rd | rm) & 1)) {
4942 return 1;
4943 }
4944 if (!u && (op == 4 || op == 6)) {
4945 return 1;
4946 }
9ee6e8bb
PB
4947 /* Right shifts are encoded as N - shift, where N is the
4948 element size in bits. */
4949 if (op <= 4)
4950 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4951 if (size == 3) {
4952 count = q + 1;
4953 } else {
4954 count = q ? 4: 2;
4955 }
4956 switch (size) {
4957 case 0:
4958 imm = (uint8_t) shift;
4959 imm |= imm << 8;
4960 imm |= imm << 16;
4961 break;
4962 case 1:
4963 imm = (uint16_t) shift;
4964 imm |= imm << 16;
4965 break;
4966 case 2:
4967 case 3:
4968 imm = shift;
4969 break;
4970 default:
4971 abort();
4972 }
4973
4974 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4975 if (size == 3) {
4976 neon_load_reg64(cpu_V0, rm + pass);
4977 tcg_gen_movi_i64(cpu_V1, imm);
4978 switch (op) {
4979 case 0: /* VSHR */
4980 case 1: /* VSRA */
4981 if (u)
4982 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4983 else
ad69471c 4984 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4985 break;
ad69471c
PB
4986 case 2: /* VRSHR */
4987 case 3: /* VRSRA */
4988 if (u)
4989 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4990 else
ad69471c 4991 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4992 break;
ad69471c 4993 case 4: /* VSRI */
ad69471c
PB
4994 case 5: /* VSHL, VSLI */
4995 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4996 break;
0322b26e 4997 case 6: /* VQSHLU */
02da0b2d
PM
4998 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
4999 cpu_V0, cpu_V1);
ad69471c 5000 break;
0322b26e
PM
5001 case 7: /* VQSHL */
5002 if (u) {
02da0b2d 5003 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5004 cpu_V0, cpu_V1);
5005 } else {
02da0b2d 5006 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5007 cpu_V0, cpu_V1);
5008 }
9ee6e8bb 5009 break;
9ee6e8bb 5010 }
ad69471c
PB
5011 if (op == 1 || op == 3) {
5012 /* Accumulate. */
5371cb81 5013 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5014 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5015 } else if (op == 4 || (op == 5 && u)) {
5016 /* Insert */
923e6509
CL
5017 neon_load_reg64(cpu_V1, rd + pass);
5018 uint64_t mask;
5019 if (shift < -63 || shift > 63) {
5020 mask = 0;
5021 } else {
5022 if (op == 4) {
5023 mask = 0xffffffffffffffffull >> -shift;
5024 } else {
5025 mask = 0xffffffffffffffffull << shift;
5026 }
5027 }
5028 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5029 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5030 }
5031 neon_store_reg64(cpu_V0, rd + pass);
5032 } else { /* size < 3 */
5033 /* Operands in T0 and T1. */
dd8fbd78 5034 tmp = neon_load_reg(rm, pass);
7d1b0095 5035 tmp2 = tcg_temp_new_i32();
dd8fbd78 5036 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5037 switch (op) {
5038 case 0: /* VSHR */
5039 case 1: /* VSRA */
5040 GEN_NEON_INTEGER_OP(shl);
5041 break;
5042 case 2: /* VRSHR */
5043 case 3: /* VRSRA */
5044 GEN_NEON_INTEGER_OP(rshl);
5045 break;
5046 case 4: /* VSRI */
ad69471c
PB
5047 case 5: /* VSHL, VSLI */
5048 switch (size) {
dd8fbd78
FN
5049 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5050 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5051 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5052 default: abort();
ad69471c
PB
5053 }
5054 break;
0322b26e 5055 case 6: /* VQSHLU */
ad69471c 5056 switch (size) {
0322b26e 5057 case 0:
02da0b2d
PM
5058 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5059 tmp, tmp2);
0322b26e
PM
5060 break;
5061 case 1:
02da0b2d
PM
5062 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5063 tmp, tmp2);
0322b26e
PM
5064 break;
5065 case 2:
02da0b2d
PM
5066 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5067 tmp, tmp2);
0322b26e
PM
5068 break;
5069 default:
cc13115b 5070 abort();
ad69471c
PB
5071 }
5072 break;
0322b26e 5073 case 7: /* VQSHL */
02da0b2d 5074 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5075 break;
ad69471c 5076 }
7d1b0095 5077 tcg_temp_free_i32(tmp2);
ad69471c
PB
5078
5079 if (op == 1 || op == 3) {
5080 /* Accumulate. */
dd8fbd78 5081 tmp2 = neon_load_reg(rd, pass);
5371cb81 5082 gen_neon_add(size, tmp, tmp2);
7d1b0095 5083 tcg_temp_free_i32(tmp2);
ad69471c
PB
5084 } else if (op == 4 || (op == 5 && u)) {
5085 /* Insert */
5086 switch (size) {
5087 case 0:
5088 if (op == 4)
ca9a32e4 5089 mask = 0xff >> -shift;
ad69471c 5090 else
ca9a32e4
JR
5091 mask = (uint8_t)(0xff << shift);
5092 mask |= mask << 8;
5093 mask |= mask << 16;
ad69471c
PB
5094 break;
5095 case 1:
5096 if (op == 4)
ca9a32e4 5097 mask = 0xffff >> -shift;
ad69471c 5098 else
ca9a32e4
JR
5099 mask = (uint16_t)(0xffff << shift);
5100 mask |= mask << 16;
ad69471c
PB
5101 break;
5102 case 2:
ca9a32e4
JR
5103 if (shift < -31 || shift > 31) {
5104 mask = 0;
5105 } else {
5106 if (op == 4)
5107 mask = 0xffffffffu >> -shift;
5108 else
5109 mask = 0xffffffffu << shift;
5110 }
ad69471c
PB
5111 break;
5112 default:
5113 abort();
5114 }
dd8fbd78 5115 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5116 tcg_gen_andi_i32(tmp, tmp, mask);
5117 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5118 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5119 tcg_temp_free_i32(tmp2);
ad69471c 5120 }
dd8fbd78 5121 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5122 }
5123 } /* for pass */
5124 } else if (op < 10) {
ad69471c 5125 /* Shift by immediate and narrow:
9ee6e8bb 5126 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5127 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5128 if (rm & 1) {
5129 return 1;
5130 }
9ee6e8bb
PB
5131 shift = shift - (1 << (size + 3));
5132 size++;
92cdfaeb 5133 if (size == 3) {
a7812ae4 5134 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5135 neon_load_reg64(cpu_V0, rm);
5136 neon_load_reg64(cpu_V1, rm + 1);
5137 for (pass = 0; pass < 2; pass++) {
5138 TCGv_i64 in;
5139 if (pass == 0) {
5140 in = cpu_V0;
5141 } else {
5142 in = cpu_V1;
5143 }
ad69471c 5144 if (q) {
0b36f4cd 5145 if (input_unsigned) {
92cdfaeb 5146 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5147 } else {
92cdfaeb 5148 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5149 }
ad69471c 5150 } else {
0b36f4cd 5151 if (input_unsigned) {
92cdfaeb 5152 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5153 } else {
92cdfaeb 5154 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5155 }
ad69471c 5156 }
7d1b0095 5157 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5158 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5159 neon_store_reg(rd, pass, tmp);
5160 } /* for pass */
5161 tcg_temp_free_i64(tmp64);
5162 } else {
5163 if (size == 1) {
5164 imm = (uint16_t)shift;
5165 imm |= imm << 16;
2c0262af 5166 } else {
92cdfaeb
PM
5167 /* size == 2 */
5168 imm = (uint32_t)shift;
5169 }
5170 tmp2 = tcg_const_i32(imm);
5171 tmp4 = neon_load_reg(rm + 1, 0);
5172 tmp5 = neon_load_reg(rm + 1, 1);
5173 for (pass = 0; pass < 2; pass++) {
5174 if (pass == 0) {
5175 tmp = neon_load_reg(rm, 0);
5176 } else {
5177 tmp = tmp4;
5178 }
0b36f4cd
CL
5179 gen_neon_shift_narrow(size, tmp, tmp2, q,
5180 input_unsigned);
92cdfaeb
PM
5181 if (pass == 0) {
5182 tmp3 = neon_load_reg(rm, 1);
5183 } else {
5184 tmp3 = tmp5;
5185 }
0b36f4cd
CL
5186 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5187 input_unsigned);
36aa55dc 5188 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5189 tcg_temp_free_i32(tmp);
5190 tcg_temp_free_i32(tmp3);
5191 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5192 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5193 neon_store_reg(rd, pass, tmp);
5194 } /* for pass */
c6067f04 5195 tcg_temp_free_i32(tmp2);
b75263d6 5196 }
9ee6e8bb 5197 } else if (op == 10) {
cc13115b
PM
5198 /* VSHLL, VMOVL */
5199 if (q || (rd & 1)) {
9ee6e8bb 5200 return 1;
cc13115b 5201 }
ad69471c
PB
5202 tmp = neon_load_reg(rm, 0);
5203 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5204 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5205 if (pass == 1)
5206 tmp = tmp2;
5207
5208 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5209
9ee6e8bb
PB
5210 if (shift != 0) {
5211 /* The shift is less than the width of the source
ad69471c
PB
5212 type, so we can just shift the whole register. */
5213 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5214 /* Widen the result of shift: we need to clear
5215 * the potential overflow bits resulting from
5216 * left bits of the narrow input appearing as
5217 * right bits of left the neighbour narrow
5218 * input. */
ad69471c
PB
5219 if (size < 2 || !u) {
5220 uint64_t imm64;
5221 if (size == 0) {
5222 imm = (0xffu >> (8 - shift));
5223 imm |= imm << 16;
acdf01ef 5224 } else if (size == 1) {
ad69471c 5225 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5226 } else {
5227 /* size == 2 */
5228 imm = 0xffffffff >> (32 - shift);
5229 }
5230 if (size < 2) {
5231 imm64 = imm | (((uint64_t)imm) << 32);
5232 } else {
5233 imm64 = imm;
9ee6e8bb 5234 }
acdf01ef 5235 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5236 }
5237 }
ad69471c 5238 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5239 }
f73534a5 5240 } else if (op >= 14) {
9ee6e8bb 5241 /* VCVT fixed-point. */
cc13115b
PM
5242 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5243 return 1;
5244 }
f73534a5
PM
5245 /* We have already masked out the must-be-1 top bit of imm6,
5246 * hence this 32-shift where the ARM ARM has 64-imm6.
5247 */
5248 shift = 32 - shift;
9ee6e8bb 5249 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5250 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5251 if (!(op & 1)) {
9ee6e8bb 5252 if (u)
5500b06c 5253 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5254 else
5500b06c 5255 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5256 } else {
5257 if (u)
5500b06c 5258 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5259 else
5500b06c 5260 gen_vfp_tosl(0, shift, 1);
2c0262af 5261 }
4373f3ce 5262 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5263 }
5264 } else {
9ee6e8bb
PB
5265 return 1;
5266 }
5267 } else { /* (insn & 0x00380080) == 0 */
5268 int invert;
7d80fee5
PM
5269 if (q && (rd & 1)) {
5270 return 1;
5271 }
9ee6e8bb
PB
5272
5273 op = (insn >> 8) & 0xf;
5274 /* One register and immediate. */
5275 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5276 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5277 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5278 * We choose to not special-case this and will behave as if a
5279 * valid constant encoding of 0 had been given.
5280 */
9ee6e8bb
PB
5281 switch (op) {
5282 case 0: case 1:
5283 /* no-op */
5284 break;
5285 case 2: case 3:
5286 imm <<= 8;
5287 break;
5288 case 4: case 5:
5289 imm <<= 16;
5290 break;
5291 case 6: case 7:
5292 imm <<= 24;
5293 break;
5294 case 8: case 9:
5295 imm |= imm << 16;
5296 break;
5297 case 10: case 11:
5298 imm = (imm << 8) | (imm << 24);
5299 break;
5300 case 12:
8e31209e 5301 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5302 break;
5303 case 13:
5304 imm = (imm << 16) | 0xffff;
5305 break;
5306 case 14:
5307 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5308 if (invert)
5309 imm = ~imm;
5310 break;
5311 case 15:
7d80fee5
PM
5312 if (invert) {
5313 return 1;
5314 }
9ee6e8bb
PB
5315 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5316 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5317 break;
5318 }
5319 if (invert)
5320 imm = ~imm;
5321
9ee6e8bb
PB
5322 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5323 if (op & 1 && op < 12) {
ad69471c 5324 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5325 if (invert) {
5326 /* The immediate value has already been inverted, so
5327 BIC becomes AND. */
ad69471c 5328 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5329 } else {
ad69471c 5330 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5331 }
9ee6e8bb 5332 } else {
ad69471c 5333 /* VMOV, VMVN. */
7d1b0095 5334 tmp = tcg_temp_new_i32();
9ee6e8bb 5335 if (op == 14 && invert) {
a5a14945 5336 int n;
ad69471c
PB
5337 uint32_t val;
5338 val = 0;
9ee6e8bb
PB
5339 for (n = 0; n < 4; n++) {
5340 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5341 val |= 0xff << (n * 8);
9ee6e8bb 5342 }
ad69471c
PB
5343 tcg_gen_movi_i32(tmp, val);
5344 } else {
5345 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5346 }
9ee6e8bb 5347 }
ad69471c 5348 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5349 }
5350 }
e4b3861d 5351 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5352 if (size != 3) {
5353 op = (insn >> 8) & 0xf;
5354 if ((insn & (1 << 6)) == 0) {
5355 /* Three registers of different lengths. */
5356 int src1_wide;
5357 int src2_wide;
5358 int prewiden;
695272dc
PM
5359 /* undefreq: bit 0 : UNDEF if size != 0
5360 * bit 1 : UNDEF if size == 0
5361 * bit 2 : UNDEF if U == 1
5362 * Note that [1:0] set implies 'always UNDEF'
5363 */
5364 int undefreq;
5365 /* prewiden, src1_wide, src2_wide, undefreq */
5366 static const int neon_3reg_wide[16][4] = {
5367 {1, 0, 0, 0}, /* VADDL */
5368 {1, 1, 0, 0}, /* VADDW */
5369 {1, 0, 0, 0}, /* VSUBL */
5370 {1, 1, 0, 0}, /* VSUBW */
5371 {0, 1, 1, 0}, /* VADDHN */
5372 {0, 0, 0, 0}, /* VABAL */
5373 {0, 1, 1, 0}, /* VSUBHN */
5374 {0, 0, 0, 0}, /* VABDL */
5375 {0, 0, 0, 0}, /* VMLAL */
5376 {0, 0, 0, 6}, /* VQDMLAL */
5377 {0, 0, 0, 0}, /* VMLSL */
5378 {0, 0, 0, 6}, /* VQDMLSL */
5379 {0, 0, 0, 0}, /* Integer VMULL */
5380 {0, 0, 0, 2}, /* VQDMULL */
5381 {0, 0, 0, 5}, /* Polynomial VMULL */
5382 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5383 };
5384
5385 prewiden = neon_3reg_wide[op][0];
5386 src1_wide = neon_3reg_wide[op][1];
5387 src2_wide = neon_3reg_wide[op][2];
695272dc 5388 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5389
695272dc
PM
5390 if (((undefreq & 1) && (size != 0)) ||
5391 ((undefreq & 2) && (size == 0)) ||
5392 ((undefreq & 4) && u)) {
5393 return 1;
5394 }
5395 if ((src1_wide && (rn & 1)) ||
5396 (src2_wide && (rm & 1)) ||
5397 (!src2_wide && (rd & 1))) {
ad69471c 5398 return 1;
695272dc 5399 }
ad69471c 5400
9ee6e8bb
PB
5401 /* Avoid overlapping operands. Wide source operands are
5402 always aligned so will never overlap with wide
5403 destinations in problematic ways. */
8f8e3aa4 5404 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5405 tmp = neon_load_reg(rm, 1);
5406 neon_store_scratch(2, tmp);
8f8e3aa4 5407 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5408 tmp = neon_load_reg(rn, 1);
5409 neon_store_scratch(2, tmp);
9ee6e8bb 5410 }
a50f5b91 5411 TCGV_UNUSED(tmp3);
9ee6e8bb 5412 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5413 if (src1_wide) {
5414 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5415 TCGV_UNUSED(tmp);
9ee6e8bb 5416 } else {
ad69471c 5417 if (pass == 1 && rd == rn) {
dd8fbd78 5418 tmp = neon_load_scratch(2);
9ee6e8bb 5419 } else {
ad69471c
PB
5420 tmp = neon_load_reg(rn, pass);
5421 }
5422 if (prewiden) {
5423 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5424 }
5425 }
ad69471c
PB
5426 if (src2_wide) {
5427 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5428 TCGV_UNUSED(tmp2);
9ee6e8bb 5429 } else {
ad69471c 5430 if (pass == 1 && rd == rm) {
dd8fbd78 5431 tmp2 = neon_load_scratch(2);
9ee6e8bb 5432 } else {
ad69471c
PB
5433 tmp2 = neon_load_reg(rm, pass);
5434 }
5435 if (prewiden) {
5436 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5437 }
9ee6e8bb
PB
5438 }
5439 switch (op) {
5440 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5441 gen_neon_addl(size);
9ee6e8bb 5442 break;
79b0e534 5443 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5444 gen_neon_subl(size);
9ee6e8bb
PB
5445 break;
5446 case 5: case 7: /* VABAL, VABDL */
5447 switch ((size << 1) | u) {
ad69471c
PB
5448 case 0:
5449 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5450 break;
5451 case 1:
5452 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5453 break;
5454 case 2:
5455 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5456 break;
5457 case 3:
5458 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5459 break;
5460 case 4:
5461 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5462 break;
5463 case 5:
5464 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5465 break;
9ee6e8bb
PB
5466 default: abort();
5467 }
7d1b0095
PM
5468 tcg_temp_free_i32(tmp2);
5469 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5470 break;
5471 case 8: case 9: case 10: case 11: case 12: case 13:
5472 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5473 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5474 break;
5475 case 14: /* Polynomial VMULL */
e5ca24cb 5476 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5477 tcg_temp_free_i32(tmp2);
5478 tcg_temp_free_i32(tmp);
e5ca24cb 5479 break;
695272dc
PM
5480 default: /* 15 is RESERVED: caught earlier */
5481 abort();
9ee6e8bb 5482 }
ebcd88ce
PM
5483 if (op == 13) {
5484 /* VQDMULL */
5485 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5486 neon_store_reg64(cpu_V0, rd + pass);
5487 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5488 /* Accumulate. */
ebcd88ce 5489 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5490 switch (op) {
4dc064e6
PM
5491 case 10: /* VMLSL */
5492 gen_neon_negl(cpu_V0, size);
5493 /* Fall through */
5494 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5495 gen_neon_addl(size);
9ee6e8bb
PB
5496 break;
5497 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5498 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5499 if (op == 11) {
5500 gen_neon_negl(cpu_V0, size);
5501 }
ad69471c
PB
5502 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5503 break;
9ee6e8bb
PB
5504 default:
5505 abort();
5506 }
ad69471c 5507 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5508 } else if (op == 4 || op == 6) {
5509 /* Narrowing operation. */
7d1b0095 5510 tmp = tcg_temp_new_i32();
79b0e534 5511 if (!u) {
9ee6e8bb 5512 switch (size) {
ad69471c
PB
5513 case 0:
5514 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5515 break;
5516 case 1:
5517 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5518 break;
5519 case 2:
5520 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5521 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5522 break;
9ee6e8bb
PB
5523 default: abort();
5524 }
5525 } else {
5526 switch (size) {
ad69471c
PB
5527 case 0:
5528 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5529 break;
5530 case 1:
5531 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5532 break;
5533 case 2:
5534 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5535 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5536 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5537 break;
9ee6e8bb
PB
5538 default: abort();
5539 }
5540 }
ad69471c
PB
5541 if (pass == 0) {
5542 tmp3 = tmp;
5543 } else {
5544 neon_store_reg(rd, 0, tmp3);
5545 neon_store_reg(rd, 1, tmp);
5546 }
9ee6e8bb
PB
5547 } else {
5548 /* Write back the result. */
ad69471c 5549 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5550 }
5551 }
5552 } else {
3e3326df
PM
5553 /* Two registers and a scalar. NB that for ops of this form
5554 * the ARM ARM labels bit 24 as Q, but it is in our variable
5555 * 'u', not 'q'.
5556 */
5557 if (size == 0) {
5558 return 1;
5559 }
9ee6e8bb 5560 switch (op) {
9ee6e8bb 5561 case 1: /* Float VMLA scalar */
9ee6e8bb 5562 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5563 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5564 if (size == 1) {
5565 return 1;
5566 }
5567 /* fall through */
5568 case 0: /* Integer VMLA scalar */
5569 case 4: /* Integer VMLS scalar */
5570 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5571 case 12: /* VQDMULH scalar */
5572 case 13: /* VQRDMULH scalar */
3e3326df
PM
5573 if (u && ((rd | rn) & 1)) {
5574 return 1;
5575 }
dd8fbd78
FN
5576 tmp = neon_get_scalar(size, rm);
5577 neon_store_scratch(0, tmp);
9ee6e8bb 5578 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5579 tmp = neon_load_scratch(0);
5580 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5581 if (op == 12) {
5582 if (size == 1) {
02da0b2d 5583 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5584 } else {
02da0b2d 5585 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5586 }
5587 } else if (op == 13) {
5588 if (size == 1) {
02da0b2d 5589 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5590 } else {
02da0b2d 5591 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5592 }
5593 } else if (op & 1) {
aa47cfdd
PM
5594 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5595 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5596 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5597 } else {
5598 switch (size) {
dd8fbd78
FN
5599 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5600 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5601 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5602 default: abort();
9ee6e8bb
PB
5603 }
5604 }
7d1b0095 5605 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5606 if (op < 8) {
5607 /* Accumulate. */
dd8fbd78 5608 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5609 switch (op) {
5610 case 0:
dd8fbd78 5611 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5612 break;
5613 case 1:
aa47cfdd
PM
5614 {
5615 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5616 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5617 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5618 break;
aa47cfdd 5619 }
9ee6e8bb 5620 case 4:
dd8fbd78 5621 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5622 break;
5623 case 5:
aa47cfdd
PM
5624 {
5625 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5626 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5627 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5628 break;
aa47cfdd 5629 }
9ee6e8bb
PB
5630 default:
5631 abort();
5632 }
7d1b0095 5633 tcg_temp_free_i32(tmp2);
9ee6e8bb 5634 }
dd8fbd78 5635 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5636 }
5637 break;
9ee6e8bb 5638 case 3: /* VQDMLAL scalar */
9ee6e8bb 5639 case 7: /* VQDMLSL scalar */
9ee6e8bb 5640 case 11: /* VQDMULL scalar */
3e3326df 5641 if (u == 1) {
ad69471c 5642 return 1;
3e3326df
PM
5643 }
5644 /* fall through */
5645 case 2: /* VMLAL sclar */
5646 case 6: /* VMLSL scalar */
5647 case 10: /* VMULL scalar */
5648 if (rd & 1) {
5649 return 1;
5650 }
dd8fbd78 5651 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5652 /* We need a copy of tmp2 because gen_neon_mull
5653 * deletes it during pass 0. */
7d1b0095 5654 tmp4 = tcg_temp_new_i32();
c6067f04 5655 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5656 tmp3 = neon_load_reg(rn, 1);
ad69471c 5657
9ee6e8bb 5658 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5659 if (pass == 0) {
5660 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5661 } else {
dd8fbd78 5662 tmp = tmp3;
c6067f04 5663 tmp2 = tmp4;
9ee6e8bb 5664 }
ad69471c 5665 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5666 if (op != 11) {
5667 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5668 }
9ee6e8bb 5669 switch (op) {
4dc064e6
PM
5670 case 6:
5671 gen_neon_negl(cpu_V0, size);
5672 /* Fall through */
5673 case 2:
ad69471c 5674 gen_neon_addl(size);
9ee6e8bb
PB
5675 break;
5676 case 3: case 7:
ad69471c 5677 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5678 if (op == 7) {
5679 gen_neon_negl(cpu_V0, size);
5680 }
ad69471c 5681 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5682 break;
5683 case 10:
5684 /* no-op */
5685 break;
5686 case 11:
ad69471c 5687 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5688 break;
5689 default:
5690 abort();
5691 }
ad69471c 5692 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5693 }
dd8fbd78 5694
dd8fbd78 5695
9ee6e8bb
PB
5696 break;
5697 default: /* 14 and 15 are RESERVED */
5698 return 1;
5699 }
5700 }
5701 } else { /* size == 3 */
5702 if (!u) {
5703 /* Extract. */
9ee6e8bb 5704 imm = (insn >> 8) & 0xf;
ad69471c
PB
5705
5706 if (imm > 7 && !q)
5707 return 1;
5708
52579ea1
PM
5709 if (q && ((rd | rn | rm) & 1)) {
5710 return 1;
5711 }
5712
ad69471c
PB
5713 if (imm == 0) {
5714 neon_load_reg64(cpu_V0, rn);
5715 if (q) {
5716 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5717 }
ad69471c
PB
5718 } else if (imm == 8) {
5719 neon_load_reg64(cpu_V0, rn + 1);
5720 if (q) {
5721 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5722 }
ad69471c 5723 } else if (q) {
a7812ae4 5724 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5725 if (imm < 8) {
5726 neon_load_reg64(cpu_V0, rn);
a7812ae4 5727 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5728 } else {
5729 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5730 neon_load_reg64(tmp64, rm);
ad69471c
PB
5731 }
5732 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5733 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5734 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5735 if (imm < 8) {
5736 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5737 } else {
ad69471c
PB
5738 neon_load_reg64(cpu_V1, rm + 1);
5739 imm -= 8;
9ee6e8bb 5740 }
ad69471c 5741 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5742 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5743 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5744 tcg_temp_free_i64(tmp64);
ad69471c 5745 } else {
a7812ae4 5746 /* BUGFIX */
ad69471c 5747 neon_load_reg64(cpu_V0, rn);
a7812ae4 5748 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5749 neon_load_reg64(cpu_V1, rm);
a7812ae4 5750 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5751 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5752 }
5753 neon_store_reg64(cpu_V0, rd);
5754 if (q) {
5755 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5756 }
5757 } else if ((insn & (1 << 11)) == 0) {
5758 /* Two register misc. */
5759 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5760 size = (insn >> 18) & 3;
600b828c
PM
5761 /* UNDEF for unknown op values and bad op-size combinations */
5762 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5763 return 1;
5764 }
fc2a9b37
PM
5765 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5766 q && ((rm | rd) & 1)) {
5767 return 1;
5768 }
9ee6e8bb 5769 switch (op) {
600b828c 5770 case NEON_2RM_VREV64:
9ee6e8bb 5771 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5772 tmp = neon_load_reg(rm, pass * 2);
5773 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5774 switch (size) {
dd8fbd78
FN
5775 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5776 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5777 case 2: /* no-op */ break;
5778 default: abort();
5779 }
dd8fbd78 5780 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5781 if (size == 2) {
dd8fbd78 5782 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5783 } else {
9ee6e8bb 5784 switch (size) {
dd8fbd78
FN
5785 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5786 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5787 default: abort();
5788 }
dd8fbd78 5789 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5790 }
5791 }
5792 break;
600b828c
PM
5793 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5794 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5795 for (pass = 0; pass < q + 1; pass++) {
5796 tmp = neon_load_reg(rm, pass * 2);
5797 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5798 tmp = neon_load_reg(rm, pass * 2 + 1);
5799 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5800 switch (size) {
5801 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5802 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5803 case 2: tcg_gen_add_i64(CPU_V001); break;
5804 default: abort();
5805 }
600b828c 5806 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5807 /* Accumulate. */
ad69471c
PB
5808 neon_load_reg64(cpu_V1, rd + pass);
5809 gen_neon_addl(size);
9ee6e8bb 5810 }
ad69471c 5811 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5812 }
5813 break;
600b828c 5814 case NEON_2RM_VTRN:
9ee6e8bb 5815 if (size == 2) {
a5a14945 5816 int n;
9ee6e8bb 5817 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5818 tmp = neon_load_reg(rm, n);
5819 tmp2 = neon_load_reg(rd, n + 1);
5820 neon_store_reg(rm, n, tmp2);
5821 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5822 }
5823 } else {
5824 goto elementwise;
5825 }
5826 break;
600b828c 5827 case NEON_2RM_VUZP:
02acedf9 5828 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5829 return 1;
9ee6e8bb
PB
5830 }
5831 break;
600b828c 5832 case NEON_2RM_VZIP:
d68a6f3a 5833 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5834 return 1;
9ee6e8bb
PB
5835 }
5836 break;
600b828c
PM
5837 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5838 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5839 if (rm & 1) {
5840 return 1;
5841 }
a50f5b91 5842 TCGV_UNUSED(tmp2);
9ee6e8bb 5843 for (pass = 0; pass < 2; pass++) {
ad69471c 5844 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5845 tmp = tcg_temp_new_i32();
600b828c
PM
5846 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5847 tmp, cpu_V0);
ad69471c
PB
5848 if (pass == 0) {
5849 tmp2 = tmp;
5850 } else {
5851 neon_store_reg(rd, 0, tmp2);
5852 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5853 }
9ee6e8bb
PB
5854 }
5855 break;
600b828c 5856 case NEON_2RM_VSHLL:
fc2a9b37 5857 if (q || (rd & 1)) {
9ee6e8bb 5858 return 1;
600b828c 5859 }
ad69471c
PB
5860 tmp = neon_load_reg(rm, 0);
5861 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5862 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5863 if (pass == 1)
5864 tmp = tmp2;
5865 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5866 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5867 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5868 }
5869 break;
600b828c 5870 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5871 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5872 q || (rm & 1)) {
5873 return 1;
5874 }
7d1b0095
PM
5875 tmp = tcg_temp_new_i32();
5876 tmp2 = tcg_temp_new_i32();
60011498 5877 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5878 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5879 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5880 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5881 tcg_gen_shli_i32(tmp2, tmp2, 16);
5882 tcg_gen_or_i32(tmp2, tmp2, tmp);
5883 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5884 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5885 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5886 neon_store_reg(rd, 0, tmp2);
7d1b0095 5887 tmp2 = tcg_temp_new_i32();
2d981da7 5888 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5889 tcg_gen_shli_i32(tmp2, tmp2, 16);
5890 tcg_gen_or_i32(tmp2, tmp2, tmp);
5891 neon_store_reg(rd, 1, tmp2);
7d1b0095 5892 tcg_temp_free_i32(tmp);
60011498 5893 break;
600b828c 5894 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5895 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5896 q || (rd & 1)) {
5897 return 1;
5898 }
7d1b0095 5899 tmp3 = tcg_temp_new_i32();
60011498
PB
5900 tmp = neon_load_reg(rm, 0);
5901 tmp2 = neon_load_reg(rm, 1);
5902 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5903 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5904 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5905 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5906 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5907 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5908 tcg_temp_free_i32(tmp);
60011498 5909 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5910 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5911 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5912 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5913 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5914 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5915 tcg_temp_free_i32(tmp2);
5916 tcg_temp_free_i32(tmp3);
60011498 5917 break;
9ee6e8bb
PB
5918 default:
5919 elementwise:
5920 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 5921 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
5922 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5923 neon_reg_offset(rm, pass));
dd8fbd78 5924 TCGV_UNUSED(tmp);
9ee6e8bb 5925 } else {
dd8fbd78 5926 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5927 }
5928 switch (op) {
600b828c 5929 case NEON_2RM_VREV32:
9ee6e8bb 5930 switch (size) {
dd8fbd78
FN
5931 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5932 case 1: gen_swap_half(tmp); break;
600b828c 5933 default: abort();
9ee6e8bb
PB
5934 }
5935 break;
600b828c 5936 case NEON_2RM_VREV16:
dd8fbd78 5937 gen_rev16(tmp);
9ee6e8bb 5938 break;
600b828c 5939 case NEON_2RM_VCLS:
9ee6e8bb 5940 switch (size) {
dd8fbd78
FN
5941 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5942 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5943 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 5944 default: abort();
9ee6e8bb
PB
5945 }
5946 break;
600b828c 5947 case NEON_2RM_VCLZ:
9ee6e8bb 5948 switch (size) {
dd8fbd78
FN
5949 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5950 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5951 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 5952 default: abort();
9ee6e8bb
PB
5953 }
5954 break;
600b828c 5955 case NEON_2RM_VCNT:
dd8fbd78 5956 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 5957 break;
600b828c 5958 case NEON_2RM_VMVN:
dd8fbd78 5959 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 5960 break;
600b828c 5961 case NEON_2RM_VQABS:
9ee6e8bb 5962 switch (size) {
02da0b2d
PM
5963 case 0:
5964 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
5965 break;
5966 case 1:
5967 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
5968 break;
5969 case 2:
5970 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
5971 break;
600b828c 5972 default: abort();
9ee6e8bb
PB
5973 }
5974 break;
600b828c 5975 case NEON_2RM_VQNEG:
9ee6e8bb 5976 switch (size) {
02da0b2d
PM
5977 case 0:
5978 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
5979 break;
5980 case 1:
5981 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
5982 break;
5983 case 2:
5984 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
5985 break;
600b828c 5986 default: abort();
9ee6e8bb
PB
5987 }
5988 break;
600b828c 5989 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 5990 tmp2 = tcg_const_i32(0);
9ee6e8bb 5991 switch(size) {
dd8fbd78
FN
5992 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5993 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5994 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 5995 default: abort();
9ee6e8bb 5996 }
dd8fbd78 5997 tcg_temp_free(tmp2);
600b828c 5998 if (op == NEON_2RM_VCLE0) {
dd8fbd78 5999 tcg_gen_not_i32(tmp, tmp);
600b828c 6000 }
9ee6e8bb 6001 break;
600b828c 6002 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6003 tmp2 = tcg_const_i32(0);
9ee6e8bb 6004 switch(size) {
dd8fbd78
FN
6005 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6006 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6007 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6008 default: abort();
9ee6e8bb 6009 }
dd8fbd78 6010 tcg_temp_free(tmp2);
600b828c 6011 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6012 tcg_gen_not_i32(tmp, tmp);
600b828c 6013 }
9ee6e8bb 6014 break;
600b828c 6015 case NEON_2RM_VCEQ0:
dd8fbd78 6016 tmp2 = tcg_const_i32(0);
9ee6e8bb 6017 switch(size) {
dd8fbd78
FN
6018 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6019 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6020 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6021 default: abort();
9ee6e8bb 6022 }
dd8fbd78 6023 tcg_temp_free(tmp2);
9ee6e8bb 6024 break;
600b828c 6025 case NEON_2RM_VABS:
9ee6e8bb 6026 switch(size) {
dd8fbd78
FN
6027 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6028 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6029 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6030 default: abort();
9ee6e8bb
PB
6031 }
6032 break;
600b828c 6033 case NEON_2RM_VNEG:
dd8fbd78
FN
6034 tmp2 = tcg_const_i32(0);
6035 gen_neon_rsb(size, tmp, tmp2);
6036 tcg_temp_free(tmp2);
9ee6e8bb 6037 break;
600b828c 6038 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6039 {
6040 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6041 tmp2 = tcg_const_i32(0);
aa47cfdd 6042 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6043 tcg_temp_free(tmp2);
aa47cfdd 6044 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6045 break;
aa47cfdd 6046 }
600b828c 6047 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6048 {
6049 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6050 tmp2 = tcg_const_i32(0);
aa47cfdd 6051 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6052 tcg_temp_free(tmp2);
aa47cfdd 6053 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6054 break;
aa47cfdd 6055 }
600b828c 6056 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6057 {
6058 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6059 tmp2 = tcg_const_i32(0);
aa47cfdd 6060 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6061 tcg_temp_free(tmp2);
aa47cfdd 6062 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6063 break;
aa47cfdd 6064 }
600b828c 6065 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6066 {
6067 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6068 tmp2 = tcg_const_i32(0);
aa47cfdd 6069 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6070 tcg_temp_free(tmp2);
aa47cfdd 6071 tcg_temp_free_ptr(fpstatus);
0e326109 6072 break;
aa47cfdd 6073 }
600b828c 6074 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6075 {
6076 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6077 tmp2 = tcg_const_i32(0);
aa47cfdd 6078 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6079 tcg_temp_free(tmp2);
aa47cfdd 6080 tcg_temp_free_ptr(fpstatus);
0e326109 6081 break;
aa47cfdd 6082 }
600b828c 6083 case NEON_2RM_VABS_F:
4373f3ce 6084 gen_vfp_abs(0);
9ee6e8bb 6085 break;
600b828c 6086 case NEON_2RM_VNEG_F:
4373f3ce 6087 gen_vfp_neg(0);
9ee6e8bb 6088 break;
600b828c 6089 case NEON_2RM_VSWP:
dd8fbd78
FN
6090 tmp2 = neon_load_reg(rd, pass);
6091 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6092 break;
600b828c 6093 case NEON_2RM_VTRN:
dd8fbd78 6094 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6095 switch (size) {
dd8fbd78
FN
6096 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6097 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6098 default: abort();
9ee6e8bb 6099 }
dd8fbd78 6100 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6101 break;
600b828c 6102 case NEON_2RM_VRECPE:
dd8fbd78 6103 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6104 break;
600b828c 6105 case NEON_2RM_VRSQRTE:
dd8fbd78 6106 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6107 break;
600b828c 6108 case NEON_2RM_VRECPE_F:
4373f3ce 6109 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6110 break;
600b828c 6111 case NEON_2RM_VRSQRTE_F:
4373f3ce 6112 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6113 break;
600b828c 6114 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6115 gen_vfp_sito(0, 1);
9ee6e8bb 6116 break;
600b828c 6117 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6118 gen_vfp_uito(0, 1);
9ee6e8bb 6119 break;
600b828c 6120 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6121 gen_vfp_tosiz(0, 1);
9ee6e8bb 6122 break;
600b828c 6123 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6124 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6125 break;
6126 default:
600b828c
PM
6127 /* Reserved op values were caught by the
6128 * neon_2rm_sizes[] check earlier.
6129 */
6130 abort();
9ee6e8bb 6131 }
600b828c 6132 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6133 tcg_gen_st_f32(cpu_F0s, cpu_env,
6134 neon_reg_offset(rd, pass));
9ee6e8bb 6135 } else {
dd8fbd78 6136 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6137 }
6138 }
6139 break;
6140 }
6141 } else if ((insn & (1 << 10)) == 0) {
6142 /* VTBL, VTBX. */
56907d77
PM
6143 int n = ((insn >> 8) & 3) + 1;
6144 if ((rn + n) > 32) {
6145 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6146 * helper function running off the end of the register file.
6147 */
6148 return 1;
6149 }
6150 n <<= 3;
9ee6e8bb 6151 if (insn & (1 << 6)) {
8f8e3aa4 6152 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6153 } else {
7d1b0095 6154 tmp = tcg_temp_new_i32();
8f8e3aa4 6155 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6156 }
8f8e3aa4 6157 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6158 tmp4 = tcg_const_i32(rn);
6159 tmp5 = tcg_const_i32(n);
9ef39277 6160 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6161 tcg_temp_free_i32(tmp);
9ee6e8bb 6162 if (insn & (1 << 6)) {
8f8e3aa4 6163 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6164 } else {
7d1b0095 6165 tmp = tcg_temp_new_i32();
8f8e3aa4 6166 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6167 }
8f8e3aa4 6168 tmp3 = neon_load_reg(rm, 1);
9ef39277 6169 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6170 tcg_temp_free_i32(tmp5);
6171 tcg_temp_free_i32(tmp4);
8f8e3aa4 6172 neon_store_reg(rd, 0, tmp2);
3018f259 6173 neon_store_reg(rd, 1, tmp3);
7d1b0095 6174 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6175 } else if ((insn & 0x380) == 0) {
6176 /* VDUP */
133da6aa
JR
6177 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6178 return 1;
6179 }
9ee6e8bb 6180 if (insn & (1 << 19)) {
dd8fbd78 6181 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6182 } else {
dd8fbd78 6183 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6184 }
6185 if (insn & (1 << 16)) {
dd8fbd78 6186 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6187 } else if (insn & (1 << 17)) {
6188 if ((insn >> 18) & 1)
dd8fbd78 6189 gen_neon_dup_high16(tmp);
9ee6e8bb 6190 else
dd8fbd78 6191 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6192 }
6193 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6194 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6195 tcg_gen_mov_i32(tmp2, tmp);
6196 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6197 }
7d1b0095 6198 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6199 } else {
6200 return 1;
6201 }
6202 }
6203 }
6204 return 0;
6205}
6206
0ecb72a5 6207static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6208{
4b6a83fb
PM
6209 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6210 const ARMCPRegInfo *ri;
6211 ARMCPU *cpu = arm_env_get_cpu(env);
9ee6e8bb
PB
6212
6213 cpnum = (insn >> 8) & 0xf;
6214 if (arm_feature(env, ARM_FEATURE_XSCALE)
6215 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6216 return 1;
6217
4b6a83fb 6218 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6219 switch (cpnum) {
6220 case 0:
6221 case 1:
6222 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6223 return disas_iwmmxt_insn(env, s, insn);
6224 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6225 return disas_dsp_insn(env, s, insn);
6226 }
6227 return 1;
6228 case 10:
6229 case 11:
6230 return disas_vfp_insn (env, s, insn);
4b6a83fb
PM
6231 default:
6232 break;
6233 }
6234
6235 /* Otherwise treat as a generic register access */
6236 is64 = (insn & (1 << 25)) == 0;
6237 if (!is64 && ((insn & (1 << 4)) == 0)) {
6238 /* cdp */
6239 return 1;
6240 }
6241
6242 crm = insn & 0xf;
6243 if (is64) {
6244 crn = 0;
6245 opc1 = (insn >> 4) & 0xf;
6246 opc2 = 0;
6247 rt2 = (insn >> 16) & 0xf;
6248 } else {
6249 crn = (insn >> 16) & 0xf;
6250 opc1 = (insn >> 21) & 7;
6251 opc2 = (insn >> 5) & 7;
6252 rt2 = 0;
6253 }
6254 isread = (insn >> 20) & 1;
6255 rt = (insn >> 12) & 0xf;
6256
6257 ri = get_arm_cp_reginfo(cpu,
6258 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6259 if (ri) {
6260 /* Check access permissions */
6261 if (!cp_access_ok(env, ri, isread)) {
6262 return 1;
6263 }
6264
6265 /* Handle special cases first */
6266 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6267 case ARM_CP_NOP:
6268 return 0;
6269 case ARM_CP_WFI:
6270 if (isread) {
6271 return 1;
6272 }
6273 gen_set_pc_im(s->pc);
6274 s->is_jmp = DISAS_WFI;
2bee5105 6275 return 0;
4b6a83fb
PM
6276 default:
6277 break;
6278 }
6279
6280 if (isread) {
6281 /* Read */
6282 if (is64) {
6283 TCGv_i64 tmp64;
6284 TCGv_i32 tmp;
6285 if (ri->type & ARM_CP_CONST) {
6286 tmp64 = tcg_const_i64(ri->resetvalue);
6287 } else if (ri->readfn) {
6288 TCGv_ptr tmpptr;
6289 gen_set_pc_im(s->pc);
6290 tmp64 = tcg_temp_new_i64();
6291 tmpptr = tcg_const_ptr(ri);
6292 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6293 tcg_temp_free_ptr(tmpptr);
6294 } else {
6295 tmp64 = tcg_temp_new_i64();
6296 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6297 }
6298 tmp = tcg_temp_new_i32();
6299 tcg_gen_trunc_i64_i32(tmp, tmp64);
6300 store_reg(s, rt, tmp);
6301 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 6302 tmp = tcg_temp_new_i32();
4b6a83fb 6303 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 6304 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
6305 store_reg(s, rt2, tmp);
6306 } else {
6307 TCGv tmp;
6308 if (ri->type & ARM_CP_CONST) {
6309 tmp = tcg_const_i32(ri->resetvalue);
6310 } else if (ri->readfn) {
6311 TCGv_ptr tmpptr;
6312 gen_set_pc_im(s->pc);
6313 tmp = tcg_temp_new_i32();
6314 tmpptr = tcg_const_ptr(ri);
6315 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6316 tcg_temp_free_ptr(tmpptr);
6317 } else {
6318 tmp = load_cpu_offset(ri->fieldoffset);
6319 }
6320 if (rt == 15) {
6321 /* Destination register of r15 for 32 bit loads sets
6322 * the condition codes from the high 4 bits of the value
6323 */
6324 gen_set_nzcv(tmp);
6325 tcg_temp_free_i32(tmp);
6326 } else {
6327 store_reg(s, rt, tmp);
6328 }
6329 }
6330 } else {
6331 /* Write */
6332 if (ri->type & ARM_CP_CONST) {
6333 /* If not forbidden by access permissions, treat as WI */
6334 return 0;
6335 }
6336
6337 if (is64) {
6338 TCGv tmplo, tmphi;
6339 TCGv_i64 tmp64 = tcg_temp_new_i64();
6340 tmplo = load_reg(s, rt);
6341 tmphi = load_reg(s, rt2);
6342 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6343 tcg_temp_free_i32(tmplo);
6344 tcg_temp_free_i32(tmphi);
6345 if (ri->writefn) {
6346 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6347 gen_set_pc_im(s->pc);
6348 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6349 tcg_temp_free_ptr(tmpptr);
6350 } else {
6351 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6352 }
6353 tcg_temp_free_i64(tmp64);
6354 } else {
6355 if (ri->writefn) {
6356 TCGv tmp;
6357 TCGv_ptr tmpptr;
6358 gen_set_pc_im(s->pc);
6359 tmp = load_reg(s, rt);
6360 tmpptr = tcg_const_ptr(ri);
6361 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6362 tcg_temp_free_ptr(tmpptr);
6363 tcg_temp_free_i32(tmp);
6364 } else {
6365 TCGv tmp = load_reg(s, rt);
6366 store_cpu_offset(tmp, ri->fieldoffset);
6367 }
6368 }
6369 /* We default to ending the TB on a coprocessor register write,
6370 * but allow this to be suppressed by the register definition
6371 * (usually only necessary to work around guest bugs).
6372 */
6373 if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6374 gen_lookup_tb(s);
6375 }
6376 }
6377 return 0;
6378 }
6379
4a9a539f 6380 return 1;
9ee6e8bb
PB
6381}
6382
5e3f878a
PB
6383
6384/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6385static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
6386{
6387 TCGv tmp;
7d1b0095 6388 tmp = tcg_temp_new_i32();
5e3f878a
PB
6389 tcg_gen_trunc_i64_i32(tmp, val);
6390 store_reg(s, rlow, tmp);
7d1b0095 6391 tmp = tcg_temp_new_i32();
5e3f878a
PB
6392 tcg_gen_shri_i64(val, val, 32);
6393 tcg_gen_trunc_i64_i32(tmp, val);
6394 store_reg(s, rhigh, tmp);
6395}
6396
6397/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6398static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6399{
a7812ae4 6400 TCGv_i64 tmp;
5e3f878a
PB
6401 TCGv tmp2;
6402
36aa55dc 6403 /* Load value and extend to 64 bits. */
a7812ae4 6404 tmp = tcg_temp_new_i64();
5e3f878a
PB
6405 tmp2 = load_reg(s, rlow);
6406 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6407 tcg_temp_free_i32(tmp2);
5e3f878a 6408 tcg_gen_add_i64(val, val, tmp);
b75263d6 6409 tcg_temp_free_i64(tmp);
5e3f878a
PB
6410}
6411
6412/* load and add a 64-bit value from a register pair. */
a7812ae4 6413static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6414{
a7812ae4 6415 TCGv_i64 tmp;
36aa55dc
PB
6416 TCGv tmpl;
6417 TCGv tmph;
5e3f878a
PB
6418
6419 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6420 tmpl = load_reg(s, rlow);
6421 tmph = load_reg(s, rhigh);
a7812ae4 6422 tmp = tcg_temp_new_i64();
36aa55dc 6423 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6424 tcg_temp_free_i32(tmpl);
6425 tcg_temp_free_i32(tmph);
5e3f878a 6426 tcg_gen_add_i64(val, val, tmp);
b75263d6 6427 tcg_temp_free_i64(tmp);
5e3f878a
PB
6428}
6429
6430/* Set N and Z flags from a 64-bit value. */
a7812ae4 6431static void gen_logicq_cc(TCGv_i64 val)
5e3f878a 6432{
7d1b0095 6433 TCGv tmp = tcg_temp_new_i32();
5e3f878a 6434 gen_helper_logicq_cc(tmp, val);
6fbe23d5 6435 gen_logic_CC(tmp);
7d1b0095 6436 tcg_temp_free_i32(tmp);
5e3f878a
PB
6437}
6438
426f5abc
PB
6439/* Load/Store exclusive instructions are implemented by remembering
6440 the value/address loaded, and seeing if these are the same
b90372ad 6441 when the store is performed. This should be sufficient to implement
426f5abc
PB
6442 the architecturally mandated semantics, and avoids having to monitor
6443 regular stores.
6444
6445 In system emulation mode only one CPU will be running at once, so
6446 this sequence is effectively atomic. In user emulation mode we
6447 throw an exception and handle the atomic operation elsewhere. */
6448static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6449 TCGv addr, int size)
6450{
6451 TCGv tmp;
6452
6453 switch (size) {
6454 case 0:
6455 tmp = gen_ld8u(addr, IS_USER(s));
6456 break;
6457 case 1:
6458 tmp = gen_ld16u(addr, IS_USER(s));
6459 break;
6460 case 2:
6461 case 3:
6462 tmp = gen_ld32(addr, IS_USER(s));
6463 break;
6464 default:
6465 abort();
6466 }
6467 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6468 store_reg(s, rt, tmp);
6469 if (size == 3) {
7d1b0095 6470 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6471 tcg_gen_addi_i32(tmp2, addr, 4);
6472 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6473 tcg_temp_free_i32(tmp2);
426f5abc
PB
6474 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6475 store_reg(s, rt2, tmp);
6476 }
6477 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6478}
6479
6480static void gen_clrex(DisasContext *s)
6481{
6482 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6483}
6484
6485#ifdef CONFIG_USER_ONLY
6486static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6487 TCGv addr, int size)
6488{
6489 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6490 tcg_gen_movi_i32(cpu_exclusive_info,
6491 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6492 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6493}
6494#else
6495static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6496 TCGv addr, int size)
6497{
6498 TCGv tmp;
6499 int done_label;
6500 int fail_label;
6501
6502 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6503 [addr] = {Rt};
6504 {Rd} = 0;
6505 } else {
6506 {Rd} = 1;
6507 } */
6508 fail_label = gen_new_label();
6509 done_label = gen_new_label();
6510 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6511 switch (size) {
6512 case 0:
6513 tmp = gen_ld8u(addr, IS_USER(s));
6514 break;
6515 case 1:
6516 tmp = gen_ld16u(addr, IS_USER(s));
6517 break;
6518 case 2:
6519 case 3:
6520 tmp = gen_ld32(addr, IS_USER(s));
6521 break;
6522 default:
6523 abort();
6524 }
6525 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6526 tcg_temp_free_i32(tmp);
426f5abc 6527 if (size == 3) {
7d1b0095 6528 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6529 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6530 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6531 tcg_temp_free_i32(tmp2);
426f5abc 6532 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6533 tcg_temp_free_i32(tmp);
426f5abc
PB
6534 }
6535 tmp = load_reg(s, rt);
6536 switch (size) {
6537 case 0:
6538 gen_st8(tmp, addr, IS_USER(s));
6539 break;
6540 case 1:
6541 gen_st16(tmp, addr, IS_USER(s));
6542 break;
6543 case 2:
6544 case 3:
6545 gen_st32(tmp, addr, IS_USER(s));
6546 break;
6547 default:
6548 abort();
6549 }
6550 if (size == 3) {
6551 tcg_gen_addi_i32(addr, addr, 4);
6552 tmp = load_reg(s, rt2);
6553 gen_st32(tmp, addr, IS_USER(s));
6554 }
6555 tcg_gen_movi_i32(cpu_R[rd], 0);
6556 tcg_gen_br(done_label);
6557 gen_set_label(fail_label);
6558 tcg_gen_movi_i32(cpu_R[rd], 1);
6559 gen_set_label(done_label);
6560 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6561}
6562#endif
6563
0ecb72a5 6564static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
6565{
6566 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6567 TCGv tmp;
3670669c 6568 TCGv tmp2;
6ddbc6e4 6569 TCGv tmp3;
b0109805 6570 TCGv addr;
a7812ae4 6571 TCGv_i64 tmp64;
9ee6e8bb 6572
d31dd73e 6573 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
6574 s->pc += 4;
6575
6576 /* M variants do not implement ARM mode. */
6577 if (IS_M(env))
6578 goto illegal_op;
6579 cond = insn >> 28;
6580 if (cond == 0xf){
be5e7a76
DES
6581 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6582 * choose to UNDEF. In ARMv5 and above the space is used
6583 * for miscellaneous unconditional instructions.
6584 */
6585 ARCH(5);
6586
9ee6e8bb
PB
6587 /* Unconditional instructions. */
6588 if (((insn >> 25) & 7) == 1) {
6589 /* NEON Data processing. */
6590 if (!arm_feature(env, ARM_FEATURE_NEON))
6591 goto illegal_op;
6592
6593 if (disas_neon_data_insn(env, s, insn))
6594 goto illegal_op;
6595 return;
6596 }
6597 if ((insn & 0x0f100000) == 0x04000000) {
6598 /* NEON load/store. */
6599 if (!arm_feature(env, ARM_FEATURE_NEON))
6600 goto illegal_op;
6601
6602 if (disas_neon_ls_insn(env, s, insn))
6603 goto illegal_op;
6604 return;
6605 }
3d185e5d
PM
6606 if (((insn & 0x0f30f000) == 0x0510f000) ||
6607 ((insn & 0x0f30f010) == 0x0710f000)) {
6608 if ((insn & (1 << 22)) == 0) {
6609 /* PLDW; v7MP */
6610 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6611 goto illegal_op;
6612 }
6613 }
6614 /* Otherwise PLD; v5TE+ */
be5e7a76 6615 ARCH(5TE);
3d185e5d
PM
6616 return;
6617 }
6618 if (((insn & 0x0f70f000) == 0x0450f000) ||
6619 ((insn & 0x0f70f010) == 0x0650f000)) {
6620 ARCH(7);
6621 return; /* PLI; V7 */
6622 }
6623 if (((insn & 0x0f700000) == 0x04100000) ||
6624 ((insn & 0x0f700010) == 0x06100000)) {
6625 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6626 goto illegal_op;
6627 }
6628 return; /* v7MP: Unallocated memory hint: must NOP */
6629 }
6630
6631 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6632 ARCH(6);
6633 /* setend */
10962fd5
PM
6634 if (((insn >> 9) & 1) != s->bswap_code) {
6635 /* Dynamic endianness switching not implemented. */
9ee6e8bb
PB
6636 goto illegal_op;
6637 }
6638 return;
6639 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6640 switch ((insn >> 4) & 0xf) {
6641 case 1: /* clrex */
6642 ARCH(6K);
426f5abc 6643 gen_clrex(s);
9ee6e8bb
PB
6644 return;
6645 case 4: /* dsb */
6646 case 5: /* dmb */
6647 case 6: /* isb */
6648 ARCH(7);
6649 /* We don't emulate caches so these are a no-op. */
6650 return;
6651 default:
6652 goto illegal_op;
6653 }
6654 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6655 /* srs */
c67b6b71 6656 int32_t offset;
9ee6e8bb
PB
6657 if (IS_USER(s))
6658 goto illegal_op;
6659 ARCH(6);
6660 op1 = (insn & 0x1f);
7d1b0095 6661 addr = tcg_temp_new_i32();
39ea3d4e
PM
6662 tmp = tcg_const_i32(op1);
6663 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6664 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6665 i = (insn >> 23) & 3;
6666 switch (i) {
6667 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6668 case 1: offset = 0; break; /* IA */
6669 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6670 case 3: offset = 4; break; /* IB */
6671 default: abort();
6672 }
6673 if (offset)
b0109805
PB
6674 tcg_gen_addi_i32(addr, addr, offset);
6675 tmp = load_reg(s, 14);
6676 gen_st32(tmp, addr, 0);
c67b6b71 6677 tmp = load_cpu_field(spsr);
b0109805
PB
6678 tcg_gen_addi_i32(addr, addr, 4);
6679 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6680 if (insn & (1 << 21)) {
6681 /* Base writeback. */
6682 switch (i) {
6683 case 0: offset = -8; break;
c67b6b71
FN
6684 case 1: offset = 4; break;
6685 case 2: offset = -4; break;
9ee6e8bb
PB
6686 case 3: offset = 0; break;
6687 default: abort();
6688 }
6689 if (offset)
c67b6b71 6690 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6691 tmp = tcg_const_i32(op1);
6692 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6693 tcg_temp_free_i32(tmp);
7d1b0095 6694 tcg_temp_free_i32(addr);
b0109805 6695 } else {
7d1b0095 6696 tcg_temp_free_i32(addr);
9ee6e8bb 6697 }
a990f58f 6698 return;
ea825eee 6699 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6700 /* rfe */
c67b6b71 6701 int32_t offset;
9ee6e8bb
PB
6702 if (IS_USER(s))
6703 goto illegal_op;
6704 ARCH(6);
6705 rn = (insn >> 16) & 0xf;
b0109805 6706 addr = load_reg(s, rn);
9ee6e8bb
PB
6707 i = (insn >> 23) & 3;
6708 switch (i) {
b0109805 6709 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6710 case 1: offset = 0; break; /* IA */
6711 case 2: offset = -8; break; /* DB */
b0109805 6712 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6713 default: abort();
6714 }
6715 if (offset)
b0109805
PB
6716 tcg_gen_addi_i32(addr, addr, offset);
6717 /* Load PC into tmp and CPSR into tmp2. */
6718 tmp = gen_ld32(addr, 0);
6719 tcg_gen_addi_i32(addr, addr, 4);
6720 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6721 if (insn & (1 << 21)) {
6722 /* Base writeback. */
6723 switch (i) {
b0109805 6724 case 0: offset = -8; break;
c67b6b71
FN
6725 case 1: offset = 4; break;
6726 case 2: offset = -4; break;
b0109805 6727 case 3: offset = 0; break;
9ee6e8bb
PB
6728 default: abort();
6729 }
6730 if (offset)
b0109805
PB
6731 tcg_gen_addi_i32(addr, addr, offset);
6732 store_reg(s, rn, addr);
6733 } else {
7d1b0095 6734 tcg_temp_free_i32(addr);
9ee6e8bb 6735 }
b0109805 6736 gen_rfe(s, tmp, tmp2);
c67b6b71 6737 return;
9ee6e8bb
PB
6738 } else if ((insn & 0x0e000000) == 0x0a000000) {
6739 /* branch link and change to thumb (blx <offset>) */
6740 int32_t offset;
6741
6742 val = (uint32_t)s->pc;
7d1b0095 6743 tmp = tcg_temp_new_i32();
d9ba4830
PB
6744 tcg_gen_movi_i32(tmp, val);
6745 store_reg(s, 14, tmp);
9ee6e8bb
PB
6746 /* Sign-extend the 24-bit offset */
6747 offset = (((int32_t)insn) << 8) >> 8;
6748 /* offset * 4 + bit24 * 2 + (thumb bit) */
6749 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6750 /* pipeline offset */
6751 val += 4;
be5e7a76 6752 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6753 gen_bx_im(s, val);
9ee6e8bb
PB
6754 return;
6755 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6756 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6757 /* iWMMXt register transfer. */
6758 if (env->cp15.c15_cpar & (1 << 1))
6759 if (!disas_iwmmxt_insn(env, s, insn))
6760 return;
6761 }
6762 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6763 /* Coprocessor double register transfer. */
be5e7a76 6764 ARCH(5TE);
9ee6e8bb
PB
6765 } else if ((insn & 0x0f000010) == 0x0e000010) {
6766 /* Additional coprocessor register transfer. */
7997d92f 6767 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6768 uint32_t mask;
6769 uint32_t val;
6770 /* cps (privileged) */
6771 if (IS_USER(s))
6772 return;
6773 mask = val = 0;
6774 if (insn & (1 << 19)) {
6775 if (insn & (1 << 8))
6776 mask |= CPSR_A;
6777 if (insn & (1 << 7))
6778 mask |= CPSR_I;
6779 if (insn & (1 << 6))
6780 mask |= CPSR_F;
6781 if (insn & (1 << 18))
6782 val |= mask;
6783 }
7997d92f 6784 if (insn & (1 << 17)) {
9ee6e8bb
PB
6785 mask |= CPSR_M;
6786 val |= (insn & 0x1f);
6787 }
6788 if (mask) {
2fbac54b 6789 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6790 }
6791 return;
6792 }
6793 goto illegal_op;
6794 }
6795 if (cond != 0xe) {
6796 /* if not always execute, we generate a conditional jump to
6797 next instruction */
6798 s->condlabel = gen_new_label();
d9ba4830 6799 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6800 s->condjmp = 1;
6801 }
6802 if ((insn & 0x0f900000) == 0x03000000) {
6803 if ((insn & (1 << 21)) == 0) {
6804 ARCH(6T2);
6805 rd = (insn >> 12) & 0xf;
6806 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6807 if ((insn & (1 << 22)) == 0) {
6808 /* MOVW */
7d1b0095 6809 tmp = tcg_temp_new_i32();
5e3f878a 6810 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6811 } else {
6812 /* MOVT */
5e3f878a 6813 tmp = load_reg(s, rd);
86831435 6814 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6815 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6816 }
5e3f878a 6817 store_reg(s, rd, tmp);
9ee6e8bb
PB
6818 } else {
6819 if (((insn >> 12) & 0xf) != 0xf)
6820 goto illegal_op;
6821 if (((insn >> 16) & 0xf) == 0) {
6822 gen_nop_hint(s, insn & 0xff);
6823 } else {
6824 /* CPSR = immediate */
6825 val = insn & 0xff;
6826 shift = ((insn >> 8) & 0xf) * 2;
6827 if (shift)
6828 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6829 i = ((insn & (1 << 22)) != 0);
2fbac54b 6830 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6831 goto illegal_op;
6832 }
6833 }
6834 } else if ((insn & 0x0f900000) == 0x01000000
6835 && (insn & 0x00000090) != 0x00000090) {
6836 /* miscellaneous instructions */
6837 op1 = (insn >> 21) & 3;
6838 sh = (insn >> 4) & 0xf;
6839 rm = insn & 0xf;
6840 switch (sh) {
6841 case 0x0: /* move program status register */
6842 if (op1 & 1) {
6843 /* PSR = reg */
2fbac54b 6844 tmp = load_reg(s, rm);
9ee6e8bb 6845 i = ((op1 & 2) != 0);
2fbac54b 6846 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6847 goto illegal_op;
6848 } else {
6849 /* reg = PSR */
6850 rd = (insn >> 12) & 0xf;
6851 if (op1 & 2) {
6852 if (IS_USER(s))
6853 goto illegal_op;
d9ba4830 6854 tmp = load_cpu_field(spsr);
9ee6e8bb 6855 } else {
7d1b0095 6856 tmp = tcg_temp_new_i32();
9ef39277 6857 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 6858 }
d9ba4830 6859 store_reg(s, rd, tmp);
9ee6e8bb
PB
6860 }
6861 break;
6862 case 0x1:
6863 if (op1 == 1) {
6864 /* branch/exchange thumb (bx). */
be5e7a76 6865 ARCH(4T);
d9ba4830
PB
6866 tmp = load_reg(s, rm);
6867 gen_bx(s, tmp);
9ee6e8bb
PB
6868 } else if (op1 == 3) {
6869 /* clz */
be5e7a76 6870 ARCH(5);
9ee6e8bb 6871 rd = (insn >> 12) & 0xf;
1497c961
PB
6872 tmp = load_reg(s, rm);
6873 gen_helper_clz(tmp, tmp);
6874 store_reg(s, rd, tmp);
9ee6e8bb
PB
6875 } else {
6876 goto illegal_op;
6877 }
6878 break;
6879 case 0x2:
6880 if (op1 == 1) {
6881 ARCH(5J); /* bxj */
6882 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6883 tmp = load_reg(s, rm);
6884 gen_bx(s, tmp);
9ee6e8bb
PB
6885 } else {
6886 goto illegal_op;
6887 }
6888 break;
6889 case 0x3:
6890 if (op1 != 1)
6891 goto illegal_op;
6892
be5e7a76 6893 ARCH(5);
9ee6e8bb 6894 /* branch link/exchange thumb (blx) */
d9ba4830 6895 tmp = load_reg(s, rm);
7d1b0095 6896 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6897 tcg_gen_movi_i32(tmp2, s->pc);
6898 store_reg(s, 14, tmp2);
6899 gen_bx(s, tmp);
9ee6e8bb
PB
6900 break;
6901 case 0x5: /* saturating add/subtract */
be5e7a76 6902 ARCH(5TE);
9ee6e8bb
PB
6903 rd = (insn >> 12) & 0xf;
6904 rn = (insn >> 16) & 0xf;
b40d0353 6905 tmp = load_reg(s, rm);
5e3f878a 6906 tmp2 = load_reg(s, rn);
9ee6e8bb 6907 if (op1 & 2)
9ef39277 6908 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 6909 if (op1 & 1)
9ef39277 6910 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6911 else
9ef39277 6912 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 6913 tcg_temp_free_i32(tmp2);
5e3f878a 6914 store_reg(s, rd, tmp);
9ee6e8bb 6915 break;
49e14940
AL
6916 case 7:
6917 /* SMC instruction (op1 == 3)
6918 and undefined instructions (op1 == 0 || op1 == 2)
6919 will trap */
6920 if (op1 != 1) {
6921 goto illegal_op;
6922 }
6923 /* bkpt */
be5e7a76 6924 ARCH(5);
bc4a0de0 6925 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6926 break;
6927 case 0x8: /* signed multiply */
6928 case 0xa:
6929 case 0xc:
6930 case 0xe:
be5e7a76 6931 ARCH(5TE);
9ee6e8bb
PB
6932 rs = (insn >> 8) & 0xf;
6933 rn = (insn >> 12) & 0xf;
6934 rd = (insn >> 16) & 0xf;
6935 if (op1 == 1) {
6936 /* (32 * 16) >> 16 */
5e3f878a
PB
6937 tmp = load_reg(s, rm);
6938 tmp2 = load_reg(s, rs);
9ee6e8bb 6939 if (sh & 4)
5e3f878a 6940 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6941 else
5e3f878a 6942 gen_sxth(tmp2);
a7812ae4
PB
6943 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6944 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 6945 tmp = tcg_temp_new_i32();
a7812ae4 6946 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6947 tcg_temp_free_i64(tmp64);
9ee6e8bb 6948 if ((sh & 2) == 0) {
5e3f878a 6949 tmp2 = load_reg(s, rn);
9ef39277 6950 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 6951 tcg_temp_free_i32(tmp2);
9ee6e8bb 6952 }
5e3f878a 6953 store_reg(s, rd, tmp);
9ee6e8bb
PB
6954 } else {
6955 /* 16 * 16 */
5e3f878a
PB
6956 tmp = load_reg(s, rm);
6957 tmp2 = load_reg(s, rs);
6958 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 6959 tcg_temp_free_i32(tmp2);
9ee6e8bb 6960 if (op1 == 2) {
a7812ae4
PB
6961 tmp64 = tcg_temp_new_i64();
6962 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 6963 tcg_temp_free_i32(tmp);
a7812ae4
PB
6964 gen_addq(s, tmp64, rn, rd);
6965 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6966 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6967 } else {
6968 if (op1 == 0) {
5e3f878a 6969 tmp2 = load_reg(s, rn);
9ef39277 6970 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 6971 tcg_temp_free_i32(tmp2);
9ee6e8bb 6972 }
5e3f878a 6973 store_reg(s, rd, tmp);
9ee6e8bb
PB
6974 }
6975 }
6976 break;
6977 default:
6978 goto illegal_op;
6979 }
6980 } else if (((insn & 0x0e000000) == 0 &&
6981 (insn & 0x00000090) != 0x90) ||
6982 ((insn & 0x0e000000) == (1 << 25))) {
6983 int set_cc, logic_cc, shiftop;
6984
6985 op1 = (insn >> 21) & 0xf;
6986 set_cc = (insn >> 20) & 1;
6987 logic_cc = table_logic_cc[op1] & set_cc;
6988
6989 /* data processing instruction */
6990 if (insn & (1 << 25)) {
6991 /* immediate operand */
6992 val = insn & 0xff;
6993 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6994 if (shift) {
9ee6e8bb 6995 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 6996 }
7d1b0095 6997 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
6998 tcg_gen_movi_i32(tmp2, val);
6999 if (logic_cc && shift) {
7000 gen_set_CF_bit31(tmp2);
7001 }
9ee6e8bb
PB
7002 } else {
7003 /* register */
7004 rm = (insn) & 0xf;
e9bb4aa9 7005 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7006 shiftop = (insn >> 5) & 3;
7007 if (!(insn & (1 << 4))) {
7008 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7009 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7010 } else {
7011 rs = (insn >> 8) & 0xf;
8984bd2e 7012 tmp = load_reg(s, rs);
e9bb4aa9 7013 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7014 }
7015 }
7016 if (op1 != 0x0f && op1 != 0x0d) {
7017 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7018 tmp = load_reg(s, rn);
7019 } else {
7020 TCGV_UNUSED(tmp);
9ee6e8bb
PB
7021 }
7022 rd = (insn >> 12) & 0xf;
7023 switch(op1) {
7024 case 0x00:
e9bb4aa9
JR
7025 tcg_gen_and_i32(tmp, tmp, tmp2);
7026 if (logic_cc) {
7027 gen_logic_CC(tmp);
7028 }
21aeb343 7029 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7030 break;
7031 case 0x01:
e9bb4aa9
JR
7032 tcg_gen_xor_i32(tmp, tmp, tmp2);
7033 if (logic_cc) {
7034 gen_logic_CC(tmp);
7035 }
21aeb343 7036 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7037 break;
7038 case 0x02:
7039 if (set_cc && rd == 15) {
7040 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7041 if (IS_USER(s)) {
9ee6e8bb 7042 goto illegal_op;
e9bb4aa9 7043 }
72485ec4 7044 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7045 gen_exception_return(s, tmp);
9ee6e8bb 7046 } else {
e9bb4aa9 7047 if (set_cc) {
72485ec4 7048 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7049 } else {
7050 tcg_gen_sub_i32(tmp, tmp, tmp2);
7051 }
21aeb343 7052 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7053 }
7054 break;
7055 case 0x03:
e9bb4aa9 7056 if (set_cc) {
72485ec4 7057 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7058 } else {
7059 tcg_gen_sub_i32(tmp, tmp2, tmp);
7060 }
21aeb343 7061 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7062 break;
7063 case 0x04:
e9bb4aa9 7064 if (set_cc) {
72485ec4 7065 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7066 } else {
7067 tcg_gen_add_i32(tmp, tmp, tmp2);
7068 }
21aeb343 7069 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7070 break;
7071 case 0x05:
e9bb4aa9 7072 if (set_cc) {
9ef39277 7073 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
e9bb4aa9
JR
7074 } else {
7075 gen_add_carry(tmp, tmp, tmp2);
7076 }
21aeb343 7077 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7078 break;
7079 case 0x06:
e9bb4aa9 7080 if (set_cc) {
9ef39277 7081 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
e9bb4aa9
JR
7082 } else {
7083 gen_sub_carry(tmp, tmp, tmp2);
7084 }
21aeb343 7085 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7086 break;
7087 case 0x07:
e9bb4aa9 7088 if (set_cc) {
9ef39277 7089 gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
e9bb4aa9
JR
7090 } else {
7091 gen_sub_carry(tmp, tmp2, tmp);
7092 }
21aeb343 7093 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7094 break;
7095 case 0x08:
7096 if (set_cc) {
e9bb4aa9
JR
7097 tcg_gen_and_i32(tmp, tmp, tmp2);
7098 gen_logic_CC(tmp);
9ee6e8bb 7099 }
7d1b0095 7100 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7101 break;
7102 case 0x09:
7103 if (set_cc) {
e9bb4aa9
JR
7104 tcg_gen_xor_i32(tmp, tmp, tmp2);
7105 gen_logic_CC(tmp);
9ee6e8bb 7106 }
7d1b0095 7107 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7108 break;
7109 case 0x0a:
7110 if (set_cc) {
72485ec4 7111 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 7112 }
7d1b0095 7113 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7114 break;
7115 case 0x0b:
7116 if (set_cc) {
72485ec4 7117 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 7118 }
7d1b0095 7119 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7120 break;
7121 case 0x0c:
e9bb4aa9
JR
7122 tcg_gen_or_i32(tmp, tmp, tmp2);
7123 if (logic_cc) {
7124 gen_logic_CC(tmp);
7125 }
21aeb343 7126 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7127 break;
7128 case 0x0d:
7129 if (logic_cc && rd == 15) {
7130 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7131 if (IS_USER(s)) {
9ee6e8bb 7132 goto illegal_op;
e9bb4aa9
JR
7133 }
7134 gen_exception_return(s, tmp2);
9ee6e8bb 7135 } else {
e9bb4aa9
JR
7136 if (logic_cc) {
7137 gen_logic_CC(tmp2);
7138 }
21aeb343 7139 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7140 }
7141 break;
7142 case 0x0e:
f669df27 7143 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7144 if (logic_cc) {
7145 gen_logic_CC(tmp);
7146 }
21aeb343 7147 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7148 break;
7149 default:
7150 case 0x0f:
e9bb4aa9
JR
7151 tcg_gen_not_i32(tmp2, tmp2);
7152 if (logic_cc) {
7153 gen_logic_CC(tmp2);
7154 }
21aeb343 7155 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7156 break;
7157 }
e9bb4aa9 7158 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7159 tcg_temp_free_i32(tmp2);
e9bb4aa9 7160 }
9ee6e8bb
PB
7161 } else {
7162 /* other instructions */
7163 op1 = (insn >> 24) & 0xf;
7164 switch(op1) {
7165 case 0x0:
7166 case 0x1:
7167 /* multiplies, extra load/stores */
7168 sh = (insn >> 5) & 3;
7169 if (sh == 0) {
7170 if (op1 == 0x0) {
7171 rd = (insn >> 16) & 0xf;
7172 rn = (insn >> 12) & 0xf;
7173 rs = (insn >> 8) & 0xf;
7174 rm = (insn) & 0xf;
7175 op1 = (insn >> 20) & 0xf;
7176 switch (op1) {
7177 case 0: case 1: case 2: case 3: case 6:
7178 /* 32 bit mul */
5e3f878a
PB
7179 tmp = load_reg(s, rs);
7180 tmp2 = load_reg(s, rm);
7181 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7182 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7183 if (insn & (1 << 22)) {
7184 /* Subtract (mls) */
7185 ARCH(6T2);
5e3f878a
PB
7186 tmp2 = load_reg(s, rn);
7187 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7188 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7189 } else if (insn & (1 << 21)) {
7190 /* Add */
5e3f878a
PB
7191 tmp2 = load_reg(s, rn);
7192 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7193 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7194 }
7195 if (insn & (1 << 20))
5e3f878a
PB
7196 gen_logic_CC(tmp);
7197 store_reg(s, rd, tmp);
9ee6e8bb 7198 break;
8aac08b1
AJ
7199 case 4:
7200 /* 64 bit mul double accumulate (UMAAL) */
7201 ARCH(6);
7202 tmp = load_reg(s, rs);
7203 tmp2 = load_reg(s, rm);
7204 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7205 gen_addq_lo(s, tmp64, rn);
7206 gen_addq_lo(s, tmp64, rd);
7207 gen_storeq_reg(s, rn, rd, tmp64);
7208 tcg_temp_free_i64(tmp64);
7209 break;
7210 case 8: case 9: case 10: case 11:
7211 case 12: case 13: case 14: case 15:
7212 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7213 tmp = load_reg(s, rs);
7214 tmp2 = load_reg(s, rm);
8aac08b1 7215 if (insn & (1 << 22)) {
a7812ae4 7216 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 7217 } else {
a7812ae4 7218 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
7219 }
7220 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 7221 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 7222 }
8aac08b1 7223 if (insn & (1 << 20)) {
a7812ae4 7224 gen_logicq_cc(tmp64);
8aac08b1 7225 }
a7812ae4 7226 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7227 tcg_temp_free_i64(tmp64);
9ee6e8bb 7228 break;
8aac08b1
AJ
7229 default:
7230 goto illegal_op;
9ee6e8bb
PB
7231 }
7232 } else {
7233 rn = (insn >> 16) & 0xf;
7234 rd = (insn >> 12) & 0xf;
7235 if (insn & (1 << 23)) {
7236 /* load/store exclusive */
86753403
PB
7237 op1 = (insn >> 21) & 0x3;
7238 if (op1)
a47f43d2 7239 ARCH(6K);
86753403
PB
7240 else
7241 ARCH(6);
3174f8e9 7242 addr = tcg_temp_local_new_i32();
98a46317 7243 load_reg_var(s, addr, rn);
9ee6e8bb 7244 if (insn & (1 << 20)) {
86753403
PB
7245 switch (op1) {
7246 case 0: /* ldrex */
426f5abc 7247 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7248 break;
7249 case 1: /* ldrexd */
426f5abc 7250 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7251 break;
7252 case 2: /* ldrexb */
426f5abc 7253 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7254 break;
7255 case 3: /* ldrexh */
426f5abc 7256 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7257 break;
7258 default:
7259 abort();
7260 }
9ee6e8bb
PB
7261 } else {
7262 rm = insn & 0xf;
86753403
PB
7263 switch (op1) {
7264 case 0: /* strex */
426f5abc 7265 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7266 break;
7267 case 1: /* strexd */
502e64fe 7268 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7269 break;
7270 case 2: /* strexb */
426f5abc 7271 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7272 break;
7273 case 3: /* strexh */
426f5abc 7274 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7275 break;
7276 default:
7277 abort();
7278 }
9ee6e8bb 7279 }
3174f8e9 7280 tcg_temp_free(addr);
9ee6e8bb
PB
7281 } else {
7282 /* SWP instruction */
7283 rm = (insn) & 0xf;
7284
8984bd2e
PB
7285 /* ??? This is not really atomic. However we know
7286 we never have multiple CPUs running in parallel,
7287 so it is good enough. */
7288 addr = load_reg(s, rn);
7289 tmp = load_reg(s, rm);
9ee6e8bb 7290 if (insn & (1 << 22)) {
8984bd2e
PB
7291 tmp2 = gen_ld8u(addr, IS_USER(s));
7292 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7293 } else {
8984bd2e
PB
7294 tmp2 = gen_ld32(addr, IS_USER(s));
7295 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7296 }
7d1b0095 7297 tcg_temp_free_i32(addr);
8984bd2e 7298 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7299 }
7300 }
7301 } else {
7302 int address_offset;
7303 int load;
7304 /* Misc load/store */
7305 rn = (insn >> 16) & 0xf;
7306 rd = (insn >> 12) & 0xf;
b0109805 7307 addr = load_reg(s, rn);
9ee6e8bb 7308 if (insn & (1 << 24))
b0109805 7309 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7310 address_offset = 0;
7311 if (insn & (1 << 20)) {
7312 /* load */
7313 switch(sh) {
7314 case 1:
b0109805 7315 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
7316 break;
7317 case 2:
b0109805 7318 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
7319 break;
7320 default:
7321 case 3:
b0109805 7322 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
7323 break;
7324 }
7325 load = 1;
7326 } else if (sh & 2) {
be5e7a76 7327 ARCH(5TE);
9ee6e8bb
PB
7328 /* doubleword */
7329 if (sh & 1) {
7330 /* store */
b0109805
PB
7331 tmp = load_reg(s, rd);
7332 gen_st32(tmp, addr, IS_USER(s));
7333 tcg_gen_addi_i32(addr, addr, 4);
7334 tmp = load_reg(s, rd + 1);
7335 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7336 load = 0;
7337 } else {
7338 /* load */
b0109805
PB
7339 tmp = gen_ld32(addr, IS_USER(s));
7340 store_reg(s, rd, tmp);
7341 tcg_gen_addi_i32(addr, addr, 4);
7342 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
7343 rd++;
7344 load = 1;
7345 }
7346 address_offset = -4;
7347 } else {
7348 /* store */
b0109805
PB
7349 tmp = load_reg(s, rd);
7350 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7351 load = 0;
7352 }
7353 /* Perform base writeback before the loaded value to
7354 ensure correct behavior with overlapping index registers.
7355 ldrd with base writeback is is undefined if the
7356 destination and index registers overlap. */
7357 if (!(insn & (1 << 24))) {
b0109805
PB
7358 gen_add_datah_offset(s, insn, address_offset, addr);
7359 store_reg(s, rn, addr);
9ee6e8bb
PB
7360 } else if (insn & (1 << 21)) {
7361 if (address_offset)
b0109805
PB
7362 tcg_gen_addi_i32(addr, addr, address_offset);
7363 store_reg(s, rn, addr);
7364 } else {
7d1b0095 7365 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7366 }
7367 if (load) {
7368 /* Complete the load. */
b0109805 7369 store_reg(s, rd, tmp);
9ee6e8bb
PB
7370 }
7371 }
7372 break;
7373 case 0x4:
7374 case 0x5:
7375 goto do_ldst;
7376 case 0x6:
7377 case 0x7:
7378 if (insn & (1 << 4)) {
7379 ARCH(6);
7380 /* Armv6 Media instructions. */
7381 rm = insn & 0xf;
7382 rn = (insn >> 16) & 0xf;
2c0262af 7383 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7384 rs = (insn >> 8) & 0xf;
7385 switch ((insn >> 23) & 3) {
7386 case 0: /* Parallel add/subtract. */
7387 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7388 tmp = load_reg(s, rn);
7389 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7390 sh = (insn >> 5) & 7;
7391 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7392 goto illegal_op;
6ddbc6e4 7393 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7394 tcg_temp_free_i32(tmp2);
6ddbc6e4 7395 store_reg(s, rd, tmp);
9ee6e8bb
PB
7396 break;
7397 case 1:
7398 if ((insn & 0x00700020) == 0) {
6c95676b 7399 /* Halfword pack. */
3670669c
PB
7400 tmp = load_reg(s, rn);
7401 tmp2 = load_reg(s, rm);
9ee6e8bb 7402 shift = (insn >> 7) & 0x1f;
3670669c
PB
7403 if (insn & (1 << 6)) {
7404 /* pkhtb */
22478e79
AZ
7405 if (shift == 0)
7406 shift = 31;
7407 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7408 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7409 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7410 } else {
7411 /* pkhbt */
22478e79
AZ
7412 if (shift)
7413 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7414 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7415 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7416 }
7417 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7418 tcg_temp_free_i32(tmp2);
3670669c 7419 store_reg(s, rd, tmp);
9ee6e8bb
PB
7420 } else if ((insn & 0x00200020) == 0x00200000) {
7421 /* [us]sat */
6ddbc6e4 7422 tmp = load_reg(s, rm);
9ee6e8bb
PB
7423 shift = (insn >> 7) & 0x1f;
7424 if (insn & (1 << 6)) {
7425 if (shift == 0)
7426 shift = 31;
6ddbc6e4 7427 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7428 } else {
6ddbc6e4 7429 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7430 }
7431 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7432 tmp2 = tcg_const_i32(sh);
7433 if (insn & (1 << 22))
9ef39277 7434 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 7435 else
9ef39277 7436 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 7437 tcg_temp_free_i32(tmp2);
6ddbc6e4 7438 store_reg(s, rd, tmp);
9ee6e8bb
PB
7439 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7440 /* [us]sat16 */
6ddbc6e4 7441 tmp = load_reg(s, rm);
9ee6e8bb 7442 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7443 tmp2 = tcg_const_i32(sh);
7444 if (insn & (1 << 22))
9ef39277 7445 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7446 else
9ef39277 7447 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7448 tcg_temp_free_i32(tmp2);
6ddbc6e4 7449 store_reg(s, rd, tmp);
9ee6e8bb
PB
7450 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7451 /* Select bytes. */
6ddbc6e4
PB
7452 tmp = load_reg(s, rn);
7453 tmp2 = load_reg(s, rm);
7d1b0095 7454 tmp3 = tcg_temp_new_i32();
0ecb72a5 7455 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 7456 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7457 tcg_temp_free_i32(tmp3);
7458 tcg_temp_free_i32(tmp2);
6ddbc6e4 7459 store_reg(s, rd, tmp);
9ee6e8bb 7460 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7461 tmp = load_reg(s, rm);
9ee6e8bb 7462 shift = (insn >> 10) & 3;
1301f322 7463 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7464 rotate, a shift is sufficient. */
7465 if (shift != 0)
f669df27 7466 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7467 op1 = (insn >> 20) & 7;
7468 switch (op1) {
5e3f878a
PB
7469 case 0: gen_sxtb16(tmp); break;
7470 case 2: gen_sxtb(tmp); break;
7471 case 3: gen_sxth(tmp); break;
7472 case 4: gen_uxtb16(tmp); break;
7473 case 6: gen_uxtb(tmp); break;
7474 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7475 default: goto illegal_op;
7476 }
7477 if (rn != 15) {
5e3f878a 7478 tmp2 = load_reg(s, rn);
9ee6e8bb 7479 if ((op1 & 3) == 0) {
5e3f878a 7480 gen_add16(tmp, tmp2);
9ee6e8bb 7481 } else {
5e3f878a 7482 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7483 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7484 }
7485 }
6c95676b 7486 store_reg(s, rd, tmp);
9ee6e8bb
PB
7487 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7488 /* rev */
b0109805 7489 tmp = load_reg(s, rm);
9ee6e8bb
PB
7490 if (insn & (1 << 22)) {
7491 if (insn & (1 << 7)) {
b0109805 7492 gen_revsh(tmp);
9ee6e8bb
PB
7493 } else {
7494 ARCH(6T2);
b0109805 7495 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7496 }
7497 } else {
7498 if (insn & (1 << 7))
b0109805 7499 gen_rev16(tmp);
9ee6e8bb 7500 else
66896cb8 7501 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7502 }
b0109805 7503 store_reg(s, rd, tmp);
9ee6e8bb
PB
7504 } else {
7505 goto illegal_op;
7506 }
7507 break;
7508 case 2: /* Multiplies (Type 3). */
41e9564d
PM
7509 switch ((insn >> 20) & 0x7) {
7510 case 5:
7511 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7512 /* op2 not 00x or 11x : UNDEF */
7513 goto illegal_op;
7514 }
838fa72d
AJ
7515 /* Signed multiply most significant [accumulate].
7516 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
7517 tmp = load_reg(s, rm);
7518 tmp2 = load_reg(s, rs);
a7812ae4 7519 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7520
955a7dd5 7521 if (rd != 15) {
838fa72d 7522 tmp = load_reg(s, rd);
9ee6e8bb 7523 if (insn & (1 << 6)) {
838fa72d 7524 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7525 } else {
838fa72d 7526 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7527 }
7528 }
838fa72d
AJ
7529 if (insn & (1 << 5)) {
7530 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7531 }
7532 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7533 tmp = tcg_temp_new_i32();
838fa72d
AJ
7534 tcg_gen_trunc_i64_i32(tmp, tmp64);
7535 tcg_temp_free_i64(tmp64);
955a7dd5 7536 store_reg(s, rn, tmp);
41e9564d
PM
7537 break;
7538 case 0:
7539 case 4:
7540 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7541 if (insn & (1 << 7)) {
7542 goto illegal_op;
7543 }
7544 tmp = load_reg(s, rm);
7545 tmp2 = load_reg(s, rs);
9ee6e8bb 7546 if (insn & (1 << 5))
5e3f878a
PB
7547 gen_swap_half(tmp2);
7548 gen_smul_dual(tmp, tmp2);
5e3f878a 7549 if (insn & (1 << 6)) {
e1d177b9 7550 /* This subtraction cannot overflow. */
5e3f878a
PB
7551 tcg_gen_sub_i32(tmp, tmp, tmp2);
7552 } else {
e1d177b9
PM
7553 /* This addition cannot overflow 32 bits;
7554 * however it may overflow considered as a signed
7555 * operation, in which case we must set the Q flag.
7556 */
9ef39277 7557 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
5e3f878a 7558 }
7d1b0095 7559 tcg_temp_free_i32(tmp2);
9ee6e8bb 7560 if (insn & (1 << 22)) {
5e3f878a 7561 /* smlald, smlsld */
a7812ae4
PB
7562 tmp64 = tcg_temp_new_i64();
7563 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7564 tcg_temp_free_i32(tmp);
a7812ae4
PB
7565 gen_addq(s, tmp64, rd, rn);
7566 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7567 tcg_temp_free_i64(tmp64);
9ee6e8bb 7568 } else {
5e3f878a 7569 /* smuad, smusd, smlad, smlsd */
22478e79 7570 if (rd != 15)
9ee6e8bb 7571 {
22478e79 7572 tmp2 = load_reg(s, rd);
9ef39277 7573 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7574 tcg_temp_free_i32(tmp2);
9ee6e8bb 7575 }
22478e79 7576 store_reg(s, rn, tmp);
9ee6e8bb 7577 }
41e9564d 7578 break;
b8b8ea05
PM
7579 case 1:
7580 case 3:
7581 /* SDIV, UDIV */
7582 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7583 goto illegal_op;
7584 }
7585 if (((insn >> 5) & 7) || (rd != 15)) {
7586 goto illegal_op;
7587 }
7588 tmp = load_reg(s, rm);
7589 tmp2 = load_reg(s, rs);
7590 if (insn & (1 << 21)) {
7591 gen_helper_udiv(tmp, tmp, tmp2);
7592 } else {
7593 gen_helper_sdiv(tmp, tmp, tmp2);
7594 }
7595 tcg_temp_free_i32(tmp2);
7596 store_reg(s, rn, tmp);
7597 break;
41e9564d
PM
7598 default:
7599 goto illegal_op;
9ee6e8bb
PB
7600 }
7601 break;
7602 case 3:
7603 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7604 switch (op1) {
7605 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7606 ARCH(6);
7607 tmp = load_reg(s, rm);
7608 tmp2 = load_reg(s, rs);
7609 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7610 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7611 if (rd != 15) {
7612 tmp2 = load_reg(s, rd);
6ddbc6e4 7613 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7614 tcg_temp_free_i32(tmp2);
9ee6e8bb 7615 }
ded9d295 7616 store_reg(s, rn, tmp);
9ee6e8bb
PB
7617 break;
7618 case 0x20: case 0x24: case 0x28: case 0x2c:
7619 /* Bitfield insert/clear. */
7620 ARCH(6T2);
7621 shift = (insn >> 7) & 0x1f;
7622 i = (insn >> 16) & 0x1f;
7623 i = i + 1 - shift;
7624 if (rm == 15) {
7d1b0095 7625 tmp = tcg_temp_new_i32();
5e3f878a 7626 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7627 } else {
5e3f878a 7628 tmp = load_reg(s, rm);
9ee6e8bb
PB
7629 }
7630 if (i != 32) {
5e3f878a 7631 tmp2 = load_reg(s, rd);
d593c48e 7632 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 7633 tcg_temp_free_i32(tmp2);
9ee6e8bb 7634 }
5e3f878a 7635 store_reg(s, rd, tmp);
9ee6e8bb
PB
7636 break;
7637 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7638 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7639 ARCH(6T2);
5e3f878a 7640 tmp = load_reg(s, rm);
9ee6e8bb
PB
7641 shift = (insn >> 7) & 0x1f;
7642 i = ((insn >> 16) & 0x1f) + 1;
7643 if (shift + i > 32)
7644 goto illegal_op;
7645 if (i < 32) {
7646 if (op1 & 0x20) {
5e3f878a 7647 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7648 } else {
5e3f878a 7649 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7650 }
7651 }
5e3f878a 7652 store_reg(s, rd, tmp);
9ee6e8bb
PB
7653 break;
7654 default:
7655 goto illegal_op;
7656 }
7657 break;
7658 }
7659 break;
7660 }
7661 do_ldst:
7662 /* Check for undefined extension instructions
7663 * per the ARM Bible IE:
7664 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7665 */
7666 sh = (0xf << 20) | (0xf << 4);
7667 if (op1 == 0x7 && ((insn & sh) == sh))
7668 {
7669 goto illegal_op;
7670 }
7671 /* load/store byte/word */
7672 rn = (insn >> 16) & 0xf;
7673 rd = (insn >> 12) & 0xf;
b0109805 7674 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7675 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7676 if (insn & (1 << 24))
b0109805 7677 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7678 if (insn & (1 << 20)) {
7679 /* load */
9ee6e8bb 7680 if (insn & (1 << 22)) {
b0109805 7681 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7682 } else {
b0109805 7683 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7684 }
9ee6e8bb
PB
7685 } else {
7686 /* store */
b0109805 7687 tmp = load_reg(s, rd);
9ee6e8bb 7688 if (insn & (1 << 22))
b0109805 7689 gen_st8(tmp, tmp2, i);
9ee6e8bb 7690 else
b0109805 7691 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7692 }
7693 if (!(insn & (1 << 24))) {
b0109805
PB
7694 gen_add_data_offset(s, insn, tmp2);
7695 store_reg(s, rn, tmp2);
7696 } else if (insn & (1 << 21)) {
7697 store_reg(s, rn, tmp2);
7698 } else {
7d1b0095 7699 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7700 }
7701 if (insn & (1 << 20)) {
7702 /* Complete the load. */
be5e7a76 7703 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7704 }
7705 break;
7706 case 0x08:
7707 case 0x09:
7708 {
7709 int j, n, user, loaded_base;
b0109805 7710 TCGv loaded_var;
9ee6e8bb
PB
7711 /* load/store multiple words */
7712 /* XXX: store correct base if write back */
7713 user = 0;
7714 if (insn & (1 << 22)) {
7715 if (IS_USER(s))
7716 goto illegal_op; /* only usable in supervisor mode */
7717
7718 if ((insn & (1 << 15)) == 0)
7719 user = 1;
7720 }
7721 rn = (insn >> 16) & 0xf;
b0109805 7722 addr = load_reg(s, rn);
9ee6e8bb
PB
7723
7724 /* compute total size */
7725 loaded_base = 0;
a50f5b91 7726 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7727 n = 0;
7728 for(i=0;i<16;i++) {
7729 if (insn & (1 << i))
7730 n++;
7731 }
7732 /* XXX: test invalid n == 0 case ? */
7733 if (insn & (1 << 23)) {
7734 if (insn & (1 << 24)) {
7735 /* pre increment */
b0109805 7736 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7737 } else {
7738 /* post increment */
7739 }
7740 } else {
7741 if (insn & (1 << 24)) {
7742 /* pre decrement */
b0109805 7743 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7744 } else {
7745 /* post decrement */
7746 if (n != 1)
b0109805 7747 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7748 }
7749 }
7750 j = 0;
7751 for(i=0;i<16;i++) {
7752 if (insn & (1 << i)) {
7753 if (insn & (1 << 20)) {
7754 /* load */
b0109805 7755 tmp = gen_ld32(addr, IS_USER(s));
be5e7a76 7756 if (user) {
b75263d6 7757 tmp2 = tcg_const_i32(i);
1ce94f81 7758 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 7759 tcg_temp_free_i32(tmp2);
7d1b0095 7760 tcg_temp_free_i32(tmp);
9ee6e8bb 7761 } else if (i == rn) {
b0109805 7762 loaded_var = tmp;
9ee6e8bb
PB
7763 loaded_base = 1;
7764 } else {
be5e7a76 7765 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7766 }
7767 } else {
7768 /* store */
7769 if (i == 15) {
7770 /* special case: r15 = PC + 8 */
7771 val = (long)s->pc + 4;
7d1b0095 7772 tmp = tcg_temp_new_i32();
b0109805 7773 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7774 } else if (user) {
7d1b0095 7775 tmp = tcg_temp_new_i32();
b75263d6 7776 tmp2 = tcg_const_i32(i);
9ef39277 7777 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 7778 tcg_temp_free_i32(tmp2);
9ee6e8bb 7779 } else {
b0109805 7780 tmp = load_reg(s, i);
9ee6e8bb 7781 }
b0109805 7782 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7783 }
7784 j++;
7785 /* no need to add after the last transfer */
7786 if (j != n)
b0109805 7787 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7788 }
7789 }
7790 if (insn & (1 << 21)) {
7791 /* write back */
7792 if (insn & (1 << 23)) {
7793 if (insn & (1 << 24)) {
7794 /* pre increment */
7795 } else {
7796 /* post increment */
b0109805 7797 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7798 }
7799 } else {
7800 if (insn & (1 << 24)) {
7801 /* pre decrement */
7802 if (n != 1)
b0109805 7803 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7804 } else {
7805 /* post decrement */
b0109805 7806 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7807 }
7808 }
b0109805
PB
7809 store_reg(s, rn, addr);
7810 } else {
7d1b0095 7811 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7812 }
7813 if (loaded_base) {
b0109805 7814 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7815 }
7816 if ((insn & (1 << 22)) && !user) {
7817 /* Restore CPSR from SPSR. */
d9ba4830
PB
7818 tmp = load_cpu_field(spsr);
7819 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7820 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7821 s->is_jmp = DISAS_UPDATE;
7822 }
7823 }
7824 break;
7825 case 0xa:
7826 case 0xb:
7827 {
7828 int32_t offset;
7829
7830 /* branch (and link) */
7831 val = (int32_t)s->pc;
7832 if (insn & (1 << 24)) {
7d1b0095 7833 tmp = tcg_temp_new_i32();
5e3f878a
PB
7834 tcg_gen_movi_i32(tmp, val);
7835 store_reg(s, 14, tmp);
9ee6e8bb
PB
7836 }
7837 offset = (((int32_t)insn << 8) >> 8);
7838 val += (offset << 2) + 4;
7839 gen_jmp(s, val);
7840 }
7841 break;
7842 case 0xc:
7843 case 0xd:
7844 case 0xe:
7845 /* Coprocessor. */
7846 if (disas_coproc_insn(env, s, insn))
7847 goto illegal_op;
7848 break;
7849 case 0xf:
7850 /* swi */
5e3f878a 7851 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7852 s->is_jmp = DISAS_SWI;
7853 break;
7854 default:
7855 illegal_op:
bc4a0de0 7856 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7857 break;
7858 }
7859 }
7860}
7861
7862/* Return true if this is a Thumb-2 logical op. */
7863static int
7864thumb2_logic_op(int op)
7865{
7866 return (op < 8);
7867}
7868
7869/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7870 then set condition code flags based on the result of the operation.
7871 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7872 to the high bit of T1.
7873 Returns zero if the opcode is valid. */
7874
7875static int
396e467c 7876gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7877{
7878 int logic_cc;
7879
7880 logic_cc = 0;
7881 switch (op) {
7882 case 0: /* and */
396e467c 7883 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7884 logic_cc = conds;
7885 break;
7886 case 1: /* bic */
f669df27 7887 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7888 logic_cc = conds;
7889 break;
7890 case 2: /* orr */
396e467c 7891 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7892 logic_cc = conds;
7893 break;
7894 case 3: /* orn */
29501f1b 7895 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7896 logic_cc = conds;
7897 break;
7898 case 4: /* eor */
396e467c 7899 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7900 logic_cc = conds;
7901 break;
7902 case 8: /* add */
7903 if (conds)
72485ec4 7904 gen_add_CC(t0, t0, t1);
9ee6e8bb 7905 else
396e467c 7906 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7907 break;
7908 case 10: /* adc */
7909 if (conds)
9ef39277 7910 gen_helper_adc_cc(t0, cpu_env, t0, t1);
9ee6e8bb 7911 else
396e467c 7912 gen_adc(t0, t1);
9ee6e8bb
PB
7913 break;
7914 case 11: /* sbc */
7915 if (conds)
9ef39277 7916 gen_helper_sbc_cc(t0, cpu_env, t0, t1);
9ee6e8bb 7917 else
396e467c 7918 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7919 break;
7920 case 13: /* sub */
7921 if (conds)
72485ec4 7922 gen_sub_CC(t0, t0, t1);
9ee6e8bb 7923 else
396e467c 7924 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7925 break;
7926 case 14: /* rsb */
7927 if (conds)
72485ec4 7928 gen_sub_CC(t0, t1, t0);
9ee6e8bb 7929 else
396e467c 7930 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7931 break;
7932 default: /* 5, 6, 7, 9, 12, 15. */
7933 return 1;
7934 }
7935 if (logic_cc) {
396e467c 7936 gen_logic_CC(t0);
9ee6e8bb 7937 if (shifter_out)
396e467c 7938 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7939 }
7940 return 0;
7941}
7942
7943/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7944 is not legal. */
0ecb72a5 7945static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 7946{
b0109805 7947 uint32_t insn, imm, shift, offset;
9ee6e8bb 7948 uint32_t rd, rn, rm, rs;
b26eefb6 7949 TCGv tmp;
6ddbc6e4
PB
7950 TCGv tmp2;
7951 TCGv tmp3;
b0109805 7952 TCGv addr;
a7812ae4 7953 TCGv_i64 tmp64;
9ee6e8bb
PB
7954 int op;
7955 int shiftop;
7956 int conds;
7957 int logic_cc;
7958
7959 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7960 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7961 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7962 16-bit instructions to get correct prefetch abort behavior. */
7963 insn = insn_hw1;
7964 if ((insn & (1 << 12)) == 0) {
be5e7a76 7965 ARCH(5);
9ee6e8bb
PB
7966 /* Second half of blx. */
7967 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7968 tmp = load_reg(s, 14);
7969 tcg_gen_addi_i32(tmp, tmp, offset);
7970 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7971
7d1b0095 7972 tmp2 = tcg_temp_new_i32();
b0109805 7973 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7974 store_reg(s, 14, tmp2);
7975 gen_bx(s, tmp);
9ee6e8bb
PB
7976 return 0;
7977 }
7978 if (insn & (1 << 11)) {
7979 /* Second half of bl. */
7980 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7981 tmp = load_reg(s, 14);
6a0d8a1d 7982 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7983
7d1b0095 7984 tmp2 = tcg_temp_new_i32();
b0109805 7985 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7986 store_reg(s, 14, tmp2);
7987 gen_bx(s, tmp);
9ee6e8bb
PB
7988 return 0;
7989 }
7990 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7991 /* Instruction spans a page boundary. Implement it as two
7992 16-bit instructions in case the second half causes an
7993 prefetch abort. */
7994 offset = ((int32_t)insn << 21) >> 9;
396e467c 7995 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7996 return 0;
7997 }
7998 /* Fall through to 32-bit decode. */
7999 }
8000
d31dd73e 8001 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
8002 s->pc += 2;
8003 insn |= (uint32_t)insn_hw1 << 16;
8004
8005 if ((insn & 0xf800e800) != 0xf000e800) {
8006 ARCH(6T2);
8007 }
8008
8009 rn = (insn >> 16) & 0xf;
8010 rs = (insn >> 12) & 0xf;
8011 rd = (insn >> 8) & 0xf;
8012 rm = insn & 0xf;
8013 switch ((insn >> 25) & 0xf) {
8014 case 0: case 1: case 2: case 3:
8015 /* 16-bit instructions. Should never happen. */
8016 abort();
8017 case 4:
8018 if (insn & (1 << 22)) {
8019 /* Other load/store, table branch. */
8020 if (insn & 0x01200000) {
8021 /* Load/store doubleword. */
8022 if (rn == 15) {
7d1b0095 8023 addr = tcg_temp_new_i32();
b0109805 8024 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8025 } else {
b0109805 8026 addr = load_reg(s, rn);
9ee6e8bb
PB
8027 }
8028 offset = (insn & 0xff) * 4;
8029 if ((insn & (1 << 23)) == 0)
8030 offset = -offset;
8031 if (insn & (1 << 24)) {
b0109805 8032 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8033 offset = 0;
8034 }
8035 if (insn & (1 << 20)) {
8036 /* ldrd */
b0109805
PB
8037 tmp = gen_ld32(addr, IS_USER(s));
8038 store_reg(s, rs, tmp);
8039 tcg_gen_addi_i32(addr, addr, 4);
8040 tmp = gen_ld32(addr, IS_USER(s));
8041 store_reg(s, rd, tmp);
9ee6e8bb
PB
8042 } else {
8043 /* strd */
b0109805
PB
8044 tmp = load_reg(s, rs);
8045 gen_st32(tmp, addr, IS_USER(s));
8046 tcg_gen_addi_i32(addr, addr, 4);
8047 tmp = load_reg(s, rd);
8048 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8049 }
8050 if (insn & (1 << 21)) {
8051 /* Base writeback. */
8052 if (rn == 15)
8053 goto illegal_op;
b0109805
PB
8054 tcg_gen_addi_i32(addr, addr, offset - 4);
8055 store_reg(s, rn, addr);
8056 } else {
7d1b0095 8057 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8058 }
8059 } else if ((insn & (1 << 23)) == 0) {
8060 /* Load/store exclusive word. */
3174f8e9 8061 addr = tcg_temp_local_new();
98a46317 8062 load_reg_var(s, addr, rn);
426f5abc 8063 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8064 if (insn & (1 << 20)) {
426f5abc 8065 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8066 } else {
426f5abc 8067 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8068 }
3174f8e9 8069 tcg_temp_free(addr);
9ee6e8bb
PB
8070 } else if ((insn & (1 << 6)) == 0) {
8071 /* Table Branch. */
8072 if (rn == 15) {
7d1b0095 8073 addr = tcg_temp_new_i32();
b0109805 8074 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8075 } else {
b0109805 8076 addr = load_reg(s, rn);
9ee6e8bb 8077 }
b26eefb6 8078 tmp = load_reg(s, rm);
b0109805 8079 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8080 if (insn & (1 << 4)) {
8081 /* tbh */
b0109805 8082 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8083 tcg_temp_free_i32(tmp);
b0109805 8084 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 8085 } else { /* tbb */
7d1b0095 8086 tcg_temp_free_i32(tmp);
b0109805 8087 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 8088 }
7d1b0095 8089 tcg_temp_free_i32(addr);
b0109805
PB
8090 tcg_gen_shli_i32(tmp, tmp, 1);
8091 tcg_gen_addi_i32(tmp, tmp, s->pc);
8092 store_reg(s, 15, tmp);
9ee6e8bb
PB
8093 } else {
8094 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 8095 ARCH(7);
9ee6e8bb 8096 op = (insn >> 4) & 0x3;
426f5abc
PB
8097 if (op == 2) {
8098 goto illegal_op;
8099 }
3174f8e9 8100 addr = tcg_temp_local_new();
98a46317 8101 load_reg_var(s, addr, rn);
9ee6e8bb 8102 if (insn & (1 << 20)) {
426f5abc 8103 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8104 } else {
426f5abc 8105 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8106 }
3174f8e9 8107 tcg_temp_free(addr);
9ee6e8bb
PB
8108 }
8109 } else {
8110 /* Load/store multiple, RFE, SRS. */
8111 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8112 /* Not available in user mode. */
b0109805 8113 if (IS_USER(s))
9ee6e8bb
PB
8114 goto illegal_op;
8115 if (insn & (1 << 20)) {
8116 /* rfe */
b0109805
PB
8117 addr = load_reg(s, rn);
8118 if ((insn & (1 << 24)) == 0)
8119 tcg_gen_addi_i32(addr, addr, -8);
8120 /* Load PC into tmp and CPSR into tmp2. */
8121 tmp = gen_ld32(addr, 0);
8122 tcg_gen_addi_i32(addr, addr, 4);
8123 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
8124 if (insn & (1 << 21)) {
8125 /* Base writeback. */
b0109805
PB
8126 if (insn & (1 << 24)) {
8127 tcg_gen_addi_i32(addr, addr, 4);
8128 } else {
8129 tcg_gen_addi_i32(addr, addr, -4);
8130 }
8131 store_reg(s, rn, addr);
8132 } else {
7d1b0095 8133 tcg_temp_free_i32(addr);
9ee6e8bb 8134 }
b0109805 8135 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8136 } else {
8137 /* srs */
8138 op = (insn & 0x1f);
7d1b0095 8139 addr = tcg_temp_new_i32();
39ea3d4e
PM
8140 tmp = tcg_const_i32(op);
8141 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8142 tcg_temp_free_i32(tmp);
9ee6e8bb 8143 if ((insn & (1 << 24)) == 0) {
b0109805 8144 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 8145 }
b0109805
PB
8146 tmp = load_reg(s, 14);
8147 gen_st32(tmp, addr, 0);
8148 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 8149 tmp = tcg_temp_new_i32();
9ef39277 8150 gen_helper_cpsr_read(tmp, cpu_env);
b0109805 8151 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
8152 if (insn & (1 << 21)) {
8153 if ((insn & (1 << 24)) == 0) {
b0109805 8154 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 8155 } else {
b0109805 8156 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8157 }
39ea3d4e
PM
8158 tmp = tcg_const_i32(op);
8159 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8160 tcg_temp_free_i32(tmp);
b0109805 8161 } else {
7d1b0095 8162 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8163 }
8164 }
8165 } else {
5856d44e
YO
8166 int i, loaded_base = 0;
8167 TCGv loaded_var;
9ee6e8bb 8168 /* Load/store multiple. */
b0109805 8169 addr = load_reg(s, rn);
9ee6e8bb
PB
8170 offset = 0;
8171 for (i = 0; i < 16; i++) {
8172 if (insn & (1 << i))
8173 offset += 4;
8174 }
8175 if (insn & (1 << 24)) {
b0109805 8176 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8177 }
8178
5856d44e 8179 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
8180 for (i = 0; i < 16; i++) {
8181 if ((insn & (1 << i)) == 0)
8182 continue;
8183 if (insn & (1 << 20)) {
8184 /* Load. */
b0109805 8185 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 8186 if (i == 15) {
b0109805 8187 gen_bx(s, tmp);
5856d44e
YO
8188 } else if (i == rn) {
8189 loaded_var = tmp;
8190 loaded_base = 1;
9ee6e8bb 8191 } else {
b0109805 8192 store_reg(s, i, tmp);
9ee6e8bb
PB
8193 }
8194 } else {
8195 /* Store. */
b0109805
PB
8196 tmp = load_reg(s, i);
8197 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8198 }
b0109805 8199 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8200 }
5856d44e
YO
8201 if (loaded_base) {
8202 store_reg(s, rn, loaded_var);
8203 }
9ee6e8bb
PB
8204 if (insn & (1 << 21)) {
8205 /* Base register writeback. */
8206 if (insn & (1 << 24)) {
b0109805 8207 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8208 }
8209 /* Fault if writeback register is in register list. */
8210 if (insn & (1 << rn))
8211 goto illegal_op;
b0109805
PB
8212 store_reg(s, rn, addr);
8213 } else {
7d1b0095 8214 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8215 }
8216 }
8217 }
8218 break;
2af9ab77
JB
8219 case 5:
8220
9ee6e8bb 8221 op = (insn >> 21) & 0xf;
2af9ab77
JB
8222 if (op == 6) {
8223 /* Halfword pack. */
8224 tmp = load_reg(s, rn);
8225 tmp2 = load_reg(s, rm);
8226 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8227 if (insn & (1 << 5)) {
8228 /* pkhtb */
8229 if (shift == 0)
8230 shift = 31;
8231 tcg_gen_sari_i32(tmp2, tmp2, shift);
8232 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8233 tcg_gen_ext16u_i32(tmp2, tmp2);
8234 } else {
8235 /* pkhbt */
8236 if (shift)
8237 tcg_gen_shli_i32(tmp2, tmp2, shift);
8238 tcg_gen_ext16u_i32(tmp, tmp);
8239 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8240 }
8241 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8242 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8243 store_reg(s, rd, tmp);
8244 } else {
2af9ab77
JB
8245 /* Data processing register constant shift. */
8246 if (rn == 15) {
7d1b0095 8247 tmp = tcg_temp_new_i32();
2af9ab77
JB
8248 tcg_gen_movi_i32(tmp, 0);
8249 } else {
8250 tmp = load_reg(s, rn);
8251 }
8252 tmp2 = load_reg(s, rm);
8253
8254 shiftop = (insn >> 4) & 3;
8255 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8256 conds = (insn & (1 << 20)) != 0;
8257 logic_cc = (conds && thumb2_logic_op(op));
8258 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8259 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8260 goto illegal_op;
7d1b0095 8261 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8262 if (rd != 15) {
8263 store_reg(s, rd, tmp);
8264 } else {
7d1b0095 8265 tcg_temp_free_i32(tmp);
2af9ab77 8266 }
3174f8e9 8267 }
9ee6e8bb
PB
8268 break;
8269 case 13: /* Misc data processing. */
8270 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8271 if (op < 4 && (insn & 0xf000) != 0xf000)
8272 goto illegal_op;
8273 switch (op) {
8274 case 0: /* Register controlled shift. */
8984bd2e
PB
8275 tmp = load_reg(s, rn);
8276 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8277 if ((insn & 0x70) != 0)
8278 goto illegal_op;
8279 op = (insn >> 21) & 3;
8984bd2e
PB
8280 logic_cc = (insn & (1 << 20)) != 0;
8281 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8282 if (logic_cc)
8283 gen_logic_CC(tmp);
21aeb343 8284 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8285 break;
8286 case 1: /* Sign/zero extend. */
5e3f878a 8287 tmp = load_reg(s, rm);
9ee6e8bb 8288 shift = (insn >> 4) & 3;
1301f322 8289 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8290 rotate, a shift is sufficient. */
8291 if (shift != 0)
f669df27 8292 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8293 op = (insn >> 20) & 7;
8294 switch (op) {
5e3f878a
PB
8295 case 0: gen_sxth(tmp); break;
8296 case 1: gen_uxth(tmp); break;
8297 case 2: gen_sxtb16(tmp); break;
8298 case 3: gen_uxtb16(tmp); break;
8299 case 4: gen_sxtb(tmp); break;
8300 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8301 default: goto illegal_op;
8302 }
8303 if (rn != 15) {
5e3f878a 8304 tmp2 = load_reg(s, rn);
9ee6e8bb 8305 if ((op >> 1) == 1) {
5e3f878a 8306 gen_add16(tmp, tmp2);
9ee6e8bb 8307 } else {
5e3f878a 8308 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8309 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8310 }
8311 }
5e3f878a 8312 store_reg(s, rd, tmp);
9ee6e8bb
PB
8313 break;
8314 case 2: /* SIMD add/subtract. */
8315 op = (insn >> 20) & 7;
8316 shift = (insn >> 4) & 7;
8317 if ((op & 3) == 3 || (shift & 3) == 3)
8318 goto illegal_op;
6ddbc6e4
PB
8319 tmp = load_reg(s, rn);
8320 tmp2 = load_reg(s, rm);
8321 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8322 tcg_temp_free_i32(tmp2);
6ddbc6e4 8323 store_reg(s, rd, tmp);
9ee6e8bb
PB
8324 break;
8325 case 3: /* Other data processing. */
8326 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8327 if (op < 4) {
8328 /* Saturating add/subtract. */
d9ba4830
PB
8329 tmp = load_reg(s, rn);
8330 tmp2 = load_reg(s, rm);
9ee6e8bb 8331 if (op & 1)
9ef39277 8332 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 8333 if (op & 2)
9ef39277 8334 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 8335 else
9ef39277 8336 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8337 tcg_temp_free_i32(tmp2);
9ee6e8bb 8338 } else {
d9ba4830 8339 tmp = load_reg(s, rn);
9ee6e8bb
PB
8340 switch (op) {
8341 case 0x0a: /* rbit */
d9ba4830 8342 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8343 break;
8344 case 0x08: /* rev */
66896cb8 8345 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8346 break;
8347 case 0x09: /* rev16 */
d9ba4830 8348 gen_rev16(tmp);
9ee6e8bb
PB
8349 break;
8350 case 0x0b: /* revsh */
d9ba4830 8351 gen_revsh(tmp);
9ee6e8bb
PB
8352 break;
8353 case 0x10: /* sel */
d9ba4830 8354 tmp2 = load_reg(s, rm);
7d1b0095 8355 tmp3 = tcg_temp_new_i32();
0ecb72a5 8356 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 8357 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8358 tcg_temp_free_i32(tmp3);
8359 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8360 break;
8361 case 0x18: /* clz */
d9ba4830 8362 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8363 break;
8364 default:
8365 goto illegal_op;
8366 }
8367 }
d9ba4830 8368 store_reg(s, rd, tmp);
9ee6e8bb
PB
8369 break;
8370 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8371 op = (insn >> 4) & 0xf;
d9ba4830
PB
8372 tmp = load_reg(s, rn);
8373 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8374 switch ((insn >> 20) & 7) {
8375 case 0: /* 32 x 32 -> 32 */
d9ba4830 8376 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8377 tcg_temp_free_i32(tmp2);
9ee6e8bb 8378 if (rs != 15) {
d9ba4830 8379 tmp2 = load_reg(s, rs);
9ee6e8bb 8380 if (op)
d9ba4830 8381 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8382 else
d9ba4830 8383 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8384 tcg_temp_free_i32(tmp2);
9ee6e8bb 8385 }
9ee6e8bb
PB
8386 break;
8387 case 1: /* 16 x 16 -> 32 */
d9ba4830 8388 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8389 tcg_temp_free_i32(tmp2);
9ee6e8bb 8390 if (rs != 15) {
d9ba4830 8391 tmp2 = load_reg(s, rs);
9ef39277 8392 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8393 tcg_temp_free_i32(tmp2);
9ee6e8bb 8394 }
9ee6e8bb
PB
8395 break;
8396 case 2: /* Dual multiply add. */
8397 case 4: /* Dual multiply subtract. */
8398 if (op)
d9ba4830
PB
8399 gen_swap_half(tmp2);
8400 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8401 if (insn & (1 << 22)) {
e1d177b9 8402 /* This subtraction cannot overflow. */
d9ba4830 8403 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8404 } else {
e1d177b9
PM
8405 /* This addition cannot overflow 32 bits;
8406 * however it may overflow considered as a signed
8407 * operation, in which case we must set the Q flag.
8408 */
9ef39277 8409 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8410 }
7d1b0095 8411 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8412 if (rs != 15)
8413 {
d9ba4830 8414 tmp2 = load_reg(s, rs);
9ef39277 8415 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8416 tcg_temp_free_i32(tmp2);
9ee6e8bb 8417 }
9ee6e8bb
PB
8418 break;
8419 case 3: /* 32 * 16 -> 32msb */
8420 if (op)
d9ba4830 8421 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8422 else
d9ba4830 8423 gen_sxth(tmp2);
a7812ae4
PB
8424 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8425 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8426 tmp = tcg_temp_new_i32();
a7812ae4 8427 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8428 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8429 if (rs != 15)
8430 {
d9ba4830 8431 tmp2 = load_reg(s, rs);
9ef39277 8432 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8433 tcg_temp_free_i32(tmp2);
9ee6e8bb 8434 }
9ee6e8bb 8435 break;
838fa72d
AJ
8436 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8437 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8438 if (rs != 15) {
838fa72d
AJ
8439 tmp = load_reg(s, rs);
8440 if (insn & (1 << 20)) {
8441 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8442 } else {
838fa72d 8443 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8444 }
2c0262af 8445 }
838fa72d
AJ
8446 if (insn & (1 << 4)) {
8447 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8448 }
8449 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8450 tmp = tcg_temp_new_i32();
838fa72d
AJ
8451 tcg_gen_trunc_i64_i32(tmp, tmp64);
8452 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8453 break;
8454 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8455 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8456 tcg_temp_free_i32(tmp2);
9ee6e8bb 8457 if (rs != 15) {
d9ba4830
PB
8458 tmp2 = load_reg(s, rs);
8459 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8460 tcg_temp_free_i32(tmp2);
5fd46862 8461 }
9ee6e8bb 8462 break;
2c0262af 8463 }
d9ba4830 8464 store_reg(s, rd, tmp);
2c0262af 8465 break;
9ee6e8bb
PB
8466 case 6: case 7: /* 64-bit multiply, Divide. */
8467 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8468 tmp = load_reg(s, rn);
8469 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8470 if ((op & 0x50) == 0x10) {
8471 /* sdiv, udiv */
47789990 8472 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 8473 goto illegal_op;
47789990 8474 }
9ee6e8bb 8475 if (op & 0x20)
5e3f878a 8476 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8477 else
5e3f878a 8478 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8479 tcg_temp_free_i32(tmp2);
5e3f878a 8480 store_reg(s, rd, tmp);
9ee6e8bb
PB
8481 } else if ((op & 0xe) == 0xc) {
8482 /* Dual multiply accumulate long. */
8483 if (op & 1)
5e3f878a
PB
8484 gen_swap_half(tmp2);
8485 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8486 if (op & 0x10) {
5e3f878a 8487 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8488 } else {
5e3f878a 8489 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8490 }
7d1b0095 8491 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8492 /* BUGFIX */
8493 tmp64 = tcg_temp_new_i64();
8494 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8495 tcg_temp_free_i32(tmp);
a7812ae4
PB
8496 gen_addq(s, tmp64, rs, rd);
8497 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8498 tcg_temp_free_i64(tmp64);
2c0262af 8499 } else {
9ee6e8bb
PB
8500 if (op & 0x20) {
8501 /* Unsigned 64-bit multiply */
a7812ae4 8502 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8503 } else {
9ee6e8bb
PB
8504 if (op & 8) {
8505 /* smlalxy */
5e3f878a 8506 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8507 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8508 tmp64 = tcg_temp_new_i64();
8509 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8510 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8511 } else {
8512 /* Signed 64-bit multiply */
a7812ae4 8513 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8514 }
b5ff1b31 8515 }
9ee6e8bb
PB
8516 if (op & 4) {
8517 /* umaal */
a7812ae4
PB
8518 gen_addq_lo(s, tmp64, rs);
8519 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8520 } else if (op & 0x40) {
8521 /* 64-bit accumulate. */
a7812ae4 8522 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8523 }
a7812ae4 8524 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8525 tcg_temp_free_i64(tmp64);
5fd46862 8526 }
2c0262af 8527 break;
9ee6e8bb
PB
8528 }
8529 break;
8530 case 6: case 7: case 14: case 15:
8531 /* Coprocessor. */
8532 if (((insn >> 24) & 3) == 3) {
8533 /* Translate into the equivalent ARM encoding. */
f06053e3 8534 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8535 if (disas_neon_data_insn(env, s, insn))
8536 goto illegal_op;
8537 } else {
8538 if (insn & (1 << 28))
8539 goto illegal_op;
8540 if (disas_coproc_insn (env, s, insn))
8541 goto illegal_op;
8542 }
8543 break;
8544 case 8: case 9: case 10: case 11:
8545 if (insn & (1 << 15)) {
8546 /* Branches, misc control. */
8547 if (insn & 0x5000) {
8548 /* Unconditional branch. */
8549 /* signextend(hw1[10:0]) -> offset[:12]. */
8550 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8551 /* hw1[10:0] -> offset[11:1]. */
8552 offset |= (insn & 0x7ff) << 1;
8553 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8554 offset[24:22] already have the same value because of the
8555 sign extension above. */
8556 offset ^= ((~insn) & (1 << 13)) << 10;
8557 offset ^= ((~insn) & (1 << 11)) << 11;
8558
9ee6e8bb
PB
8559 if (insn & (1 << 14)) {
8560 /* Branch and link. */
3174f8e9 8561 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8562 }
3b46e624 8563
b0109805 8564 offset += s->pc;
9ee6e8bb
PB
8565 if (insn & (1 << 12)) {
8566 /* b/bl */
b0109805 8567 gen_jmp(s, offset);
9ee6e8bb
PB
8568 } else {
8569 /* blx */
b0109805 8570 offset &= ~(uint32_t)2;
be5e7a76 8571 /* thumb2 bx, no need to check */
b0109805 8572 gen_bx_im(s, offset);
2c0262af 8573 }
9ee6e8bb
PB
8574 } else if (((insn >> 23) & 7) == 7) {
8575 /* Misc control */
8576 if (insn & (1 << 13))
8577 goto illegal_op;
8578
8579 if (insn & (1 << 26)) {
8580 /* Secure monitor call (v6Z) */
8581 goto illegal_op; /* not implemented. */
2c0262af 8582 } else {
9ee6e8bb
PB
8583 op = (insn >> 20) & 7;
8584 switch (op) {
8585 case 0: /* msr cpsr. */
8586 if (IS_M(env)) {
8984bd2e
PB
8587 tmp = load_reg(s, rn);
8588 addr = tcg_const_i32(insn & 0xff);
8589 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8590 tcg_temp_free_i32(addr);
7d1b0095 8591 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8592 gen_lookup_tb(s);
8593 break;
8594 }
8595 /* fall through */
8596 case 1: /* msr spsr. */
8597 if (IS_M(env))
8598 goto illegal_op;
2fbac54b
FN
8599 tmp = load_reg(s, rn);
8600 if (gen_set_psr(s,
9ee6e8bb 8601 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8602 op == 1, tmp))
9ee6e8bb
PB
8603 goto illegal_op;
8604 break;
8605 case 2: /* cps, nop-hint. */
8606 if (((insn >> 8) & 7) == 0) {
8607 gen_nop_hint(s, insn & 0xff);
8608 }
8609 /* Implemented as NOP in user mode. */
8610 if (IS_USER(s))
8611 break;
8612 offset = 0;
8613 imm = 0;
8614 if (insn & (1 << 10)) {
8615 if (insn & (1 << 7))
8616 offset |= CPSR_A;
8617 if (insn & (1 << 6))
8618 offset |= CPSR_I;
8619 if (insn & (1 << 5))
8620 offset |= CPSR_F;
8621 if (insn & (1 << 9))
8622 imm = CPSR_A | CPSR_I | CPSR_F;
8623 }
8624 if (insn & (1 << 8)) {
8625 offset |= 0x1f;
8626 imm |= (insn & 0x1f);
8627 }
8628 if (offset) {
2fbac54b 8629 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8630 }
8631 break;
8632 case 3: /* Special control operations. */
426f5abc 8633 ARCH(7);
9ee6e8bb
PB
8634 op = (insn >> 4) & 0xf;
8635 switch (op) {
8636 case 2: /* clrex */
426f5abc 8637 gen_clrex(s);
9ee6e8bb
PB
8638 break;
8639 case 4: /* dsb */
8640 case 5: /* dmb */
8641 case 6: /* isb */
8642 /* These execute as NOPs. */
9ee6e8bb
PB
8643 break;
8644 default:
8645 goto illegal_op;
8646 }
8647 break;
8648 case 4: /* bxj */
8649 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8650 tmp = load_reg(s, rn);
8651 gen_bx(s, tmp);
9ee6e8bb
PB
8652 break;
8653 case 5: /* Exception return. */
b8b45b68
RV
8654 if (IS_USER(s)) {
8655 goto illegal_op;
8656 }
8657 if (rn != 14 || rd != 15) {
8658 goto illegal_op;
8659 }
8660 tmp = load_reg(s, rn);
8661 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8662 gen_exception_return(s, tmp);
8663 break;
9ee6e8bb 8664 case 6: /* mrs cpsr. */
7d1b0095 8665 tmp = tcg_temp_new_i32();
9ee6e8bb 8666 if (IS_M(env)) {
8984bd2e
PB
8667 addr = tcg_const_i32(insn & 0xff);
8668 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8669 tcg_temp_free_i32(addr);
9ee6e8bb 8670 } else {
9ef39277 8671 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8672 }
8984bd2e 8673 store_reg(s, rd, tmp);
9ee6e8bb
PB
8674 break;
8675 case 7: /* mrs spsr. */
8676 /* Not accessible in user mode. */
8677 if (IS_USER(s) || IS_M(env))
8678 goto illegal_op;
d9ba4830
PB
8679 tmp = load_cpu_field(spsr);
8680 store_reg(s, rd, tmp);
9ee6e8bb 8681 break;
2c0262af
FB
8682 }
8683 }
9ee6e8bb
PB
8684 } else {
8685 /* Conditional branch. */
8686 op = (insn >> 22) & 0xf;
8687 /* Generate a conditional jump to next instruction. */
8688 s->condlabel = gen_new_label();
d9ba4830 8689 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8690 s->condjmp = 1;
8691
8692 /* offset[11:1] = insn[10:0] */
8693 offset = (insn & 0x7ff) << 1;
8694 /* offset[17:12] = insn[21:16]. */
8695 offset |= (insn & 0x003f0000) >> 4;
8696 /* offset[31:20] = insn[26]. */
8697 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8698 /* offset[18] = insn[13]. */
8699 offset |= (insn & (1 << 13)) << 5;
8700 /* offset[19] = insn[11]. */
8701 offset |= (insn & (1 << 11)) << 8;
8702
8703 /* jump to the offset */
b0109805 8704 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8705 }
8706 } else {
8707 /* Data processing immediate. */
8708 if (insn & (1 << 25)) {
8709 if (insn & (1 << 24)) {
8710 if (insn & (1 << 20))
8711 goto illegal_op;
8712 /* Bitfield/Saturate. */
8713 op = (insn >> 21) & 7;
8714 imm = insn & 0x1f;
8715 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8716 if (rn == 15) {
7d1b0095 8717 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8718 tcg_gen_movi_i32(tmp, 0);
8719 } else {
8720 tmp = load_reg(s, rn);
8721 }
9ee6e8bb
PB
8722 switch (op) {
8723 case 2: /* Signed bitfield extract. */
8724 imm++;
8725 if (shift + imm > 32)
8726 goto illegal_op;
8727 if (imm < 32)
6ddbc6e4 8728 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8729 break;
8730 case 6: /* Unsigned bitfield extract. */
8731 imm++;
8732 if (shift + imm > 32)
8733 goto illegal_op;
8734 if (imm < 32)
6ddbc6e4 8735 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8736 break;
8737 case 3: /* Bitfield insert/clear. */
8738 if (imm < shift)
8739 goto illegal_op;
8740 imm = imm + 1 - shift;
8741 if (imm != 32) {
6ddbc6e4 8742 tmp2 = load_reg(s, rd);
d593c48e 8743 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 8744 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8745 }
8746 break;
8747 case 7:
8748 goto illegal_op;
8749 default: /* Saturate. */
9ee6e8bb
PB
8750 if (shift) {
8751 if (op & 1)
6ddbc6e4 8752 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8753 else
6ddbc6e4 8754 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8755 }
6ddbc6e4 8756 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8757 if (op & 4) {
8758 /* Unsigned. */
9ee6e8bb 8759 if ((op & 1) && shift == 0)
9ef39277 8760 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8761 else
9ef39277 8762 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 8763 } else {
9ee6e8bb 8764 /* Signed. */
9ee6e8bb 8765 if ((op & 1) && shift == 0)
9ef39277 8766 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8767 else
9ef39277 8768 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 8769 }
b75263d6 8770 tcg_temp_free_i32(tmp2);
9ee6e8bb 8771 break;
2c0262af 8772 }
6ddbc6e4 8773 store_reg(s, rd, tmp);
9ee6e8bb
PB
8774 } else {
8775 imm = ((insn & 0x04000000) >> 15)
8776 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8777 if (insn & (1 << 22)) {
8778 /* 16-bit immediate. */
8779 imm |= (insn >> 4) & 0xf000;
8780 if (insn & (1 << 23)) {
8781 /* movt */
5e3f878a 8782 tmp = load_reg(s, rd);
86831435 8783 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8784 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8785 } else {
9ee6e8bb 8786 /* movw */
7d1b0095 8787 tmp = tcg_temp_new_i32();
5e3f878a 8788 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8789 }
8790 } else {
9ee6e8bb
PB
8791 /* Add/sub 12-bit immediate. */
8792 if (rn == 15) {
b0109805 8793 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8794 if (insn & (1 << 23))
b0109805 8795 offset -= imm;
9ee6e8bb 8796 else
b0109805 8797 offset += imm;
7d1b0095 8798 tmp = tcg_temp_new_i32();
5e3f878a 8799 tcg_gen_movi_i32(tmp, offset);
2c0262af 8800 } else {
5e3f878a 8801 tmp = load_reg(s, rn);
9ee6e8bb 8802 if (insn & (1 << 23))
5e3f878a 8803 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8804 else
5e3f878a 8805 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8806 }
9ee6e8bb 8807 }
5e3f878a 8808 store_reg(s, rd, tmp);
191abaa2 8809 }
9ee6e8bb
PB
8810 } else {
8811 int shifter_out = 0;
8812 /* modified 12-bit immediate. */
8813 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8814 imm = (insn & 0xff);
8815 switch (shift) {
8816 case 0: /* XY */
8817 /* Nothing to do. */
8818 break;
8819 case 1: /* 00XY00XY */
8820 imm |= imm << 16;
8821 break;
8822 case 2: /* XY00XY00 */
8823 imm |= imm << 16;
8824 imm <<= 8;
8825 break;
8826 case 3: /* XYXYXYXY */
8827 imm |= imm << 16;
8828 imm |= imm << 8;
8829 break;
8830 default: /* Rotated constant. */
8831 shift = (shift << 1) | (imm >> 7);
8832 imm |= 0x80;
8833 imm = imm << (32 - shift);
8834 shifter_out = 1;
8835 break;
b5ff1b31 8836 }
7d1b0095 8837 tmp2 = tcg_temp_new_i32();
3174f8e9 8838 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8839 rn = (insn >> 16) & 0xf;
3174f8e9 8840 if (rn == 15) {
7d1b0095 8841 tmp = tcg_temp_new_i32();
3174f8e9
FN
8842 tcg_gen_movi_i32(tmp, 0);
8843 } else {
8844 tmp = load_reg(s, rn);
8845 }
9ee6e8bb
PB
8846 op = (insn >> 21) & 0xf;
8847 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8848 shifter_out, tmp, tmp2))
9ee6e8bb 8849 goto illegal_op;
7d1b0095 8850 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8851 rd = (insn >> 8) & 0xf;
8852 if (rd != 15) {
3174f8e9
FN
8853 store_reg(s, rd, tmp);
8854 } else {
7d1b0095 8855 tcg_temp_free_i32(tmp);
2c0262af 8856 }
2c0262af 8857 }
9ee6e8bb
PB
8858 }
8859 break;
8860 case 12: /* Load/store single data item. */
8861 {
8862 int postinc = 0;
8863 int writeback = 0;
b0109805 8864 int user;
9ee6e8bb
PB
8865 if ((insn & 0x01100000) == 0x01000000) {
8866 if (disas_neon_ls_insn(env, s, insn))
c1713132 8867 goto illegal_op;
9ee6e8bb
PB
8868 break;
8869 }
a2fdc890
PM
8870 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8871 if (rs == 15) {
8872 if (!(insn & (1 << 20))) {
8873 goto illegal_op;
8874 }
8875 if (op != 2) {
8876 /* Byte or halfword load space with dest == r15 : memory hints.
8877 * Catch them early so we don't emit pointless addressing code.
8878 * This space is a mix of:
8879 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8880 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8881 * cores)
8882 * unallocated hints, which must be treated as NOPs
8883 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8884 * which is easiest for the decoding logic
8885 * Some space which must UNDEF
8886 */
8887 int op1 = (insn >> 23) & 3;
8888 int op2 = (insn >> 6) & 0x3f;
8889 if (op & 2) {
8890 goto illegal_op;
8891 }
8892 if (rn == 15) {
02afbf64
PM
8893 /* UNPREDICTABLE, unallocated hint or
8894 * PLD/PLDW/PLI (literal)
8895 */
a2fdc890
PM
8896 return 0;
8897 }
8898 if (op1 & 1) {
02afbf64 8899 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
8900 }
8901 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 8902 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
8903 }
8904 /* UNDEF space, or an UNPREDICTABLE */
8905 return 1;
8906 }
8907 }
b0109805 8908 user = IS_USER(s);
9ee6e8bb 8909 if (rn == 15) {
7d1b0095 8910 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8911 /* PC relative. */
8912 /* s->pc has already been incremented by 4. */
8913 imm = s->pc & 0xfffffffc;
8914 if (insn & (1 << 23))
8915 imm += insn & 0xfff;
8916 else
8917 imm -= insn & 0xfff;
b0109805 8918 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8919 } else {
b0109805 8920 addr = load_reg(s, rn);
9ee6e8bb
PB
8921 if (insn & (1 << 23)) {
8922 /* Positive offset. */
8923 imm = insn & 0xfff;
b0109805 8924 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8925 } else {
9ee6e8bb 8926 imm = insn & 0xff;
2a0308c5
PM
8927 switch ((insn >> 8) & 0xf) {
8928 case 0x0: /* Shifted Register. */
9ee6e8bb 8929 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8930 if (shift > 3) {
8931 tcg_temp_free_i32(addr);
18c9b560 8932 goto illegal_op;
2a0308c5 8933 }
b26eefb6 8934 tmp = load_reg(s, rm);
9ee6e8bb 8935 if (shift)
b26eefb6 8936 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8937 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8938 tcg_temp_free_i32(tmp);
9ee6e8bb 8939 break;
2a0308c5 8940 case 0xc: /* Negative offset. */
b0109805 8941 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8942 break;
2a0308c5 8943 case 0xe: /* User privilege. */
b0109805
PB
8944 tcg_gen_addi_i32(addr, addr, imm);
8945 user = 1;
9ee6e8bb 8946 break;
2a0308c5 8947 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8948 imm = -imm;
8949 /* Fall through. */
2a0308c5 8950 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8951 postinc = 1;
8952 writeback = 1;
8953 break;
2a0308c5 8954 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
8955 imm = -imm;
8956 /* Fall through. */
2a0308c5 8957 case 0xf: /* Pre-increment. */
b0109805 8958 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8959 writeback = 1;
8960 break;
8961 default:
2a0308c5 8962 tcg_temp_free_i32(addr);
b7bcbe95 8963 goto illegal_op;
9ee6e8bb
PB
8964 }
8965 }
8966 }
9ee6e8bb
PB
8967 if (insn & (1 << 20)) {
8968 /* Load. */
a2fdc890
PM
8969 switch (op) {
8970 case 0: tmp = gen_ld8u(addr, user); break;
8971 case 4: tmp = gen_ld8s(addr, user); break;
8972 case 1: tmp = gen_ld16u(addr, user); break;
8973 case 5: tmp = gen_ld16s(addr, user); break;
8974 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
8975 default:
8976 tcg_temp_free_i32(addr);
8977 goto illegal_op;
a2fdc890
PM
8978 }
8979 if (rs == 15) {
8980 gen_bx(s, tmp);
9ee6e8bb 8981 } else {
a2fdc890 8982 store_reg(s, rs, tmp);
9ee6e8bb
PB
8983 }
8984 } else {
8985 /* Store. */
b0109805 8986 tmp = load_reg(s, rs);
9ee6e8bb 8987 switch (op) {
b0109805
PB
8988 case 0: gen_st8(tmp, addr, user); break;
8989 case 1: gen_st16(tmp, addr, user); break;
8990 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
8991 default:
8992 tcg_temp_free_i32(addr);
8993 goto illegal_op;
b7bcbe95 8994 }
2c0262af 8995 }
9ee6e8bb 8996 if (postinc)
b0109805
PB
8997 tcg_gen_addi_i32(addr, addr, imm);
8998 if (writeback) {
8999 store_reg(s, rn, addr);
9000 } else {
7d1b0095 9001 tcg_temp_free_i32(addr);
b0109805 9002 }
9ee6e8bb
PB
9003 }
9004 break;
9005 default:
9006 goto illegal_op;
2c0262af 9007 }
9ee6e8bb
PB
9008 return 0;
9009illegal_op:
9010 return 1;
2c0262af
FB
9011}
9012
0ecb72a5 9013static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9014{
9015 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9016 int32_t offset;
9017 int i;
b26eefb6 9018 TCGv tmp;
d9ba4830 9019 TCGv tmp2;
b0109805 9020 TCGv addr;
99c475ab 9021
9ee6e8bb
PB
9022 if (s->condexec_mask) {
9023 cond = s->condexec_cond;
bedd2912
JB
9024 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9025 s->condlabel = gen_new_label();
9026 gen_test_cc(cond ^ 1, s->condlabel);
9027 s->condjmp = 1;
9028 }
9ee6e8bb
PB
9029 }
9030
d31dd73e 9031 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 9032 s->pc += 2;
b5ff1b31 9033
99c475ab
FB
9034 switch (insn >> 12) {
9035 case 0: case 1:
396e467c 9036
99c475ab
FB
9037 rd = insn & 7;
9038 op = (insn >> 11) & 3;
9039 if (op == 3) {
9040 /* add/subtract */
9041 rn = (insn >> 3) & 7;
396e467c 9042 tmp = load_reg(s, rn);
99c475ab
FB
9043 if (insn & (1 << 10)) {
9044 /* immediate */
7d1b0095 9045 tmp2 = tcg_temp_new_i32();
396e467c 9046 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9047 } else {
9048 /* reg */
9049 rm = (insn >> 6) & 7;
396e467c 9050 tmp2 = load_reg(s, rm);
99c475ab 9051 }
9ee6e8bb
PB
9052 if (insn & (1 << 9)) {
9053 if (s->condexec_mask)
396e467c 9054 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9055 else
72485ec4 9056 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
9057 } else {
9058 if (s->condexec_mask)
396e467c 9059 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9060 else
72485ec4 9061 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9062 }
7d1b0095 9063 tcg_temp_free_i32(tmp2);
396e467c 9064 store_reg(s, rd, tmp);
99c475ab
FB
9065 } else {
9066 /* shift immediate */
9067 rm = (insn >> 3) & 7;
9068 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9069 tmp = load_reg(s, rm);
9070 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9071 if (!s->condexec_mask)
9072 gen_logic_CC(tmp);
9073 store_reg(s, rd, tmp);
99c475ab
FB
9074 }
9075 break;
9076 case 2: case 3:
9077 /* arithmetic large immediate */
9078 op = (insn >> 11) & 3;
9079 rd = (insn >> 8) & 0x7;
396e467c 9080 if (op == 0) { /* mov */
7d1b0095 9081 tmp = tcg_temp_new_i32();
396e467c 9082 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9083 if (!s->condexec_mask)
396e467c
FN
9084 gen_logic_CC(tmp);
9085 store_reg(s, rd, tmp);
9086 } else {
9087 tmp = load_reg(s, rd);
7d1b0095 9088 tmp2 = tcg_temp_new_i32();
396e467c
FN
9089 tcg_gen_movi_i32(tmp2, insn & 0xff);
9090 switch (op) {
9091 case 1: /* cmp */
72485ec4 9092 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9093 tcg_temp_free_i32(tmp);
9094 tcg_temp_free_i32(tmp2);
396e467c
FN
9095 break;
9096 case 2: /* add */
9097 if (s->condexec_mask)
9098 tcg_gen_add_i32(tmp, tmp, tmp2);
9099 else
72485ec4 9100 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 9101 tcg_temp_free_i32(tmp2);
396e467c
FN
9102 store_reg(s, rd, tmp);
9103 break;
9104 case 3: /* sub */
9105 if (s->condexec_mask)
9106 tcg_gen_sub_i32(tmp, tmp, tmp2);
9107 else
72485ec4 9108 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 9109 tcg_temp_free_i32(tmp2);
396e467c
FN
9110 store_reg(s, rd, tmp);
9111 break;
9112 }
99c475ab 9113 }
99c475ab
FB
9114 break;
9115 case 4:
9116 if (insn & (1 << 11)) {
9117 rd = (insn >> 8) & 7;
5899f386
FB
9118 /* load pc-relative. Bit 1 of PC is ignored. */
9119 val = s->pc + 2 + ((insn & 0xff) * 4);
9120 val &= ~(uint32_t)2;
7d1b0095 9121 addr = tcg_temp_new_i32();
b0109805
PB
9122 tcg_gen_movi_i32(addr, val);
9123 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 9124 tcg_temp_free_i32(addr);
b0109805 9125 store_reg(s, rd, tmp);
99c475ab
FB
9126 break;
9127 }
9128 if (insn & (1 << 10)) {
9129 /* data processing extended or blx */
9130 rd = (insn & 7) | ((insn >> 4) & 8);
9131 rm = (insn >> 3) & 0xf;
9132 op = (insn >> 8) & 3;
9133 switch (op) {
9134 case 0: /* add */
396e467c
FN
9135 tmp = load_reg(s, rd);
9136 tmp2 = load_reg(s, rm);
9137 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9138 tcg_temp_free_i32(tmp2);
396e467c 9139 store_reg(s, rd, tmp);
99c475ab
FB
9140 break;
9141 case 1: /* cmp */
396e467c
FN
9142 tmp = load_reg(s, rd);
9143 tmp2 = load_reg(s, rm);
72485ec4 9144 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9145 tcg_temp_free_i32(tmp2);
9146 tcg_temp_free_i32(tmp);
99c475ab
FB
9147 break;
9148 case 2: /* mov/cpy */
396e467c
FN
9149 tmp = load_reg(s, rm);
9150 store_reg(s, rd, tmp);
99c475ab
FB
9151 break;
9152 case 3:/* branch [and link] exchange thumb register */
b0109805 9153 tmp = load_reg(s, rm);
99c475ab 9154 if (insn & (1 << 7)) {
be5e7a76 9155 ARCH(5);
99c475ab 9156 val = (uint32_t)s->pc | 1;
7d1b0095 9157 tmp2 = tcg_temp_new_i32();
b0109805
PB
9158 tcg_gen_movi_i32(tmp2, val);
9159 store_reg(s, 14, tmp2);
99c475ab 9160 }
be5e7a76 9161 /* already thumb, no need to check */
d9ba4830 9162 gen_bx(s, tmp);
99c475ab
FB
9163 break;
9164 }
9165 break;
9166 }
9167
9168 /* data processing register */
9169 rd = insn & 7;
9170 rm = (insn >> 3) & 7;
9171 op = (insn >> 6) & 0xf;
9172 if (op == 2 || op == 3 || op == 4 || op == 7) {
9173 /* the shift/rotate ops want the operands backwards */
9174 val = rm;
9175 rm = rd;
9176 rd = val;
9177 val = 1;
9178 } else {
9179 val = 0;
9180 }
9181
396e467c 9182 if (op == 9) { /* neg */
7d1b0095 9183 tmp = tcg_temp_new_i32();
396e467c
FN
9184 tcg_gen_movi_i32(tmp, 0);
9185 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9186 tmp = load_reg(s, rd);
9187 } else {
9188 TCGV_UNUSED(tmp);
9189 }
99c475ab 9190
396e467c 9191 tmp2 = load_reg(s, rm);
5899f386 9192 switch (op) {
99c475ab 9193 case 0x0: /* and */
396e467c 9194 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9195 if (!s->condexec_mask)
396e467c 9196 gen_logic_CC(tmp);
99c475ab
FB
9197 break;
9198 case 0x1: /* eor */
396e467c 9199 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9200 if (!s->condexec_mask)
396e467c 9201 gen_logic_CC(tmp);
99c475ab
FB
9202 break;
9203 case 0x2: /* lsl */
9ee6e8bb 9204 if (s->condexec_mask) {
365af80e 9205 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 9206 } else {
9ef39277 9207 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9208 gen_logic_CC(tmp2);
9ee6e8bb 9209 }
99c475ab
FB
9210 break;
9211 case 0x3: /* lsr */
9ee6e8bb 9212 if (s->condexec_mask) {
365af80e 9213 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 9214 } else {
9ef39277 9215 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9216 gen_logic_CC(tmp2);
9ee6e8bb 9217 }
99c475ab
FB
9218 break;
9219 case 0x4: /* asr */
9ee6e8bb 9220 if (s->condexec_mask) {
365af80e 9221 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 9222 } else {
9ef39277 9223 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9224 gen_logic_CC(tmp2);
9ee6e8bb 9225 }
99c475ab
FB
9226 break;
9227 case 0x5: /* adc */
9ee6e8bb 9228 if (s->condexec_mask)
396e467c 9229 gen_adc(tmp, tmp2);
9ee6e8bb 9230 else
9ef39277 9231 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
99c475ab
FB
9232 break;
9233 case 0x6: /* sbc */
9ee6e8bb 9234 if (s->condexec_mask)
396e467c 9235 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 9236 else
9ef39277 9237 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
99c475ab
FB
9238 break;
9239 case 0x7: /* ror */
9ee6e8bb 9240 if (s->condexec_mask) {
f669df27
AJ
9241 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9242 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9243 } else {
9ef39277 9244 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9245 gen_logic_CC(tmp2);
9ee6e8bb 9246 }
99c475ab
FB
9247 break;
9248 case 0x8: /* tst */
396e467c
FN
9249 tcg_gen_and_i32(tmp, tmp, tmp2);
9250 gen_logic_CC(tmp);
99c475ab 9251 rd = 16;
5899f386 9252 break;
99c475ab 9253 case 0x9: /* neg */
9ee6e8bb 9254 if (s->condexec_mask)
396e467c 9255 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9256 else
72485ec4 9257 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9258 break;
9259 case 0xa: /* cmp */
72485ec4 9260 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9261 rd = 16;
9262 break;
9263 case 0xb: /* cmn */
72485ec4 9264 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
9265 rd = 16;
9266 break;
9267 case 0xc: /* orr */
396e467c 9268 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9269 if (!s->condexec_mask)
396e467c 9270 gen_logic_CC(tmp);
99c475ab
FB
9271 break;
9272 case 0xd: /* mul */
7b2919a0 9273 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9274 if (!s->condexec_mask)
396e467c 9275 gen_logic_CC(tmp);
99c475ab
FB
9276 break;
9277 case 0xe: /* bic */
f669df27 9278 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9279 if (!s->condexec_mask)
396e467c 9280 gen_logic_CC(tmp);
99c475ab
FB
9281 break;
9282 case 0xf: /* mvn */
396e467c 9283 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9284 if (!s->condexec_mask)
396e467c 9285 gen_logic_CC(tmp2);
99c475ab 9286 val = 1;
5899f386 9287 rm = rd;
99c475ab
FB
9288 break;
9289 }
9290 if (rd != 16) {
396e467c
FN
9291 if (val) {
9292 store_reg(s, rm, tmp2);
9293 if (op != 0xf)
7d1b0095 9294 tcg_temp_free_i32(tmp);
396e467c
FN
9295 } else {
9296 store_reg(s, rd, tmp);
7d1b0095 9297 tcg_temp_free_i32(tmp2);
396e467c
FN
9298 }
9299 } else {
7d1b0095
PM
9300 tcg_temp_free_i32(tmp);
9301 tcg_temp_free_i32(tmp2);
99c475ab
FB
9302 }
9303 break;
9304
9305 case 5:
9306 /* load/store register offset. */
9307 rd = insn & 7;
9308 rn = (insn >> 3) & 7;
9309 rm = (insn >> 6) & 7;
9310 op = (insn >> 9) & 7;
b0109805 9311 addr = load_reg(s, rn);
b26eefb6 9312 tmp = load_reg(s, rm);
b0109805 9313 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9314 tcg_temp_free_i32(tmp);
99c475ab
FB
9315
9316 if (op < 3) /* store */
b0109805 9317 tmp = load_reg(s, rd);
99c475ab
FB
9318
9319 switch (op) {
9320 case 0: /* str */
b0109805 9321 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9322 break;
9323 case 1: /* strh */
b0109805 9324 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9325 break;
9326 case 2: /* strb */
b0109805 9327 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9328 break;
9329 case 3: /* ldrsb */
b0109805 9330 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
9331 break;
9332 case 4: /* ldr */
b0109805 9333 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9334 break;
9335 case 5: /* ldrh */
b0109805 9336 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
9337 break;
9338 case 6: /* ldrb */
b0109805 9339 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
9340 break;
9341 case 7: /* ldrsh */
b0109805 9342 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
9343 break;
9344 }
9345 if (op >= 3) /* load */
b0109805 9346 store_reg(s, rd, tmp);
7d1b0095 9347 tcg_temp_free_i32(addr);
99c475ab
FB
9348 break;
9349
9350 case 6:
9351 /* load/store word immediate offset */
9352 rd = insn & 7;
9353 rn = (insn >> 3) & 7;
b0109805 9354 addr = load_reg(s, rn);
99c475ab 9355 val = (insn >> 4) & 0x7c;
b0109805 9356 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9357
9358 if (insn & (1 << 11)) {
9359 /* load */
b0109805
PB
9360 tmp = gen_ld32(addr, IS_USER(s));
9361 store_reg(s, rd, tmp);
99c475ab
FB
9362 } else {
9363 /* store */
b0109805
PB
9364 tmp = load_reg(s, rd);
9365 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9366 }
7d1b0095 9367 tcg_temp_free_i32(addr);
99c475ab
FB
9368 break;
9369
9370 case 7:
9371 /* load/store byte immediate offset */
9372 rd = insn & 7;
9373 rn = (insn >> 3) & 7;
b0109805 9374 addr = load_reg(s, rn);
99c475ab 9375 val = (insn >> 6) & 0x1f;
b0109805 9376 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9377
9378 if (insn & (1 << 11)) {
9379 /* load */
b0109805
PB
9380 tmp = gen_ld8u(addr, IS_USER(s));
9381 store_reg(s, rd, tmp);
99c475ab
FB
9382 } else {
9383 /* store */
b0109805
PB
9384 tmp = load_reg(s, rd);
9385 gen_st8(tmp, addr, IS_USER(s));
99c475ab 9386 }
7d1b0095 9387 tcg_temp_free_i32(addr);
99c475ab
FB
9388 break;
9389
9390 case 8:
9391 /* load/store halfword immediate offset */
9392 rd = insn & 7;
9393 rn = (insn >> 3) & 7;
b0109805 9394 addr = load_reg(s, rn);
99c475ab 9395 val = (insn >> 5) & 0x3e;
b0109805 9396 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9397
9398 if (insn & (1 << 11)) {
9399 /* load */
b0109805
PB
9400 tmp = gen_ld16u(addr, IS_USER(s));
9401 store_reg(s, rd, tmp);
99c475ab
FB
9402 } else {
9403 /* store */
b0109805
PB
9404 tmp = load_reg(s, rd);
9405 gen_st16(tmp, addr, IS_USER(s));
99c475ab 9406 }
7d1b0095 9407 tcg_temp_free_i32(addr);
99c475ab
FB
9408 break;
9409
9410 case 9:
9411 /* load/store from stack */
9412 rd = (insn >> 8) & 7;
b0109805 9413 addr = load_reg(s, 13);
99c475ab 9414 val = (insn & 0xff) * 4;
b0109805 9415 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9416
9417 if (insn & (1 << 11)) {
9418 /* load */
b0109805
PB
9419 tmp = gen_ld32(addr, IS_USER(s));
9420 store_reg(s, rd, tmp);
99c475ab
FB
9421 } else {
9422 /* store */
b0109805
PB
9423 tmp = load_reg(s, rd);
9424 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9425 }
7d1b0095 9426 tcg_temp_free_i32(addr);
99c475ab
FB
9427 break;
9428
9429 case 10:
9430 /* add to high reg */
9431 rd = (insn >> 8) & 7;
5899f386
FB
9432 if (insn & (1 << 11)) {
9433 /* SP */
5e3f878a 9434 tmp = load_reg(s, 13);
5899f386
FB
9435 } else {
9436 /* PC. bit 1 is ignored. */
7d1b0095 9437 tmp = tcg_temp_new_i32();
5e3f878a 9438 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9439 }
99c475ab 9440 val = (insn & 0xff) * 4;
5e3f878a
PB
9441 tcg_gen_addi_i32(tmp, tmp, val);
9442 store_reg(s, rd, tmp);
99c475ab
FB
9443 break;
9444
9445 case 11:
9446 /* misc */
9447 op = (insn >> 8) & 0xf;
9448 switch (op) {
9449 case 0:
9450 /* adjust stack pointer */
b26eefb6 9451 tmp = load_reg(s, 13);
99c475ab
FB
9452 val = (insn & 0x7f) * 4;
9453 if (insn & (1 << 7))
6a0d8a1d 9454 val = -(int32_t)val;
b26eefb6
PB
9455 tcg_gen_addi_i32(tmp, tmp, val);
9456 store_reg(s, 13, tmp);
99c475ab
FB
9457 break;
9458
9ee6e8bb
PB
9459 case 2: /* sign/zero extend. */
9460 ARCH(6);
9461 rd = insn & 7;
9462 rm = (insn >> 3) & 7;
b0109805 9463 tmp = load_reg(s, rm);
9ee6e8bb 9464 switch ((insn >> 6) & 3) {
b0109805
PB
9465 case 0: gen_sxth(tmp); break;
9466 case 1: gen_sxtb(tmp); break;
9467 case 2: gen_uxth(tmp); break;
9468 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9469 }
b0109805 9470 store_reg(s, rd, tmp);
9ee6e8bb 9471 break;
99c475ab
FB
9472 case 4: case 5: case 0xc: case 0xd:
9473 /* push/pop */
b0109805 9474 addr = load_reg(s, 13);
5899f386
FB
9475 if (insn & (1 << 8))
9476 offset = 4;
99c475ab 9477 else
5899f386
FB
9478 offset = 0;
9479 for (i = 0; i < 8; i++) {
9480 if (insn & (1 << i))
9481 offset += 4;
9482 }
9483 if ((insn & (1 << 11)) == 0) {
b0109805 9484 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9485 }
99c475ab
FB
9486 for (i = 0; i < 8; i++) {
9487 if (insn & (1 << i)) {
9488 if (insn & (1 << 11)) {
9489 /* pop */
b0109805
PB
9490 tmp = gen_ld32(addr, IS_USER(s));
9491 store_reg(s, i, tmp);
99c475ab
FB
9492 } else {
9493 /* push */
b0109805
PB
9494 tmp = load_reg(s, i);
9495 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9496 }
5899f386 9497 /* advance to the next address. */
b0109805 9498 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9499 }
9500 }
a50f5b91 9501 TCGV_UNUSED(tmp);
99c475ab
FB
9502 if (insn & (1 << 8)) {
9503 if (insn & (1 << 11)) {
9504 /* pop pc */
b0109805 9505 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9506 /* don't set the pc until the rest of the instruction
9507 has completed */
9508 } else {
9509 /* push lr */
b0109805
PB
9510 tmp = load_reg(s, 14);
9511 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9512 }
b0109805 9513 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9514 }
5899f386 9515 if ((insn & (1 << 11)) == 0) {
b0109805 9516 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9517 }
99c475ab 9518 /* write back the new stack pointer */
b0109805 9519 store_reg(s, 13, addr);
99c475ab 9520 /* set the new PC value */
be5e7a76
DES
9521 if ((insn & 0x0900) == 0x0900) {
9522 store_reg_from_load(env, s, 15, tmp);
9523 }
99c475ab
FB
9524 break;
9525
9ee6e8bb
PB
9526 case 1: case 3: case 9: case 11: /* czb */
9527 rm = insn & 7;
d9ba4830 9528 tmp = load_reg(s, rm);
9ee6e8bb
PB
9529 s->condlabel = gen_new_label();
9530 s->condjmp = 1;
9531 if (insn & (1 << 11))
cb63669a 9532 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9533 else
cb63669a 9534 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9535 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9536 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9537 val = (uint32_t)s->pc + 2;
9538 val += offset;
9539 gen_jmp(s, val);
9540 break;
9541
9542 case 15: /* IT, nop-hint. */
9543 if ((insn & 0xf) == 0) {
9544 gen_nop_hint(s, (insn >> 4) & 0xf);
9545 break;
9546 }
9547 /* If Then. */
9548 s->condexec_cond = (insn >> 4) & 0xe;
9549 s->condexec_mask = insn & 0x1f;
9550 /* No actual code generated for this insn, just setup state. */
9551 break;
9552
06c949e6 9553 case 0xe: /* bkpt */
be5e7a76 9554 ARCH(5);
bc4a0de0 9555 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9556 break;
9557
9ee6e8bb
PB
9558 case 0xa: /* rev */
9559 ARCH(6);
9560 rn = (insn >> 3) & 0x7;
9561 rd = insn & 0x7;
b0109805 9562 tmp = load_reg(s, rn);
9ee6e8bb 9563 switch ((insn >> 6) & 3) {
66896cb8 9564 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9565 case 1: gen_rev16(tmp); break;
9566 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9567 default: goto illegal_op;
9568 }
b0109805 9569 store_reg(s, rd, tmp);
9ee6e8bb
PB
9570 break;
9571
d9e028c1
PM
9572 case 6:
9573 switch ((insn >> 5) & 7) {
9574 case 2:
9575 /* setend */
9576 ARCH(6);
10962fd5
PM
9577 if (((insn >> 3) & 1) != s->bswap_code) {
9578 /* Dynamic endianness switching not implemented. */
d9e028c1
PM
9579 goto illegal_op;
9580 }
9ee6e8bb 9581 break;
d9e028c1
PM
9582 case 3:
9583 /* cps */
9584 ARCH(6);
9585 if (IS_USER(s)) {
9586 break;
8984bd2e 9587 }
d9e028c1
PM
9588 if (IS_M(env)) {
9589 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9590 /* FAULTMASK */
9591 if (insn & 1) {
9592 addr = tcg_const_i32(19);
9593 gen_helper_v7m_msr(cpu_env, addr, tmp);
9594 tcg_temp_free_i32(addr);
9595 }
9596 /* PRIMASK */
9597 if (insn & 2) {
9598 addr = tcg_const_i32(16);
9599 gen_helper_v7m_msr(cpu_env, addr, tmp);
9600 tcg_temp_free_i32(addr);
9601 }
9602 tcg_temp_free_i32(tmp);
9603 gen_lookup_tb(s);
9604 } else {
9605 if (insn & (1 << 4)) {
9606 shift = CPSR_A | CPSR_I | CPSR_F;
9607 } else {
9608 shift = 0;
9609 }
9610 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 9611 }
d9e028c1
PM
9612 break;
9613 default:
9614 goto undef;
9ee6e8bb
PB
9615 }
9616 break;
9617
99c475ab
FB
9618 default:
9619 goto undef;
9620 }
9621 break;
9622
9623 case 12:
a7d3970d 9624 {
99c475ab 9625 /* load/store multiple */
a7d3970d
PM
9626 TCGv loaded_var;
9627 TCGV_UNUSED(loaded_var);
99c475ab 9628 rn = (insn >> 8) & 0x7;
b0109805 9629 addr = load_reg(s, rn);
99c475ab
FB
9630 for (i = 0; i < 8; i++) {
9631 if (insn & (1 << i)) {
99c475ab
FB
9632 if (insn & (1 << 11)) {
9633 /* load */
b0109805 9634 tmp = gen_ld32(addr, IS_USER(s));
a7d3970d
PM
9635 if (i == rn) {
9636 loaded_var = tmp;
9637 } else {
9638 store_reg(s, i, tmp);
9639 }
99c475ab
FB
9640 } else {
9641 /* store */
b0109805
PB
9642 tmp = load_reg(s, i);
9643 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9644 }
5899f386 9645 /* advance to the next address */
b0109805 9646 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9647 }
9648 }
b0109805 9649 if ((insn & (1 << rn)) == 0) {
a7d3970d 9650 /* base reg not in list: base register writeback */
b0109805
PB
9651 store_reg(s, rn, addr);
9652 } else {
a7d3970d
PM
9653 /* base reg in list: if load, complete it now */
9654 if (insn & (1 << 11)) {
9655 store_reg(s, rn, loaded_var);
9656 }
7d1b0095 9657 tcg_temp_free_i32(addr);
b0109805 9658 }
99c475ab 9659 break;
a7d3970d 9660 }
99c475ab
FB
9661 case 13:
9662 /* conditional branch or swi */
9663 cond = (insn >> 8) & 0xf;
9664 if (cond == 0xe)
9665 goto undef;
9666
9667 if (cond == 0xf) {
9668 /* swi */
422ebf69 9669 gen_set_pc_im(s->pc);
9ee6e8bb 9670 s->is_jmp = DISAS_SWI;
99c475ab
FB
9671 break;
9672 }
9673 /* generate a conditional jump to next instruction */
e50e6a20 9674 s->condlabel = gen_new_label();
d9ba4830 9675 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9676 s->condjmp = 1;
99c475ab
FB
9677
9678 /* jump to the offset */
5899f386 9679 val = (uint32_t)s->pc + 2;
99c475ab 9680 offset = ((int32_t)insn << 24) >> 24;
5899f386 9681 val += offset << 1;
8aaca4c0 9682 gen_jmp(s, val);
99c475ab
FB
9683 break;
9684
9685 case 14:
358bf29e 9686 if (insn & (1 << 11)) {
9ee6e8bb
PB
9687 if (disas_thumb2_insn(env, s, insn))
9688 goto undef32;
358bf29e
PB
9689 break;
9690 }
9ee6e8bb 9691 /* unconditional branch */
99c475ab
FB
9692 val = (uint32_t)s->pc;
9693 offset = ((int32_t)insn << 21) >> 21;
9694 val += (offset << 1) + 2;
8aaca4c0 9695 gen_jmp(s, val);
99c475ab
FB
9696 break;
9697
9698 case 15:
9ee6e8bb 9699 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9700 goto undef32;
9ee6e8bb 9701 break;
99c475ab
FB
9702 }
9703 return;
9ee6e8bb 9704undef32:
bc4a0de0 9705 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9706 return;
9707illegal_op:
99c475ab 9708undef:
bc4a0de0 9709 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9710}
9711
2c0262af
FB
9712/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9713 basic block 'tb'. If search_pc is TRUE, also generate PC
9714 information for each intermediate instruction. */
0ecb72a5 9715static inline void gen_intermediate_code_internal(CPUARMState *env,
2cfc5f17
TS
9716 TranslationBlock *tb,
9717 int search_pc)
2c0262af
FB
9718{
9719 DisasContext dc1, *dc = &dc1;
a1d1bb31 9720 CPUBreakpoint *bp;
2c0262af
FB
9721 uint16_t *gen_opc_end;
9722 int j, lj;
0fa85d43 9723 target_ulong pc_start;
b5ff1b31 9724 uint32_t next_page_start;
2e70f6ef
PB
9725 int num_insns;
9726 int max_insns;
3b46e624 9727
2c0262af 9728 /* generate intermediate code */
0fa85d43 9729 pc_start = tb->pc;
3b46e624 9730
2c0262af
FB
9731 dc->tb = tb;
9732
92414b31 9733 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9734
9735 dc->is_jmp = DISAS_NEXT;
9736 dc->pc = pc_start;
8aaca4c0 9737 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9738 dc->condjmp = 0;
7204ab88 9739 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
d8fd2954 9740 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
98eac7ca
PM
9741 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9742 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9743#if !defined(CONFIG_USER_ONLY)
61f74d6a 9744 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9745#endif
5df8bac1 9746 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9747 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9748 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9749 cpu_F0s = tcg_temp_new_i32();
9750 cpu_F1s = tcg_temp_new_i32();
9751 cpu_F0d = tcg_temp_new_i64();
9752 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9753 cpu_V0 = cpu_F0d;
9754 cpu_V1 = cpu_F1d;
e677137d 9755 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9756 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9757 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9758 lj = -1;
2e70f6ef
PB
9759 num_insns = 0;
9760 max_insns = tb->cflags & CF_COUNT_MASK;
9761 if (max_insns == 0)
9762 max_insns = CF_COUNT_MASK;
9763
9764 gen_icount_start();
e12ce78d 9765
3849902c
PM
9766 tcg_clear_temp_count();
9767
e12ce78d
PM
9768 /* A note on handling of the condexec (IT) bits:
9769 *
9770 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 9771 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 9772 * (1) if the condexec bits are not already zero then we write
0ecb72a5 9773 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
9774 * to do it at the end of the block. (For example if we don't do this
9775 * it's hard to identify whether we can safely skip writing condexec
9776 * at the end of the TB, which we definitely want to do for the case
9777 * where a TB doesn't do anything with the IT state at all.)
9778 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 9779 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
9780 * This is done both for leaving the TB at the end, and for leaving
9781 * it because of an exception we know will happen, which is done in
9782 * gen_exception_insn(). The latter is necessary because we need to
9783 * leave the TB with the PC/IT state just prior to execution of the
9784 * instruction which caused the exception.
9785 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 9786 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
9787 * This is handled in the same way as restoration of the
9788 * PC in these situations: we will be called again with search_pc=1
9789 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9790 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9791 * this to restore the condexec bits.
e12ce78d
PM
9792 *
9793 * Note that there are no instructions which can read the condexec
9794 * bits, and none which can write non-static values to them, so
0ecb72a5 9795 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
9796 * middle of a TB.
9797 */
9798
9ee6e8bb
PB
9799 /* Reset the conditional execution bits immediately. This avoids
9800 complications trying to do it at the end of the block. */
98eac7ca 9801 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9802 {
7d1b0095 9803 TCGv tmp = tcg_temp_new_i32();
8f01245e 9804 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9805 store_cpu_field(tmp, condexec_bits);
8f01245e 9806 }
2c0262af 9807 do {
fbb4a2e3
PB
9808#ifdef CONFIG_USER_ONLY
9809 /* Intercept jump to the magic kernel page. */
9810 if (dc->pc >= 0xffff0000) {
9811 /* We always get here via a jump, so know we are not in a
9812 conditional execution block. */
9813 gen_exception(EXCP_KERNEL_TRAP);
9814 dc->is_jmp = DISAS_UPDATE;
9815 break;
9816 }
9817#else
9ee6e8bb
PB
9818 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9819 /* We always get here via a jump, so know we are not in a
9820 conditional execution block. */
d9ba4830 9821 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9822 dc->is_jmp = DISAS_UPDATE;
9823 break;
9ee6e8bb
PB
9824 }
9825#endif
9826
72cf2d4f
BS
9827 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9828 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9829 if (bp->pc == dc->pc) {
bc4a0de0 9830 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9831 /* Advance PC so that clearing the breakpoint will
9832 invalidate this TB. */
9833 dc->pc += 2;
9834 goto done_generating;
1fddef4b
FB
9835 break;
9836 }
9837 }
9838 }
2c0262af 9839 if (search_pc) {
92414b31 9840 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
9841 if (lj < j) {
9842 lj++;
9843 while (lj < j)
ab1103de 9844 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 9845 }
25983cad 9846 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 9847 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 9848 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 9849 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 9850 }
e50e6a20 9851
2e70f6ef
PB
9852 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9853 gen_io_start();
9854
fdefe51c 9855 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
9856 tcg_gen_debug_insn_start(dc->pc);
9857 }
9858
7204ab88 9859 if (dc->thumb) {
9ee6e8bb
PB
9860 disas_thumb_insn(env, dc);
9861 if (dc->condexec_mask) {
9862 dc->condexec_cond = (dc->condexec_cond & 0xe)
9863 | ((dc->condexec_mask >> 4) & 1);
9864 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9865 if (dc->condexec_mask == 0) {
9866 dc->condexec_cond = 0;
9867 }
9868 }
9869 } else {
9870 disas_arm_insn(env, dc);
9871 }
e50e6a20
FB
9872
9873 if (dc->condjmp && !dc->is_jmp) {
9874 gen_set_label(dc->condlabel);
9875 dc->condjmp = 0;
9876 }
3849902c
PM
9877
9878 if (tcg_check_temp_count()) {
9879 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9880 }
9881
aaf2d97d 9882 /* Translation stops when a conditional branch is encountered.
e50e6a20 9883 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9884 * Also stop translation when a page boundary is reached. This
bf20dc07 9885 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9886 num_insns ++;
efd7f486 9887 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
1fddef4b 9888 !env->singlestep_enabled &&
1b530a6d 9889 !singlestep &&
2e70f6ef
PB
9890 dc->pc < next_page_start &&
9891 num_insns < max_insns);
9892
9893 if (tb->cflags & CF_LAST_IO) {
9894 if (dc->condjmp) {
9895 /* FIXME: This can theoretically happen with self-modifying
9896 code. */
9897 cpu_abort(env, "IO on conditional branch instruction");
9898 }
9899 gen_io_end();
9900 }
9ee6e8bb 9901
b5ff1b31 9902 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9903 instruction was a conditional branch or trap, and the PC has
9904 already been written. */
551bd27f 9905 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9906 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9907 if (dc->condjmp) {
9ee6e8bb
PB
9908 gen_set_condexec(dc);
9909 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9910 gen_exception(EXCP_SWI);
9ee6e8bb 9911 } else {
d9ba4830 9912 gen_exception(EXCP_DEBUG);
9ee6e8bb 9913 }
e50e6a20
FB
9914 gen_set_label(dc->condlabel);
9915 }
9916 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9917 gen_set_pc_im(dc->pc);
e50e6a20 9918 dc->condjmp = 0;
8aaca4c0 9919 }
9ee6e8bb
PB
9920 gen_set_condexec(dc);
9921 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9922 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9923 } else {
9924 /* FIXME: Single stepping a WFI insn will not halt
9925 the CPU. */
d9ba4830 9926 gen_exception(EXCP_DEBUG);
9ee6e8bb 9927 }
8aaca4c0 9928 } else {
9ee6e8bb
PB
9929 /* While branches must always occur at the end of an IT block,
9930 there are a few other things that can cause us to terminate
65626741 9931 the TB in the middle of an IT block:
9ee6e8bb
PB
9932 - Exception generating instructions (bkpt, swi, undefined).
9933 - Page boundaries.
9934 - Hardware watchpoints.
9935 Hardware breakpoints have already been handled and skip this code.
9936 */
9937 gen_set_condexec(dc);
8aaca4c0 9938 switch(dc->is_jmp) {
8aaca4c0 9939 case DISAS_NEXT:
6e256c93 9940 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9941 break;
9942 default:
9943 case DISAS_JUMP:
9944 case DISAS_UPDATE:
9945 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9946 tcg_gen_exit_tb(0);
8aaca4c0
FB
9947 break;
9948 case DISAS_TB_JUMP:
9949 /* nothing more to generate */
9950 break;
9ee6e8bb 9951 case DISAS_WFI:
1ce94f81 9952 gen_helper_wfi(cpu_env);
9ee6e8bb
PB
9953 break;
9954 case DISAS_SWI:
d9ba4830 9955 gen_exception(EXCP_SWI);
9ee6e8bb 9956 break;
8aaca4c0 9957 }
e50e6a20
FB
9958 if (dc->condjmp) {
9959 gen_set_label(dc->condlabel);
9ee6e8bb 9960 gen_set_condexec(dc);
6e256c93 9961 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9962 dc->condjmp = 0;
9963 }
2c0262af 9964 }
2e70f6ef 9965
9ee6e8bb 9966done_generating:
2e70f6ef 9967 gen_icount_end(tb, num_insns);
efd7f486 9968 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
9969
9970#ifdef DEBUG_DISAS
8fec2b8c 9971 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9972 qemu_log("----------------\n");
9973 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 9974 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 9975 dc->thumb | (dc->bswap_code << 1));
93fcfe39 9976 qemu_log("\n");
2c0262af
FB
9977 }
9978#endif
b5ff1b31 9979 if (search_pc) {
92414b31 9980 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
9981 lj++;
9982 while (lj <= j)
ab1103de 9983 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 9984 } else {
2c0262af 9985 tb->size = dc->pc - pc_start;
2e70f6ef 9986 tb->icount = num_insns;
b5ff1b31 9987 }
2c0262af
FB
9988}
9989
0ecb72a5 9990void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 9991{
2cfc5f17 9992 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9993}
9994
0ecb72a5 9995void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 9996{
2cfc5f17 9997 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9998}
9999
b5ff1b31
FB
10000static const char *cpu_mode_names[16] = {
10001 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10002 "???", "???", "???", "und", "???", "???", "???", "sys"
10003};
9ee6e8bb 10004
0ecb72a5 10005void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 10006 int flags)
2c0262af
FB
10007{
10008 int i;
b5ff1b31 10009 uint32_t psr;
2c0262af
FB
10010
10011 for(i=0;i<16;i++) {
7fe48483 10012 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10013 if ((i % 4) == 3)
7fe48483 10014 cpu_fprintf(f, "\n");
2c0262af 10015 else
7fe48483 10016 cpu_fprintf(f, " ");
2c0262af 10017 }
b5ff1b31 10018 psr = cpsr_read(env);
687fa640
TS
10019 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10020 psr,
b5ff1b31
FB
10021 psr & (1 << 31) ? 'N' : '-',
10022 psr & (1 << 30) ? 'Z' : '-',
10023 psr & (1 << 29) ? 'C' : '-',
10024 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10025 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10026 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10027
f2617cfc
PM
10028 if (flags & CPU_DUMP_FPU) {
10029 int numvfpregs = 0;
10030 if (arm_feature(env, ARM_FEATURE_VFP)) {
10031 numvfpregs += 16;
10032 }
10033 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10034 numvfpregs += 16;
10035 }
10036 for (i = 0; i < numvfpregs; i++) {
10037 uint64_t v = float64_val(env->vfp.regs[i]);
10038 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10039 i * 2, (uint32_t)v,
10040 i * 2 + 1, (uint32_t)(v >> 32),
10041 i, v);
10042 }
10043 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 10044 }
2c0262af 10045}
a6b025d3 10046
0ecb72a5 10047void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 10048{
25983cad 10049 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
e12ce78d 10050 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10051}