]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
Merge remote-tracking branch 'aneesh/for-upstream' into staging
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
2c0262af 28#include "disas.h"
57fec1fe 29#include "tcg-op.h"
79383c9c 30#include "qemu-log.h"
1497c961 31
7b59220e 32#include "helper.h"
1497c961 33#define GEN_HELPER 1
7b59220e 34#include "helper.h"
2c0262af 35
be5e7a76
DES
36#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38/* currently all emulated v5 cores are also v5TE, so don't bother */
39#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
40#define ENABLE_ARCH_5J 0
41#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 45
86753403 46#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 47
2c0262af
FB
48/* internal defines */
49typedef struct DisasContext {
0fa85d43 50 target_ulong pc;
2c0262af 51 int is_jmp;
e50e6a20
FB
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
b90372ad 56 /* Thumb-2 conditional execution bits. */
9ee6e8bb
PB
57 int condexec_mask;
58 int condexec_cond;
2c0262af 59 struct TranslationBlock *tb;
8aaca4c0 60 int singlestep_enabled;
5899f386 61 int thumb;
d8fd2954 62 int bswap_code;
b5ff1b31
FB
63#if !defined(CONFIG_USER_ONLY)
64 int user;
65#endif
5df8bac1 66 int vfp_enabled;
69d1fc22
PM
67 int vec_len;
68 int vec_stride;
2c0262af
FB
69} DisasContext;
70
e12ce78d
PM
71static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
b5ff1b31
FB
73#if defined(CONFIG_USER_ONLY)
74#define IS_USER(s) 1
75#else
76#define IS_USER(s) (s->user)
77#endif
78
9ee6e8bb 79/* These instructions trap after executing, so defer them until after the
b90372ad 80 conditional execution state has been updated. */
9ee6e8bb
PB
81#define DISAS_WFI 4
82#define DISAS_SWI 5
2c0262af 83
a7812ae4 84static TCGv_ptr cpu_env;
ad69471c 85/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 86static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 87static TCGv_i32 cpu_R[16];
66c374de 88static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
426f5abc
PB
89static TCGv_i32 cpu_exclusive_addr;
90static TCGv_i32 cpu_exclusive_val;
91static TCGv_i32 cpu_exclusive_high;
92#ifdef CONFIG_USER_ONLY
93static TCGv_i32 cpu_exclusive_test;
94static TCGv_i32 cpu_exclusive_info;
95#endif
ad69471c 96
b26eefb6 97/* FIXME: These should be removed. */
a7812ae4
PB
98static TCGv cpu_F0s, cpu_F1s;
99static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 100
2e70f6ef
PB
101#include "gen-icount.h"
102
155c3eac
FN
103static const char *regnames[] =
104 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
105 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106
b26eefb6
PB
107/* initialize TCG globals. */
108void arm_translate_init(void)
109{
155c3eac
FN
110 int i;
111
a7812ae4
PB
112 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
113
155c3eac
FN
114 for (i = 0; i < 16; i++) {
115 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 116 offsetof(CPUARMState, regs[i]),
155c3eac
FN
117 regnames[i]);
118 }
66c374de
AJ
119 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
120 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
121 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
122 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
123
426f5abc 124 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 125 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
426f5abc 126 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 127 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 128 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 129 offsetof(CPUARMState, exclusive_high), "exclusive_high");
426f5abc
PB
130#ifdef CONFIG_USER_ONLY
131 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 132 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 133 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 134 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 135#endif
155c3eac 136
a7812ae4 137#define GEN_HELPER 2
7b59220e 138#include "helper.h"
b26eefb6
PB
139}
140
d9ba4830
PB
141static inline TCGv load_cpu_offset(int offset)
142{
7d1b0095 143 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
144 tcg_gen_ld_i32(tmp, cpu_env, offset);
145 return tmp;
146}
147
0ecb72a5 148#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830
PB
149
150static inline void store_cpu_offset(TCGv var, int offset)
151{
152 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 153 tcg_temp_free_i32(var);
d9ba4830
PB
154}
155
156#define store_cpu_field(var, name) \
0ecb72a5 157 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 158
b26eefb6
PB
159/* Set a variable to the value of a CPU register. */
160static void load_reg_var(DisasContext *s, TCGv var, int reg)
161{
162 if (reg == 15) {
163 uint32_t addr;
b90372ad 164 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
165 if (s->thumb)
166 addr = (long)s->pc + 2;
167 else
168 addr = (long)s->pc + 4;
169 tcg_gen_movi_i32(var, addr);
170 } else {
155c3eac 171 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
172 }
173}
174
175/* Create a new temporary and set it to the value of a CPU register. */
176static inline TCGv load_reg(DisasContext *s, int reg)
177{
7d1b0095 178 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
179 load_reg_var(s, tmp, reg);
180 return tmp;
181}
182
183/* Set a CPU register. The source must be a temporary and will be
184 marked as dead. */
185static void store_reg(DisasContext *s, int reg, TCGv var)
186{
187 if (reg == 15) {
188 tcg_gen_andi_i32(var, var, ~1);
189 s->is_jmp = DISAS_JUMP;
190 }
155c3eac 191 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 192 tcg_temp_free_i32(var);
b26eefb6
PB
193}
194
b26eefb6 195/* Value extensions. */
86831435
PB
196#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
197#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
198#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
199#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
200
1497c961
PB
201#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
202#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 203
b26eefb6 204
b75263d6
JR
205static inline void gen_set_cpsr(TCGv var, uint32_t mask)
206{
207 TCGv tmp_mask = tcg_const_i32(mask);
1ce94f81 208 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
209 tcg_temp_free_i32(tmp_mask);
210}
d9ba4830
PB
211/* Set NZCV flags from the high 4 bits of var. */
212#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
213
214static void gen_exception(int excp)
215{
7d1b0095 216 TCGv tmp = tcg_temp_new_i32();
d9ba4830 217 tcg_gen_movi_i32(tmp, excp);
1ce94f81 218 gen_helper_exception(cpu_env, tmp);
7d1b0095 219 tcg_temp_free_i32(tmp);
d9ba4830
PB
220}
221
3670669c
PB
222static void gen_smul_dual(TCGv a, TCGv b)
223{
7d1b0095
PM
224 TCGv tmp1 = tcg_temp_new_i32();
225 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
226 tcg_gen_ext16s_i32(tmp1, a);
227 tcg_gen_ext16s_i32(tmp2, b);
3670669c 228 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 229 tcg_temp_free_i32(tmp2);
3670669c
PB
230 tcg_gen_sari_i32(a, a, 16);
231 tcg_gen_sari_i32(b, b, 16);
232 tcg_gen_mul_i32(b, b, a);
233 tcg_gen_mov_i32(a, tmp1);
7d1b0095 234 tcg_temp_free_i32(tmp1);
3670669c
PB
235}
236
237/* Byteswap each halfword. */
238static void gen_rev16(TCGv var)
239{
7d1b0095 240 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
241 tcg_gen_shri_i32(tmp, var, 8);
242 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
243 tcg_gen_shli_i32(var, var, 8);
244 tcg_gen_andi_i32(var, var, 0xff00ff00);
245 tcg_gen_or_i32(var, var, tmp);
7d1b0095 246 tcg_temp_free_i32(tmp);
3670669c
PB
247}
248
249/* Byteswap low halfword and sign extend. */
250static void gen_revsh(TCGv var)
251{
1a855029
AJ
252 tcg_gen_ext16u_i32(var, var);
253 tcg_gen_bswap16_i32(var, var);
254 tcg_gen_ext16s_i32(var, var);
3670669c
PB
255}
256
257/* Unsigned bitfield extract. */
258static void gen_ubfx(TCGv var, int shift, uint32_t mask)
259{
260 if (shift)
261 tcg_gen_shri_i32(var, var, shift);
262 tcg_gen_andi_i32(var, var, mask);
263}
264
265/* Signed bitfield extract. */
266static void gen_sbfx(TCGv var, int shift, int width)
267{
268 uint32_t signbit;
269
270 if (shift)
271 tcg_gen_sari_i32(var, var, shift);
272 if (shift + width < 32) {
273 signbit = 1u << (width - 1);
274 tcg_gen_andi_i32(var, var, (1u << width) - 1);
275 tcg_gen_xori_i32(var, var, signbit);
276 tcg_gen_subi_i32(var, var, signbit);
277 }
278}
279
838fa72d
AJ
280/* Return (b << 32) + a. Mark inputs as dead */
281static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 282{
838fa72d
AJ
283 TCGv_i64 tmp64 = tcg_temp_new_i64();
284
285 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 286 tcg_temp_free_i32(b);
838fa72d
AJ
287 tcg_gen_shli_i64(tmp64, tmp64, 32);
288 tcg_gen_add_i64(a, tmp64, a);
289
290 tcg_temp_free_i64(tmp64);
291 return a;
292}
293
294/* Return (b << 32) - a. Mark inputs as dead. */
295static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
296{
297 TCGv_i64 tmp64 = tcg_temp_new_i64();
298
299 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 300 tcg_temp_free_i32(b);
838fa72d
AJ
301 tcg_gen_shli_i64(tmp64, tmp64, 32);
302 tcg_gen_sub_i64(a, tmp64, a);
303
304 tcg_temp_free_i64(tmp64);
305 return a;
3670669c
PB
306}
307
8f01245e
PB
308/* FIXME: Most targets have native widening multiplication.
309 It would be good to use that instead of a full wide multiply. */
5e3f878a 310/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 311static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 312{
a7812ae4
PB
313 TCGv_i64 tmp1 = tcg_temp_new_i64();
314 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
315
316 tcg_gen_extu_i32_i64(tmp1, a);
7d1b0095 317 tcg_temp_free_i32(a);
5e3f878a 318 tcg_gen_extu_i32_i64(tmp2, b);
7d1b0095 319 tcg_temp_free_i32(b);
5e3f878a 320 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 321 tcg_temp_free_i64(tmp2);
5e3f878a
PB
322 return tmp1;
323}
324
a7812ae4 325static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 326{
a7812ae4
PB
327 TCGv_i64 tmp1 = tcg_temp_new_i64();
328 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
329
330 tcg_gen_ext_i32_i64(tmp1, a);
7d1b0095 331 tcg_temp_free_i32(a);
5e3f878a 332 tcg_gen_ext_i32_i64(tmp2, b);
7d1b0095 333 tcg_temp_free_i32(b);
5e3f878a 334 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 335 tcg_temp_free_i64(tmp2);
5e3f878a
PB
336 return tmp1;
337}
338
8f01245e
PB
339/* Swap low and high halfwords. */
340static void gen_swap_half(TCGv var)
341{
7d1b0095 342 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
343 tcg_gen_shri_i32(tmp, var, 16);
344 tcg_gen_shli_i32(var, var, 16);
345 tcg_gen_or_i32(var, var, tmp);
7d1b0095 346 tcg_temp_free_i32(tmp);
8f01245e
PB
347}
348
b26eefb6
PB
349/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
350 tmp = (t0 ^ t1) & 0x8000;
351 t0 &= ~0x8000;
352 t1 &= ~0x8000;
353 t0 = (t0 + t1) ^ tmp;
354 */
355
356static void gen_add16(TCGv t0, TCGv t1)
357{
7d1b0095 358 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
359 tcg_gen_xor_i32(tmp, t0, t1);
360 tcg_gen_andi_i32(tmp, tmp, 0x8000);
361 tcg_gen_andi_i32(t0, t0, ~0x8000);
362 tcg_gen_andi_i32(t1, t1, ~0x8000);
363 tcg_gen_add_i32(t0, t0, t1);
364 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
365 tcg_temp_free_i32(tmp);
366 tcg_temp_free_i32(t1);
b26eefb6
PB
367}
368
369/* Set CF to the top bit of var. */
370static void gen_set_CF_bit31(TCGv var)
371{
66c374de 372 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
373}
374
375/* Set N and Z flags from var. */
376static inline void gen_logic_CC(TCGv var)
377{
66c374de
AJ
378 tcg_gen_mov_i32(cpu_NF, var);
379 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
380}
381
382/* T0 += T1 + CF. */
396e467c 383static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 384{
396e467c 385 tcg_gen_add_i32(t0, t0, t1);
66c374de 386 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
387}
388
e9bb4aa9
JR
389/* dest = T0 + T1 + CF. */
390static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
391{
e9bb4aa9 392 tcg_gen_add_i32(dest, t0, t1);
66c374de 393 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
394}
395
3670669c
PB
396/* dest = T0 - T1 + CF - 1. */
397static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
398{
3670669c 399 tcg_gen_sub_i32(dest, t0, t1);
66c374de 400 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 401 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
402}
403
72485ec4
AJ
404/* dest = T0 + T1. Compute C, N, V and Z flags */
405static void gen_add_CC(TCGv dest, TCGv t0, TCGv t1)
406{
407 TCGv tmp;
408 tcg_gen_add_i32(cpu_NF, t0, t1);
409 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
410 tcg_gen_setcond_i32(TCG_COND_LTU, cpu_CF, cpu_NF, t0);
411 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
412 tmp = tcg_temp_new_i32();
413 tcg_gen_xor_i32(tmp, t0, t1);
414 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
415 tcg_temp_free_i32(tmp);
416 tcg_gen_mov_i32(dest, cpu_NF);
417}
418
419/* dest = T0 - T1. Compute C, N, V and Z flags */
420static void gen_sub_CC(TCGv dest, TCGv t0, TCGv t1)
421{
422 TCGv tmp;
423 tcg_gen_sub_i32(cpu_NF, t0, t1);
424 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
425 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
426 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
427 tmp = tcg_temp_new_i32();
428 tcg_gen_xor_i32(tmp, t0, t1);
429 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
430 tcg_temp_free_i32(tmp);
431 tcg_gen_mov_i32(dest, cpu_NF);
432}
433
365af80e
AJ
434#define GEN_SHIFT(name) \
435static void gen_##name(TCGv dest, TCGv t0, TCGv t1) \
436{ \
437 TCGv tmp1, tmp2, tmp3; \
438 tmp1 = tcg_temp_new_i32(); \
439 tcg_gen_andi_i32(tmp1, t1, 0xff); \
440 tmp2 = tcg_const_i32(0); \
441 tmp3 = tcg_const_i32(0x1f); \
442 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
443 tcg_temp_free_i32(tmp3); \
444 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
445 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
446 tcg_temp_free_i32(tmp2); \
447 tcg_temp_free_i32(tmp1); \
448}
449GEN_SHIFT(shl)
450GEN_SHIFT(shr)
451#undef GEN_SHIFT
452
453static void gen_sar(TCGv dest, TCGv t0, TCGv t1)
454{
455 TCGv tmp1, tmp2;
456 tmp1 = tcg_temp_new_i32();
457 tcg_gen_andi_i32(tmp1, t1, 0xff);
458 tmp2 = tcg_const_i32(0x1f);
459 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
460 tcg_temp_free_i32(tmp2);
461 tcg_gen_sar_i32(dest, t0, tmp1);
462 tcg_temp_free_i32(tmp1);
463}
464
36c91fd1
PM
465static void tcg_gen_abs_i32(TCGv dest, TCGv src)
466{
467 TCGv c0 = tcg_const_i32(0);
468 TCGv tmp = tcg_temp_new_i32();
469 tcg_gen_neg_i32(tmp, src);
470 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
471 tcg_temp_free_i32(c0);
472 tcg_temp_free_i32(tmp);
473}
ad69471c 474
9a119ff6 475static void shifter_out_im(TCGv var, int shift)
b26eefb6 476{
9a119ff6 477 if (shift == 0) {
66c374de 478 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 479 } else {
66c374de
AJ
480 tcg_gen_shri_i32(cpu_CF, var, shift);
481 if (shift != 31) {
482 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
483 }
9a119ff6 484 }
9a119ff6 485}
b26eefb6 486
9a119ff6
PB
487/* Shift by immediate. Includes special handling for shift == 0. */
488static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
489{
490 switch (shiftop) {
491 case 0: /* LSL */
492 if (shift != 0) {
493 if (flags)
494 shifter_out_im(var, 32 - shift);
495 tcg_gen_shli_i32(var, var, shift);
496 }
497 break;
498 case 1: /* LSR */
499 if (shift == 0) {
500 if (flags) {
66c374de 501 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
502 }
503 tcg_gen_movi_i32(var, 0);
504 } else {
505 if (flags)
506 shifter_out_im(var, shift - 1);
507 tcg_gen_shri_i32(var, var, shift);
508 }
509 break;
510 case 2: /* ASR */
511 if (shift == 0)
512 shift = 32;
513 if (flags)
514 shifter_out_im(var, shift - 1);
515 if (shift == 32)
516 shift = 31;
517 tcg_gen_sari_i32(var, var, shift);
518 break;
519 case 3: /* ROR/RRX */
520 if (shift != 0) {
521 if (flags)
522 shifter_out_im(var, shift - 1);
f669df27 523 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 524 } else {
66c374de 525 TCGv tmp = tcg_temp_new_i32();
b6348f29 526 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
527 if (flags)
528 shifter_out_im(var, 0);
529 tcg_gen_shri_i32(var, var, 1);
b26eefb6 530 tcg_gen_or_i32(var, var, tmp);
7d1b0095 531 tcg_temp_free_i32(tmp);
b26eefb6
PB
532 }
533 }
534};
535
8984bd2e
PB
536static inline void gen_arm_shift_reg(TCGv var, int shiftop,
537 TCGv shift, int flags)
538{
539 if (flags) {
540 switch (shiftop) {
9ef39277
BS
541 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
542 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
543 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
544 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
545 }
546 } else {
547 switch (shiftop) {
365af80e
AJ
548 case 0:
549 gen_shl(var, var, shift);
550 break;
551 case 1:
552 gen_shr(var, var, shift);
553 break;
554 case 2:
555 gen_sar(var, var, shift);
556 break;
f669df27
AJ
557 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
558 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
559 }
560 }
7d1b0095 561 tcg_temp_free_i32(shift);
8984bd2e
PB
562}
563
6ddbc6e4
PB
564#define PAS_OP(pfx) \
565 switch (op2) { \
566 case 0: gen_pas_helper(glue(pfx,add16)); break; \
567 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
568 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
569 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
570 case 4: gen_pas_helper(glue(pfx,add8)); break; \
571 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
572 }
d9ba4830 573static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 574{
a7812ae4 575 TCGv_ptr tmp;
6ddbc6e4
PB
576
577 switch (op1) {
578#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
579 case 1:
a7812ae4 580 tmp = tcg_temp_new_ptr();
0ecb72a5 581 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 582 PAS_OP(s)
b75263d6 583 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
584 break;
585 case 5:
a7812ae4 586 tmp = tcg_temp_new_ptr();
0ecb72a5 587 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 588 PAS_OP(u)
b75263d6 589 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
590 break;
591#undef gen_pas_helper
592#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
593 case 2:
594 PAS_OP(q);
595 break;
596 case 3:
597 PAS_OP(sh);
598 break;
599 case 6:
600 PAS_OP(uq);
601 break;
602 case 7:
603 PAS_OP(uh);
604 break;
605#undef gen_pas_helper
606 }
607}
9ee6e8bb
PB
608#undef PAS_OP
609
6ddbc6e4
PB
610/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
611#define PAS_OP(pfx) \
ed89a2f1 612 switch (op1) { \
6ddbc6e4
PB
613 case 0: gen_pas_helper(glue(pfx,add8)); break; \
614 case 1: gen_pas_helper(glue(pfx,add16)); break; \
615 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
616 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
617 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
618 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
619 }
d9ba4830 620static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 621{
a7812ae4 622 TCGv_ptr tmp;
6ddbc6e4 623
ed89a2f1 624 switch (op2) {
6ddbc6e4
PB
625#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
626 case 0:
a7812ae4 627 tmp = tcg_temp_new_ptr();
0ecb72a5 628 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 629 PAS_OP(s)
b75263d6 630 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
631 break;
632 case 4:
a7812ae4 633 tmp = tcg_temp_new_ptr();
0ecb72a5 634 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 635 PAS_OP(u)
b75263d6 636 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
637 break;
638#undef gen_pas_helper
639#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
640 case 1:
641 PAS_OP(q);
642 break;
643 case 2:
644 PAS_OP(sh);
645 break;
646 case 5:
647 PAS_OP(uq);
648 break;
649 case 6:
650 PAS_OP(uh);
651 break;
652#undef gen_pas_helper
653 }
654}
9ee6e8bb
PB
655#undef PAS_OP
656
d9ba4830
PB
657static void gen_test_cc(int cc, int label)
658{
659 TCGv tmp;
d9ba4830
PB
660 int inv;
661
d9ba4830
PB
662 switch (cc) {
663 case 0: /* eq: Z */
66c374de 664 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
665 break;
666 case 1: /* ne: !Z */
66c374de 667 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
668 break;
669 case 2: /* cs: C */
66c374de 670 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
671 break;
672 case 3: /* cc: !C */
66c374de 673 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
674 break;
675 case 4: /* mi: N */
66c374de 676 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
677 break;
678 case 5: /* pl: !N */
66c374de 679 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
680 break;
681 case 6: /* vs: V */
66c374de 682 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
683 break;
684 case 7: /* vc: !V */
66c374de 685 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
686 break;
687 case 8: /* hi: C && !Z */
688 inv = gen_new_label();
66c374de
AJ
689 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
690 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
691 gen_set_label(inv);
692 break;
693 case 9: /* ls: !C || Z */
66c374de
AJ
694 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
695 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
696 break;
697 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
698 tmp = tcg_temp_new_i32();
699 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 700 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 701 tcg_temp_free_i32(tmp);
d9ba4830
PB
702 break;
703 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
704 tmp = tcg_temp_new_i32();
705 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 706 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 707 tcg_temp_free_i32(tmp);
d9ba4830
PB
708 break;
709 case 12: /* gt: !Z && N == V */
710 inv = gen_new_label();
66c374de
AJ
711 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
712 tmp = tcg_temp_new_i32();
713 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 714 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 715 tcg_temp_free_i32(tmp);
d9ba4830
PB
716 gen_set_label(inv);
717 break;
718 case 13: /* le: Z || N != V */
66c374de
AJ
719 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
720 tmp = tcg_temp_new_i32();
721 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 722 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 723 tcg_temp_free_i32(tmp);
d9ba4830
PB
724 break;
725 default:
726 fprintf(stderr, "Bad condition code 0x%x\n", cc);
727 abort();
728 }
d9ba4830 729}
2c0262af 730
b1d8e52e 731static const uint8_t table_logic_cc[16] = {
2c0262af
FB
732 1, /* and */
733 1, /* xor */
734 0, /* sub */
735 0, /* rsb */
736 0, /* add */
737 0, /* adc */
738 0, /* sbc */
739 0, /* rsc */
740 1, /* andl */
741 1, /* xorl */
742 0, /* cmp */
743 0, /* cmn */
744 1, /* orr */
745 1, /* mov */
746 1, /* bic */
747 1, /* mvn */
748};
3b46e624 749
d9ba4830
PB
750/* Set PC and Thumb state from an immediate address. */
751static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 752{
b26eefb6 753 TCGv tmp;
99c475ab 754
b26eefb6 755 s->is_jmp = DISAS_UPDATE;
d9ba4830 756 if (s->thumb != (addr & 1)) {
7d1b0095 757 tmp = tcg_temp_new_i32();
d9ba4830 758 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 759 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 760 tcg_temp_free_i32(tmp);
d9ba4830 761 }
155c3eac 762 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
763}
764
765/* Set PC and Thumb state from var. var is marked as dead. */
766static inline void gen_bx(DisasContext *s, TCGv var)
767{
d9ba4830 768 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
769 tcg_gen_andi_i32(cpu_R[15], var, ~1);
770 tcg_gen_andi_i32(var, var, 1);
771 store_cpu_field(var, thumb);
d9ba4830
PB
772}
773
21aeb343
JR
774/* Variant of store_reg which uses branch&exchange logic when storing
775 to r15 in ARM architecture v7 and above. The source must be a temporary
776 and will be marked as dead. */
0ecb72a5 777static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
21aeb343
JR
778 int reg, TCGv var)
779{
780 if (reg == 15 && ENABLE_ARCH_7) {
781 gen_bx(s, var);
782 } else {
783 store_reg(s, reg, var);
784 }
785}
786
be5e7a76
DES
787/* Variant of store_reg which uses branch&exchange logic when storing
788 * to r15 in ARM architecture v5T and above. This is used for storing
789 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
790 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 791static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
be5e7a76
DES
792 int reg, TCGv var)
793{
794 if (reg == 15 && ENABLE_ARCH_5) {
795 gen_bx(s, var);
796 } else {
797 store_reg(s, reg, var);
798 }
799}
800
b0109805
PB
801static inline TCGv gen_ld8s(TCGv addr, int index)
802{
7d1b0095 803 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
804 tcg_gen_qemu_ld8s(tmp, addr, index);
805 return tmp;
806}
807static inline TCGv gen_ld8u(TCGv addr, int index)
808{
7d1b0095 809 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
810 tcg_gen_qemu_ld8u(tmp, addr, index);
811 return tmp;
812}
813static inline TCGv gen_ld16s(TCGv addr, int index)
814{
7d1b0095 815 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
816 tcg_gen_qemu_ld16s(tmp, addr, index);
817 return tmp;
818}
819static inline TCGv gen_ld16u(TCGv addr, int index)
820{
7d1b0095 821 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
822 tcg_gen_qemu_ld16u(tmp, addr, index);
823 return tmp;
824}
825static inline TCGv gen_ld32(TCGv addr, int index)
826{
7d1b0095 827 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
828 tcg_gen_qemu_ld32u(tmp, addr, index);
829 return tmp;
830}
84496233
JR
831static inline TCGv_i64 gen_ld64(TCGv addr, int index)
832{
833 TCGv_i64 tmp = tcg_temp_new_i64();
834 tcg_gen_qemu_ld64(tmp, addr, index);
835 return tmp;
836}
b0109805
PB
837static inline void gen_st8(TCGv val, TCGv addr, int index)
838{
839 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 840 tcg_temp_free_i32(val);
b0109805
PB
841}
842static inline void gen_st16(TCGv val, TCGv addr, int index)
843{
844 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 845 tcg_temp_free_i32(val);
b0109805
PB
846}
847static inline void gen_st32(TCGv val, TCGv addr, int index)
848{
849 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 850 tcg_temp_free_i32(val);
b0109805 851}
84496233
JR
852static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
853{
854 tcg_gen_qemu_st64(val, addr, index);
855 tcg_temp_free_i64(val);
856}
b5ff1b31 857
5e3f878a
PB
858static inline void gen_set_pc_im(uint32_t val)
859{
155c3eac 860 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
861}
862
b5ff1b31
FB
863/* Force a TB lookup after an instruction that changes the CPU state. */
864static inline void gen_lookup_tb(DisasContext *s)
865{
a6445c52 866 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
867 s->is_jmp = DISAS_UPDATE;
868}
869
b0109805
PB
870static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
871 TCGv var)
2c0262af 872{
1e8d4eec 873 int val, rm, shift, shiftop;
b26eefb6 874 TCGv offset;
2c0262af
FB
875
876 if (!(insn & (1 << 25))) {
877 /* immediate */
878 val = insn & 0xfff;
879 if (!(insn & (1 << 23)))
880 val = -val;
537730b9 881 if (val != 0)
b0109805 882 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
883 } else {
884 /* shift/register */
885 rm = (insn) & 0xf;
886 shift = (insn >> 7) & 0x1f;
1e8d4eec 887 shiftop = (insn >> 5) & 3;
b26eefb6 888 offset = load_reg(s, rm);
9a119ff6 889 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 890 if (!(insn & (1 << 23)))
b0109805 891 tcg_gen_sub_i32(var, var, offset);
2c0262af 892 else
b0109805 893 tcg_gen_add_i32(var, var, offset);
7d1b0095 894 tcg_temp_free_i32(offset);
2c0262af
FB
895 }
896}
897
191f9a93 898static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 899 int extra, TCGv var)
2c0262af
FB
900{
901 int val, rm;
b26eefb6 902 TCGv offset;
3b46e624 903
2c0262af
FB
904 if (insn & (1 << 22)) {
905 /* immediate */
906 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
907 if (!(insn & (1 << 23)))
908 val = -val;
18acad92 909 val += extra;
537730b9 910 if (val != 0)
b0109805 911 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
912 } else {
913 /* register */
191f9a93 914 if (extra)
b0109805 915 tcg_gen_addi_i32(var, var, extra);
2c0262af 916 rm = (insn) & 0xf;
b26eefb6 917 offset = load_reg(s, rm);
2c0262af 918 if (!(insn & (1 << 23)))
b0109805 919 tcg_gen_sub_i32(var, var, offset);
2c0262af 920 else
b0109805 921 tcg_gen_add_i32(var, var, offset);
7d1b0095 922 tcg_temp_free_i32(offset);
2c0262af
FB
923 }
924}
925
5aaebd13
PM
926static TCGv_ptr get_fpstatus_ptr(int neon)
927{
928 TCGv_ptr statusptr = tcg_temp_new_ptr();
929 int offset;
930 if (neon) {
0ecb72a5 931 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 932 } else {
0ecb72a5 933 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
934 }
935 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
936 return statusptr;
937}
938
4373f3ce
PB
939#define VFP_OP2(name) \
940static inline void gen_vfp_##name(int dp) \
941{ \
ae1857ec
PM
942 TCGv_ptr fpst = get_fpstatus_ptr(0); \
943 if (dp) { \
944 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
945 } else { \
946 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
947 } \
948 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
949}
950
4373f3ce
PB
951VFP_OP2(add)
952VFP_OP2(sub)
953VFP_OP2(mul)
954VFP_OP2(div)
955
956#undef VFP_OP2
957
605a6aed
PM
958static inline void gen_vfp_F1_mul(int dp)
959{
960 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 961 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 962 if (dp) {
ae1857ec 963 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 964 } else {
ae1857ec 965 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 966 }
ae1857ec 967 tcg_temp_free_ptr(fpst);
605a6aed
PM
968}
969
970static inline void gen_vfp_F1_neg(int dp)
971{
972 /* Like gen_vfp_neg() but put result in F1 */
973 if (dp) {
974 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
975 } else {
976 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
977 }
978}
979
4373f3ce
PB
980static inline void gen_vfp_abs(int dp)
981{
982 if (dp)
983 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
984 else
985 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
986}
987
988static inline void gen_vfp_neg(int dp)
989{
990 if (dp)
991 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
992 else
993 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
994}
995
996static inline void gen_vfp_sqrt(int dp)
997{
998 if (dp)
999 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1000 else
1001 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1002}
1003
1004static inline void gen_vfp_cmp(int dp)
1005{
1006 if (dp)
1007 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1008 else
1009 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1010}
1011
1012static inline void gen_vfp_cmpe(int dp)
1013{
1014 if (dp)
1015 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1016 else
1017 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1018}
1019
1020static inline void gen_vfp_F1_ld0(int dp)
1021{
1022 if (dp)
5b340b51 1023 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1024 else
5b340b51 1025 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1026}
1027
5500b06c
PM
1028#define VFP_GEN_ITOF(name) \
1029static inline void gen_vfp_##name(int dp, int neon) \
1030{ \
5aaebd13 1031 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1032 if (dp) { \
1033 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1034 } else { \
1035 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1036 } \
b7fa9214 1037 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1038}
1039
5500b06c
PM
1040VFP_GEN_ITOF(uito)
1041VFP_GEN_ITOF(sito)
1042#undef VFP_GEN_ITOF
4373f3ce 1043
5500b06c
PM
1044#define VFP_GEN_FTOI(name) \
1045static inline void gen_vfp_##name(int dp, int neon) \
1046{ \
5aaebd13 1047 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1048 if (dp) { \
1049 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1050 } else { \
1051 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1052 } \
b7fa9214 1053 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1054}
1055
5500b06c
PM
1056VFP_GEN_FTOI(toui)
1057VFP_GEN_FTOI(touiz)
1058VFP_GEN_FTOI(tosi)
1059VFP_GEN_FTOI(tosiz)
1060#undef VFP_GEN_FTOI
4373f3ce
PB
1061
1062#define VFP_GEN_FIX(name) \
5500b06c 1063static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1064{ \
b75263d6 1065 TCGv tmp_shift = tcg_const_i32(shift); \
5aaebd13 1066 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1067 if (dp) { \
1068 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1069 } else { \
1070 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1071 } \
b75263d6 1072 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1073 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1074}
4373f3ce
PB
1075VFP_GEN_FIX(tosh)
1076VFP_GEN_FIX(tosl)
1077VFP_GEN_FIX(touh)
1078VFP_GEN_FIX(toul)
1079VFP_GEN_FIX(shto)
1080VFP_GEN_FIX(slto)
1081VFP_GEN_FIX(uhto)
1082VFP_GEN_FIX(ulto)
1083#undef VFP_GEN_FIX
9ee6e8bb 1084
312eea9f 1085static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1086{
1087 if (dp)
312eea9f 1088 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1089 else
312eea9f 1090 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1091}
1092
312eea9f 1093static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1094{
1095 if (dp)
312eea9f 1096 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1097 else
312eea9f 1098 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1099}
1100
8e96005d
FB
1101static inline long
1102vfp_reg_offset (int dp, int reg)
1103{
1104 if (dp)
1105 return offsetof(CPUARMState, vfp.regs[reg]);
1106 else if (reg & 1) {
1107 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1108 + offsetof(CPU_DoubleU, l.upper);
1109 } else {
1110 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1111 + offsetof(CPU_DoubleU, l.lower);
1112 }
1113}
9ee6e8bb
PB
1114
1115/* Return the offset of a 32-bit piece of a NEON register.
1116 zero is the least significant end of the register. */
1117static inline long
1118neon_reg_offset (int reg, int n)
1119{
1120 int sreg;
1121 sreg = reg * 2 + n;
1122 return vfp_reg_offset(0, sreg);
1123}
1124
8f8e3aa4
PB
1125static TCGv neon_load_reg(int reg, int pass)
1126{
7d1b0095 1127 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1128 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1129 return tmp;
1130}
1131
1132static void neon_store_reg(int reg, int pass, TCGv var)
1133{
1134 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1135 tcg_temp_free_i32(var);
8f8e3aa4
PB
1136}
1137
a7812ae4 1138static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1139{
1140 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1141}
1142
a7812ae4 1143static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1144{
1145 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1146}
1147
4373f3ce
PB
1148#define tcg_gen_ld_f32 tcg_gen_ld_i32
1149#define tcg_gen_ld_f64 tcg_gen_ld_i64
1150#define tcg_gen_st_f32 tcg_gen_st_i32
1151#define tcg_gen_st_f64 tcg_gen_st_i64
1152
b7bcbe95
FB
1153static inline void gen_mov_F0_vreg(int dp, int reg)
1154{
1155 if (dp)
4373f3ce 1156 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1157 else
4373f3ce 1158 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1159}
1160
1161static inline void gen_mov_F1_vreg(int dp, int reg)
1162{
1163 if (dp)
4373f3ce 1164 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1165 else
4373f3ce 1166 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1167}
1168
1169static inline void gen_mov_vreg_F0(int dp, int reg)
1170{
1171 if (dp)
4373f3ce 1172 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1173 else
4373f3ce 1174 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1175}
1176
18c9b560
AZ
1177#define ARM_CP_RW_BIT (1 << 20)
1178
a7812ae4 1179static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1180{
0ecb72a5 1181 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1182}
1183
a7812ae4 1184static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1185{
0ecb72a5 1186 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1187}
1188
da6b5335 1189static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1190{
7d1b0095 1191 TCGv var = tcg_temp_new_i32();
0ecb72a5 1192 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1193 return var;
e677137d
PB
1194}
1195
da6b5335 1196static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1197{
0ecb72a5 1198 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1199 tcg_temp_free_i32(var);
e677137d
PB
1200}
1201
1202static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1203{
1204 iwmmxt_store_reg(cpu_M0, rn);
1205}
1206
1207static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1208{
1209 iwmmxt_load_reg(cpu_M0, rn);
1210}
1211
1212static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1213{
1214 iwmmxt_load_reg(cpu_V1, rn);
1215 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1216}
1217
1218static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1219{
1220 iwmmxt_load_reg(cpu_V1, rn);
1221 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1222}
1223
1224static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1225{
1226 iwmmxt_load_reg(cpu_V1, rn);
1227 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1228}
1229
1230#define IWMMXT_OP(name) \
1231static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1232{ \
1233 iwmmxt_load_reg(cpu_V1, rn); \
1234 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1235}
1236
477955bd
PM
1237#define IWMMXT_OP_ENV(name) \
1238static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1239{ \
1240 iwmmxt_load_reg(cpu_V1, rn); \
1241 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1242}
1243
1244#define IWMMXT_OP_ENV_SIZE(name) \
1245IWMMXT_OP_ENV(name##b) \
1246IWMMXT_OP_ENV(name##w) \
1247IWMMXT_OP_ENV(name##l)
e677137d 1248
477955bd 1249#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1250static inline void gen_op_iwmmxt_##name##_M0(void) \
1251{ \
477955bd 1252 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1253}
1254
1255IWMMXT_OP(maddsq)
1256IWMMXT_OP(madduq)
1257IWMMXT_OP(sadb)
1258IWMMXT_OP(sadw)
1259IWMMXT_OP(mulslw)
1260IWMMXT_OP(mulshw)
1261IWMMXT_OP(mululw)
1262IWMMXT_OP(muluhw)
1263IWMMXT_OP(macsw)
1264IWMMXT_OP(macuw)
1265
477955bd
PM
1266IWMMXT_OP_ENV_SIZE(unpackl)
1267IWMMXT_OP_ENV_SIZE(unpackh)
1268
1269IWMMXT_OP_ENV1(unpacklub)
1270IWMMXT_OP_ENV1(unpackluw)
1271IWMMXT_OP_ENV1(unpacklul)
1272IWMMXT_OP_ENV1(unpackhub)
1273IWMMXT_OP_ENV1(unpackhuw)
1274IWMMXT_OP_ENV1(unpackhul)
1275IWMMXT_OP_ENV1(unpacklsb)
1276IWMMXT_OP_ENV1(unpacklsw)
1277IWMMXT_OP_ENV1(unpacklsl)
1278IWMMXT_OP_ENV1(unpackhsb)
1279IWMMXT_OP_ENV1(unpackhsw)
1280IWMMXT_OP_ENV1(unpackhsl)
1281
1282IWMMXT_OP_ENV_SIZE(cmpeq)
1283IWMMXT_OP_ENV_SIZE(cmpgtu)
1284IWMMXT_OP_ENV_SIZE(cmpgts)
1285
1286IWMMXT_OP_ENV_SIZE(mins)
1287IWMMXT_OP_ENV_SIZE(minu)
1288IWMMXT_OP_ENV_SIZE(maxs)
1289IWMMXT_OP_ENV_SIZE(maxu)
1290
1291IWMMXT_OP_ENV_SIZE(subn)
1292IWMMXT_OP_ENV_SIZE(addn)
1293IWMMXT_OP_ENV_SIZE(subu)
1294IWMMXT_OP_ENV_SIZE(addu)
1295IWMMXT_OP_ENV_SIZE(subs)
1296IWMMXT_OP_ENV_SIZE(adds)
1297
1298IWMMXT_OP_ENV(avgb0)
1299IWMMXT_OP_ENV(avgb1)
1300IWMMXT_OP_ENV(avgw0)
1301IWMMXT_OP_ENV(avgw1)
e677137d
PB
1302
1303IWMMXT_OP(msadb)
1304
477955bd
PM
1305IWMMXT_OP_ENV(packuw)
1306IWMMXT_OP_ENV(packul)
1307IWMMXT_OP_ENV(packuq)
1308IWMMXT_OP_ENV(packsw)
1309IWMMXT_OP_ENV(packsl)
1310IWMMXT_OP_ENV(packsq)
e677137d 1311
e677137d
PB
1312static void gen_op_iwmmxt_set_mup(void)
1313{
1314 TCGv tmp;
1315 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1316 tcg_gen_ori_i32(tmp, tmp, 2);
1317 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1318}
1319
1320static void gen_op_iwmmxt_set_cup(void)
1321{
1322 TCGv tmp;
1323 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1324 tcg_gen_ori_i32(tmp, tmp, 1);
1325 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1326}
1327
1328static void gen_op_iwmmxt_setpsr_nz(void)
1329{
7d1b0095 1330 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1331 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1332 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1333}
1334
1335static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1336{
1337 iwmmxt_load_reg(cpu_V1, rn);
86831435 1338 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1339 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1340}
1341
da6b5335 1342static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1343{
1344 int rd;
1345 uint32_t offset;
da6b5335 1346 TCGv tmp;
18c9b560
AZ
1347
1348 rd = (insn >> 16) & 0xf;
da6b5335 1349 tmp = load_reg(s, rd);
18c9b560
AZ
1350
1351 offset = (insn & 0xff) << ((insn >> 7) & 2);
1352 if (insn & (1 << 24)) {
1353 /* Pre indexed */
1354 if (insn & (1 << 23))
da6b5335 1355 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1356 else
da6b5335
FN
1357 tcg_gen_addi_i32(tmp, tmp, -offset);
1358 tcg_gen_mov_i32(dest, tmp);
18c9b560 1359 if (insn & (1 << 21))
da6b5335
FN
1360 store_reg(s, rd, tmp);
1361 else
7d1b0095 1362 tcg_temp_free_i32(tmp);
18c9b560
AZ
1363 } else if (insn & (1 << 21)) {
1364 /* Post indexed */
da6b5335 1365 tcg_gen_mov_i32(dest, tmp);
18c9b560 1366 if (insn & (1 << 23))
da6b5335 1367 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1368 else
da6b5335
FN
1369 tcg_gen_addi_i32(tmp, tmp, -offset);
1370 store_reg(s, rd, tmp);
18c9b560
AZ
1371 } else if (!(insn & (1 << 23)))
1372 return 1;
1373 return 0;
1374}
1375
da6b5335 1376static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1377{
1378 int rd = (insn >> 0) & 0xf;
da6b5335 1379 TCGv tmp;
18c9b560 1380
da6b5335
FN
1381 if (insn & (1 << 8)) {
1382 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1383 return 1;
da6b5335
FN
1384 } else {
1385 tmp = iwmmxt_load_creg(rd);
1386 }
1387 } else {
7d1b0095 1388 tmp = tcg_temp_new_i32();
da6b5335
FN
1389 iwmmxt_load_reg(cpu_V0, rd);
1390 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1391 }
1392 tcg_gen_andi_i32(tmp, tmp, mask);
1393 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1394 tcg_temp_free_i32(tmp);
18c9b560
AZ
1395 return 0;
1396}
1397
a1c7273b 1398/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1399 (ie. an undefined instruction). */
0ecb72a5 1400static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1401{
1402 int rd, wrd;
1403 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1404 TCGv addr;
1405 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1406
1407 if ((insn & 0x0e000e00) == 0x0c000000) {
1408 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1409 wrd = insn & 0xf;
1410 rdlo = (insn >> 12) & 0xf;
1411 rdhi = (insn >> 16) & 0xf;
1412 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1413 iwmmxt_load_reg(cpu_V0, wrd);
1414 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1415 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1416 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1417 } else { /* TMCRR */
da6b5335
FN
1418 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1419 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1420 gen_op_iwmmxt_set_mup();
1421 }
1422 return 0;
1423 }
1424
1425 wrd = (insn >> 12) & 0xf;
7d1b0095 1426 addr = tcg_temp_new_i32();
da6b5335 1427 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1428 tcg_temp_free_i32(addr);
18c9b560 1429 return 1;
da6b5335 1430 }
18c9b560
AZ
1431 if (insn & ARM_CP_RW_BIT) {
1432 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1433 tmp = tcg_temp_new_i32();
da6b5335
FN
1434 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1435 iwmmxt_store_creg(wrd, tmp);
18c9b560 1436 } else {
e677137d
PB
1437 i = 1;
1438 if (insn & (1 << 8)) {
1439 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1440 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1441 i = 0;
1442 } else { /* WLDRW wRd */
da6b5335 1443 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1444 }
1445 } else {
1446 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1447 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1448 } else { /* WLDRB */
da6b5335 1449 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1450 }
1451 }
1452 if (i) {
1453 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1454 tcg_temp_free_i32(tmp);
e677137d 1455 }
18c9b560
AZ
1456 gen_op_iwmmxt_movq_wRn_M0(wrd);
1457 }
1458 } else {
1459 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1460 tmp = iwmmxt_load_creg(wrd);
1461 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1462 } else {
1463 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1464 tmp = tcg_temp_new_i32();
e677137d
PB
1465 if (insn & (1 << 8)) {
1466 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1467 tcg_temp_free_i32(tmp);
da6b5335 1468 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1469 } else { /* WSTRW wRd */
1470 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1471 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1472 }
1473 } else {
1474 if (insn & (1 << 22)) { /* WSTRH */
1475 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1476 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1477 } else { /* WSTRB */
1478 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1479 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1480 }
1481 }
18c9b560
AZ
1482 }
1483 }
7d1b0095 1484 tcg_temp_free_i32(addr);
18c9b560
AZ
1485 return 0;
1486 }
1487
1488 if ((insn & 0x0f000000) != 0x0e000000)
1489 return 1;
1490
1491 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1492 case 0x000: /* WOR */
1493 wrd = (insn >> 12) & 0xf;
1494 rd0 = (insn >> 0) & 0xf;
1495 rd1 = (insn >> 16) & 0xf;
1496 gen_op_iwmmxt_movq_M0_wRn(rd0);
1497 gen_op_iwmmxt_orq_M0_wRn(rd1);
1498 gen_op_iwmmxt_setpsr_nz();
1499 gen_op_iwmmxt_movq_wRn_M0(wrd);
1500 gen_op_iwmmxt_set_mup();
1501 gen_op_iwmmxt_set_cup();
1502 break;
1503 case 0x011: /* TMCR */
1504 if (insn & 0xf)
1505 return 1;
1506 rd = (insn >> 12) & 0xf;
1507 wrd = (insn >> 16) & 0xf;
1508 switch (wrd) {
1509 case ARM_IWMMXT_wCID:
1510 case ARM_IWMMXT_wCASF:
1511 break;
1512 case ARM_IWMMXT_wCon:
1513 gen_op_iwmmxt_set_cup();
1514 /* Fall through. */
1515 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1516 tmp = iwmmxt_load_creg(wrd);
1517 tmp2 = load_reg(s, rd);
f669df27 1518 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1519 tcg_temp_free_i32(tmp2);
da6b5335 1520 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1521 break;
1522 case ARM_IWMMXT_wCGR0:
1523 case ARM_IWMMXT_wCGR1:
1524 case ARM_IWMMXT_wCGR2:
1525 case ARM_IWMMXT_wCGR3:
1526 gen_op_iwmmxt_set_cup();
da6b5335
FN
1527 tmp = load_reg(s, rd);
1528 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1529 break;
1530 default:
1531 return 1;
1532 }
1533 break;
1534 case 0x100: /* WXOR */
1535 wrd = (insn >> 12) & 0xf;
1536 rd0 = (insn >> 0) & 0xf;
1537 rd1 = (insn >> 16) & 0xf;
1538 gen_op_iwmmxt_movq_M0_wRn(rd0);
1539 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1540 gen_op_iwmmxt_setpsr_nz();
1541 gen_op_iwmmxt_movq_wRn_M0(wrd);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1544 break;
1545 case 0x111: /* TMRC */
1546 if (insn & 0xf)
1547 return 1;
1548 rd = (insn >> 12) & 0xf;
1549 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1550 tmp = iwmmxt_load_creg(wrd);
1551 store_reg(s, rd, tmp);
18c9b560
AZ
1552 break;
1553 case 0x300: /* WANDN */
1554 wrd = (insn >> 12) & 0xf;
1555 rd0 = (insn >> 0) & 0xf;
1556 rd1 = (insn >> 16) & 0xf;
1557 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1558 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1559 gen_op_iwmmxt_andq_M0_wRn(rd1);
1560 gen_op_iwmmxt_setpsr_nz();
1561 gen_op_iwmmxt_movq_wRn_M0(wrd);
1562 gen_op_iwmmxt_set_mup();
1563 gen_op_iwmmxt_set_cup();
1564 break;
1565 case 0x200: /* WAND */
1566 wrd = (insn >> 12) & 0xf;
1567 rd0 = (insn >> 0) & 0xf;
1568 rd1 = (insn >> 16) & 0xf;
1569 gen_op_iwmmxt_movq_M0_wRn(rd0);
1570 gen_op_iwmmxt_andq_M0_wRn(rd1);
1571 gen_op_iwmmxt_setpsr_nz();
1572 gen_op_iwmmxt_movq_wRn_M0(wrd);
1573 gen_op_iwmmxt_set_mup();
1574 gen_op_iwmmxt_set_cup();
1575 break;
1576 case 0x810: case 0xa10: /* WMADD */
1577 wrd = (insn >> 12) & 0xf;
1578 rd0 = (insn >> 0) & 0xf;
1579 rd1 = (insn >> 16) & 0xf;
1580 gen_op_iwmmxt_movq_M0_wRn(rd0);
1581 if (insn & (1 << 21))
1582 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1583 else
1584 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1585 gen_op_iwmmxt_movq_wRn_M0(wrd);
1586 gen_op_iwmmxt_set_mup();
1587 break;
1588 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1589 wrd = (insn >> 12) & 0xf;
1590 rd0 = (insn >> 16) & 0xf;
1591 rd1 = (insn >> 0) & 0xf;
1592 gen_op_iwmmxt_movq_M0_wRn(rd0);
1593 switch ((insn >> 22) & 3) {
1594 case 0:
1595 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1596 break;
1597 case 1:
1598 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1599 break;
1600 case 2:
1601 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1602 break;
1603 case 3:
1604 return 1;
1605 }
1606 gen_op_iwmmxt_movq_wRn_M0(wrd);
1607 gen_op_iwmmxt_set_mup();
1608 gen_op_iwmmxt_set_cup();
1609 break;
1610 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1611 wrd = (insn >> 12) & 0xf;
1612 rd0 = (insn >> 16) & 0xf;
1613 rd1 = (insn >> 0) & 0xf;
1614 gen_op_iwmmxt_movq_M0_wRn(rd0);
1615 switch ((insn >> 22) & 3) {
1616 case 0:
1617 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1618 break;
1619 case 1:
1620 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1621 break;
1622 case 2:
1623 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1624 break;
1625 case 3:
1626 return 1;
1627 }
1628 gen_op_iwmmxt_movq_wRn_M0(wrd);
1629 gen_op_iwmmxt_set_mup();
1630 gen_op_iwmmxt_set_cup();
1631 break;
1632 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1633 wrd = (insn >> 12) & 0xf;
1634 rd0 = (insn >> 16) & 0xf;
1635 rd1 = (insn >> 0) & 0xf;
1636 gen_op_iwmmxt_movq_M0_wRn(rd0);
1637 if (insn & (1 << 22))
1638 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1639 else
1640 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1641 if (!(insn & (1 << 20)))
1642 gen_op_iwmmxt_addl_M0_wRn(wrd);
1643 gen_op_iwmmxt_movq_wRn_M0(wrd);
1644 gen_op_iwmmxt_set_mup();
1645 break;
1646 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1647 wrd = (insn >> 12) & 0xf;
1648 rd0 = (insn >> 16) & 0xf;
1649 rd1 = (insn >> 0) & 0xf;
1650 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1651 if (insn & (1 << 21)) {
1652 if (insn & (1 << 20))
1653 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1654 else
1655 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1656 } else {
1657 if (insn & (1 << 20))
1658 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1659 else
1660 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1661 }
18c9b560
AZ
1662 gen_op_iwmmxt_movq_wRn_M0(wrd);
1663 gen_op_iwmmxt_set_mup();
1664 break;
1665 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1666 wrd = (insn >> 12) & 0xf;
1667 rd0 = (insn >> 16) & 0xf;
1668 rd1 = (insn >> 0) & 0xf;
1669 gen_op_iwmmxt_movq_M0_wRn(rd0);
1670 if (insn & (1 << 21))
1671 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1672 else
1673 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1674 if (!(insn & (1 << 20))) {
e677137d
PB
1675 iwmmxt_load_reg(cpu_V1, wrd);
1676 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1677 }
1678 gen_op_iwmmxt_movq_wRn_M0(wrd);
1679 gen_op_iwmmxt_set_mup();
1680 break;
1681 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1682 wrd = (insn >> 12) & 0xf;
1683 rd0 = (insn >> 16) & 0xf;
1684 rd1 = (insn >> 0) & 0xf;
1685 gen_op_iwmmxt_movq_M0_wRn(rd0);
1686 switch ((insn >> 22) & 3) {
1687 case 0:
1688 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1689 break;
1690 case 1:
1691 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1692 break;
1693 case 2:
1694 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1695 break;
1696 case 3:
1697 return 1;
1698 }
1699 gen_op_iwmmxt_movq_wRn_M0(wrd);
1700 gen_op_iwmmxt_set_mup();
1701 gen_op_iwmmxt_set_cup();
1702 break;
1703 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1704 wrd = (insn >> 12) & 0xf;
1705 rd0 = (insn >> 16) & 0xf;
1706 rd1 = (insn >> 0) & 0xf;
1707 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1708 if (insn & (1 << 22)) {
1709 if (insn & (1 << 20))
1710 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1711 else
1712 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1713 } else {
1714 if (insn & (1 << 20))
1715 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1716 else
1717 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1718 }
18c9b560
AZ
1719 gen_op_iwmmxt_movq_wRn_M0(wrd);
1720 gen_op_iwmmxt_set_mup();
1721 gen_op_iwmmxt_set_cup();
1722 break;
1723 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1724 wrd = (insn >> 12) & 0xf;
1725 rd0 = (insn >> 16) & 0xf;
1726 rd1 = (insn >> 0) & 0xf;
1727 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1728 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1729 tcg_gen_andi_i32(tmp, tmp, 7);
1730 iwmmxt_load_reg(cpu_V1, rd1);
1731 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1732 tcg_temp_free_i32(tmp);
18c9b560
AZ
1733 gen_op_iwmmxt_movq_wRn_M0(wrd);
1734 gen_op_iwmmxt_set_mup();
1735 break;
1736 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1737 if (((insn >> 6) & 3) == 3)
1738 return 1;
18c9b560
AZ
1739 rd = (insn >> 12) & 0xf;
1740 wrd = (insn >> 16) & 0xf;
da6b5335 1741 tmp = load_reg(s, rd);
18c9b560
AZ
1742 gen_op_iwmmxt_movq_M0_wRn(wrd);
1743 switch ((insn >> 6) & 3) {
1744 case 0:
da6b5335
FN
1745 tmp2 = tcg_const_i32(0xff);
1746 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1747 break;
1748 case 1:
da6b5335
FN
1749 tmp2 = tcg_const_i32(0xffff);
1750 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1751 break;
1752 case 2:
da6b5335
FN
1753 tmp2 = tcg_const_i32(0xffffffff);
1754 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1755 break;
da6b5335
FN
1756 default:
1757 TCGV_UNUSED(tmp2);
1758 TCGV_UNUSED(tmp3);
18c9b560 1759 }
da6b5335
FN
1760 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1761 tcg_temp_free(tmp3);
1762 tcg_temp_free(tmp2);
7d1b0095 1763 tcg_temp_free_i32(tmp);
18c9b560
AZ
1764 gen_op_iwmmxt_movq_wRn_M0(wrd);
1765 gen_op_iwmmxt_set_mup();
1766 break;
1767 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1768 rd = (insn >> 12) & 0xf;
1769 wrd = (insn >> 16) & 0xf;
da6b5335 1770 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1771 return 1;
1772 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1773 tmp = tcg_temp_new_i32();
18c9b560
AZ
1774 switch ((insn >> 22) & 3) {
1775 case 0:
da6b5335
FN
1776 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1777 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1778 if (insn & 8) {
1779 tcg_gen_ext8s_i32(tmp, tmp);
1780 } else {
1781 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1782 }
1783 break;
1784 case 1:
da6b5335
FN
1785 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1786 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1787 if (insn & 8) {
1788 tcg_gen_ext16s_i32(tmp, tmp);
1789 } else {
1790 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1791 }
1792 break;
1793 case 2:
da6b5335
FN
1794 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1795 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1796 break;
18c9b560 1797 }
da6b5335 1798 store_reg(s, rd, tmp);
18c9b560
AZ
1799 break;
1800 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1801 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1802 return 1;
da6b5335 1803 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1804 switch ((insn >> 22) & 3) {
1805 case 0:
da6b5335 1806 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1807 break;
1808 case 1:
da6b5335 1809 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1810 break;
1811 case 2:
da6b5335 1812 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1813 break;
18c9b560 1814 }
da6b5335
FN
1815 tcg_gen_shli_i32(tmp, tmp, 28);
1816 gen_set_nzcv(tmp);
7d1b0095 1817 tcg_temp_free_i32(tmp);
18c9b560
AZ
1818 break;
1819 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1820 if (((insn >> 6) & 3) == 3)
1821 return 1;
18c9b560
AZ
1822 rd = (insn >> 12) & 0xf;
1823 wrd = (insn >> 16) & 0xf;
da6b5335 1824 tmp = load_reg(s, rd);
18c9b560
AZ
1825 switch ((insn >> 6) & 3) {
1826 case 0:
da6b5335 1827 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1828 break;
1829 case 1:
da6b5335 1830 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1831 break;
1832 case 2:
da6b5335 1833 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1834 break;
18c9b560 1835 }
7d1b0095 1836 tcg_temp_free_i32(tmp);
18c9b560
AZ
1837 gen_op_iwmmxt_movq_wRn_M0(wrd);
1838 gen_op_iwmmxt_set_mup();
1839 break;
1840 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1841 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1842 return 1;
da6b5335 1843 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1844 tmp2 = tcg_temp_new_i32();
da6b5335 1845 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1846 switch ((insn >> 22) & 3) {
1847 case 0:
1848 for (i = 0; i < 7; i ++) {
da6b5335
FN
1849 tcg_gen_shli_i32(tmp2, tmp2, 4);
1850 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1851 }
1852 break;
1853 case 1:
1854 for (i = 0; i < 3; i ++) {
da6b5335
FN
1855 tcg_gen_shli_i32(tmp2, tmp2, 8);
1856 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1857 }
1858 break;
1859 case 2:
da6b5335
FN
1860 tcg_gen_shli_i32(tmp2, tmp2, 16);
1861 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1862 break;
18c9b560 1863 }
da6b5335 1864 gen_set_nzcv(tmp);
7d1b0095
PM
1865 tcg_temp_free_i32(tmp2);
1866 tcg_temp_free_i32(tmp);
18c9b560
AZ
1867 break;
1868 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1869 wrd = (insn >> 12) & 0xf;
1870 rd0 = (insn >> 16) & 0xf;
1871 gen_op_iwmmxt_movq_M0_wRn(rd0);
1872 switch ((insn >> 22) & 3) {
1873 case 0:
e677137d 1874 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1875 break;
1876 case 1:
e677137d 1877 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1878 break;
1879 case 2:
e677137d 1880 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1881 break;
1882 case 3:
1883 return 1;
1884 }
1885 gen_op_iwmmxt_movq_wRn_M0(wrd);
1886 gen_op_iwmmxt_set_mup();
1887 break;
1888 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1889 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1890 return 1;
da6b5335 1891 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1892 tmp2 = tcg_temp_new_i32();
da6b5335 1893 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1894 switch ((insn >> 22) & 3) {
1895 case 0:
1896 for (i = 0; i < 7; i ++) {
da6b5335
FN
1897 tcg_gen_shli_i32(tmp2, tmp2, 4);
1898 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1899 }
1900 break;
1901 case 1:
1902 for (i = 0; i < 3; i ++) {
da6b5335
FN
1903 tcg_gen_shli_i32(tmp2, tmp2, 8);
1904 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1905 }
1906 break;
1907 case 2:
da6b5335
FN
1908 tcg_gen_shli_i32(tmp2, tmp2, 16);
1909 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1910 break;
18c9b560 1911 }
da6b5335 1912 gen_set_nzcv(tmp);
7d1b0095
PM
1913 tcg_temp_free_i32(tmp2);
1914 tcg_temp_free_i32(tmp);
18c9b560
AZ
1915 break;
1916 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1917 rd = (insn >> 12) & 0xf;
1918 rd0 = (insn >> 16) & 0xf;
da6b5335 1919 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1920 return 1;
1921 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1922 tmp = tcg_temp_new_i32();
18c9b560
AZ
1923 switch ((insn >> 22) & 3) {
1924 case 0:
da6b5335 1925 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1926 break;
1927 case 1:
da6b5335 1928 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1929 break;
1930 case 2:
da6b5335 1931 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1932 break;
18c9b560 1933 }
da6b5335 1934 store_reg(s, rd, tmp);
18c9b560
AZ
1935 break;
1936 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1937 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1938 wrd = (insn >> 12) & 0xf;
1939 rd0 = (insn >> 16) & 0xf;
1940 rd1 = (insn >> 0) & 0xf;
1941 gen_op_iwmmxt_movq_M0_wRn(rd0);
1942 switch ((insn >> 22) & 3) {
1943 case 0:
1944 if (insn & (1 << 21))
1945 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1946 else
1947 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1948 break;
1949 case 1:
1950 if (insn & (1 << 21))
1951 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1952 else
1953 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1954 break;
1955 case 2:
1956 if (insn & (1 << 21))
1957 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1958 else
1959 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1960 break;
1961 case 3:
1962 return 1;
1963 }
1964 gen_op_iwmmxt_movq_wRn_M0(wrd);
1965 gen_op_iwmmxt_set_mup();
1966 gen_op_iwmmxt_set_cup();
1967 break;
1968 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1969 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
1973 switch ((insn >> 22) & 3) {
1974 case 0:
1975 if (insn & (1 << 21))
1976 gen_op_iwmmxt_unpacklsb_M0();
1977 else
1978 gen_op_iwmmxt_unpacklub_M0();
1979 break;
1980 case 1:
1981 if (insn & (1 << 21))
1982 gen_op_iwmmxt_unpacklsw_M0();
1983 else
1984 gen_op_iwmmxt_unpackluw_M0();
1985 break;
1986 case 2:
1987 if (insn & (1 << 21))
1988 gen_op_iwmmxt_unpacklsl_M0();
1989 else
1990 gen_op_iwmmxt_unpacklul_M0();
1991 break;
1992 case 3:
1993 return 1;
1994 }
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2000 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2001 wrd = (insn >> 12) & 0xf;
2002 rd0 = (insn >> 16) & 0xf;
2003 gen_op_iwmmxt_movq_M0_wRn(rd0);
2004 switch ((insn >> 22) & 3) {
2005 case 0:
2006 if (insn & (1 << 21))
2007 gen_op_iwmmxt_unpackhsb_M0();
2008 else
2009 gen_op_iwmmxt_unpackhub_M0();
2010 break;
2011 case 1:
2012 if (insn & (1 << 21))
2013 gen_op_iwmmxt_unpackhsw_M0();
2014 else
2015 gen_op_iwmmxt_unpackhuw_M0();
2016 break;
2017 case 2:
2018 if (insn & (1 << 21))
2019 gen_op_iwmmxt_unpackhsl_M0();
2020 else
2021 gen_op_iwmmxt_unpackhul_M0();
2022 break;
2023 case 3:
2024 return 1;
2025 }
2026 gen_op_iwmmxt_movq_wRn_M0(wrd);
2027 gen_op_iwmmxt_set_mup();
2028 gen_op_iwmmxt_set_cup();
2029 break;
2030 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2031 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2032 if (((insn >> 22) & 3) == 0)
2033 return 1;
18c9b560
AZ
2034 wrd = (insn >> 12) & 0xf;
2035 rd0 = (insn >> 16) & 0xf;
2036 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2037 tmp = tcg_temp_new_i32();
da6b5335 2038 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2039 tcg_temp_free_i32(tmp);
18c9b560 2040 return 1;
da6b5335 2041 }
18c9b560 2042 switch ((insn >> 22) & 3) {
18c9b560 2043 case 1:
477955bd 2044 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2045 break;
2046 case 2:
477955bd 2047 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2048 break;
2049 case 3:
477955bd 2050 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2051 break;
2052 }
7d1b0095 2053 tcg_temp_free_i32(tmp);
18c9b560
AZ
2054 gen_op_iwmmxt_movq_wRn_M0(wrd);
2055 gen_op_iwmmxt_set_mup();
2056 gen_op_iwmmxt_set_cup();
2057 break;
2058 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2059 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2060 if (((insn >> 22) & 3) == 0)
2061 return 1;
18c9b560
AZ
2062 wrd = (insn >> 12) & 0xf;
2063 rd0 = (insn >> 16) & 0xf;
2064 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2065 tmp = tcg_temp_new_i32();
da6b5335 2066 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2067 tcg_temp_free_i32(tmp);
18c9b560 2068 return 1;
da6b5335 2069 }
18c9b560 2070 switch ((insn >> 22) & 3) {
18c9b560 2071 case 1:
477955bd 2072 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2073 break;
2074 case 2:
477955bd 2075 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2076 break;
2077 case 3:
477955bd 2078 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2079 break;
2080 }
7d1b0095 2081 tcg_temp_free_i32(tmp);
18c9b560
AZ
2082 gen_op_iwmmxt_movq_wRn_M0(wrd);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2085 break;
2086 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2087 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2088 if (((insn >> 22) & 3) == 0)
2089 return 1;
18c9b560
AZ
2090 wrd = (insn >> 12) & 0xf;
2091 rd0 = (insn >> 16) & 0xf;
2092 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2093 tmp = tcg_temp_new_i32();
da6b5335 2094 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2095 tcg_temp_free_i32(tmp);
18c9b560 2096 return 1;
da6b5335 2097 }
18c9b560 2098 switch ((insn >> 22) & 3) {
18c9b560 2099 case 1:
477955bd 2100 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2101 break;
2102 case 2:
477955bd 2103 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2104 break;
2105 case 3:
477955bd 2106 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2107 break;
2108 }
7d1b0095 2109 tcg_temp_free_i32(tmp);
18c9b560
AZ
2110 gen_op_iwmmxt_movq_wRn_M0(wrd);
2111 gen_op_iwmmxt_set_mup();
2112 gen_op_iwmmxt_set_cup();
2113 break;
2114 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2115 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2116 if (((insn >> 22) & 3) == 0)
2117 return 1;
18c9b560
AZ
2118 wrd = (insn >> 12) & 0xf;
2119 rd0 = (insn >> 16) & 0xf;
2120 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2121 tmp = tcg_temp_new_i32();
18c9b560 2122 switch ((insn >> 22) & 3) {
18c9b560 2123 case 1:
da6b5335 2124 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2125 tcg_temp_free_i32(tmp);
18c9b560 2126 return 1;
da6b5335 2127 }
477955bd 2128 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2129 break;
2130 case 2:
da6b5335 2131 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2132 tcg_temp_free_i32(tmp);
18c9b560 2133 return 1;
da6b5335 2134 }
477955bd 2135 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2136 break;
2137 case 3:
da6b5335 2138 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2139 tcg_temp_free_i32(tmp);
18c9b560 2140 return 1;
da6b5335 2141 }
477955bd 2142 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2143 break;
2144 }
7d1b0095 2145 tcg_temp_free_i32(tmp);
18c9b560
AZ
2146 gen_op_iwmmxt_movq_wRn_M0(wrd);
2147 gen_op_iwmmxt_set_mup();
2148 gen_op_iwmmxt_set_cup();
2149 break;
2150 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2151 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2152 wrd = (insn >> 12) & 0xf;
2153 rd0 = (insn >> 16) & 0xf;
2154 rd1 = (insn >> 0) & 0xf;
2155 gen_op_iwmmxt_movq_M0_wRn(rd0);
2156 switch ((insn >> 22) & 3) {
2157 case 0:
2158 if (insn & (1 << 21))
2159 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2160 else
2161 gen_op_iwmmxt_minub_M0_wRn(rd1);
2162 break;
2163 case 1:
2164 if (insn & (1 << 21))
2165 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2166 else
2167 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2168 break;
2169 case 2:
2170 if (insn & (1 << 21))
2171 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2172 else
2173 gen_op_iwmmxt_minul_M0_wRn(rd1);
2174 break;
2175 case 3:
2176 return 1;
2177 }
2178 gen_op_iwmmxt_movq_wRn_M0(wrd);
2179 gen_op_iwmmxt_set_mup();
2180 break;
2181 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2182 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2183 wrd = (insn >> 12) & 0xf;
2184 rd0 = (insn >> 16) & 0xf;
2185 rd1 = (insn >> 0) & 0xf;
2186 gen_op_iwmmxt_movq_M0_wRn(rd0);
2187 switch ((insn >> 22) & 3) {
2188 case 0:
2189 if (insn & (1 << 21))
2190 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2191 else
2192 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2193 break;
2194 case 1:
2195 if (insn & (1 << 21))
2196 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2197 else
2198 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2199 break;
2200 case 2:
2201 if (insn & (1 << 21))
2202 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2203 else
2204 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2205 break;
2206 case 3:
2207 return 1;
2208 }
2209 gen_op_iwmmxt_movq_wRn_M0(wrd);
2210 gen_op_iwmmxt_set_mup();
2211 break;
2212 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2213 case 0x402: case 0x502: case 0x602: case 0x702:
2214 wrd = (insn >> 12) & 0xf;
2215 rd0 = (insn >> 16) & 0xf;
2216 rd1 = (insn >> 0) & 0xf;
2217 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2218 tmp = tcg_const_i32((insn >> 20) & 3);
2219 iwmmxt_load_reg(cpu_V1, rd1);
2220 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2221 tcg_temp_free(tmp);
18c9b560
AZ
2222 gen_op_iwmmxt_movq_wRn_M0(wrd);
2223 gen_op_iwmmxt_set_mup();
2224 break;
2225 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2226 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2227 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2228 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2229 wrd = (insn >> 12) & 0xf;
2230 rd0 = (insn >> 16) & 0xf;
2231 rd1 = (insn >> 0) & 0xf;
2232 gen_op_iwmmxt_movq_M0_wRn(rd0);
2233 switch ((insn >> 20) & 0xf) {
2234 case 0x0:
2235 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2236 break;
2237 case 0x1:
2238 gen_op_iwmmxt_subub_M0_wRn(rd1);
2239 break;
2240 case 0x3:
2241 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2242 break;
2243 case 0x4:
2244 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2245 break;
2246 case 0x5:
2247 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2248 break;
2249 case 0x7:
2250 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2251 break;
2252 case 0x8:
2253 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2254 break;
2255 case 0x9:
2256 gen_op_iwmmxt_subul_M0_wRn(rd1);
2257 break;
2258 case 0xb:
2259 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2260 break;
2261 default:
2262 return 1;
2263 }
2264 gen_op_iwmmxt_movq_wRn_M0(wrd);
2265 gen_op_iwmmxt_set_mup();
2266 gen_op_iwmmxt_set_cup();
2267 break;
2268 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2269 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2270 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2271 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2272 wrd = (insn >> 12) & 0xf;
2273 rd0 = (insn >> 16) & 0xf;
2274 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2275 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2276 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
da6b5335 2277 tcg_temp_free(tmp);
18c9b560
AZ
2278 gen_op_iwmmxt_movq_wRn_M0(wrd);
2279 gen_op_iwmmxt_set_mup();
2280 gen_op_iwmmxt_set_cup();
2281 break;
2282 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2283 case 0x418: case 0x518: case 0x618: case 0x718:
2284 case 0x818: case 0x918: case 0xa18: case 0xb18:
2285 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2286 wrd = (insn >> 12) & 0xf;
2287 rd0 = (insn >> 16) & 0xf;
2288 rd1 = (insn >> 0) & 0xf;
2289 gen_op_iwmmxt_movq_M0_wRn(rd0);
2290 switch ((insn >> 20) & 0xf) {
2291 case 0x0:
2292 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2293 break;
2294 case 0x1:
2295 gen_op_iwmmxt_addub_M0_wRn(rd1);
2296 break;
2297 case 0x3:
2298 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2299 break;
2300 case 0x4:
2301 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2302 break;
2303 case 0x5:
2304 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2305 break;
2306 case 0x7:
2307 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2308 break;
2309 case 0x8:
2310 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2311 break;
2312 case 0x9:
2313 gen_op_iwmmxt_addul_M0_wRn(rd1);
2314 break;
2315 case 0xb:
2316 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2317 break;
2318 default:
2319 return 1;
2320 }
2321 gen_op_iwmmxt_movq_wRn_M0(wrd);
2322 gen_op_iwmmxt_set_mup();
2323 gen_op_iwmmxt_set_cup();
2324 break;
2325 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2326 case 0x408: case 0x508: case 0x608: case 0x708:
2327 case 0x808: case 0x908: case 0xa08: case 0xb08:
2328 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2329 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2330 return 1;
18c9b560
AZ
2331 wrd = (insn >> 12) & 0xf;
2332 rd0 = (insn >> 16) & 0xf;
2333 rd1 = (insn >> 0) & 0xf;
2334 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2335 switch ((insn >> 22) & 3) {
18c9b560
AZ
2336 case 1:
2337 if (insn & (1 << 21))
2338 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2339 else
2340 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2341 break;
2342 case 2:
2343 if (insn & (1 << 21))
2344 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2345 else
2346 gen_op_iwmmxt_packul_M0_wRn(rd1);
2347 break;
2348 case 3:
2349 if (insn & (1 << 21))
2350 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2351 else
2352 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2353 break;
2354 }
2355 gen_op_iwmmxt_movq_wRn_M0(wrd);
2356 gen_op_iwmmxt_set_mup();
2357 gen_op_iwmmxt_set_cup();
2358 break;
2359 case 0x201: case 0x203: case 0x205: case 0x207:
2360 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2361 case 0x211: case 0x213: case 0x215: case 0x217:
2362 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2363 wrd = (insn >> 5) & 0xf;
2364 rd0 = (insn >> 12) & 0xf;
2365 rd1 = (insn >> 0) & 0xf;
2366 if (rd0 == 0xf || rd1 == 0xf)
2367 return 1;
2368 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2369 tmp = load_reg(s, rd0);
2370 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2371 switch ((insn >> 16) & 0xf) {
2372 case 0x0: /* TMIA */
da6b5335 2373 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2374 break;
2375 case 0x8: /* TMIAPH */
da6b5335 2376 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2377 break;
2378 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2379 if (insn & (1 << 16))
da6b5335 2380 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2381 if (insn & (1 << 17))
da6b5335
FN
2382 tcg_gen_shri_i32(tmp2, tmp2, 16);
2383 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2384 break;
2385 default:
7d1b0095
PM
2386 tcg_temp_free_i32(tmp2);
2387 tcg_temp_free_i32(tmp);
18c9b560
AZ
2388 return 1;
2389 }
7d1b0095
PM
2390 tcg_temp_free_i32(tmp2);
2391 tcg_temp_free_i32(tmp);
18c9b560
AZ
2392 gen_op_iwmmxt_movq_wRn_M0(wrd);
2393 gen_op_iwmmxt_set_mup();
2394 break;
2395 default:
2396 return 1;
2397 }
2398
2399 return 0;
2400}
2401
a1c7273b 2402/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2403 (ie. an undefined instruction). */
0ecb72a5 2404static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2405{
2406 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2407 TCGv tmp, tmp2;
18c9b560
AZ
2408
2409 if ((insn & 0x0ff00f10) == 0x0e200010) {
2410 /* Multiply with Internal Accumulate Format */
2411 rd0 = (insn >> 12) & 0xf;
2412 rd1 = insn & 0xf;
2413 acc = (insn >> 5) & 7;
2414
2415 if (acc != 0)
2416 return 1;
2417
3a554c0f
FN
2418 tmp = load_reg(s, rd0);
2419 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2420 switch ((insn >> 16) & 0xf) {
2421 case 0x0: /* MIA */
3a554c0f 2422 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2423 break;
2424 case 0x8: /* MIAPH */
3a554c0f 2425 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2426 break;
2427 case 0xc: /* MIABB */
2428 case 0xd: /* MIABT */
2429 case 0xe: /* MIATB */
2430 case 0xf: /* MIATT */
18c9b560 2431 if (insn & (1 << 16))
3a554c0f 2432 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2433 if (insn & (1 << 17))
3a554c0f
FN
2434 tcg_gen_shri_i32(tmp2, tmp2, 16);
2435 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2436 break;
2437 default:
2438 return 1;
2439 }
7d1b0095
PM
2440 tcg_temp_free_i32(tmp2);
2441 tcg_temp_free_i32(tmp);
18c9b560
AZ
2442
2443 gen_op_iwmmxt_movq_wRn_M0(acc);
2444 return 0;
2445 }
2446
2447 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2448 /* Internal Accumulator Access Format */
2449 rdhi = (insn >> 16) & 0xf;
2450 rdlo = (insn >> 12) & 0xf;
2451 acc = insn & 7;
2452
2453 if (acc != 0)
2454 return 1;
2455
2456 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2457 iwmmxt_load_reg(cpu_V0, acc);
2458 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2459 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2460 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2461 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2462 } else { /* MAR */
3a554c0f
FN
2463 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2464 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2465 }
2466 return 0;
2467 }
2468
2469 return 1;
2470}
2471
9ee6e8bb
PB
2472#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2473#define VFP_SREG(insn, bigbit, smallbit) \
2474 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2475#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2476 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2477 reg = (((insn) >> (bigbit)) & 0x0f) \
2478 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2479 } else { \
2480 if (insn & (1 << (smallbit))) \
2481 return 1; \
2482 reg = ((insn) >> (bigbit)) & 0x0f; \
2483 }} while (0)
2484
2485#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2486#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2487#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2488#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2489#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2490#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2491
4373f3ce
PB
2492/* Move between integer and VFP cores. */
2493static TCGv gen_vfp_mrs(void)
2494{
7d1b0095 2495 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2496 tcg_gen_mov_i32(tmp, cpu_F0s);
2497 return tmp;
2498}
2499
2500static void gen_vfp_msr(TCGv tmp)
2501{
2502 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2503 tcg_temp_free_i32(tmp);
4373f3ce
PB
2504}
2505
ad69471c
PB
2506static void gen_neon_dup_u8(TCGv var, int shift)
2507{
7d1b0095 2508 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2509 if (shift)
2510 tcg_gen_shri_i32(var, var, shift);
86831435 2511 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2512 tcg_gen_shli_i32(tmp, var, 8);
2513 tcg_gen_or_i32(var, var, tmp);
2514 tcg_gen_shli_i32(tmp, var, 16);
2515 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2516 tcg_temp_free_i32(tmp);
ad69471c
PB
2517}
2518
2519static void gen_neon_dup_low16(TCGv var)
2520{
7d1b0095 2521 TCGv tmp = tcg_temp_new_i32();
86831435 2522 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2523 tcg_gen_shli_i32(tmp, var, 16);
2524 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2525 tcg_temp_free_i32(tmp);
ad69471c
PB
2526}
2527
2528static void gen_neon_dup_high16(TCGv var)
2529{
7d1b0095 2530 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2531 tcg_gen_andi_i32(var, var, 0xffff0000);
2532 tcg_gen_shri_i32(tmp, var, 16);
2533 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2534 tcg_temp_free_i32(tmp);
ad69471c
PB
2535}
2536
8e18cde3
PM
2537static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2538{
2539 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2540 TCGv tmp;
2541 switch (size) {
2542 case 0:
2543 tmp = gen_ld8u(addr, IS_USER(s));
2544 gen_neon_dup_u8(tmp, 0);
2545 break;
2546 case 1:
2547 tmp = gen_ld16u(addr, IS_USER(s));
2548 gen_neon_dup_low16(tmp);
2549 break;
2550 case 2:
2551 tmp = gen_ld32(addr, IS_USER(s));
2552 break;
2553 default: /* Avoid compiler warnings. */
2554 abort();
2555 }
2556 return tmp;
2557}
2558
a1c7273b 2559/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2560 (ie. an undefined instruction). */
0ecb72a5 2561static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2562{
2563 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2564 int dp, veclen;
312eea9f 2565 TCGv addr;
4373f3ce 2566 TCGv tmp;
ad69471c 2567 TCGv tmp2;
b7bcbe95 2568
40f137e1
PB
2569 if (!arm_feature(env, ARM_FEATURE_VFP))
2570 return 1;
2571
5df8bac1 2572 if (!s->vfp_enabled) {
9ee6e8bb 2573 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2574 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2575 return 1;
2576 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2577 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2578 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2579 return 1;
2580 }
b7bcbe95
FB
2581 dp = ((insn & 0xf00) == 0xb00);
2582 switch ((insn >> 24) & 0xf) {
2583 case 0xe:
2584 if (insn & (1 << 4)) {
2585 /* single register transfer */
b7bcbe95
FB
2586 rd = (insn >> 12) & 0xf;
2587 if (dp) {
9ee6e8bb
PB
2588 int size;
2589 int pass;
2590
2591 VFP_DREG_N(rn, insn);
2592 if (insn & 0xf)
b7bcbe95 2593 return 1;
9ee6e8bb
PB
2594 if (insn & 0x00c00060
2595 && !arm_feature(env, ARM_FEATURE_NEON))
2596 return 1;
2597
2598 pass = (insn >> 21) & 1;
2599 if (insn & (1 << 22)) {
2600 size = 0;
2601 offset = ((insn >> 5) & 3) * 8;
2602 } else if (insn & (1 << 5)) {
2603 size = 1;
2604 offset = (insn & (1 << 6)) ? 16 : 0;
2605 } else {
2606 size = 2;
2607 offset = 0;
2608 }
18c9b560 2609 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2610 /* vfp->arm */
ad69471c 2611 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2612 switch (size) {
2613 case 0:
9ee6e8bb 2614 if (offset)
ad69471c 2615 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2616 if (insn & (1 << 23))
ad69471c 2617 gen_uxtb(tmp);
9ee6e8bb 2618 else
ad69471c 2619 gen_sxtb(tmp);
9ee6e8bb
PB
2620 break;
2621 case 1:
9ee6e8bb
PB
2622 if (insn & (1 << 23)) {
2623 if (offset) {
ad69471c 2624 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2625 } else {
ad69471c 2626 gen_uxth(tmp);
9ee6e8bb
PB
2627 }
2628 } else {
2629 if (offset) {
ad69471c 2630 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2631 } else {
ad69471c 2632 gen_sxth(tmp);
9ee6e8bb
PB
2633 }
2634 }
2635 break;
2636 case 2:
9ee6e8bb
PB
2637 break;
2638 }
ad69471c 2639 store_reg(s, rd, tmp);
b7bcbe95
FB
2640 } else {
2641 /* arm->vfp */
ad69471c 2642 tmp = load_reg(s, rd);
9ee6e8bb
PB
2643 if (insn & (1 << 23)) {
2644 /* VDUP */
2645 if (size == 0) {
ad69471c 2646 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2647 } else if (size == 1) {
ad69471c 2648 gen_neon_dup_low16(tmp);
9ee6e8bb 2649 }
cbbccffc 2650 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2651 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2652 tcg_gen_mov_i32(tmp2, tmp);
2653 neon_store_reg(rn, n, tmp2);
2654 }
2655 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2656 } else {
2657 /* VMOV */
2658 switch (size) {
2659 case 0:
ad69471c 2660 tmp2 = neon_load_reg(rn, pass);
d593c48e 2661 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 2662 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2663 break;
2664 case 1:
ad69471c 2665 tmp2 = neon_load_reg(rn, pass);
d593c48e 2666 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 2667 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2668 break;
2669 case 2:
9ee6e8bb
PB
2670 break;
2671 }
ad69471c 2672 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2673 }
b7bcbe95 2674 }
9ee6e8bb
PB
2675 } else { /* !dp */
2676 if ((insn & 0x6f) != 0x00)
2677 return 1;
2678 rn = VFP_SREG_N(insn);
18c9b560 2679 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2680 /* vfp->arm */
2681 if (insn & (1 << 21)) {
2682 /* system register */
40f137e1 2683 rn >>= 1;
9ee6e8bb 2684
b7bcbe95 2685 switch (rn) {
40f137e1 2686 case ARM_VFP_FPSID:
4373f3ce 2687 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2688 VFP3 restricts all id registers to privileged
2689 accesses. */
2690 if (IS_USER(s)
2691 && arm_feature(env, ARM_FEATURE_VFP3))
2692 return 1;
4373f3ce 2693 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2694 break;
40f137e1 2695 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2696 if (IS_USER(s))
2697 return 1;
4373f3ce 2698 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2699 break;
40f137e1
PB
2700 case ARM_VFP_FPINST:
2701 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2702 /* Not present in VFP3. */
2703 if (IS_USER(s)
2704 || arm_feature(env, ARM_FEATURE_VFP3))
2705 return 1;
4373f3ce 2706 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2707 break;
40f137e1 2708 case ARM_VFP_FPSCR:
601d70b9 2709 if (rd == 15) {
4373f3ce
PB
2710 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2711 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2712 } else {
7d1b0095 2713 tmp = tcg_temp_new_i32();
4373f3ce
PB
2714 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2715 }
b7bcbe95 2716 break;
9ee6e8bb
PB
2717 case ARM_VFP_MVFR0:
2718 case ARM_VFP_MVFR1:
2719 if (IS_USER(s)
06ed5d66 2720 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 2721 return 1;
4373f3ce 2722 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2723 break;
b7bcbe95
FB
2724 default:
2725 return 1;
2726 }
2727 } else {
2728 gen_mov_F0_vreg(0, rn);
4373f3ce 2729 tmp = gen_vfp_mrs();
b7bcbe95
FB
2730 }
2731 if (rd == 15) {
b5ff1b31 2732 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2733 gen_set_nzcv(tmp);
7d1b0095 2734 tcg_temp_free_i32(tmp);
4373f3ce
PB
2735 } else {
2736 store_reg(s, rd, tmp);
2737 }
b7bcbe95
FB
2738 } else {
2739 /* arm->vfp */
4373f3ce 2740 tmp = load_reg(s, rd);
b7bcbe95 2741 if (insn & (1 << 21)) {
40f137e1 2742 rn >>= 1;
b7bcbe95
FB
2743 /* system register */
2744 switch (rn) {
40f137e1 2745 case ARM_VFP_FPSID:
9ee6e8bb
PB
2746 case ARM_VFP_MVFR0:
2747 case ARM_VFP_MVFR1:
b7bcbe95
FB
2748 /* Writes are ignored. */
2749 break;
40f137e1 2750 case ARM_VFP_FPSCR:
4373f3ce 2751 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2752 tcg_temp_free_i32(tmp);
b5ff1b31 2753 gen_lookup_tb(s);
b7bcbe95 2754 break;
40f137e1 2755 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2756 if (IS_USER(s))
2757 return 1;
71b3c3de
JR
2758 /* TODO: VFP subarchitecture support.
2759 * For now, keep the EN bit only */
2760 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2761 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2762 gen_lookup_tb(s);
2763 break;
2764 case ARM_VFP_FPINST:
2765 case ARM_VFP_FPINST2:
4373f3ce 2766 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2767 break;
b7bcbe95
FB
2768 default:
2769 return 1;
2770 }
2771 } else {
4373f3ce 2772 gen_vfp_msr(tmp);
b7bcbe95
FB
2773 gen_mov_vreg_F0(0, rn);
2774 }
2775 }
2776 }
2777 } else {
2778 /* data processing */
2779 /* The opcode is in bits 23, 21, 20 and 6. */
2780 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2781 if (dp) {
2782 if (op == 15) {
2783 /* rn is opcode */
2784 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2785 } else {
2786 /* rn is register number */
9ee6e8bb 2787 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2788 }
2789
04595bf6 2790 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2791 /* Integer or single precision destination. */
9ee6e8bb 2792 rd = VFP_SREG_D(insn);
b7bcbe95 2793 } else {
9ee6e8bb 2794 VFP_DREG_D(rd, insn);
b7bcbe95 2795 }
04595bf6
PM
2796 if (op == 15 &&
2797 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2798 /* VCVT from int is always from S reg regardless of dp bit.
2799 * VCVT with immediate frac_bits has same format as SREG_M
2800 */
2801 rm = VFP_SREG_M(insn);
b7bcbe95 2802 } else {
9ee6e8bb 2803 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2804 }
2805 } else {
9ee6e8bb 2806 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2807 if (op == 15 && rn == 15) {
2808 /* Double precision destination. */
9ee6e8bb
PB
2809 VFP_DREG_D(rd, insn);
2810 } else {
2811 rd = VFP_SREG_D(insn);
2812 }
04595bf6
PM
2813 /* NB that we implicitly rely on the encoding for the frac_bits
2814 * in VCVT of fixed to float being the same as that of an SREG_M
2815 */
9ee6e8bb 2816 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2817 }
2818
69d1fc22 2819 veclen = s->vec_len;
b7bcbe95
FB
2820 if (op == 15 && rn > 3)
2821 veclen = 0;
2822
2823 /* Shut up compiler warnings. */
2824 delta_m = 0;
2825 delta_d = 0;
2826 bank_mask = 0;
3b46e624 2827
b7bcbe95
FB
2828 if (veclen > 0) {
2829 if (dp)
2830 bank_mask = 0xc;
2831 else
2832 bank_mask = 0x18;
2833
2834 /* Figure out what type of vector operation this is. */
2835 if ((rd & bank_mask) == 0) {
2836 /* scalar */
2837 veclen = 0;
2838 } else {
2839 if (dp)
69d1fc22 2840 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2841 else
69d1fc22 2842 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2843
2844 if ((rm & bank_mask) == 0) {
2845 /* mixed scalar/vector */
2846 delta_m = 0;
2847 } else {
2848 /* vector */
2849 delta_m = delta_d;
2850 }
2851 }
2852 }
2853
2854 /* Load the initial operands. */
2855 if (op == 15) {
2856 switch (rn) {
2857 case 16:
2858 case 17:
2859 /* Integer source */
2860 gen_mov_F0_vreg(0, rm);
2861 break;
2862 case 8:
2863 case 9:
2864 /* Compare */
2865 gen_mov_F0_vreg(dp, rd);
2866 gen_mov_F1_vreg(dp, rm);
2867 break;
2868 case 10:
2869 case 11:
2870 /* Compare with zero */
2871 gen_mov_F0_vreg(dp, rd);
2872 gen_vfp_F1_ld0(dp);
2873 break;
9ee6e8bb
PB
2874 case 20:
2875 case 21:
2876 case 22:
2877 case 23:
644ad806
PB
2878 case 28:
2879 case 29:
2880 case 30:
2881 case 31:
9ee6e8bb
PB
2882 /* Source and destination the same. */
2883 gen_mov_F0_vreg(dp, rd);
2884 break;
6e0c0ed1
PM
2885 case 4:
2886 case 5:
2887 case 6:
2888 case 7:
2889 /* VCVTB, VCVTT: only present with the halfprec extension,
2890 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2891 */
2892 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2893 return 1;
2894 }
2895 /* Otherwise fall through */
b7bcbe95
FB
2896 default:
2897 /* One source operand. */
2898 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2899 break;
b7bcbe95
FB
2900 }
2901 } else {
2902 /* Two source operands. */
2903 gen_mov_F0_vreg(dp, rn);
2904 gen_mov_F1_vreg(dp, rm);
2905 }
2906
2907 for (;;) {
2908 /* Perform the calculation. */
2909 switch (op) {
605a6aed
PM
2910 case 0: /* VMLA: fd + (fn * fm) */
2911 /* Note that order of inputs to the add matters for NaNs */
2912 gen_vfp_F1_mul(dp);
2913 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2914 gen_vfp_add(dp);
2915 break;
605a6aed 2916 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 2917 gen_vfp_mul(dp);
605a6aed
PM
2918 gen_vfp_F1_neg(dp);
2919 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2920 gen_vfp_add(dp);
2921 break;
605a6aed
PM
2922 case 2: /* VNMLS: -fd + (fn * fm) */
2923 /* Note that it isn't valid to replace (-A + B) with (B - A)
2924 * or similar plausible looking simplifications
2925 * because this will give wrong results for NaNs.
2926 */
2927 gen_vfp_F1_mul(dp);
2928 gen_mov_F0_vreg(dp, rd);
2929 gen_vfp_neg(dp);
2930 gen_vfp_add(dp);
b7bcbe95 2931 break;
605a6aed 2932 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 2933 gen_vfp_mul(dp);
605a6aed
PM
2934 gen_vfp_F1_neg(dp);
2935 gen_mov_F0_vreg(dp, rd);
b7bcbe95 2936 gen_vfp_neg(dp);
605a6aed 2937 gen_vfp_add(dp);
b7bcbe95
FB
2938 break;
2939 case 4: /* mul: fn * fm */
2940 gen_vfp_mul(dp);
2941 break;
2942 case 5: /* nmul: -(fn * fm) */
2943 gen_vfp_mul(dp);
2944 gen_vfp_neg(dp);
2945 break;
2946 case 6: /* add: fn + fm */
2947 gen_vfp_add(dp);
2948 break;
2949 case 7: /* sub: fn - fm */
2950 gen_vfp_sub(dp);
2951 break;
2952 case 8: /* div: fn / fm */
2953 gen_vfp_div(dp);
2954 break;
da97f52c
PM
2955 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
2956 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
2957 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
2958 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
2959 /* These are fused multiply-add, and must be done as one
2960 * floating point operation with no rounding between the
2961 * multiplication and addition steps.
2962 * NB that doing the negations here as separate steps is
2963 * correct : an input NaN should come out with its sign bit
2964 * flipped if it is a negated-input.
2965 */
2966 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
2967 return 1;
2968 }
2969 if (dp) {
2970 TCGv_ptr fpst;
2971 TCGv_i64 frd;
2972 if (op & 1) {
2973 /* VFNMS, VFMS */
2974 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
2975 }
2976 frd = tcg_temp_new_i64();
2977 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
2978 if (op & 2) {
2979 /* VFNMA, VFNMS */
2980 gen_helper_vfp_negd(frd, frd);
2981 }
2982 fpst = get_fpstatus_ptr(0);
2983 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
2984 cpu_F1d, frd, fpst);
2985 tcg_temp_free_ptr(fpst);
2986 tcg_temp_free_i64(frd);
2987 } else {
2988 TCGv_ptr fpst;
2989 TCGv_i32 frd;
2990 if (op & 1) {
2991 /* VFNMS, VFMS */
2992 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
2993 }
2994 frd = tcg_temp_new_i32();
2995 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
2996 if (op & 2) {
2997 gen_helper_vfp_negs(frd, frd);
2998 }
2999 fpst = get_fpstatus_ptr(0);
3000 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3001 cpu_F1s, frd, fpst);
3002 tcg_temp_free_ptr(fpst);
3003 tcg_temp_free_i32(frd);
3004 }
3005 break;
9ee6e8bb
PB
3006 case 14: /* fconst */
3007 if (!arm_feature(env, ARM_FEATURE_VFP3))
3008 return 1;
3009
3010 n = (insn << 12) & 0x80000000;
3011 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3012 if (dp) {
3013 if (i & 0x40)
3014 i |= 0x3f80;
3015 else
3016 i |= 0x4000;
3017 n |= i << 16;
4373f3ce 3018 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3019 } else {
3020 if (i & 0x40)
3021 i |= 0x780;
3022 else
3023 i |= 0x800;
3024 n |= i << 19;
5b340b51 3025 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3026 }
9ee6e8bb 3027 break;
b7bcbe95
FB
3028 case 15: /* extension space */
3029 switch (rn) {
3030 case 0: /* cpy */
3031 /* no-op */
3032 break;
3033 case 1: /* abs */
3034 gen_vfp_abs(dp);
3035 break;
3036 case 2: /* neg */
3037 gen_vfp_neg(dp);
3038 break;
3039 case 3: /* sqrt */
3040 gen_vfp_sqrt(dp);
3041 break;
60011498 3042 case 4: /* vcvtb.f32.f16 */
60011498
PB
3043 tmp = gen_vfp_mrs();
3044 tcg_gen_ext16u_i32(tmp, tmp);
3045 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3046 tcg_temp_free_i32(tmp);
60011498
PB
3047 break;
3048 case 5: /* vcvtt.f32.f16 */
60011498
PB
3049 tmp = gen_vfp_mrs();
3050 tcg_gen_shri_i32(tmp, tmp, 16);
3051 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3052 tcg_temp_free_i32(tmp);
60011498
PB
3053 break;
3054 case 6: /* vcvtb.f16.f32 */
7d1b0095 3055 tmp = tcg_temp_new_i32();
60011498
PB
3056 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3057 gen_mov_F0_vreg(0, rd);
3058 tmp2 = gen_vfp_mrs();
3059 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3060 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3061 tcg_temp_free_i32(tmp2);
60011498
PB
3062 gen_vfp_msr(tmp);
3063 break;
3064 case 7: /* vcvtt.f16.f32 */
7d1b0095 3065 tmp = tcg_temp_new_i32();
60011498
PB
3066 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3067 tcg_gen_shli_i32(tmp, tmp, 16);
3068 gen_mov_F0_vreg(0, rd);
3069 tmp2 = gen_vfp_mrs();
3070 tcg_gen_ext16u_i32(tmp2, tmp2);
3071 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3072 tcg_temp_free_i32(tmp2);
60011498
PB
3073 gen_vfp_msr(tmp);
3074 break;
b7bcbe95
FB
3075 case 8: /* cmp */
3076 gen_vfp_cmp(dp);
3077 break;
3078 case 9: /* cmpe */
3079 gen_vfp_cmpe(dp);
3080 break;
3081 case 10: /* cmpz */
3082 gen_vfp_cmp(dp);
3083 break;
3084 case 11: /* cmpez */
3085 gen_vfp_F1_ld0(dp);
3086 gen_vfp_cmpe(dp);
3087 break;
3088 case 15: /* single<->double conversion */
3089 if (dp)
4373f3ce 3090 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3091 else
4373f3ce 3092 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3093 break;
3094 case 16: /* fuito */
5500b06c 3095 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3096 break;
3097 case 17: /* fsito */
5500b06c 3098 gen_vfp_sito(dp, 0);
b7bcbe95 3099 break;
9ee6e8bb
PB
3100 case 20: /* fshto */
3101 if (!arm_feature(env, ARM_FEATURE_VFP3))
3102 return 1;
5500b06c 3103 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3104 break;
3105 case 21: /* fslto */
3106 if (!arm_feature(env, ARM_FEATURE_VFP3))
3107 return 1;
5500b06c 3108 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3109 break;
3110 case 22: /* fuhto */
3111 if (!arm_feature(env, ARM_FEATURE_VFP3))
3112 return 1;
5500b06c 3113 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3114 break;
3115 case 23: /* fulto */
3116 if (!arm_feature(env, ARM_FEATURE_VFP3))
3117 return 1;
5500b06c 3118 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3119 break;
b7bcbe95 3120 case 24: /* ftoui */
5500b06c 3121 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3122 break;
3123 case 25: /* ftouiz */
5500b06c 3124 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3125 break;
3126 case 26: /* ftosi */
5500b06c 3127 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3128 break;
3129 case 27: /* ftosiz */
5500b06c 3130 gen_vfp_tosiz(dp, 0);
b7bcbe95 3131 break;
9ee6e8bb
PB
3132 case 28: /* ftosh */
3133 if (!arm_feature(env, ARM_FEATURE_VFP3))
3134 return 1;
5500b06c 3135 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3136 break;
3137 case 29: /* ftosl */
3138 if (!arm_feature(env, ARM_FEATURE_VFP3))
3139 return 1;
5500b06c 3140 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3141 break;
3142 case 30: /* ftouh */
3143 if (!arm_feature(env, ARM_FEATURE_VFP3))
3144 return 1;
5500b06c 3145 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3146 break;
3147 case 31: /* ftoul */
3148 if (!arm_feature(env, ARM_FEATURE_VFP3))
3149 return 1;
5500b06c 3150 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3151 break;
b7bcbe95 3152 default: /* undefined */
b7bcbe95
FB
3153 return 1;
3154 }
3155 break;
3156 default: /* undefined */
b7bcbe95
FB
3157 return 1;
3158 }
3159
3160 /* Write back the result. */
3161 if (op == 15 && (rn >= 8 && rn <= 11))
3162 ; /* Comparison, do nothing. */
04595bf6
PM
3163 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3164 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3165 gen_mov_vreg_F0(0, rd);
3166 else if (op == 15 && rn == 15)
3167 /* conversion */
3168 gen_mov_vreg_F0(!dp, rd);
3169 else
3170 gen_mov_vreg_F0(dp, rd);
3171
3172 /* break out of the loop if we have finished */
3173 if (veclen == 0)
3174 break;
3175
3176 if (op == 15 && delta_m == 0) {
3177 /* single source one-many */
3178 while (veclen--) {
3179 rd = ((rd + delta_d) & (bank_mask - 1))
3180 | (rd & bank_mask);
3181 gen_mov_vreg_F0(dp, rd);
3182 }
3183 break;
3184 }
3185 /* Setup the next operands. */
3186 veclen--;
3187 rd = ((rd + delta_d) & (bank_mask - 1))
3188 | (rd & bank_mask);
3189
3190 if (op == 15) {
3191 /* One source operand. */
3192 rm = ((rm + delta_m) & (bank_mask - 1))
3193 | (rm & bank_mask);
3194 gen_mov_F0_vreg(dp, rm);
3195 } else {
3196 /* Two source operands. */
3197 rn = ((rn + delta_d) & (bank_mask - 1))
3198 | (rn & bank_mask);
3199 gen_mov_F0_vreg(dp, rn);
3200 if (delta_m) {
3201 rm = ((rm + delta_m) & (bank_mask - 1))
3202 | (rm & bank_mask);
3203 gen_mov_F1_vreg(dp, rm);
3204 }
3205 }
3206 }
3207 }
3208 break;
3209 case 0xc:
3210 case 0xd:
8387da81 3211 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3212 /* two-register transfer */
3213 rn = (insn >> 16) & 0xf;
3214 rd = (insn >> 12) & 0xf;
3215 if (dp) {
9ee6e8bb
PB
3216 VFP_DREG_M(rm, insn);
3217 } else {
3218 rm = VFP_SREG_M(insn);
3219 }
b7bcbe95 3220
18c9b560 3221 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3222 /* vfp->arm */
3223 if (dp) {
4373f3ce
PB
3224 gen_mov_F0_vreg(0, rm * 2);
3225 tmp = gen_vfp_mrs();
3226 store_reg(s, rd, tmp);
3227 gen_mov_F0_vreg(0, rm * 2 + 1);
3228 tmp = gen_vfp_mrs();
3229 store_reg(s, rn, tmp);
b7bcbe95
FB
3230 } else {
3231 gen_mov_F0_vreg(0, rm);
4373f3ce 3232 tmp = gen_vfp_mrs();
8387da81 3233 store_reg(s, rd, tmp);
b7bcbe95 3234 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3235 tmp = gen_vfp_mrs();
8387da81 3236 store_reg(s, rn, tmp);
b7bcbe95
FB
3237 }
3238 } else {
3239 /* arm->vfp */
3240 if (dp) {
4373f3ce
PB
3241 tmp = load_reg(s, rd);
3242 gen_vfp_msr(tmp);
3243 gen_mov_vreg_F0(0, rm * 2);
3244 tmp = load_reg(s, rn);
3245 gen_vfp_msr(tmp);
3246 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3247 } else {
8387da81 3248 tmp = load_reg(s, rd);
4373f3ce 3249 gen_vfp_msr(tmp);
b7bcbe95 3250 gen_mov_vreg_F0(0, rm);
8387da81 3251 tmp = load_reg(s, rn);
4373f3ce 3252 gen_vfp_msr(tmp);
b7bcbe95
FB
3253 gen_mov_vreg_F0(0, rm + 1);
3254 }
3255 }
3256 } else {
3257 /* Load/store */
3258 rn = (insn >> 16) & 0xf;
3259 if (dp)
9ee6e8bb 3260 VFP_DREG_D(rd, insn);
b7bcbe95 3261 else
9ee6e8bb 3262 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3263 if ((insn & 0x01200000) == 0x01000000) {
3264 /* Single load/store */
3265 offset = (insn & 0xff) << 2;
3266 if ((insn & (1 << 23)) == 0)
3267 offset = -offset;
934814f1
PM
3268 if (s->thumb && rn == 15) {
3269 /* This is actually UNPREDICTABLE */
3270 addr = tcg_temp_new_i32();
3271 tcg_gen_movi_i32(addr, s->pc & ~2);
3272 } else {
3273 addr = load_reg(s, rn);
3274 }
312eea9f 3275 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3276 if (insn & (1 << 20)) {
312eea9f 3277 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3278 gen_mov_vreg_F0(dp, rd);
3279 } else {
3280 gen_mov_F0_vreg(dp, rd);
312eea9f 3281 gen_vfp_st(s, dp, addr);
b7bcbe95 3282 }
7d1b0095 3283 tcg_temp_free_i32(addr);
b7bcbe95
FB
3284 } else {
3285 /* load/store multiple */
934814f1 3286 int w = insn & (1 << 21);
b7bcbe95
FB
3287 if (dp)
3288 n = (insn >> 1) & 0x7f;
3289 else
3290 n = insn & 0xff;
3291
934814f1
PM
3292 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3293 /* P == U , W == 1 => UNDEF */
3294 return 1;
3295 }
3296 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3297 /* UNPREDICTABLE cases for bad immediates: we choose to
3298 * UNDEF to avoid generating huge numbers of TCG ops
3299 */
3300 return 1;
3301 }
3302 if (rn == 15 && w) {
3303 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3304 return 1;
3305 }
3306
3307 if (s->thumb && rn == 15) {
3308 /* This is actually UNPREDICTABLE */
3309 addr = tcg_temp_new_i32();
3310 tcg_gen_movi_i32(addr, s->pc & ~2);
3311 } else {
3312 addr = load_reg(s, rn);
3313 }
b7bcbe95 3314 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3315 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3316
3317 if (dp)
3318 offset = 8;
3319 else
3320 offset = 4;
3321 for (i = 0; i < n; i++) {
18c9b560 3322 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3323 /* load */
312eea9f 3324 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3325 gen_mov_vreg_F0(dp, rd + i);
3326 } else {
3327 /* store */
3328 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3329 gen_vfp_st(s, dp, addr);
b7bcbe95 3330 }
312eea9f 3331 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3332 }
934814f1 3333 if (w) {
b7bcbe95
FB
3334 /* writeback */
3335 if (insn & (1 << 24))
3336 offset = -offset * n;
3337 else if (dp && (insn & 1))
3338 offset = 4;
3339 else
3340 offset = 0;
3341
3342 if (offset != 0)
312eea9f
FN
3343 tcg_gen_addi_i32(addr, addr, offset);
3344 store_reg(s, rn, addr);
3345 } else {
7d1b0095 3346 tcg_temp_free_i32(addr);
b7bcbe95
FB
3347 }
3348 }
3349 }
3350 break;
3351 default:
3352 /* Should never happen. */
3353 return 1;
3354 }
3355 return 0;
3356}
3357
6e256c93 3358static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3359{
6e256c93
FB
3360 TranslationBlock *tb;
3361
3362 tb = s->tb;
3363 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3364 tcg_gen_goto_tb(n);
8984bd2e 3365 gen_set_pc_im(dest);
4b4a72e5 3366 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3367 } else {
8984bd2e 3368 gen_set_pc_im(dest);
57fec1fe 3369 tcg_gen_exit_tb(0);
6e256c93 3370 }
c53be334
FB
3371}
3372
8aaca4c0
FB
3373static inline void gen_jmp (DisasContext *s, uint32_t dest)
3374{
551bd27f 3375 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3376 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3377 if (s->thumb)
d9ba4830
PB
3378 dest |= 1;
3379 gen_bx_im(s, dest);
8aaca4c0 3380 } else {
6e256c93 3381 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3382 s->is_jmp = DISAS_TB_JUMP;
3383 }
3384}
3385
d9ba4830 3386static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3387{
ee097184 3388 if (x)
d9ba4830 3389 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3390 else
d9ba4830 3391 gen_sxth(t0);
ee097184 3392 if (y)
d9ba4830 3393 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3394 else
d9ba4830
PB
3395 gen_sxth(t1);
3396 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3397}
3398
3399/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3400static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3401 uint32_t mask;
3402
3403 mask = 0;
3404 if (flags & (1 << 0))
3405 mask |= 0xff;
3406 if (flags & (1 << 1))
3407 mask |= 0xff00;
3408 if (flags & (1 << 2))
3409 mask |= 0xff0000;
3410 if (flags & (1 << 3))
3411 mask |= 0xff000000;
9ee6e8bb 3412
2ae23e75 3413 /* Mask out undefined bits. */
9ee6e8bb 3414 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3415 if (!arm_feature(env, ARM_FEATURE_V4T))
3416 mask &= ~CPSR_T;
3417 if (!arm_feature(env, ARM_FEATURE_V5))
3418 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3419 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3420 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3421 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3422 mask &= ~CPSR_IT;
9ee6e8bb 3423 /* Mask out execution state bits. */
2ae23e75 3424 if (!spsr)
e160c51c 3425 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3426 /* Mask out privileged bits. */
3427 if (IS_USER(s))
9ee6e8bb 3428 mask &= CPSR_USER;
b5ff1b31
FB
3429 return mask;
3430}
3431
2fbac54b
FN
3432/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3433static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3434{
d9ba4830 3435 TCGv tmp;
b5ff1b31
FB
3436 if (spsr) {
3437 /* ??? This is also undefined in system mode. */
3438 if (IS_USER(s))
3439 return 1;
d9ba4830
PB
3440
3441 tmp = load_cpu_field(spsr);
3442 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3443 tcg_gen_andi_i32(t0, t0, mask);
3444 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3445 store_cpu_field(tmp, spsr);
b5ff1b31 3446 } else {
2fbac54b 3447 gen_set_cpsr(t0, mask);
b5ff1b31 3448 }
7d1b0095 3449 tcg_temp_free_i32(t0);
b5ff1b31
FB
3450 gen_lookup_tb(s);
3451 return 0;
3452}
3453
2fbac54b
FN
3454/* Returns nonzero if access to the PSR is not permitted. */
3455static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3456{
3457 TCGv tmp;
7d1b0095 3458 tmp = tcg_temp_new_i32();
2fbac54b
FN
3459 tcg_gen_movi_i32(tmp, val);
3460 return gen_set_psr(s, mask, spsr, tmp);
3461}
3462
e9bb4aa9
JR
3463/* Generate an old-style exception return. Marks pc as dead. */
3464static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3465{
d9ba4830 3466 TCGv tmp;
e9bb4aa9 3467 store_reg(s, 15, pc);
d9ba4830
PB
3468 tmp = load_cpu_field(spsr);
3469 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3470 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3471 s->is_jmp = DISAS_UPDATE;
3472}
3473
b0109805
PB
3474/* Generate a v6 exception return. Marks both values as dead. */
3475static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3476{
b0109805 3477 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3478 tcg_temp_free_i32(cpsr);
b0109805 3479 store_reg(s, 15, pc);
9ee6e8bb
PB
3480 s->is_jmp = DISAS_UPDATE;
3481}
3b46e624 3482
9ee6e8bb
PB
3483static inline void
3484gen_set_condexec (DisasContext *s)
3485{
3486 if (s->condexec_mask) {
8f01245e 3487 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3488 TCGv tmp = tcg_temp_new_i32();
8f01245e 3489 tcg_gen_movi_i32(tmp, val);
d9ba4830 3490 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3491 }
3492}
3b46e624 3493
bc4a0de0
PM
3494static void gen_exception_insn(DisasContext *s, int offset, int excp)
3495{
3496 gen_set_condexec(s);
3497 gen_set_pc_im(s->pc - offset);
3498 gen_exception(excp);
3499 s->is_jmp = DISAS_JUMP;
3500}
3501
9ee6e8bb
PB
3502static void gen_nop_hint(DisasContext *s, int val)
3503{
3504 switch (val) {
3505 case 3: /* wfi */
8984bd2e 3506 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3507 s->is_jmp = DISAS_WFI;
3508 break;
3509 case 2: /* wfe */
3510 case 4: /* sev */
3511 /* TODO: Implement SEV and WFE. May help SMP performance. */
3512 default: /* nop */
3513 break;
3514 }
3515}
99c475ab 3516
ad69471c 3517#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3518
62698be3 3519static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3520{
3521 switch (size) {
dd8fbd78
FN
3522 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3523 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3524 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3525 default: abort();
9ee6e8bb 3526 }
9ee6e8bb
PB
3527}
3528
dd8fbd78 3529static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3530{
3531 switch (size) {
dd8fbd78
FN
3532 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3533 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3534 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3535 default: return;
3536 }
3537}
3538
3539/* 32-bit pairwise ops end up the same as the elementwise versions. */
3540#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3541#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3542#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3543#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3544
ad69471c
PB
3545#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3546 switch ((size << 1) | u) { \
3547 case 0: \
dd8fbd78 3548 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3549 break; \
3550 case 1: \
dd8fbd78 3551 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3552 break; \
3553 case 2: \
dd8fbd78 3554 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3555 break; \
3556 case 3: \
dd8fbd78 3557 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3558 break; \
3559 case 4: \
dd8fbd78 3560 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3561 break; \
3562 case 5: \
dd8fbd78 3563 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3564 break; \
3565 default: return 1; \
3566 }} while (0)
9ee6e8bb
PB
3567
3568#define GEN_NEON_INTEGER_OP(name) do { \
3569 switch ((size << 1) | u) { \
ad69471c 3570 case 0: \
dd8fbd78 3571 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3572 break; \
3573 case 1: \
dd8fbd78 3574 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3575 break; \
3576 case 2: \
dd8fbd78 3577 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3578 break; \
3579 case 3: \
dd8fbd78 3580 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3581 break; \
3582 case 4: \
dd8fbd78 3583 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3584 break; \
3585 case 5: \
dd8fbd78 3586 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3587 break; \
9ee6e8bb
PB
3588 default: return 1; \
3589 }} while (0)
3590
dd8fbd78 3591static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3592{
7d1b0095 3593 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3594 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3595 return tmp;
9ee6e8bb
PB
3596}
3597
dd8fbd78 3598static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3599{
dd8fbd78 3600 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3601 tcg_temp_free_i32(var);
9ee6e8bb
PB
3602}
3603
dd8fbd78 3604static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3605{
dd8fbd78 3606 TCGv tmp;
9ee6e8bb 3607 if (size == 1) {
0fad6efc
PM
3608 tmp = neon_load_reg(reg & 7, reg >> 4);
3609 if (reg & 8) {
dd8fbd78 3610 gen_neon_dup_high16(tmp);
0fad6efc
PM
3611 } else {
3612 gen_neon_dup_low16(tmp);
dd8fbd78 3613 }
0fad6efc
PM
3614 } else {
3615 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3616 }
dd8fbd78 3617 return tmp;
9ee6e8bb
PB
3618}
3619
02acedf9 3620static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3621{
02acedf9 3622 TCGv tmp, tmp2;
600b828c 3623 if (!q && size == 2) {
02acedf9
PM
3624 return 1;
3625 }
3626 tmp = tcg_const_i32(rd);
3627 tmp2 = tcg_const_i32(rm);
3628 if (q) {
3629 switch (size) {
3630 case 0:
02da0b2d 3631 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3632 break;
3633 case 1:
02da0b2d 3634 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3635 break;
3636 case 2:
02da0b2d 3637 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3638 break;
3639 default:
3640 abort();
3641 }
3642 } else {
3643 switch (size) {
3644 case 0:
02da0b2d 3645 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3646 break;
3647 case 1:
02da0b2d 3648 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3649 break;
3650 default:
3651 abort();
3652 }
3653 }
3654 tcg_temp_free_i32(tmp);
3655 tcg_temp_free_i32(tmp2);
3656 return 0;
19457615
FN
3657}
3658
d68a6f3a 3659static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3660{
3661 TCGv tmp, tmp2;
600b828c 3662 if (!q && size == 2) {
d68a6f3a
PM
3663 return 1;
3664 }
3665 tmp = tcg_const_i32(rd);
3666 tmp2 = tcg_const_i32(rm);
3667 if (q) {
3668 switch (size) {
3669 case 0:
02da0b2d 3670 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3671 break;
3672 case 1:
02da0b2d 3673 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3674 break;
3675 case 2:
02da0b2d 3676 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3677 break;
3678 default:
3679 abort();
3680 }
3681 } else {
3682 switch (size) {
3683 case 0:
02da0b2d 3684 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3685 break;
3686 case 1:
02da0b2d 3687 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3688 break;
3689 default:
3690 abort();
3691 }
3692 }
3693 tcg_temp_free_i32(tmp);
3694 tcg_temp_free_i32(tmp2);
3695 return 0;
19457615
FN
3696}
3697
19457615
FN
3698static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3699{
3700 TCGv rd, tmp;
3701
7d1b0095
PM
3702 rd = tcg_temp_new_i32();
3703 tmp = tcg_temp_new_i32();
19457615
FN
3704
3705 tcg_gen_shli_i32(rd, t0, 8);
3706 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3707 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3708 tcg_gen_or_i32(rd, rd, tmp);
3709
3710 tcg_gen_shri_i32(t1, t1, 8);
3711 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3712 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3713 tcg_gen_or_i32(t1, t1, tmp);
3714 tcg_gen_mov_i32(t0, rd);
3715
7d1b0095
PM
3716 tcg_temp_free_i32(tmp);
3717 tcg_temp_free_i32(rd);
19457615
FN
3718}
3719
3720static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3721{
3722 TCGv rd, tmp;
3723
7d1b0095
PM
3724 rd = tcg_temp_new_i32();
3725 tmp = tcg_temp_new_i32();
19457615
FN
3726
3727 tcg_gen_shli_i32(rd, t0, 16);
3728 tcg_gen_andi_i32(tmp, t1, 0xffff);
3729 tcg_gen_or_i32(rd, rd, tmp);
3730 tcg_gen_shri_i32(t1, t1, 16);
3731 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3732 tcg_gen_or_i32(t1, t1, tmp);
3733 tcg_gen_mov_i32(t0, rd);
3734
7d1b0095
PM
3735 tcg_temp_free_i32(tmp);
3736 tcg_temp_free_i32(rd);
19457615
FN
3737}
3738
3739
9ee6e8bb
PB
3740static struct {
3741 int nregs;
3742 int interleave;
3743 int spacing;
3744} neon_ls_element_type[11] = {
3745 {4, 4, 1},
3746 {4, 4, 2},
3747 {4, 1, 1},
3748 {4, 2, 1},
3749 {3, 3, 1},
3750 {3, 3, 2},
3751 {3, 1, 1},
3752 {1, 1, 1},
3753 {2, 2, 1},
3754 {2, 2, 2},
3755 {2, 1, 1}
3756};
3757
3758/* Translate a NEON load/store element instruction. Return nonzero if the
3759 instruction is invalid. */
0ecb72a5 3760static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3761{
3762 int rd, rn, rm;
3763 int op;
3764 int nregs;
3765 int interleave;
84496233 3766 int spacing;
9ee6e8bb
PB
3767 int stride;
3768 int size;
3769 int reg;
3770 int pass;
3771 int load;
3772 int shift;
9ee6e8bb 3773 int n;
1b2b1e54 3774 TCGv addr;
b0109805 3775 TCGv tmp;
8f8e3aa4 3776 TCGv tmp2;
84496233 3777 TCGv_i64 tmp64;
9ee6e8bb 3778
5df8bac1 3779 if (!s->vfp_enabled)
9ee6e8bb
PB
3780 return 1;
3781 VFP_DREG_D(rd, insn);
3782 rn = (insn >> 16) & 0xf;
3783 rm = insn & 0xf;
3784 load = (insn & (1 << 21)) != 0;
3785 if ((insn & (1 << 23)) == 0) {
3786 /* Load store all elements. */
3787 op = (insn >> 8) & 0xf;
3788 size = (insn >> 6) & 3;
84496233 3789 if (op > 10)
9ee6e8bb 3790 return 1;
f2dd89d0
PM
3791 /* Catch UNDEF cases for bad values of align field */
3792 switch (op & 0xc) {
3793 case 4:
3794 if (((insn >> 5) & 1) == 1) {
3795 return 1;
3796 }
3797 break;
3798 case 8:
3799 if (((insn >> 4) & 3) == 3) {
3800 return 1;
3801 }
3802 break;
3803 default:
3804 break;
3805 }
9ee6e8bb
PB
3806 nregs = neon_ls_element_type[op].nregs;
3807 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3808 spacing = neon_ls_element_type[op].spacing;
3809 if (size == 3 && (interleave | spacing) != 1)
3810 return 1;
e318a60b 3811 addr = tcg_temp_new_i32();
dcc65026 3812 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3813 stride = (1 << size) * interleave;
3814 for (reg = 0; reg < nregs; reg++) {
3815 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3816 load_reg_var(s, addr, rn);
3817 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3818 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3819 load_reg_var(s, addr, rn);
3820 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3821 }
84496233
JR
3822 if (size == 3) {
3823 if (load) {
3824 tmp64 = gen_ld64(addr, IS_USER(s));
3825 neon_store_reg64(tmp64, rd);
3826 tcg_temp_free_i64(tmp64);
3827 } else {
3828 tmp64 = tcg_temp_new_i64();
3829 neon_load_reg64(tmp64, rd);
3830 gen_st64(tmp64, addr, IS_USER(s));
3831 }
3832 tcg_gen_addi_i32(addr, addr, stride);
3833 } else {
3834 for (pass = 0; pass < 2; pass++) {
3835 if (size == 2) {
3836 if (load) {
3837 tmp = gen_ld32(addr, IS_USER(s));
3838 neon_store_reg(rd, pass, tmp);
3839 } else {
3840 tmp = neon_load_reg(rd, pass);
3841 gen_st32(tmp, addr, IS_USER(s));
3842 }
1b2b1e54 3843 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3844 } else if (size == 1) {
3845 if (load) {
3846 tmp = gen_ld16u(addr, IS_USER(s));
3847 tcg_gen_addi_i32(addr, addr, stride);
3848 tmp2 = gen_ld16u(addr, IS_USER(s));
3849 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3850 tcg_gen_shli_i32(tmp2, tmp2, 16);
3851 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3852 tcg_temp_free_i32(tmp2);
84496233
JR
3853 neon_store_reg(rd, pass, tmp);
3854 } else {
3855 tmp = neon_load_reg(rd, pass);
7d1b0095 3856 tmp2 = tcg_temp_new_i32();
84496233
JR
3857 tcg_gen_shri_i32(tmp2, tmp, 16);
3858 gen_st16(tmp, addr, IS_USER(s));
3859 tcg_gen_addi_i32(addr, addr, stride);
3860 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3861 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3862 }
84496233
JR
3863 } else /* size == 0 */ {
3864 if (load) {
3865 TCGV_UNUSED(tmp2);
3866 for (n = 0; n < 4; n++) {
3867 tmp = gen_ld8u(addr, IS_USER(s));
3868 tcg_gen_addi_i32(addr, addr, stride);
3869 if (n == 0) {
3870 tmp2 = tmp;
3871 } else {
41ba8341
PB
3872 tcg_gen_shli_i32(tmp, tmp, n * 8);
3873 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3874 tcg_temp_free_i32(tmp);
84496233 3875 }
9ee6e8bb 3876 }
84496233
JR
3877 neon_store_reg(rd, pass, tmp2);
3878 } else {
3879 tmp2 = neon_load_reg(rd, pass);
3880 for (n = 0; n < 4; n++) {
7d1b0095 3881 tmp = tcg_temp_new_i32();
84496233
JR
3882 if (n == 0) {
3883 tcg_gen_mov_i32(tmp, tmp2);
3884 } else {
3885 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3886 }
3887 gen_st8(tmp, addr, IS_USER(s));
3888 tcg_gen_addi_i32(addr, addr, stride);
3889 }
7d1b0095 3890 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3891 }
3892 }
3893 }
3894 }
84496233 3895 rd += spacing;
9ee6e8bb 3896 }
e318a60b 3897 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3898 stride = nregs * 8;
3899 } else {
3900 size = (insn >> 10) & 3;
3901 if (size == 3) {
3902 /* Load single element to all lanes. */
8e18cde3
PM
3903 int a = (insn >> 4) & 1;
3904 if (!load) {
9ee6e8bb 3905 return 1;
8e18cde3 3906 }
9ee6e8bb
PB
3907 size = (insn >> 6) & 3;
3908 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3909
3910 if (size == 3) {
3911 if (nregs != 4 || a == 0) {
9ee6e8bb 3912 return 1;
99c475ab 3913 }
8e18cde3
PM
3914 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3915 size = 2;
3916 }
3917 if (nregs == 1 && a == 1 && size == 0) {
3918 return 1;
3919 }
3920 if (nregs == 3 && a == 1) {
3921 return 1;
3922 }
e318a60b 3923 addr = tcg_temp_new_i32();
8e18cde3
PM
3924 load_reg_var(s, addr, rn);
3925 if (nregs == 1) {
3926 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3927 tmp = gen_load_and_replicate(s, addr, size);
3928 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3929 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3930 if (insn & (1 << 5)) {
3931 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3932 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3933 }
3934 tcg_temp_free_i32(tmp);
3935 } else {
3936 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3937 stride = (insn & (1 << 5)) ? 2 : 1;
3938 for (reg = 0; reg < nregs; reg++) {
3939 tmp = gen_load_and_replicate(s, addr, size);
3940 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3941 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3942 tcg_temp_free_i32(tmp);
3943 tcg_gen_addi_i32(addr, addr, 1 << size);
3944 rd += stride;
3945 }
9ee6e8bb 3946 }
e318a60b 3947 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3948 stride = (1 << size) * nregs;
3949 } else {
3950 /* Single element. */
93262b16 3951 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
3952 pass = (insn >> 7) & 1;
3953 switch (size) {
3954 case 0:
3955 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3956 stride = 1;
3957 break;
3958 case 1:
3959 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3960 stride = (insn & (1 << 5)) ? 2 : 1;
3961 break;
3962 case 2:
3963 shift = 0;
9ee6e8bb
PB
3964 stride = (insn & (1 << 6)) ? 2 : 1;
3965 break;
3966 default:
3967 abort();
3968 }
3969 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
3970 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3971 switch (nregs) {
3972 case 1:
3973 if (((idx & (1 << size)) != 0) ||
3974 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3975 return 1;
3976 }
3977 break;
3978 case 3:
3979 if ((idx & 1) != 0) {
3980 return 1;
3981 }
3982 /* fall through */
3983 case 2:
3984 if (size == 2 && (idx & 2) != 0) {
3985 return 1;
3986 }
3987 break;
3988 case 4:
3989 if ((size == 2) && ((idx & 3) == 3)) {
3990 return 1;
3991 }
3992 break;
3993 default:
3994 abort();
3995 }
3996 if ((rd + stride * (nregs - 1)) > 31) {
3997 /* Attempts to write off the end of the register file
3998 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3999 * the neon_load_reg() would write off the end of the array.
4000 */
4001 return 1;
4002 }
e318a60b 4003 addr = tcg_temp_new_i32();
dcc65026 4004 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4005 for (reg = 0; reg < nregs; reg++) {
4006 if (load) {
9ee6e8bb
PB
4007 switch (size) {
4008 case 0:
1b2b1e54 4009 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
4010 break;
4011 case 1:
1b2b1e54 4012 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
4013 break;
4014 case 2:
1b2b1e54 4015 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 4016 break;
a50f5b91
PB
4017 default: /* Avoid compiler warnings. */
4018 abort();
9ee6e8bb
PB
4019 }
4020 if (size != 2) {
8f8e3aa4 4021 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4022 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4023 shift, size ? 16 : 8);
7d1b0095 4024 tcg_temp_free_i32(tmp2);
9ee6e8bb 4025 }
8f8e3aa4 4026 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4027 } else { /* Store */
8f8e3aa4
PB
4028 tmp = neon_load_reg(rd, pass);
4029 if (shift)
4030 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4031 switch (size) {
4032 case 0:
1b2b1e54 4033 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4034 break;
4035 case 1:
1b2b1e54 4036 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4037 break;
4038 case 2:
1b2b1e54 4039 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4040 break;
99c475ab 4041 }
99c475ab 4042 }
9ee6e8bb 4043 rd += stride;
1b2b1e54 4044 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4045 }
e318a60b 4046 tcg_temp_free_i32(addr);
9ee6e8bb 4047 stride = nregs * (1 << size);
99c475ab 4048 }
9ee6e8bb
PB
4049 }
4050 if (rm != 15) {
b26eefb6
PB
4051 TCGv base;
4052
4053 base = load_reg(s, rn);
9ee6e8bb 4054 if (rm == 13) {
b26eefb6 4055 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4056 } else {
b26eefb6
PB
4057 TCGv index;
4058 index = load_reg(s, rm);
4059 tcg_gen_add_i32(base, base, index);
7d1b0095 4060 tcg_temp_free_i32(index);
9ee6e8bb 4061 }
b26eefb6 4062 store_reg(s, rn, base);
9ee6e8bb
PB
4063 }
4064 return 0;
4065}
3b46e624 4066
8f8e3aa4
PB
4067/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4068static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4069{
4070 tcg_gen_and_i32(t, t, c);
f669df27 4071 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4072 tcg_gen_or_i32(dest, t, f);
4073}
4074
a7812ae4 4075static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4076{
4077 switch (size) {
4078 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4079 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4080 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4081 default: abort();
4082 }
4083}
4084
a7812ae4 4085static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4086{
4087 switch (size) {
02da0b2d
PM
4088 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4089 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4090 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4091 default: abort();
4092 }
4093}
4094
a7812ae4 4095static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4096{
4097 switch (size) {
02da0b2d
PM
4098 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4099 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4100 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4101 default: abort();
4102 }
4103}
4104
af1bbf30
JR
4105static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4106{
4107 switch (size) {
02da0b2d
PM
4108 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4109 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4110 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4111 default: abort();
4112 }
4113}
4114
ad69471c
PB
4115static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4116 int q, int u)
4117{
4118 if (q) {
4119 if (u) {
4120 switch (size) {
4121 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4122 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4123 default: abort();
4124 }
4125 } else {
4126 switch (size) {
4127 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4128 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4129 default: abort();
4130 }
4131 }
4132 } else {
4133 if (u) {
4134 switch (size) {
b408a9b0
CL
4135 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4136 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4137 default: abort();
4138 }
4139 } else {
4140 switch (size) {
4141 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4142 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4143 default: abort();
4144 }
4145 }
4146 }
4147}
4148
a7812ae4 4149static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4150{
4151 if (u) {
4152 switch (size) {
4153 case 0: gen_helper_neon_widen_u8(dest, src); break;
4154 case 1: gen_helper_neon_widen_u16(dest, src); break;
4155 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4156 default: abort();
4157 }
4158 } else {
4159 switch (size) {
4160 case 0: gen_helper_neon_widen_s8(dest, src); break;
4161 case 1: gen_helper_neon_widen_s16(dest, src); break;
4162 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4163 default: abort();
4164 }
4165 }
7d1b0095 4166 tcg_temp_free_i32(src);
ad69471c
PB
4167}
4168
4169static inline void gen_neon_addl(int size)
4170{
4171 switch (size) {
4172 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4173 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4174 case 2: tcg_gen_add_i64(CPU_V001); break;
4175 default: abort();
4176 }
4177}
4178
4179static inline void gen_neon_subl(int size)
4180{
4181 switch (size) {
4182 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4183 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4184 case 2: tcg_gen_sub_i64(CPU_V001); break;
4185 default: abort();
4186 }
4187}
4188
a7812ae4 4189static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4190{
4191 switch (size) {
4192 case 0: gen_helper_neon_negl_u16(var, var); break;
4193 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4194 case 2:
4195 tcg_gen_neg_i64(var, var);
4196 break;
ad69471c
PB
4197 default: abort();
4198 }
4199}
4200
a7812ae4 4201static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4202{
4203 switch (size) {
02da0b2d
PM
4204 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4205 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4206 default: abort();
4207 }
4208}
4209
a7812ae4 4210static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4211{
a7812ae4 4212 TCGv_i64 tmp;
ad69471c
PB
4213
4214 switch ((size << 1) | u) {
4215 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4216 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4217 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4218 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4219 case 4:
4220 tmp = gen_muls_i64_i32(a, b);
4221 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4222 tcg_temp_free_i64(tmp);
ad69471c
PB
4223 break;
4224 case 5:
4225 tmp = gen_mulu_i64_i32(a, b);
4226 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4227 tcg_temp_free_i64(tmp);
ad69471c
PB
4228 break;
4229 default: abort();
4230 }
c6067f04
CL
4231
4232 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4233 Don't forget to clean them now. */
4234 if (size < 2) {
7d1b0095
PM
4235 tcg_temp_free_i32(a);
4236 tcg_temp_free_i32(b);
c6067f04 4237 }
ad69471c
PB
4238}
4239
c33171c7
PM
4240static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4241{
4242 if (op) {
4243 if (u) {
4244 gen_neon_unarrow_sats(size, dest, src);
4245 } else {
4246 gen_neon_narrow(size, dest, src);
4247 }
4248 } else {
4249 if (u) {
4250 gen_neon_narrow_satu(size, dest, src);
4251 } else {
4252 gen_neon_narrow_sats(size, dest, src);
4253 }
4254 }
4255}
4256
62698be3
PM
4257/* Symbolic constants for op fields for Neon 3-register same-length.
4258 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4259 * table A7-9.
4260 */
4261#define NEON_3R_VHADD 0
4262#define NEON_3R_VQADD 1
4263#define NEON_3R_VRHADD 2
4264#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4265#define NEON_3R_VHSUB 4
4266#define NEON_3R_VQSUB 5
4267#define NEON_3R_VCGT 6
4268#define NEON_3R_VCGE 7
4269#define NEON_3R_VSHL 8
4270#define NEON_3R_VQSHL 9
4271#define NEON_3R_VRSHL 10
4272#define NEON_3R_VQRSHL 11
4273#define NEON_3R_VMAX 12
4274#define NEON_3R_VMIN 13
4275#define NEON_3R_VABD 14
4276#define NEON_3R_VABA 15
4277#define NEON_3R_VADD_VSUB 16
4278#define NEON_3R_VTST_VCEQ 17
4279#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4280#define NEON_3R_VMUL 19
4281#define NEON_3R_VPMAX 20
4282#define NEON_3R_VPMIN 21
4283#define NEON_3R_VQDMULH_VQRDMULH 22
4284#define NEON_3R_VPADD 23
da97f52c 4285#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4286#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4287#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4288#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4289#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4290#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4291#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4292
4293static const uint8_t neon_3r_sizes[] = {
4294 [NEON_3R_VHADD] = 0x7,
4295 [NEON_3R_VQADD] = 0xf,
4296 [NEON_3R_VRHADD] = 0x7,
4297 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4298 [NEON_3R_VHSUB] = 0x7,
4299 [NEON_3R_VQSUB] = 0xf,
4300 [NEON_3R_VCGT] = 0x7,
4301 [NEON_3R_VCGE] = 0x7,
4302 [NEON_3R_VSHL] = 0xf,
4303 [NEON_3R_VQSHL] = 0xf,
4304 [NEON_3R_VRSHL] = 0xf,
4305 [NEON_3R_VQRSHL] = 0xf,
4306 [NEON_3R_VMAX] = 0x7,
4307 [NEON_3R_VMIN] = 0x7,
4308 [NEON_3R_VABD] = 0x7,
4309 [NEON_3R_VABA] = 0x7,
4310 [NEON_3R_VADD_VSUB] = 0xf,
4311 [NEON_3R_VTST_VCEQ] = 0x7,
4312 [NEON_3R_VML] = 0x7,
4313 [NEON_3R_VMUL] = 0x7,
4314 [NEON_3R_VPMAX] = 0x7,
4315 [NEON_3R_VPMIN] = 0x7,
4316 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4317 [NEON_3R_VPADD] = 0x7,
da97f52c 4318 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4319 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4320 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4321 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4322 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4323 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4324 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4325};
4326
600b828c
PM
4327/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4328 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4329 * table A7-13.
4330 */
4331#define NEON_2RM_VREV64 0
4332#define NEON_2RM_VREV32 1
4333#define NEON_2RM_VREV16 2
4334#define NEON_2RM_VPADDL 4
4335#define NEON_2RM_VPADDL_U 5
4336#define NEON_2RM_VCLS 8
4337#define NEON_2RM_VCLZ 9
4338#define NEON_2RM_VCNT 10
4339#define NEON_2RM_VMVN 11
4340#define NEON_2RM_VPADAL 12
4341#define NEON_2RM_VPADAL_U 13
4342#define NEON_2RM_VQABS 14
4343#define NEON_2RM_VQNEG 15
4344#define NEON_2RM_VCGT0 16
4345#define NEON_2RM_VCGE0 17
4346#define NEON_2RM_VCEQ0 18
4347#define NEON_2RM_VCLE0 19
4348#define NEON_2RM_VCLT0 20
4349#define NEON_2RM_VABS 22
4350#define NEON_2RM_VNEG 23
4351#define NEON_2RM_VCGT0_F 24
4352#define NEON_2RM_VCGE0_F 25
4353#define NEON_2RM_VCEQ0_F 26
4354#define NEON_2RM_VCLE0_F 27
4355#define NEON_2RM_VCLT0_F 28
4356#define NEON_2RM_VABS_F 30
4357#define NEON_2RM_VNEG_F 31
4358#define NEON_2RM_VSWP 32
4359#define NEON_2RM_VTRN 33
4360#define NEON_2RM_VUZP 34
4361#define NEON_2RM_VZIP 35
4362#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4363#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4364#define NEON_2RM_VSHLL 38
4365#define NEON_2RM_VCVT_F16_F32 44
4366#define NEON_2RM_VCVT_F32_F16 46
4367#define NEON_2RM_VRECPE 56
4368#define NEON_2RM_VRSQRTE 57
4369#define NEON_2RM_VRECPE_F 58
4370#define NEON_2RM_VRSQRTE_F 59
4371#define NEON_2RM_VCVT_FS 60
4372#define NEON_2RM_VCVT_FU 61
4373#define NEON_2RM_VCVT_SF 62
4374#define NEON_2RM_VCVT_UF 63
4375
4376static int neon_2rm_is_float_op(int op)
4377{
4378 /* Return true if this neon 2reg-misc op is float-to-float */
4379 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4380 op >= NEON_2RM_VRECPE_F);
4381}
4382
4383/* Each entry in this array has bit n set if the insn allows
4384 * size value n (otherwise it will UNDEF). Since unallocated
4385 * op values will have no bits set they always UNDEF.
4386 */
4387static const uint8_t neon_2rm_sizes[] = {
4388 [NEON_2RM_VREV64] = 0x7,
4389 [NEON_2RM_VREV32] = 0x3,
4390 [NEON_2RM_VREV16] = 0x1,
4391 [NEON_2RM_VPADDL] = 0x7,
4392 [NEON_2RM_VPADDL_U] = 0x7,
4393 [NEON_2RM_VCLS] = 0x7,
4394 [NEON_2RM_VCLZ] = 0x7,
4395 [NEON_2RM_VCNT] = 0x1,
4396 [NEON_2RM_VMVN] = 0x1,
4397 [NEON_2RM_VPADAL] = 0x7,
4398 [NEON_2RM_VPADAL_U] = 0x7,
4399 [NEON_2RM_VQABS] = 0x7,
4400 [NEON_2RM_VQNEG] = 0x7,
4401 [NEON_2RM_VCGT0] = 0x7,
4402 [NEON_2RM_VCGE0] = 0x7,
4403 [NEON_2RM_VCEQ0] = 0x7,
4404 [NEON_2RM_VCLE0] = 0x7,
4405 [NEON_2RM_VCLT0] = 0x7,
4406 [NEON_2RM_VABS] = 0x7,
4407 [NEON_2RM_VNEG] = 0x7,
4408 [NEON_2RM_VCGT0_F] = 0x4,
4409 [NEON_2RM_VCGE0_F] = 0x4,
4410 [NEON_2RM_VCEQ0_F] = 0x4,
4411 [NEON_2RM_VCLE0_F] = 0x4,
4412 [NEON_2RM_VCLT0_F] = 0x4,
4413 [NEON_2RM_VABS_F] = 0x4,
4414 [NEON_2RM_VNEG_F] = 0x4,
4415 [NEON_2RM_VSWP] = 0x1,
4416 [NEON_2RM_VTRN] = 0x7,
4417 [NEON_2RM_VUZP] = 0x7,
4418 [NEON_2RM_VZIP] = 0x7,
4419 [NEON_2RM_VMOVN] = 0x7,
4420 [NEON_2RM_VQMOVN] = 0x7,
4421 [NEON_2RM_VSHLL] = 0x7,
4422 [NEON_2RM_VCVT_F16_F32] = 0x2,
4423 [NEON_2RM_VCVT_F32_F16] = 0x2,
4424 [NEON_2RM_VRECPE] = 0x4,
4425 [NEON_2RM_VRSQRTE] = 0x4,
4426 [NEON_2RM_VRECPE_F] = 0x4,
4427 [NEON_2RM_VRSQRTE_F] = 0x4,
4428 [NEON_2RM_VCVT_FS] = 0x4,
4429 [NEON_2RM_VCVT_FU] = 0x4,
4430 [NEON_2RM_VCVT_SF] = 0x4,
4431 [NEON_2RM_VCVT_UF] = 0x4,
4432};
4433
9ee6e8bb
PB
4434/* Translate a NEON data processing instruction. Return nonzero if the
4435 instruction is invalid.
ad69471c
PB
4436 We process data in a mixture of 32-bit and 64-bit chunks.
4437 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4438
0ecb72a5 4439static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4440{
4441 int op;
4442 int q;
4443 int rd, rn, rm;
4444 int size;
4445 int shift;
4446 int pass;
4447 int count;
4448 int pairwise;
4449 int u;
ca9a32e4 4450 uint32_t imm, mask;
b75263d6 4451 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4452 TCGv_i64 tmp64;
9ee6e8bb 4453
5df8bac1 4454 if (!s->vfp_enabled)
9ee6e8bb
PB
4455 return 1;
4456 q = (insn & (1 << 6)) != 0;
4457 u = (insn >> 24) & 1;
4458 VFP_DREG_D(rd, insn);
4459 VFP_DREG_N(rn, insn);
4460 VFP_DREG_M(rm, insn);
4461 size = (insn >> 20) & 3;
4462 if ((insn & (1 << 23)) == 0) {
4463 /* Three register same length. */
4464 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4465 /* Catch invalid op and bad size combinations: UNDEF */
4466 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4467 return 1;
4468 }
25f84f79
PM
4469 /* All insns of this form UNDEF for either this condition or the
4470 * superset of cases "Q==1"; we catch the latter later.
4471 */
4472 if (q && ((rd | rn | rm) & 1)) {
4473 return 1;
4474 }
62698be3
PM
4475 if (size == 3 && op != NEON_3R_LOGIC) {
4476 /* 64-bit element instructions. */
9ee6e8bb 4477 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4478 neon_load_reg64(cpu_V0, rn + pass);
4479 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4480 switch (op) {
62698be3 4481 case NEON_3R_VQADD:
9ee6e8bb 4482 if (u) {
02da0b2d
PM
4483 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4484 cpu_V0, cpu_V1);
2c0262af 4485 } else {
02da0b2d
PM
4486 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4487 cpu_V0, cpu_V1);
2c0262af 4488 }
9ee6e8bb 4489 break;
62698be3 4490 case NEON_3R_VQSUB:
9ee6e8bb 4491 if (u) {
02da0b2d
PM
4492 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4493 cpu_V0, cpu_V1);
ad69471c 4494 } else {
02da0b2d
PM
4495 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4496 cpu_V0, cpu_V1);
ad69471c
PB
4497 }
4498 break;
62698be3 4499 case NEON_3R_VSHL:
ad69471c
PB
4500 if (u) {
4501 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4502 } else {
4503 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4504 }
4505 break;
62698be3 4506 case NEON_3R_VQSHL:
ad69471c 4507 if (u) {
02da0b2d
PM
4508 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4509 cpu_V1, cpu_V0);
ad69471c 4510 } else {
02da0b2d
PM
4511 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4512 cpu_V1, cpu_V0);
ad69471c
PB
4513 }
4514 break;
62698be3 4515 case NEON_3R_VRSHL:
ad69471c
PB
4516 if (u) {
4517 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4518 } else {
ad69471c
PB
4519 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4520 }
4521 break;
62698be3 4522 case NEON_3R_VQRSHL:
ad69471c 4523 if (u) {
02da0b2d
PM
4524 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4525 cpu_V1, cpu_V0);
ad69471c 4526 } else {
02da0b2d
PM
4527 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4528 cpu_V1, cpu_V0);
1e8d4eec 4529 }
9ee6e8bb 4530 break;
62698be3 4531 case NEON_3R_VADD_VSUB:
9ee6e8bb 4532 if (u) {
ad69471c 4533 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4534 } else {
ad69471c 4535 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4536 }
4537 break;
4538 default:
4539 abort();
2c0262af 4540 }
ad69471c 4541 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4542 }
9ee6e8bb 4543 return 0;
2c0262af 4544 }
25f84f79 4545 pairwise = 0;
9ee6e8bb 4546 switch (op) {
62698be3
PM
4547 case NEON_3R_VSHL:
4548 case NEON_3R_VQSHL:
4549 case NEON_3R_VRSHL:
4550 case NEON_3R_VQRSHL:
9ee6e8bb 4551 {
ad69471c
PB
4552 int rtmp;
4553 /* Shift instruction operands are reversed. */
4554 rtmp = rn;
9ee6e8bb 4555 rn = rm;
ad69471c 4556 rm = rtmp;
9ee6e8bb 4557 }
2c0262af 4558 break;
25f84f79
PM
4559 case NEON_3R_VPADD:
4560 if (u) {
4561 return 1;
4562 }
4563 /* Fall through */
62698be3
PM
4564 case NEON_3R_VPMAX:
4565 case NEON_3R_VPMIN:
9ee6e8bb 4566 pairwise = 1;
2c0262af 4567 break;
25f84f79
PM
4568 case NEON_3R_FLOAT_ARITH:
4569 pairwise = (u && size < 2); /* if VPADD (float) */
4570 break;
4571 case NEON_3R_FLOAT_MINMAX:
4572 pairwise = u; /* if VPMIN/VPMAX (float) */
4573 break;
4574 case NEON_3R_FLOAT_CMP:
4575 if (!u && size) {
4576 /* no encoding for U=0 C=1x */
4577 return 1;
4578 }
4579 break;
4580 case NEON_3R_FLOAT_ACMP:
4581 if (!u) {
4582 return 1;
4583 }
4584 break;
4585 case NEON_3R_VRECPS_VRSQRTS:
4586 if (u) {
4587 return 1;
4588 }
2c0262af 4589 break;
25f84f79
PM
4590 case NEON_3R_VMUL:
4591 if (u && (size != 0)) {
4592 /* UNDEF on invalid size for polynomial subcase */
4593 return 1;
4594 }
2c0262af 4595 break;
da97f52c
PM
4596 case NEON_3R_VFM:
4597 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4598 return 1;
4599 }
4600 break;
9ee6e8bb 4601 default:
2c0262af 4602 break;
9ee6e8bb 4603 }
dd8fbd78 4604
25f84f79
PM
4605 if (pairwise && q) {
4606 /* All the pairwise insns UNDEF if Q is set */
4607 return 1;
4608 }
4609
9ee6e8bb
PB
4610 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4611
4612 if (pairwise) {
4613 /* Pairwise. */
a5a14945
JR
4614 if (pass < 1) {
4615 tmp = neon_load_reg(rn, 0);
4616 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4617 } else {
a5a14945
JR
4618 tmp = neon_load_reg(rm, 0);
4619 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4620 }
4621 } else {
4622 /* Elementwise. */
dd8fbd78
FN
4623 tmp = neon_load_reg(rn, pass);
4624 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4625 }
4626 switch (op) {
62698be3 4627 case NEON_3R_VHADD:
9ee6e8bb
PB
4628 GEN_NEON_INTEGER_OP(hadd);
4629 break;
62698be3 4630 case NEON_3R_VQADD:
02da0b2d 4631 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4632 break;
62698be3 4633 case NEON_3R_VRHADD:
9ee6e8bb 4634 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4635 break;
62698be3 4636 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4637 switch ((u << 2) | size) {
4638 case 0: /* VAND */
dd8fbd78 4639 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4640 break;
4641 case 1: /* BIC */
f669df27 4642 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4643 break;
4644 case 2: /* VORR */
dd8fbd78 4645 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4646 break;
4647 case 3: /* VORN */
f669df27 4648 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4649 break;
4650 case 4: /* VEOR */
dd8fbd78 4651 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4652 break;
4653 case 5: /* VBSL */
dd8fbd78
FN
4654 tmp3 = neon_load_reg(rd, pass);
4655 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4656 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4657 break;
4658 case 6: /* VBIT */
dd8fbd78
FN
4659 tmp3 = neon_load_reg(rd, pass);
4660 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4661 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4662 break;
4663 case 7: /* VBIF */
dd8fbd78
FN
4664 tmp3 = neon_load_reg(rd, pass);
4665 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4666 tcg_temp_free_i32(tmp3);
9ee6e8bb 4667 break;
2c0262af
FB
4668 }
4669 break;
62698be3 4670 case NEON_3R_VHSUB:
9ee6e8bb
PB
4671 GEN_NEON_INTEGER_OP(hsub);
4672 break;
62698be3 4673 case NEON_3R_VQSUB:
02da0b2d 4674 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4675 break;
62698be3 4676 case NEON_3R_VCGT:
9ee6e8bb
PB
4677 GEN_NEON_INTEGER_OP(cgt);
4678 break;
62698be3 4679 case NEON_3R_VCGE:
9ee6e8bb
PB
4680 GEN_NEON_INTEGER_OP(cge);
4681 break;
62698be3 4682 case NEON_3R_VSHL:
ad69471c 4683 GEN_NEON_INTEGER_OP(shl);
2c0262af 4684 break;
62698be3 4685 case NEON_3R_VQSHL:
02da0b2d 4686 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4687 break;
62698be3 4688 case NEON_3R_VRSHL:
ad69471c 4689 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4690 break;
62698be3 4691 case NEON_3R_VQRSHL:
02da0b2d 4692 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4693 break;
62698be3 4694 case NEON_3R_VMAX:
9ee6e8bb
PB
4695 GEN_NEON_INTEGER_OP(max);
4696 break;
62698be3 4697 case NEON_3R_VMIN:
9ee6e8bb
PB
4698 GEN_NEON_INTEGER_OP(min);
4699 break;
62698be3 4700 case NEON_3R_VABD:
9ee6e8bb
PB
4701 GEN_NEON_INTEGER_OP(abd);
4702 break;
62698be3 4703 case NEON_3R_VABA:
9ee6e8bb 4704 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4705 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4706 tmp2 = neon_load_reg(rd, pass);
4707 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4708 break;
62698be3 4709 case NEON_3R_VADD_VSUB:
9ee6e8bb 4710 if (!u) { /* VADD */
62698be3 4711 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4712 } else { /* VSUB */
4713 switch (size) {
dd8fbd78
FN
4714 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4715 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4716 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4717 default: abort();
9ee6e8bb
PB
4718 }
4719 }
4720 break;
62698be3 4721 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4722 if (!u) { /* VTST */
4723 switch (size) {
dd8fbd78
FN
4724 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4725 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4726 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4727 default: abort();
9ee6e8bb
PB
4728 }
4729 } else { /* VCEQ */
4730 switch (size) {
dd8fbd78
FN
4731 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4732 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4733 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4734 default: abort();
9ee6e8bb
PB
4735 }
4736 }
4737 break;
62698be3 4738 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4739 switch (size) {
dd8fbd78
FN
4740 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4741 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4742 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4743 default: abort();
9ee6e8bb 4744 }
7d1b0095 4745 tcg_temp_free_i32(tmp2);
dd8fbd78 4746 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4747 if (u) { /* VMLS */
dd8fbd78 4748 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4749 } else { /* VMLA */
dd8fbd78 4750 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4751 }
4752 break;
62698be3 4753 case NEON_3R_VMUL:
9ee6e8bb 4754 if (u) { /* polynomial */
dd8fbd78 4755 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4756 } else { /* Integer */
4757 switch (size) {
dd8fbd78
FN
4758 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4759 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4760 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4761 default: abort();
9ee6e8bb
PB
4762 }
4763 }
4764 break;
62698be3 4765 case NEON_3R_VPMAX:
9ee6e8bb
PB
4766 GEN_NEON_INTEGER_OP(pmax);
4767 break;
62698be3 4768 case NEON_3R_VPMIN:
9ee6e8bb
PB
4769 GEN_NEON_INTEGER_OP(pmin);
4770 break;
62698be3 4771 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4772 if (!u) { /* VQDMULH */
4773 switch (size) {
02da0b2d
PM
4774 case 1:
4775 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4776 break;
4777 case 2:
4778 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4779 break;
62698be3 4780 default: abort();
9ee6e8bb 4781 }
62698be3 4782 } else { /* VQRDMULH */
9ee6e8bb 4783 switch (size) {
02da0b2d
PM
4784 case 1:
4785 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4786 break;
4787 case 2:
4788 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4789 break;
62698be3 4790 default: abort();
9ee6e8bb
PB
4791 }
4792 }
4793 break;
62698be3 4794 case NEON_3R_VPADD:
9ee6e8bb 4795 switch (size) {
dd8fbd78
FN
4796 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4797 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4798 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4799 default: abort();
9ee6e8bb
PB
4800 }
4801 break;
62698be3 4802 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4803 {
4804 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4805 switch ((u << 2) | size) {
4806 case 0: /* VADD */
aa47cfdd
PM
4807 case 4: /* VPADD */
4808 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4809 break;
4810 case 2: /* VSUB */
aa47cfdd 4811 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4812 break;
4813 case 6: /* VABD */
aa47cfdd 4814 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4815 break;
4816 default:
62698be3 4817 abort();
9ee6e8bb 4818 }
aa47cfdd 4819 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4820 break;
aa47cfdd 4821 }
62698be3 4822 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
4823 {
4824 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4825 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4826 if (!u) {
7d1b0095 4827 tcg_temp_free_i32(tmp2);
dd8fbd78 4828 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4829 if (size == 0) {
aa47cfdd 4830 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4831 } else {
aa47cfdd 4832 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
4833 }
4834 }
aa47cfdd 4835 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4836 break;
aa47cfdd 4837 }
62698be3 4838 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
4839 {
4840 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 4841 if (!u) {
aa47cfdd 4842 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 4843 } else {
aa47cfdd
PM
4844 if (size == 0) {
4845 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4846 } else {
4847 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4848 }
b5ff1b31 4849 }
aa47cfdd 4850 tcg_temp_free_ptr(fpstatus);
2c0262af 4851 break;
aa47cfdd 4852 }
62698be3 4853 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
4854 {
4855 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4856 if (size == 0) {
4857 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4858 } else {
4859 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4860 }
4861 tcg_temp_free_ptr(fpstatus);
2c0262af 4862 break;
aa47cfdd 4863 }
62698be3 4864 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
4865 {
4866 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4867 if (size == 0) {
4868 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4869 } else {
4870 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4871 }
4872 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4873 break;
aa47cfdd 4874 }
62698be3 4875 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4876 if (size == 0)
dd8fbd78 4877 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4878 else
dd8fbd78 4879 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4880 break;
da97f52c
PM
4881 case NEON_3R_VFM:
4882 {
4883 /* VFMA, VFMS: fused multiply-add */
4884 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4885 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4886 if (size) {
4887 /* VFMS */
4888 gen_helper_vfp_negs(tmp, tmp);
4889 }
4890 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4891 tcg_temp_free_i32(tmp3);
4892 tcg_temp_free_ptr(fpstatus);
4893 break;
4894 }
9ee6e8bb
PB
4895 default:
4896 abort();
2c0262af 4897 }
7d1b0095 4898 tcg_temp_free_i32(tmp2);
dd8fbd78 4899
9ee6e8bb
PB
4900 /* Save the result. For elementwise operations we can put it
4901 straight into the destination register. For pairwise operations
4902 we have to be careful to avoid clobbering the source operands. */
4903 if (pairwise && rd == rm) {
dd8fbd78 4904 neon_store_scratch(pass, tmp);
9ee6e8bb 4905 } else {
dd8fbd78 4906 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4907 }
4908
4909 } /* for pass */
4910 if (pairwise && rd == rm) {
4911 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4912 tmp = neon_load_scratch(pass);
4913 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4914 }
4915 }
ad69471c 4916 /* End of 3 register same size operations. */
9ee6e8bb
PB
4917 } else if (insn & (1 << 4)) {
4918 if ((insn & 0x00380080) != 0) {
4919 /* Two registers and shift. */
4920 op = (insn >> 8) & 0xf;
4921 if (insn & (1 << 7)) {
cc13115b
PM
4922 /* 64-bit shift. */
4923 if (op > 7) {
4924 return 1;
4925 }
9ee6e8bb
PB
4926 size = 3;
4927 } else {
4928 size = 2;
4929 while ((insn & (1 << (size + 19))) == 0)
4930 size--;
4931 }
4932 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 4933 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
4934 by immediate using the variable shift operations. */
4935 if (op < 8) {
4936 /* Shift by immediate:
4937 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
4938 if (q && ((rd | rm) & 1)) {
4939 return 1;
4940 }
4941 if (!u && (op == 4 || op == 6)) {
4942 return 1;
4943 }
9ee6e8bb
PB
4944 /* Right shifts are encoded as N - shift, where N is the
4945 element size in bits. */
4946 if (op <= 4)
4947 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4948 if (size == 3) {
4949 count = q + 1;
4950 } else {
4951 count = q ? 4: 2;
4952 }
4953 switch (size) {
4954 case 0:
4955 imm = (uint8_t) shift;
4956 imm |= imm << 8;
4957 imm |= imm << 16;
4958 break;
4959 case 1:
4960 imm = (uint16_t) shift;
4961 imm |= imm << 16;
4962 break;
4963 case 2:
4964 case 3:
4965 imm = shift;
4966 break;
4967 default:
4968 abort();
4969 }
4970
4971 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4972 if (size == 3) {
4973 neon_load_reg64(cpu_V0, rm + pass);
4974 tcg_gen_movi_i64(cpu_V1, imm);
4975 switch (op) {
4976 case 0: /* VSHR */
4977 case 1: /* VSRA */
4978 if (u)
4979 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4980 else
ad69471c 4981 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4982 break;
ad69471c
PB
4983 case 2: /* VRSHR */
4984 case 3: /* VRSRA */
4985 if (u)
4986 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4987 else
ad69471c 4988 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4989 break;
ad69471c 4990 case 4: /* VSRI */
ad69471c
PB
4991 case 5: /* VSHL, VSLI */
4992 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4993 break;
0322b26e 4994 case 6: /* VQSHLU */
02da0b2d
PM
4995 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
4996 cpu_V0, cpu_V1);
ad69471c 4997 break;
0322b26e
PM
4998 case 7: /* VQSHL */
4999 if (u) {
02da0b2d 5000 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5001 cpu_V0, cpu_V1);
5002 } else {
02da0b2d 5003 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5004 cpu_V0, cpu_V1);
5005 }
9ee6e8bb 5006 break;
9ee6e8bb 5007 }
ad69471c
PB
5008 if (op == 1 || op == 3) {
5009 /* Accumulate. */
5371cb81 5010 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5011 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5012 } else if (op == 4 || (op == 5 && u)) {
5013 /* Insert */
923e6509
CL
5014 neon_load_reg64(cpu_V1, rd + pass);
5015 uint64_t mask;
5016 if (shift < -63 || shift > 63) {
5017 mask = 0;
5018 } else {
5019 if (op == 4) {
5020 mask = 0xffffffffffffffffull >> -shift;
5021 } else {
5022 mask = 0xffffffffffffffffull << shift;
5023 }
5024 }
5025 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5026 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5027 }
5028 neon_store_reg64(cpu_V0, rd + pass);
5029 } else { /* size < 3 */
5030 /* Operands in T0 and T1. */
dd8fbd78 5031 tmp = neon_load_reg(rm, pass);
7d1b0095 5032 tmp2 = tcg_temp_new_i32();
dd8fbd78 5033 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5034 switch (op) {
5035 case 0: /* VSHR */
5036 case 1: /* VSRA */
5037 GEN_NEON_INTEGER_OP(shl);
5038 break;
5039 case 2: /* VRSHR */
5040 case 3: /* VRSRA */
5041 GEN_NEON_INTEGER_OP(rshl);
5042 break;
5043 case 4: /* VSRI */
ad69471c
PB
5044 case 5: /* VSHL, VSLI */
5045 switch (size) {
dd8fbd78
FN
5046 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5047 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5048 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5049 default: abort();
ad69471c
PB
5050 }
5051 break;
0322b26e 5052 case 6: /* VQSHLU */
ad69471c 5053 switch (size) {
0322b26e 5054 case 0:
02da0b2d
PM
5055 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5056 tmp, tmp2);
0322b26e
PM
5057 break;
5058 case 1:
02da0b2d
PM
5059 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5060 tmp, tmp2);
0322b26e
PM
5061 break;
5062 case 2:
02da0b2d
PM
5063 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5064 tmp, tmp2);
0322b26e
PM
5065 break;
5066 default:
cc13115b 5067 abort();
ad69471c
PB
5068 }
5069 break;
0322b26e 5070 case 7: /* VQSHL */
02da0b2d 5071 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5072 break;
ad69471c 5073 }
7d1b0095 5074 tcg_temp_free_i32(tmp2);
ad69471c
PB
5075
5076 if (op == 1 || op == 3) {
5077 /* Accumulate. */
dd8fbd78 5078 tmp2 = neon_load_reg(rd, pass);
5371cb81 5079 gen_neon_add(size, tmp, tmp2);
7d1b0095 5080 tcg_temp_free_i32(tmp2);
ad69471c
PB
5081 } else if (op == 4 || (op == 5 && u)) {
5082 /* Insert */
5083 switch (size) {
5084 case 0:
5085 if (op == 4)
ca9a32e4 5086 mask = 0xff >> -shift;
ad69471c 5087 else
ca9a32e4
JR
5088 mask = (uint8_t)(0xff << shift);
5089 mask |= mask << 8;
5090 mask |= mask << 16;
ad69471c
PB
5091 break;
5092 case 1:
5093 if (op == 4)
ca9a32e4 5094 mask = 0xffff >> -shift;
ad69471c 5095 else
ca9a32e4
JR
5096 mask = (uint16_t)(0xffff << shift);
5097 mask |= mask << 16;
ad69471c
PB
5098 break;
5099 case 2:
ca9a32e4
JR
5100 if (shift < -31 || shift > 31) {
5101 mask = 0;
5102 } else {
5103 if (op == 4)
5104 mask = 0xffffffffu >> -shift;
5105 else
5106 mask = 0xffffffffu << shift;
5107 }
ad69471c
PB
5108 break;
5109 default:
5110 abort();
5111 }
dd8fbd78 5112 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5113 tcg_gen_andi_i32(tmp, tmp, mask);
5114 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5115 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5116 tcg_temp_free_i32(tmp2);
ad69471c 5117 }
dd8fbd78 5118 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5119 }
5120 } /* for pass */
5121 } else if (op < 10) {
ad69471c 5122 /* Shift by immediate and narrow:
9ee6e8bb 5123 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5124 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5125 if (rm & 1) {
5126 return 1;
5127 }
9ee6e8bb
PB
5128 shift = shift - (1 << (size + 3));
5129 size++;
92cdfaeb 5130 if (size == 3) {
a7812ae4 5131 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5132 neon_load_reg64(cpu_V0, rm);
5133 neon_load_reg64(cpu_V1, rm + 1);
5134 for (pass = 0; pass < 2; pass++) {
5135 TCGv_i64 in;
5136 if (pass == 0) {
5137 in = cpu_V0;
5138 } else {
5139 in = cpu_V1;
5140 }
ad69471c 5141 if (q) {
0b36f4cd 5142 if (input_unsigned) {
92cdfaeb 5143 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5144 } else {
92cdfaeb 5145 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5146 }
ad69471c 5147 } else {
0b36f4cd 5148 if (input_unsigned) {
92cdfaeb 5149 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5150 } else {
92cdfaeb 5151 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5152 }
ad69471c 5153 }
7d1b0095 5154 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5155 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5156 neon_store_reg(rd, pass, tmp);
5157 } /* for pass */
5158 tcg_temp_free_i64(tmp64);
5159 } else {
5160 if (size == 1) {
5161 imm = (uint16_t)shift;
5162 imm |= imm << 16;
2c0262af 5163 } else {
92cdfaeb
PM
5164 /* size == 2 */
5165 imm = (uint32_t)shift;
5166 }
5167 tmp2 = tcg_const_i32(imm);
5168 tmp4 = neon_load_reg(rm + 1, 0);
5169 tmp5 = neon_load_reg(rm + 1, 1);
5170 for (pass = 0; pass < 2; pass++) {
5171 if (pass == 0) {
5172 tmp = neon_load_reg(rm, 0);
5173 } else {
5174 tmp = tmp4;
5175 }
0b36f4cd
CL
5176 gen_neon_shift_narrow(size, tmp, tmp2, q,
5177 input_unsigned);
92cdfaeb
PM
5178 if (pass == 0) {
5179 tmp3 = neon_load_reg(rm, 1);
5180 } else {
5181 tmp3 = tmp5;
5182 }
0b36f4cd
CL
5183 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5184 input_unsigned);
36aa55dc 5185 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5186 tcg_temp_free_i32(tmp);
5187 tcg_temp_free_i32(tmp3);
5188 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5189 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5190 neon_store_reg(rd, pass, tmp);
5191 } /* for pass */
c6067f04 5192 tcg_temp_free_i32(tmp2);
b75263d6 5193 }
9ee6e8bb 5194 } else if (op == 10) {
cc13115b
PM
5195 /* VSHLL, VMOVL */
5196 if (q || (rd & 1)) {
9ee6e8bb 5197 return 1;
cc13115b 5198 }
ad69471c
PB
5199 tmp = neon_load_reg(rm, 0);
5200 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5201 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5202 if (pass == 1)
5203 tmp = tmp2;
5204
5205 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5206
9ee6e8bb
PB
5207 if (shift != 0) {
5208 /* The shift is less than the width of the source
ad69471c
PB
5209 type, so we can just shift the whole register. */
5210 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5211 /* Widen the result of shift: we need to clear
5212 * the potential overflow bits resulting from
5213 * left bits of the narrow input appearing as
5214 * right bits of left the neighbour narrow
5215 * input. */
ad69471c
PB
5216 if (size < 2 || !u) {
5217 uint64_t imm64;
5218 if (size == 0) {
5219 imm = (0xffu >> (8 - shift));
5220 imm |= imm << 16;
acdf01ef 5221 } else if (size == 1) {
ad69471c 5222 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5223 } else {
5224 /* size == 2 */
5225 imm = 0xffffffff >> (32 - shift);
5226 }
5227 if (size < 2) {
5228 imm64 = imm | (((uint64_t)imm) << 32);
5229 } else {
5230 imm64 = imm;
9ee6e8bb 5231 }
acdf01ef 5232 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5233 }
5234 }
ad69471c 5235 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5236 }
f73534a5 5237 } else if (op >= 14) {
9ee6e8bb 5238 /* VCVT fixed-point. */
cc13115b
PM
5239 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5240 return 1;
5241 }
f73534a5
PM
5242 /* We have already masked out the must-be-1 top bit of imm6,
5243 * hence this 32-shift where the ARM ARM has 64-imm6.
5244 */
5245 shift = 32 - shift;
9ee6e8bb 5246 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5247 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5248 if (!(op & 1)) {
9ee6e8bb 5249 if (u)
5500b06c 5250 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5251 else
5500b06c 5252 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5253 } else {
5254 if (u)
5500b06c 5255 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5256 else
5500b06c 5257 gen_vfp_tosl(0, shift, 1);
2c0262af 5258 }
4373f3ce 5259 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5260 }
5261 } else {
9ee6e8bb
PB
5262 return 1;
5263 }
5264 } else { /* (insn & 0x00380080) == 0 */
5265 int invert;
7d80fee5
PM
5266 if (q && (rd & 1)) {
5267 return 1;
5268 }
9ee6e8bb
PB
5269
5270 op = (insn >> 8) & 0xf;
5271 /* One register and immediate. */
5272 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5273 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5274 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5275 * We choose to not special-case this and will behave as if a
5276 * valid constant encoding of 0 had been given.
5277 */
9ee6e8bb
PB
5278 switch (op) {
5279 case 0: case 1:
5280 /* no-op */
5281 break;
5282 case 2: case 3:
5283 imm <<= 8;
5284 break;
5285 case 4: case 5:
5286 imm <<= 16;
5287 break;
5288 case 6: case 7:
5289 imm <<= 24;
5290 break;
5291 case 8: case 9:
5292 imm |= imm << 16;
5293 break;
5294 case 10: case 11:
5295 imm = (imm << 8) | (imm << 24);
5296 break;
5297 case 12:
8e31209e 5298 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5299 break;
5300 case 13:
5301 imm = (imm << 16) | 0xffff;
5302 break;
5303 case 14:
5304 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5305 if (invert)
5306 imm = ~imm;
5307 break;
5308 case 15:
7d80fee5
PM
5309 if (invert) {
5310 return 1;
5311 }
9ee6e8bb
PB
5312 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5313 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5314 break;
5315 }
5316 if (invert)
5317 imm = ~imm;
5318
9ee6e8bb
PB
5319 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5320 if (op & 1 && op < 12) {
ad69471c 5321 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5322 if (invert) {
5323 /* The immediate value has already been inverted, so
5324 BIC becomes AND. */
ad69471c 5325 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5326 } else {
ad69471c 5327 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5328 }
9ee6e8bb 5329 } else {
ad69471c 5330 /* VMOV, VMVN. */
7d1b0095 5331 tmp = tcg_temp_new_i32();
9ee6e8bb 5332 if (op == 14 && invert) {
a5a14945 5333 int n;
ad69471c
PB
5334 uint32_t val;
5335 val = 0;
9ee6e8bb
PB
5336 for (n = 0; n < 4; n++) {
5337 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5338 val |= 0xff << (n * 8);
9ee6e8bb 5339 }
ad69471c
PB
5340 tcg_gen_movi_i32(tmp, val);
5341 } else {
5342 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5343 }
9ee6e8bb 5344 }
ad69471c 5345 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5346 }
5347 }
e4b3861d 5348 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5349 if (size != 3) {
5350 op = (insn >> 8) & 0xf;
5351 if ((insn & (1 << 6)) == 0) {
5352 /* Three registers of different lengths. */
5353 int src1_wide;
5354 int src2_wide;
5355 int prewiden;
695272dc
PM
5356 /* undefreq: bit 0 : UNDEF if size != 0
5357 * bit 1 : UNDEF if size == 0
5358 * bit 2 : UNDEF if U == 1
5359 * Note that [1:0] set implies 'always UNDEF'
5360 */
5361 int undefreq;
5362 /* prewiden, src1_wide, src2_wide, undefreq */
5363 static const int neon_3reg_wide[16][4] = {
5364 {1, 0, 0, 0}, /* VADDL */
5365 {1, 1, 0, 0}, /* VADDW */
5366 {1, 0, 0, 0}, /* VSUBL */
5367 {1, 1, 0, 0}, /* VSUBW */
5368 {0, 1, 1, 0}, /* VADDHN */
5369 {0, 0, 0, 0}, /* VABAL */
5370 {0, 1, 1, 0}, /* VSUBHN */
5371 {0, 0, 0, 0}, /* VABDL */
5372 {0, 0, 0, 0}, /* VMLAL */
5373 {0, 0, 0, 6}, /* VQDMLAL */
5374 {0, 0, 0, 0}, /* VMLSL */
5375 {0, 0, 0, 6}, /* VQDMLSL */
5376 {0, 0, 0, 0}, /* Integer VMULL */
5377 {0, 0, 0, 2}, /* VQDMULL */
5378 {0, 0, 0, 5}, /* Polynomial VMULL */
5379 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5380 };
5381
5382 prewiden = neon_3reg_wide[op][0];
5383 src1_wide = neon_3reg_wide[op][1];
5384 src2_wide = neon_3reg_wide[op][2];
695272dc 5385 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5386
695272dc
PM
5387 if (((undefreq & 1) && (size != 0)) ||
5388 ((undefreq & 2) && (size == 0)) ||
5389 ((undefreq & 4) && u)) {
5390 return 1;
5391 }
5392 if ((src1_wide && (rn & 1)) ||
5393 (src2_wide && (rm & 1)) ||
5394 (!src2_wide && (rd & 1))) {
ad69471c 5395 return 1;
695272dc 5396 }
ad69471c 5397
9ee6e8bb
PB
5398 /* Avoid overlapping operands. Wide source operands are
5399 always aligned so will never overlap with wide
5400 destinations in problematic ways. */
8f8e3aa4 5401 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5402 tmp = neon_load_reg(rm, 1);
5403 neon_store_scratch(2, tmp);
8f8e3aa4 5404 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5405 tmp = neon_load_reg(rn, 1);
5406 neon_store_scratch(2, tmp);
9ee6e8bb 5407 }
a50f5b91 5408 TCGV_UNUSED(tmp3);
9ee6e8bb 5409 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5410 if (src1_wide) {
5411 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5412 TCGV_UNUSED(tmp);
9ee6e8bb 5413 } else {
ad69471c 5414 if (pass == 1 && rd == rn) {
dd8fbd78 5415 tmp = neon_load_scratch(2);
9ee6e8bb 5416 } else {
ad69471c
PB
5417 tmp = neon_load_reg(rn, pass);
5418 }
5419 if (prewiden) {
5420 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5421 }
5422 }
ad69471c
PB
5423 if (src2_wide) {
5424 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5425 TCGV_UNUSED(tmp2);
9ee6e8bb 5426 } else {
ad69471c 5427 if (pass == 1 && rd == rm) {
dd8fbd78 5428 tmp2 = neon_load_scratch(2);
9ee6e8bb 5429 } else {
ad69471c
PB
5430 tmp2 = neon_load_reg(rm, pass);
5431 }
5432 if (prewiden) {
5433 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5434 }
9ee6e8bb
PB
5435 }
5436 switch (op) {
5437 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5438 gen_neon_addl(size);
9ee6e8bb 5439 break;
79b0e534 5440 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5441 gen_neon_subl(size);
9ee6e8bb
PB
5442 break;
5443 case 5: case 7: /* VABAL, VABDL */
5444 switch ((size << 1) | u) {
ad69471c
PB
5445 case 0:
5446 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5447 break;
5448 case 1:
5449 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5450 break;
5451 case 2:
5452 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5453 break;
5454 case 3:
5455 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5456 break;
5457 case 4:
5458 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5459 break;
5460 case 5:
5461 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5462 break;
9ee6e8bb
PB
5463 default: abort();
5464 }
7d1b0095
PM
5465 tcg_temp_free_i32(tmp2);
5466 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5467 break;
5468 case 8: case 9: case 10: case 11: case 12: case 13:
5469 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5470 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5471 break;
5472 case 14: /* Polynomial VMULL */
e5ca24cb 5473 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5474 tcg_temp_free_i32(tmp2);
5475 tcg_temp_free_i32(tmp);
e5ca24cb 5476 break;
695272dc
PM
5477 default: /* 15 is RESERVED: caught earlier */
5478 abort();
9ee6e8bb 5479 }
ebcd88ce
PM
5480 if (op == 13) {
5481 /* VQDMULL */
5482 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5483 neon_store_reg64(cpu_V0, rd + pass);
5484 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5485 /* Accumulate. */
ebcd88ce 5486 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5487 switch (op) {
4dc064e6
PM
5488 case 10: /* VMLSL */
5489 gen_neon_negl(cpu_V0, size);
5490 /* Fall through */
5491 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5492 gen_neon_addl(size);
9ee6e8bb
PB
5493 break;
5494 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5495 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5496 if (op == 11) {
5497 gen_neon_negl(cpu_V0, size);
5498 }
ad69471c
PB
5499 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5500 break;
9ee6e8bb
PB
5501 default:
5502 abort();
5503 }
ad69471c 5504 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5505 } else if (op == 4 || op == 6) {
5506 /* Narrowing operation. */
7d1b0095 5507 tmp = tcg_temp_new_i32();
79b0e534 5508 if (!u) {
9ee6e8bb 5509 switch (size) {
ad69471c
PB
5510 case 0:
5511 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5512 break;
5513 case 1:
5514 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5515 break;
5516 case 2:
5517 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5518 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5519 break;
9ee6e8bb
PB
5520 default: abort();
5521 }
5522 } else {
5523 switch (size) {
ad69471c
PB
5524 case 0:
5525 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5526 break;
5527 case 1:
5528 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5529 break;
5530 case 2:
5531 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5532 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5533 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5534 break;
9ee6e8bb
PB
5535 default: abort();
5536 }
5537 }
ad69471c
PB
5538 if (pass == 0) {
5539 tmp3 = tmp;
5540 } else {
5541 neon_store_reg(rd, 0, tmp3);
5542 neon_store_reg(rd, 1, tmp);
5543 }
9ee6e8bb
PB
5544 } else {
5545 /* Write back the result. */
ad69471c 5546 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5547 }
5548 }
5549 } else {
3e3326df
PM
5550 /* Two registers and a scalar. NB that for ops of this form
5551 * the ARM ARM labels bit 24 as Q, but it is in our variable
5552 * 'u', not 'q'.
5553 */
5554 if (size == 0) {
5555 return 1;
5556 }
9ee6e8bb 5557 switch (op) {
9ee6e8bb 5558 case 1: /* Float VMLA scalar */
9ee6e8bb 5559 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5560 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5561 if (size == 1) {
5562 return 1;
5563 }
5564 /* fall through */
5565 case 0: /* Integer VMLA scalar */
5566 case 4: /* Integer VMLS scalar */
5567 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5568 case 12: /* VQDMULH scalar */
5569 case 13: /* VQRDMULH scalar */
3e3326df
PM
5570 if (u && ((rd | rn) & 1)) {
5571 return 1;
5572 }
dd8fbd78
FN
5573 tmp = neon_get_scalar(size, rm);
5574 neon_store_scratch(0, tmp);
9ee6e8bb 5575 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5576 tmp = neon_load_scratch(0);
5577 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5578 if (op == 12) {
5579 if (size == 1) {
02da0b2d 5580 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5581 } else {
02da0b2d 5582 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5583 }
5584 } else if (op == 13) {
5585 if (size == 1) {
02da0b2d 5586 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5587 } else {
02da0b2d 5588 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5589 }
5590 } else if (op & 1) {
aa47cfdd
PM
5591 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5592 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5593 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5594 } else {
5595 switch (size) {
dd8fbd78
FN
5596 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5597 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5598 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5599 default: abort();
9ee6e8bb
PB
5600 }
5601 }
7d1b0095 5602 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5603 if (op < 8) {
5604 /* Accumulate. */
dd8fbd78 5605 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5606 switch (op) {
5607 case 0:
dd8fbd78 5608 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5609 break;
5610 case 1:
aa47cfdd
PM
5611 {
5612 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5613 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5614 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5615 break;
aa47cfdd 5616 }
9ee6e8bb 5617 case 4:
dd8fbd78 5618 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5619 break;
5620 case 5:
aa47cfdd
PM
5621 {
5622 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5623 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5624 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5625 break;
aa47cfdd 5626 }
9ee6e8bb
PB
5627 default:
5628 abort();
5629 }
7d1b0095 5630 tcg_temp_free_i32(tmp2);
9ee6e8bb 5631 }
dd8fbd78 5632 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5633 }
5634 break;
9ee6e8bb 5635 case 3: /* VQDMLAL scalar */
9ee6e8bb 5636 case 7: /* VQDMLSL scalar */
9ee6e8bb 5637 case 11: /* VQDMULL scalar */
3e3326df 5638 if (u == 1) {
ad69471c 5639 return 1;
3e3326df
PM
5640 }
5641 /* fall through */
5642 case 2: /* VMLAL sclar */
5643 case 6: /* VMLSL scalar */
5644 case 10: /* VMULL scalar */
5645 if (rd & 1) {
5646 return 1;
5647 }
dd8fbd78 5648 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5649 /* We need a copy of tmp2 because gen_neon_mull
5650 * deletes it during pass 0. */
7d1b0095 5651 tmp4 = tcg_temp_new_i32();
c6067f04 5652 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5653 tmp3 = neon_load_reg(rn, 1);
ad69471c 5654
9ee6e8bb 5655 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5656 if (pass == 0) {
5657 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5658 } else {
dd8fbd78 5659 tmp = tmp3;
c6067f04 5660 tmp2 = tmp4;
9ee6e8bb 5661 }
ad69471c 5662 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5663 if (op != 11) {
5664 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5665 }
9ee6e8bb 5666 switch (op) {
4dc064e6
PM
5667 case 6:
5668 gen_neon_negl(cpu_V0, size);
5669 /* Fall through */
5670 case 2:
ad69471c 5671 gen_neon_addl(size);
9ee6e8bb
PB
5672 break;
5673 case 3: case 7:
ad69471c 5674 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5675 if (op == 7) {
5676 gen_neon_negl(cpu_V0, size);
5677 }
ad69471c 5678 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5679 break;
5680 case 10:
5681 /* no-op */
5682 break;
5683 case 11:
ad69471c 5684 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5685 break;
5686 default:
5687 abort();
5688 }
ad69471c 5689 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5690 }
dd8fbd78 5691
dd8fbd78 5692
9ee6e8bb
PB
5693 break;
5694 default: /* 14 and 15 are RESERVED */
5695 return 1;
5696 }
5697 }
5698 } else { /* size == 3 */
5699 if (!u) {
5700 /* Extract. */
9ee6e8bb 5701 imm = (insn >> 8) & 0xf;
ad69471c
PB
5702
5703 if (imm > 7 && !q)
5704 return 1;
5705
52579ea1
PM
5706 if (q && ((rd | rn | rm) & 1)) {
5707 return 1;
5708 }
5709
ad69471c
PB
5710 if (imm == 0) {
5711 neon_load_reg64(cpu_V0, rn);
5712 if (q) {
5713 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5714 }
ad69471c
PB
5715 } else if (imm == 8) {
5716 neon_load_reg64(cpu_V0, rn + 1);
5717 if (q) {
5718 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5719 }
ad69471c 5720 } else if (q) {
a7812ae4 5721 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5722 if (imm < 8) {
5723 neon_load_reg64(cpu_V0, rn);
a7812ae4 5724 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5725 } else {
5726 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5727 neon_load_reg64(tmp64, rm);
ad69471c
PB
5728 }
5729 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5730 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5731 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5732 if (imm < 8) {
5733 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5734 } else {
ad69471c
PB
5735 neon_load_reg64(cpu_V1, rm + 1);
5736 imm -= 8;
9ee6e8bb 5737 }
ad69471c 5738 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5739 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5740 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5741 tcg_temp_free_i64(tmp64);
ad69471c 5742 } else {
a7812ae4 5743 /* BUGFIX */
ad69471c 5744 neon_load_reg64(cpu_V0, rn);
a7812ae4 5745 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5746 neon_load_reg64(cpu_V1, rm);
a7812ae4 5747 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5748 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5749 }
5750 neon_store_reg64(cpu_V0, rd);
5751 if (q) {
5752 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5753 }
5754 } else if ((insn & (1 << 11)) == 0) {
5755 /* Two register misc. */
5756 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5757 size = (insn >> 18) & 3;
600b828c
PM
5758 /* UNDEF for unknown op values and bad op-size combinations */
5759 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5760 return 1;
5761 }
fc2a9b37
PM
5762 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5763 q && ((rm | rd) & 1)) {
5764 return 1;
5765 }
9ee6e8bb 5766 switch (op) {
600b828c 5767 case NEON_2RM_VREV64:
9ee6e8bb 5768 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5769 tmp = neon_load_reg(rm, pass * 2);
5770 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5771 switch (size) {
dd8fbd78
FN
5772 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5773 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5774 case 2: /* no-op */ break;
5775 default: abort();
5776 }
dd8fbd78 5777 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5778 if (size == 2) {
dd8fbd78 5779 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5780 } else {
9ee6e8bb 5781 switch (size) {
dd8fbd78
FN
5782 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5783 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5784 default: abort();
5785 }
dd8fbd78 5786 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5787 }
5788 }
5789 break;
600b828c
PM
5790 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5791 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5792 for (pass = 0; pass < q + 1; pass++) {
5793 tmp = neon_load_reg(rm, pass * 2);
5794 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5795 tmp = neon_load_reg(rm, pass * 2 + 1);
5796 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5797 switch (size) {
5798 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5799 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5800 case 2: tcg_gen_add_i64(CPU_V001); break;
5801 default: abort();
5802 }
600b828c 5803 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5804 /* Accumulate. */
ad69471c
PB
5805 neon_load_reg64(cpu_V1, rd + pass);
5806 gen_neon_addl(size);
9ee6e8bb 5807 }
ad69471c 5808 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5809 }
5810 break;
600b828c 5811 case NEON_2RM_VTRN:
9ee6e8bb 5812 if (size == 2) {
a5a14945 5813 int n;
9ee6e8bb 5814 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5815 tmp = neon_load_reg(rm, n);
5816 tmp2 = neon_load_reg(rd, n + 1);
5817 neon_store_reg(rm, n, tmp2);
5818 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5819 }
5820 } else {
5821 goto elementwise;
5822 }
5823 break;
600b828c 5824 case NEON_2RM_VUZP:
02acedf9 5825 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5826 return 1;
9ee6e8bb
PB
5827 }
5828 break;
600b828c 5829 case NEON_2RM_VZIP:
d68a6f3a 5830 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5831 return 1;
9ee6e8bb
PB
5832 }
5833 break;
600b828c
PM
5834 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5835 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5836 if (rm & 1) {
5837 return 1;
5838 }
a50f5b91 5839 TCGV_UNUSED(tmp2);
9ee6e8bb 5840 for (pass = 0; pass < 2; pass++) {
ad69471c 5841 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5842 tmp = tcg_temp_new_i32();
600b828c
PM
5843 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5844 tmp, cpu_V0);
ad69471c
PB
5845 if (pass == 0) {
5846 tmp2 = tmp;
5847 } else {
5848 neon_store_reg(rd, 0, tmp2);
5849 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5850 }
9ee6e8bb
PB
5851 }
5852 break;
600b828c 5853 case NEON_2RM_VSHLL:
fc2a9b37 5854 if (q || (rd & 1)) {
9ee6e8bb 5855 return 1;
600b828c 5856 }
ad69471c
PB
5857 tmp = neon_load_reg(rm, 0);
5858 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5859 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5860 if (pass == 1)
5861 tmp = tmp2;
5862 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5863 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5864 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5865 }
5866 break;
600b828c 5867 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5868 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5869 q || (rm & 1)) {
5870 return 1;
5871 }
7d1b0095
PM
5872 tmp = tcg_temp_new_i32();
5873 tmp2 = tcg_temp_new_i32();
60011498 5874 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5875 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5876 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5877 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5878 tcg_gen_shli_i32(tmp2, tmp2, 16);
5879 tcg_gen_or_i32(tmp2, tmp2, tmp);
5880 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5881 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5882 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5883 neon_store_reg(rd, 0, tmp2);
7d1b0095 5884 tmp2 = tcg_temp_new_i32();
2d981da7 5885 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5886 tcg_gen_shli_i32(tmp2, tmp2, 16);
5887 tcg_gen_or_i32(tmp2, tmp2, tmp);
5888 neon_store_reg(rd, 1, tmp2);
7d1b0095 5889 tcg_temp_free_i32(tmp);
60011498 5890 break;
600b828c 5891 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5892 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5893 q || (rd & 1)) {
5894 return 1;
5895 }
7d1b0095 5896 tmp3 = tcg_temp_new_i32();
60011498
PB
5897 tmp = neon_load_reg(rm, 0);
5898 tmp2 = neon_load_reg(rm, 1);
5899 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5900 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5901 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5902 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5903 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5904 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5905 tcg_temp_free_i32(tmp);
60011498 5906 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5907 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5908 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5909 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5910 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5911 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5912 tcg_temp_free_i32(tmp2);
5913 tcg_temp_free_i32(tmp3);
60011498 5914 break;
9ee6e8bb
PB
5915 default:
5916 elementwise:
5917 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 5918 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
5919 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5920 neon_reg_offset(rm, pass));
dd8fbd78 5921 TCGV_UNUSED(tmp);
9ee6e8bb 5922 } else {
dd8fbd78 5923 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5924 }
5925 switch (op) {
600b828c 5926 case NEON_2RM_VREV32:
9ee6e8bb 5927 switch (size) {
dd8fbd78
FN
5928 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5929 case 1: gen_swap_half(tmp); break;
600b828c 5930 default: abort();
9ee6e8bb
PB
5931 }
5932 break;
600b828c 5933 case NEON_2RM_VREV16:
dd8fbd78 5934 gen_rev16(tmp);
9ee6e8bb 5935 break;
600b828c 5936 case NEON_2RM_VCLS:
9ee6e8bb 5937 switch (size) {
dd8fbd78
FN
5938 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5939 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5940 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 5941 default: abort();
9ee6e8bb
PB
5942 }
5943 break;
600b828c 5944 case NEON_2RM_VCLZ:
9ee6e8bb 5945 switch (size) {
dd8fbd78
FN
5946 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5947 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5948 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 5949 default: abort();
9ee6e8bb
PB
5950 }
5951 break;
600b828c 5952 case NEON_2RM_VCNT:
dd8fbd78 5953 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 5954 break;
600b828c 5955 case NEON_2RM_VMVN:
dd8fbd78 5956 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 5957 break;
600b828c 5958 case NEON_2RM_VQABS:
9ee6e8bb 5959 switch (size) {
02da0b2d
PM
5960 case 0:
5961 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
5962 break;
5963 case 1:
5964 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
5965 break;
5966 case 2:
5967 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
5968 break;
600b828c 5969 default: abort();
9ee6e8bb
PB
5970 }
5971 break;
600b828c 5972 case NEON_2RM_VQNEG:
9ee6e8bb 5973 switch (size) {
02da0b2d
PM
5974 case 0:
5975 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
5976 break;
5977 case 1:
5978 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
5979 break;
5980 case 2:
5981 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
5982 break;
600b828c 5983 default: abort();
9ee6e8bb
PB
5984 }
5985 break;
600b828c 5986 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 5987 tmp2 = tcg_const_i32(0);
9ee6e8bb 5988 switch(size) {
dd8fbd78
FN
5989 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5990 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5991 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 5992 default: abort();
9ee6e8bb 5993 }
dd8fbd78 5994 tcg_temp_free(tmp2);
600b828c 5995 if (op == NEON_2RM_VCLE0) {
dd8fbd78 5996 tcg_gen_not_i32(tmp, tmp);
600b828c 5997 }
9ee6e8bb 5998 break;
600b828c 5999 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6000 tmp2 = tcg_const_i32(0);
9ee6e8bb 6001 switch(size) {
dd8fbd78
FN
6002 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6003 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6004 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6005 default: abort();
9ee6e8bb 6006 }
dd8fbd78 6007 tcg_temp_free(tmp2);
600b828c 6008 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6009 tcg_gen_not_i32(tmp, tmp);
600b828c 6010 }
9ee6e8bb 6011 break;
600b828c 6012 case NEON_2RM_VCEQ0:
dd8fbd78 6013 tmp2 = tcg_const_i32(0);
9ee6e8bb 6014 switch(size) {
dd8fbd78
FN
6015 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6016 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6017 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6018 default: abort();
9ee6e8bb 6019 }
dd8fbd78 6020 tcg_temp_free(tmp2);
9ee6e8bb 6021 break;
600b828c 6022 case NEON_2RM_VABS:
9ee6e8bb 6023 switch(size) {
dd8fbd78
FN
6024 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6025 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6026 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6027 default: abort();
9ee6e8bb
PB
6028 }
6029 break;
600b828c 6030 case NEON_2RM_VNEG:
dd8fbd78
FN
6031 tmp2 = tcg_const_i32(0);
6032 gen_neon_rsb(size, tmp, tmp2);
6033 tcg_temp_free(tmp2);
9ee6e8bb 6034 break;
600b828c 6035 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6036 {
6037 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6038 tmp2 = tcg_const_i32(0);
aa47cfdd 6039 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6040 tcg_temp_free(tmp2);
aa47cfdd 6041 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6042 break;
aa47cfdd 6043 }
600b828c 6044 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6045 {
6046 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6047 tmp2 = tcg_const_i32(0);
aa47cfdd 6048 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6049 tcg_temp_free(tmp2);
aa47cfdd 6050 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6051 break;
aa47cfdd 6052 }
600b828c 6053 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6054 {
6055 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6056 tmp2 = tcg_const_i32(0);
aa47cfdd 6057 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6058 tcg_temp_free(tmp2);
aa47cfdd 6059 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6060 break;
aa47cfdd 6061 }
600b828c 6062 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6063 {
6064 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6065 tmp2 = tcg_const_i32(0);
aa47cfdd 6066 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6067 tcg_temp_free(tmp2);
aa47cfdd 6068 tcg_temp_free_ptr(fpstatus);
0e326109 6069 break;
aa47cfdd 6070 }
600b828c 6071 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6072 {
6073 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6074 tmp2 = tcg_const_i32(0);
aa47cfdd 6075 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6076 tcg_temp_free(tmp2);
aa47cfdd 6077 tcg_temp_free_ptr(fpstatus);
0e326109 6078 break;
aa47cfdd 6079 }
600b828c 6080 case NEON_2RM_VABS_F:
4373f3ce 6081 gen_vfp_abs(0);
9ee6e8bb 6082 break;
600b828c 6083 case NEON_2RM_VNEG_F:
4373f3ce 6084 gen_vfp_neg(0);
9ee6e8bb 6085 break;
600b828c 6086 case NEON_2RM_VSWP:
dd8fbd78
FN
6087 tmp2 = neon_load_reg(rd, pass);
6088 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6089 break;
600b828c 6090 case NEON_2RM_VTRN:
dd8fbd78 6091 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6092 switch (size) {
dd8fbd78
FN
6093 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6094 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6095 default: abort();
9ee6e8bb 6096 }
dd8fbd78 6097 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6098 break;
600b828c 6099 case NEON_2RM_VRECPE:
dd8fbd78 6100 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6101 break;
600b828c 6102 case NEON_2RM_VRSQRTE:
dd8fbd78 6103 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6104 break;
600b828c 6105 case NEON_2RM_VRECPE_F:
4373f3ce 6106 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6107 break;
600b828c 6108 case NEON_2RM_VRSQRTE_F:
4373f3ce 6109 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6110 break;
600b828c 6111 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6112 gen_vfp_sito(0, 1);
9ee6e8bb 6113 break;
600b828c 6114 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6115 gen_vfp_uito(0, 1);
9ee6e8bb 6116 break;
600b828c 6117 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6118 gen_vfp_tosiz(0, 1);
9ee6e8bb 6119 break;
600b828c 6120 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6121 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6122 break;
6123 default:
600b828c
PM
6124 /* Reserved op values were caught by the
6125 * neon_2rm_sizes[] check earlier.
6126 */
6127 abort();
9ee6e8bb 6128 }
600b828c 6129 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6130 tcg_gen_st_f32(cpu_F0s, cpu_env,
6131 neon_reg_offset(rd, pass));
9ee6e8bb 6132 } else {
dd8fbd78 6133 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6134 }
6135 }
6136 break;
6137 }
6138 } else if ((insn & (1 << 10)) == 0) {
6139 /* VTBL, VTBX. */
56907d77
PM
6140 int n = ((insn >> 8) & 3) + 1;
6141 if ((rn + n) > 32) {
6142 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6143 * helper function running off the end of the register file.
6144 */
6145 return 1;
6146 }
6147 n <<= 3;
9ee6e8bb 6148 if (insn & (1 << 6)) {
8f8e3aa4 6149 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6150 } else {
7d1b0095 6151 tmp = tcg_temp_new_i32();
8f8e3aa4 6152 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6153 }
8f8e3aa4 6154 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6155 tmp4 = tcg_const_i32(rn);
6156 tmp5 = tcg_const_i32(n);
9ef39277 6157 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6158 tcg_temp_free_i32(tmp);
9ee6e8bb 6159 if (insn & (1 << 6)) {
8f8e3aa4 6160 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6161 } else {
7d1b0095 6162 tmp = tcg_temp_new_i32();
8f8e3aa4 6163 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6164 }
8f8e3aa4 6165 tmp3 = neon_load_reg(rm, 1);
9ef39277 6166 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6167 tcg_temp_free_i32(tmp5);
6168 tcg_temp_free_i32(tmp4);
8f8e3aa4 6169 neon_store_reg(rd, 0, tmp2);
3018f259 6170 neon_store_reg(rd, 1, tmp3);
7d1b0095 6171 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6172 } else if ((insn & 0x380) == 0) {
6173 /* VDUP */
133da6aa
JR
6174 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6175 return 1;
6176 }
9ee6e8bb 6177 if (insn & (1 << 19)) {
dd8fbd78 6178 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6179 } else {
dd8fbd78 6180 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6181 }
6182 if (insn & (1 << 16)) {
dd8fbd78 6183 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6184 } else if (insn & (1 << 17)) {
6185 if ((insn >> 18) & 1)
dd8fbd78 6186 gen_neon_dup_high16(tmp);
9ee6e8bb 6187 else
dd8fbd78 6188 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6189 }
6190 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6191 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6192 tcg_gen_mov_i32(tmp2, tmp);
6193 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6194 }
7d1b0095 6195 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6196 } else {
6197 return 1;
6198 }
6199 }
6200 }
6201 return 0;
6202}
6203
0ecb72a5 6204static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6205{
4b6a83fb
PM
6206 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6207 const ARMCPRegInfo *ri;
6208 ARMCPU *cpu = arm_env_get_cpu(env);
9ee6e8bb
PB
6209
6210 cpnum = (insn >> 8) & 0xf;
6211 if (arm_feature(env, ARM_FEATURE_XSCALE)
6212 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6213 return 1;
6214
4b6a83fb 6215 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6216 switch (cpnum) {
6217 case 0:
6218 case 1:
6219 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6220 return disas_iwmmxt_insn(env, s, insn);
6221 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6222 return disas_dsp_insn(env, s, insn);
6223 }
6224 return 1;
6225 case 10:
6226 case 11:
6227 return disas_vfp_insn (env, s, insn);
4b6a83fb
PM
6228 default:
6229 break;
6230 }
6231
6232 /* Otherwise treat as a generic register access */
6233 is64 = (insn & (1 << 25)) == 0;
6234 if (!is64 && ((insn & (1 << 4)) == 0)) {
6235 /* cdp */
6236 return 1;
6237 }
6238
6239 crm = insn & 0xf;
6240 if (is64) {
6241 crn = 0;
6242 opc1 = (insn >> 4) & 0xf;
6243 opc2 = 0;
6244 rt2 = (insn >> 16) & 0xf;
6245 } else {
6246 crn = (insn >> 16) & 0xf;
6247 opc1 = (insn >> 21) & 7;
6248 opc2 = (insn >> 5) & 7;
6249 rt2 = 0;
6250 }
6251 isread = (insn >> 20) & 1;
6252 rt = (insn >> 12) & 0xf;
6253
6254 ri = get_arm_cp_reginfo(cpu,
6255 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6256 if (ri) {
6257 /* Check access permissions */
6258 if (!cp_access_ok(env, ri, isread)) {
6259 return 1;
6260 }
6261
6262 /* Handle special cases first */
6263 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6264 case ARM_CP_NOP:
6265 return 0;
6266 case ARM_CP_WFI:
6267 if (isread) {
6268 return 1;
6269 }
6270 gen_set_pc_im(s->pc);
6271 s->is_jmp = DISAS_WFI;
2bee5105 6272 return 0;
4b6a83fb
PM
6273 default:
6274 break;
6275 }
6276
6277 if (isread) {
6278 /* Read */
6279 if (is64) {
6280 TCGv_i64 tmp64;
6281 TCGv_i32 tmp;
6282 if (ri->type & ARM_CP_CONST) {
6283 tmp64 = tcg_const_i64(ri->resetvalue);
6284 } else if (ri->readfn) {
6285 TCGv_ptr tmpptr;
6286 gen_set_pc_im(s->pc);
6287 tmp64 = tcg_temp_new_i64();
6288 tmpptr = tcg_const_ptr(ri);
6289 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6290 tcg_temp_free_ptr(tmpptr);
6291 } else {
6292 tmp64 = tcg_temp_new_i64();
6293 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6294 }
6295 tmp = tcg_temp_new_i32();
6296 tcg_gen_trunc_i64_i32(tmp, tmp64);
6297 store_reg(s, rt, tmp);
6298 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 6299 tmp = tcg_temp_new_i32();
4b6a83fb 6300 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 6301 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
6302 store_reg(s, rt2, tmp);
6303 } else {
6304 TCGv tmp;
6305 if (ri->type & ARM_CP_CONST) {
6306 tmp = tcg_const_i32(ri->resetvalue);
6307 } else if (ri->readfn) {
6308 TCGv_ptr tmpptr;
6309 gen_set_pc_im(s->pc);
6310 tmp = tcg_temp_new_i32();
6311 tmpptr = tcg_const_ptr(ri);
6312 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6313 tcg_temp_free_ptr(tmpptr);
6314 } else {
6315 tmp = load_cpu_offset(ri->fieldoffset);
6316 }
6317 if (rt == 15) {
6318 /* Destination register of r15 for 32 bit loads sets
6319 * the condition codes from the high 4 bits of the value
6320 */
6321 gen_set_nzcv(tmp);
6322 tcg_temp_free_i32(tmp);
6323 } else {
6324 store_reg(s, rt, tmp);
6325 }
6326 }
6327 } else {
6328 /* Write */
6329 if (ri->type & ARM_CP_CONST) {
6330 /* If not forbidden by access permissions, treat as WI */
6331 return 0;
6332 }
6333
6334 if (is64) {
6335 TCGv tmplo, tmphi;
6336 TCGv_i64 tmp64 = tcg_temp_new_i64();
6337 tmplo = load_reg(s, rt);
6338 tmphi = load_reg(s, rt2);
6339 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6340 tcg_temp_free_i32(tmplo);
6341 tcg_temp_free_i32(tmphi);
6342 if (ri->writefn) {
6343 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6344 gen_set_pc_im(s->pc);
6345 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6346 tcg_temp_free_ptr(tmpptr);
6347 } else {
6348 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6349 }
6350 tcg_temp_free_i64(tmp64);
6351 } else {
6352 if (ri->writefn) {
6353 TCGv tmp;
6354 TCGv_ptr tmpptr;
6355 gen_set_pc_im(s->pc);
6356 tmp = load_reg(s, rt);
6357 tmpptr = tcg_const_ptr(ri);
6358 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6359 tcg_temp_free_ptr(tmpptr);
6360 tcg_temp_free_i32(tmp);
6361 } else {
6362 TCGv tmp = load_reg(s, rt);
6363 store_cpu_offset(tmp, ri->fieldoffset);
6364 }
6365 }
6366 /* We default to ending the TB on a coprocessor register write,
6367 * but allow this to be suppressed by the register definition
6368 * (usually only necessary to work around guest bugs).
6369 */
6370 if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6371 gen_lookup_tb(s);
6372 }
6373 }
6374 return 0;
6375 }
6376
4a9a539f 6377 return 1;
9ee6e8bb
PB
6378}
6379
5e3f878a
PB
6380
6381/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6382static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
6383{
6384 TCGv tmp;
7d1b0095 6385 tmp = tcg_temp_new_i32();
5e3f878a
PB
6386 tcg_gen_trunc_i64_i32(tmp, val);
6387 store_reg(s, rlow, tmp);
7d1b0095 6388 tmp = tcg_temp_new_i32();
5e3f878a
PB
6389 tcg_gen_shri_i64(val, val, 32);
6390 tcg_gen_trunc_i64_i32(tmp, val);
6391 store_reg(s, rhigh, tmp);
6392}
6393
6394/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6395static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6396{
a7812ae4 6397 TCGv_i64 tmp;
5e3f878a
PB
6398 TCGv tmp2;
6399
36aa55dc 6400 /* Load value and extend to 64 bits. */
a7812ae4 6401 tmp = tcg_temp_new_i64();
5e3f878a
PB
6402 tmp2 = load_reg(s, rlow);
6403 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6404 tcg_temp_free_i32(tmp2);
5e3f878a 6405 tcg_gen_add_i64(val, val, tmp);
b75263d6 6406 tcg_temp_free_i64(tmp);
5e3f878a
PB
6407}
6408
6409/* load and add a 64-bit value from a register pair. */
a7812ae4 6410static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6411{
a7812ae4 6412 TCGv_i64 tmp;
36aa55dc
PB
6413 TCGv tmpl;
6414 TCGv tmph;
5e3f878a
PB
6415
6416 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6417 tmpl = load_reg(s, rlow);
6418 tmph = load_reg(s, rhigh);
a7812ae4 6419 tmp = tcg_temp_new_i64();
36aa55dc 6420 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6421 tcg_temp_free_i32(tmpl);
6422 tcg_temp_free_i32(tmph);
5e3f878a 6423 tcg_gen_add_i64(val, val, tmp);
b75263d6 6424 tcg_temp_free_i64(tmp);
5e3f878a
PB
6425}
6426
6427/* Set N and Z flags from a 64-bit value. */
a7812ae4 6428static void gen_logicq_cc(TCGv_i64 val)
5e3f878a 6429{
7d1b0095 6430 TCGv tmp = tcg_temp_new_i32();
5e3f878a 6431 gen_helper_logicq_cc(tmp, val);
6fbe23d5 6432 gen_logic_CC(tmp);
7d1b0095 6433 tcg_temp_free_i32(tmp);
5e3f878a
PB
6434}
6435
426f5abc
PB
6436/* Load/Store exclusive instructions are implemented by remembering
6437 the value/address loaded, and seeing if these are the same
b90372ad 6438 when the store is performed. This should be sufficient to implement
426f5abc
PB
6439 the architecturally mandated semantics, and avoids having to monitor
6440 regular stores.
6441
6442 In system emulation mode only one CPU will be running at once, so
6443 this sequence is effectively atomic. In user emulation mode we
6444 throw an exception and handle the atomic operation elsewhere. */
6445static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6446 TCGv addr, int size)
6447{
6448 TCGv tmp;
6449
6450 switch (size) {
6451 case 0:
6452 tmp = gen_ld8u(addr, IS_USER(s));
6453 break;
6454 case 1:
6455 tmp = gen_ld16u(addr, IS_USER(s));
6456 break;
6457 case 2:
6458 case 3:
6459 tmp = gen_ld32(addr, IS_USER(s));
6460 break;
6461 default:
6462 abort();
6463 }
6464 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6465 store_reg(s, rt, tmp);
6466 if (size == 3) {
7d1b0095 6467 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6468 tcg_gen_addi_i32(tmp2, addr, 4);
6469 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6470 tcg_temp_free_i32(tmp2);
426f5abc
PB
6471 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6472 store_reg(s, rt2, tmp);
6473 }
6474 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6475}
6476
6477static void gen_clrex(DisasContext *s)
6478{
6479 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6480}
6481
6482#ifdef CONFIG_USER_ONLY
6483static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6484 TCGv addr, int size)
6485{
6486 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6487 tcg_gen_movi_i32(cpu_exclusive_info,
6488 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6489 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6490}
6491#else
6492static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6493 TCGv addr, int size)
6494{
6495 TCGv tmp;
6496 int done_label;
6497 int fail_label;
6498
6499 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6500 [addr] = {Rt};
6501 {Rd} = 0;
6502 } else {
6503 {Rd} = 1;
6504 } */
6505 fail_label = gen_new_label();
6506 done_label = gen_new_label();
6507 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6508 switch (size) {
6509 case 0:
6510 tmp = gen_ld8u(addr, IS_USER(s));
6511 break;
6512 case 1:
6513 tmp = gen_ld16u(addr, IS_USER(s));
6514 break;
6515 case 2:
6516 case 3:
6517 tmp = gen_ld32(addr, IS_USER(s));
6518 break;
6519 default:
6520 abort();
6521 }
6522 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6523 tcg_temp_free_i32(tmp);
426f5abc 6524 if (size == 3) {
7d1b0095 6525 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6526 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6527 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6528 tcg_temp_free_i32(tmp2);
426f5abc 6529 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6530 tcg_temp_free_i32(tmp);
426f5abc
PB
6531 }
6532 tmp = load_reg(s, rt);
6533 switch (size) {
6534 case 0:
6535 gen_st8(tmp, addr, IS_USER(s));
6536 break;
6537 case 1:
6538 gen_st16(tmp, addr, IS_USER(s));
6539 break;
6540 case 2:
6541 case 3:
6542 gen_st32(tmp, addr, IS_USER(s));
6543 break;
6544 default:
6545 abort();
6546 }
6547 if (size == 3) {
6548 tcg_gen_addi_i32(addr, addr, 4);
6549 tmp = load_reg(s, rt2);
6550 gen_st32(tmp, addr, IS_USER(s));
6551 }
6552 tcg_gen_movi_i32(cpu_R[rd], 0);
6553 tcg_gen_br(done_label);
6554 gen_set_label(fail_label);
6555 tcg_gen_movi_i32(cpu_R[rd], 1);
6556 gen_set_label(done_label);
6557 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6558}
6559#endif
6560
0ecb72a5 6561static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
6562{
6563 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6564 TCGv tmp;
3670669c 6565 TCGv tmp2;
6ddbc6e4 6566 TCGv tmp3;
b0109805 6567 TCGv addr;
a7812ae4 6568 TCGv_i64 tmp64;
9ee6e8bb 6569
d31dd73e 6570 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
6571 s->pc += 4;
6572
6573 /* M variants do not implement ARM mode. */
6574 if (IS_M(env))
6575 goto illegal_op;
6576 cond = insn >> 28;
6577 if (cond == 0xf){
be5e7a76
DES
6578 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6579 * choose to UNDEF. In ARMv5 and above the space is used
6580 * for miscellaneous unconditional instructions.
6581 */
6582 ARCH(5);
6583
9ee6e8bb
PB
6584 /* Unconditional instructions. */
6585 if (((insn >> 25) & 7) == 1) {
6586 /* NEON Data processing. */
6587 if (!arm_feature(env, ARM_FEATURE_NEON))
6588 goto illegal_op;
6589
6590 if (disas_neon_data_insn(env, s, insn))
6591 goto illegal_op;
6592 return;
6593 }
6594 if ((insn & 0x0f100000) == 0x04000000) {
6595 /* NEON load/store. */
6596 if (!arm_feature(env, ARM_FEATURE_NEON))
6597 goto illegal_op;
6598
6599 if (disas_neon_ls_insn(env, s, insn))
6600 goto illegal_op;
6601 return;
6602 }
3d185e5d
PM
6603 if (((insn & 0x0f30f000) == 0x0510f000) ||
6604 ((insn & 0x0f30f010) == 0x0710f000)) {
6605 if ((insn & (1 << 22)) == 0) {
6606 /* PLDW; v7MP */
6607 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6608 goto illegal_op;
6609 }
6610 }
6611 /* Otherwise PLD; v5TE+ */
be5e7a76 6612 ARCH(5TE);
3d185e5d
PM
6613 return;
6614 }
6615 if (((insn & 0x0f70f000) == 0x0450f000) ||
6616 ((insn & 0x0f70f010) == 0x0650f000)) {
6617 ARCH(7);
6618 return; /* PLI; V7 */
6619 }
6620 if (((insn & 0x0f700000) == 0x04100000) ||
6621 ((insn & 0x0f700010) == 0x06100000)) {
6622 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6623 goto illegal_op;
6624 }
6625 return; /* v7MP: Unallocated memory hint: must NOP */
6626 }
6627
6628 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6629 ARCH(6);
6630 /* setend */
10962fd5
PM
6631 if (((insn >> 9) & 1) != s->bswap_code) {
6632 /* Dynamic endianness switching not implemented. */
9ee6e8bb
PB
6633 goto illegal_op;
6634 }
6635 return;
6636 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6637 switch ((insn >> 4) & 0xf) {
6638 case 1: /* clrex */
6639 ARCH(6K);
426f5abc 6640 gen_clrex(s);
9ee6e8bb
PB
6641 return;
6642 case 4: /* dsb */
6643 case 5: /* dmb */
6644 case 6: /* isb */
6645 ARCH(7);
6646 /* We don't emulate caches so these are a no-op. */
6647 return;
6648 default:
6649 goto illegal_op;
6650 }
6651 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6652 /* srs */
c67b6b71 6653 int32_t offset;
9ee6e8bb
PB
6654 if (IS_USER(s))
6655 goto illegal_op;
6656 ARCH(6);
6657 op1 = (insn & 0x1f);
7d1b0095 6658 addr = tcg_temp_new_i32();
39ea3d4e
PM
6659 tmp = tcg_const_i32(op1);
6660 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6661 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6662 i = (insn >> 23) & 3;
6663 switch (i) {
6664 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6665 case 1: offset = 0; break; /* IA */
6666 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6667 case 3: offset = 4; break; /* IB */
6668 default: abort();
6669 }
6670 if (offset)
b0109805
PB
6671 tcg_gen_addi_i32(addr, addr, offset);
6672 tmp = load_reg(s, 14);
6673 gen_st32(tmp, addr, 0);
c67b6b71 6674 tmp = load_cpu_field(spsr);
b0109805
PB
6675 tcg_gen_addi_i32(addr, addr, 4);
6676 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6677 if (insn & (1 << 21)) {
6678 /* Base writeback. */
6679 switch (i) {
6680 case 0: offset = -8; break;
c67b6b71
FN
6681 case 1: offset = 4; break;
6682 case 2: offset = -4; break;
9ee6e8bb
PB
6683 case 3: offset = 0; break;
6684 default: abort();
6685 }
6686 if (offset)
c67b6b71 6687 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6688 tmp = tcg_const_i32(op1);
6689 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6690 tcg_temp_free_i32(tmp);
7d1b0095 6691 tcg_temp_free_i32(addr);
b0109805 6692 } else {
7d1b0095 6693 tcg_temp_free_i32(addr);
9ee6e8bb 6694 }
a990f58f 6695 return;
ea825eee 6696 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6697 /* rfe */
c67b6b71 6698 int32_t offset;
9ee6e8bb
PB
6699 if (IS_USER(s))
6700 goto illegal_op;
6701 ARCH(6);
6702 rn = (insn >> 16) & 0xf;
b0109805 6703 addr = load_reg(s, rn);
9ee6e8bb
PB
6704 i = (insn >> 23) & 3;
6705 switch (i) {
b0109805 6706 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6707 case 1: offset = 0; break; /* IA */
6708 case 2: offset = -8; break; /* DB */
b0109805 6709 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6710 default: abort();
6711 }
6712 if (offset)
b0109805
PB
6713 tcg_gen_addi_i32(addr, addr, offset);
6714 /* Load PC into tmp and CPSR into tmp2. */
6715 tmp = gen_ld32(addr, 0);
6716 tcg_gen_addi_i32(addr, addr, 4);
6717 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6718 if (insn & (1 << 21)) {
6719 /* Base writeback. */
6720 switch (i) {
b0109805 6721 case 0: offset = -8; break;
c67b6b71
FN
6722 case 1: offset = 4; break;
6723 case 2: offset = -4; break;
b0109805 6724 case 3: offset = 0; break;
9ee6e8bb
PB
6725 default: abort();
6726 }
6727 if (offset)
b0109805
PB
6728 tcg_gen_addi_i32(addr, addr, offset);
6729 store_reg(s, rn, addr);
6730 } else {
7d1b0095 6731 tcg_temp_free_i32(addr);
9ee6e8bb 6732 }
b0109805 6733 gen_rfe(s, tmp, tmp2);
c67b6b71 6734 return;
9ee6e8bb
PB
6735 } else if ((insn & 0x0e000000) == 0x0a000000) {
6736 /* branch link and change to thumb (blx <offset>) */
6737 int32_t offset;
6738
6739 val = (uint32_t)s->pc;
7d1b0095 6740 tmp = tcg_temp_new_i32();
d9ba4830
PB
6741 tcg_gen_movi_i32(tmp, val);
6742 store_reg(s, 14, tmp);
9ee6e8bb
PB
6743 /* Sign-extend the 24-bit offset */
6744 offset = (((int32_t)insn) << 8) >> 8;
6745 /* offset * 4 + bit24 * 2 + (thumb bit) */
6746 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6747 /* pipeline offset */
6748 val += 4;
be5e7a76 6749 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6750 gen_bx_im(s, val);
9ee6e8bb
PB
6751 return;
6752 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6753 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6754 /* iWMMXt register transfer. */
6755 if (env->cp15.c15_cpar & (1 << 1))
6756 if (!disas_iwmmxt_insn(env, s, insn))
6757 return;
6758 }
6759 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6760 /* Coprocessor double register transfer. */
be5e7a76 6761 ARCH(5TE);
9ee6e8bb
PB
6762 } else if ((insn & 0x0f000010) == 0x0e000010) {
6763 /* Additional coprocessor register transfer. */
7997d92f 6764 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6765 uint32_t mask;
6766 uint32_t val;
6767 /* cps (privileged) */
6768 if (IS_USER(s))
6769 return;
6770 mask = val = 0;
6771 if (insn & (1 << 19)) {
6772 if (insn & (1 << 8))
6773 mask |= CPSR_A;
6774 if (insn & (1 << 7))
6775 mask |= CPSR_I;
6776 if (insn & (1 << 6))
6777 mask |= CPSR_F;
6778 if (insn & (1 << 18))
6779 val |= mask;
6780 }
7997d92f 6781 if (insn & (1 << 17)) {
9ee6e8bb
PB
6782 mask |= CPSR_M;
6783 val |= (insn & 0x1f);
6784 }
6785 if (mask) {
2fbac54b 6786 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6787 }
6788 return;
6789 }
6790 goto illegal_op;
6791 }
6792 if (cond != 0xe) {
6793 /* if not always execute, we generate a conditional jump to
6794 next instruction */
6795 s->condlabel = gen_new_label();
d9ba4830 6796 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6797 s->condjmp = 1;
6798 }
6799 if ((insn & 0x0f900000) == 0x03000000) {
6800 if ((insn & (1 << 21)) == 0) {
6801 ARCH(6T2);
6802 rd = (insn >> 12) & 0xf;
6803 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6804 if ((insn & (1 << 22)) == 0) {
6805 /* MOVW */
7d1b0095 6806 tmp = tcg_temp_new_i32();
5e3f878a 6807 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6808 } else {
6809 /* MOVT */
5e3f878a 6810 tmp = load_reg(s, rd);
86831435 6811 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6812 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6813 }
5e3f878a 6814 store_reg(s, rd, tmp);
9ee6e8bb
PB
6815 } else {
6816 if (((insn >> 12) & 0xf) != 0xf)
6817 goto illegal_op;
6818 if (((insn >> 16) & 0xf) == 0) {
6819 gen_nop_hint(s, insn & 0xff);
6820 } else {
6821 /* CPSR = immediate */
6822 val = insn & 0xff;
6823 shift = ((insn >> 8) & 0xf) * 2;
6824 if (shift)
6825 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6826 i = ((insn & (1 << 22)) != 0);
2fbac54b 6827 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6828 goto illegal_op;
6829 }
6830 }
6831 } else if ((insn & 0x0f900000) == 0x01000000
6832 && (insn & 0x00000090) != 0x00000090) {
6833 /* miscellaneous instructions */
6834 op1 = (insn >> 21) & 3;
6835 sh = (insn >> 4) & 0xf;
6836 rm = insn & 0xf;
6837 switch (sh) {
6838 case 0x0: /* move program status register */
6839 if (op1 & 1) {
6840 /* PSR = reg */
2fbac54b 6841 tmp = load_reg(s, rm);
9ee6e8bb 6842 i = ((op1 & 2) != 0);
2fbac54b 6843 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6844 goto illegal_op;
6845 } else {
6846 /* reg = PSR */
6847 rd = (insn >> 12) & 0xf;
6848 if (op1 & 2) {
6849 if (IS_USER(s))
6850 goto illegal_op;
d9ba4830 6851 tmp = load_cpu_field(spsr);
9ee6e8bb 6852 } else {
7d1b0095 6853 tmp = tcg_temp_new_i32();
9ef39277 6854 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 6855 }
d9ba4830 6856 store_reg(s, rd, tmp);
9ee6e8bb
PB
6857 }
6858 break;
6859 case 0x1:
6860 if (op1 == 1) {
6861 /* branch/exchange thumb (bx). */
be5e7a76 6862 ARCH(4T);
d9ba4830
PB
6863 tmp = load_reg(s, rm);
6864 gen_bx(s, tmp);
9ee6e8bb
PB
6865 } else if (op1 == 3) {
6866 /* clz */
be5e7a76 6867 ARCH(5);
9ee6e8bb 6868 rd = (insn >> 12) & 0xf;
1497c961
PB
6869 tmp = load_reg(s, rm);
6870 gen_helper_clz(tmp, tmp);
6871 store_reg(s, rd, tmp);
9ee6e8bb
PB
6872 } else {
6873 goto illegal_op;
6874 }
6875 break;
6876 case 0x2:
6877 if (op1 == 1) {
6878 ARCH(5J); /* bxj */
6879 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6880 tmp = load_reg(s, rm);
6881 gen_bx(s, tmp);
9ee6e8bb
PB
6882 } else {
6883 goto illegal_op;
6884 }
6885 break;
6886 case 0x3:
6887 if (op1 != 1)
6888 goto illegal_op;
6889
be5e7a76 6890 ARCH(5);
9ee6e8bb 6891 /* branch link/exchange thumb (blx) */
d9ba4830 6892 tmp = load_reg(s, rm);
7d1b0095 6893 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6894 tcg_gen_movi_i32(tmp2, s->pc);
6895 store_reg(s, 14, tmp2);
6896 gen_bx(s, tmp);
9ee6e8bb
PB
6897 break;
6898 case 0x5: /* saturating add/subtract */
be5e7a76 6899 ARCH(5TE);
9ee6e8bb
PB
6900 rd = (insn >> 12) & 0xf;
6901 rn = (insn >> 16) & 0xf;
b40d0353 6902 tmp = load_reg(s, rm);
5e3f878a 6903 tmp2 = load_reg(s, rn);
9ee6e8bb 6904 if (op1 & 2)
9ef39277 6905 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 6906 if (op1 & 1)
9ef39277 6907 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6908 else
9ef39277 6909 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 6910 tcg_temp_free_i32(tmp2);
5e3f878a 6911 store_reg(s, rd, tmp);
9ee6e8bb 6912 break;
49e14940
AL
6913 case 7:
6914 /* SMC instruction (op1 == 3)
6915 and undefined instructions (op1 == 0 || op1 == 2)
6916 will trap */
6917 if (op1 != 1) {
6918 goto illegal_op;
6919 }
6920 /* bkpt */
be5e7a76 6921 ARCH(5);
bc4a0de0 6922 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6923 break;
6924 case 0x8: /* signed multiply */
6925 case 0xa:
6926 case 0xc:
6927 case 0xe:
be5e7a76 6928 ARCH(5TE);
9ee6e8bb
PB
6929 rs = (insn >> 8) & 0xf;
6930 rn = (insn >> 12) & 0xf;
6931 rd = (insn >> 16) & 0xf;
6932 if (op1 == 1) {
6933 /* (32 * 16) >> 16 */
5e3f878a
PB
6934 tmp = load_reg(s, rm);
6935 tmp2 = load_reg(s, rs);
9ee6e8bb 6936 if (sh & 4)
5e3f878a 6937 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6938 else
5e3f878a 6939 gen_sxth(tmp2);
a7812ae4
PB
6940 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6941 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 6942 tmp = tcg_temp_new_i32();
a7812ae4 6943 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6944 tcg_temp_free_i64(tmp64);
9ee6e8bb 6945 if ((sh & 2) == 0) {
5e3f878a 6946 tmp2 = load_reg(s, rn);
9ef39277 6947 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 6948 tcg_temp_free_i32(tmp2);
9ee6e8bb 6949 }
5e3f878a 6950 store_reg(s, rd, tmp);
9ee6e8bb
PB
6951 } else {
6952 /* 16 * 16 */
5e3f878a
PB
6953 tmp = load_reg(s, rm);
6954 tmp2 = load_reg(s, rs);
6955 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 6956 tcg_temp_free_i32(tmp2);
9ee6e8bb 6957 if (op1 == 2) {
a7812ae4
PB
6958 tmp64 = tcg_temp_new_i64();
6959 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 6960 tcg_temp_free_i32(tmp);
a7812ae4
PB
6961 gen_addq(s, tmp64, rn, rd);
6962 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6963 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6964 } else {
6965 if (op1 == 0) {
5e3f878a 6966 tmp2 = load_reg(s, rn);
9ef39277 6967 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 6968 tcg_temp_free_i32(tmp2);
9ee6e8bb 6969 }
5e3f878a 6970 store_reg(s, rd, tmp);
9ee6e8bb
PB
6971 }
6972 }
6973 break;
6974 default:
6975 goto illegal_op;
6976 }
6977 } else if (((insn & 0x0e000000) == 0 &&
6978 (insn & 0x00000090) != 0x90) ||
6979 ((insn & 0x0e000000) == (1 << 25))) {
6980 int set_cc, logic_cc, shiftop;
6981
6982 op1 = (insn >> 21) & 0xf;
6983 set_cc = (insn >> 20) & 1;
6984 logic_cc = table_logic_cc[op1] & set_cc;
6985
6986 /* data processing instruction */
6987 if (insn & (1 << 25)) {
6988 /* immediate operand */
6989 val = insn & 0xff;
6990 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6991 if (shift) {
9ee6e8bb 6992 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 6993 }
7d1b0095 6994 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
6995 tcg_gen_movi_i32(tmp2, val);
6996 if (logic_cc && shift) {
6997 gen_set_CF_bit31(tmp2);
6998 }
9ee6e8bb
PB
6999 } else {
7000 /* register */
7001 rm = (insn) & 0xf;
e9bb4aa9 7002 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7003 shiftop = (insn >> 5) & 3;
7004 if (!(insn & (1 << 4))) {
7005 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7006 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7007 } else {
7008 rs = (insn >> 8) & 0xf;
8984bd2e 7009 tmp = load_reg(s, rs);
e9bb4aa9 7010 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7011 }
7012 }
7013 if (op1 != 0x0f && op1 != 0x0d) {
7014 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7015 tmp = load_reg(s, rn);
7016 } else {
7017 TCGV_UNUSED(tmp);
9ee6e8bb
PB
7018 }
7019 rd = (insn >> 12) & 0xf;
7020 switch(op1) {
7021 case 0x00:
e9bb4aa9
JR
7022 tcg_gen_and_i32(tmp, tmp, tmp2);
7023 if (logic_cc) {
7024 gen_logic_CC(tmp);
7025 }
21aeb343 7026 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7027 break;
7028 case 0x01:
e9bb4aa9
JR
7029 tcg_gen_xor_i32(tmp, tmp, tmp2);
7030 if (logic_cc) {
7031 gen_logic_CC(tmp);
7032 }
21aeb343 7033 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7034 break;
7035 case 0x02:
7036 if (set_cc && rd == 15) {
7037 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7038 if (IS_USER(s)) {
9ee6e8bb 7039 goto illegal_op;
e9bb4aa9 7040 }
72485ec4 7041 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7042 gen_exception_return(s, tmp);
9ee6e8bb 7043 } else {
e9bb4aa9 7044 if (set_cc) {
72485ec4 7045 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7046 } else {
7047 tcg_gen_sub_i32(tmp, tmp, tmp2);
7048 }
21aeb343 7049 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7050 }
7051 break;
7052 case 0x03:
e9bb4aa9 7053 if (set_cc) {
72485ec4 7054 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7055 } else {
7056 tcg_gen_sub_i32(tmp, tmp2, tmp);
7057 }
21aeb343 7058 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7059 break;
7060 case 0x04:
e9bb4aa9 7061 if (set_cc) {
72485ec4 7062 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7063 } else {
7064 tcg_gen_add_i32(tmp, tmp, tmp2);
7065 }
21aeb343 7066 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7067 break;
7068 case 0x05:
e9bb4aa9 7069 if (set_cc) {
9ef39277 7070 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
e9bb4aa9
JR
7071 } else {
7072 gen_add_carry(tmp, tmp, tmp2);
7073 }
21aeb343 7074 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7075 break;
7076 case 0x06:
e9bb4aa9 7077 if (set_cc) {
9ef39277 7078 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
e9bb4aa9
JR
7079 } else {
7080 gen_sub_carry(tmp, tmp, tmp2);
7081 }
21aeb343 7082 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7083 break;
7084 case 0x07:
e9bb4aa9 7085 if (set_cc) {
9ef39277 7086 gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
e9bb4aa9
JR
7087 } else {
7088 gen_sub_carry(tmp, tmp2, tmp);
7089 }
21aeb343 7090 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7091 break;
7092 case 0x08:
7093 if (set_cc) {
e9bb4aa9
JR
7094 tcg_gen_and_i32(tmp, tmp, tmp2);
7095 gen_logic_CC(tmp);
9ee6e8bb 7096 }
7d1b0095 7097 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7098 break;
7099 case 0x09:
7100 if (set_cc) {
e9bb4aa9
JR
7101 tcg_gen_xor_i32(tmp, tmp, tmp2);
7102 gen_logic_CC(tmp);
9ee6e8bb 7103 }
7d1b0095 7104 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7105 break;
7106 case 0x0a:
7107 if (set_cc) {
72485ec4 7108 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 7109 }
7d1b0095 7110 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7111 break;
7112 case 0x0b:
7113 if (set_cc) {
72485ec4 7114 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 7115 }
7d1b0095 7116 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7117 break;
7118 case 0x0c:
e9bb4aa9
JR
7119 tcg_gen_or_i32(tmp, tmp, tmp2);
7120 if (logic_cc) {
7121 gen_logic_CC(tmp);
7122 }
21aeb343 7123 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7124 break;
7125 case 0x0d:
7126 if (logic_cc && rd == 15) {
7127 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7128 if (IS_USER(s)) {
9ee6e8bb 7129 goto illegal_op;
e9bb4aa9
JR
7130 }
7131 gen_exception_return(s, tmp2);
9ee6e8bb 7132 } else {
e9bb4aa9
JR
7133 if (logic_cc) {
7134 gen_logic_CC(tmp2);
7135 }
21aeb343 7136 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7137 }
7138 break;
7139 case 0x0e:
f669df27 7140 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7141 if (logic_cc) {
7142 gen_logic_CC(tmp);
7143 }
21aeb343 7144 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7145 break;
7146 default:
7147 case 0x0f:
e9bb4aa9
JR
7148 tcg_gen_not_i32(tmp2, tmp2);
7149 if (logic_cc) {
7150 gen_logic_CC(tmp2);
7151 }
21aeb343 7152 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7153 break;
7154 }
e9bb4aa9 7155 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7156 tcg_temp_free_i32(tmp2);
e9bb4aa9 7157 }
9ee6e8bb
PB
7158 } else {
7159 /* other instructions */
7160 op1 = (insn >> 24) & 0xf;
7161 switch(op1) {
7162 case 0x0:
7163 case 0x1:
7164 /* multiplies, extra load/stores */
7165 sh = (insn >> 5) & 3;
7166 if (sh == 0) {
7167 if (op1 == 0x0) {
7168 rd = (insn >> 16) & 0xf;
7169 rn = (insn >> 12) & 0xf;
7170 rs = (insn >> 8) & 0xf;
7171 rm = (insn) & 0xf;
7172 op1 = (insn >> 20) & 0xf;
7173 switch (op1) {
7174 case 0: case 1: case 2: case 3: case 6:
7175 /* 32 bit mul */
5e3f878a
PB
7176 tmp = load_reg(s, rs);
7177 tmp2 = load_reg(s, rm);
7178 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7179 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7180 if (insn & (1 << 22)) {
7181 /* Subtract (mls) */
7182 ARCH(6T2);
5e3f878a
PB
7183 tmp2 = load_reg(s, rn);
7184 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7185 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7186 } else if (insn & (1 << 21)) {
7187 /* Add */
5e3f878a
PB
7188 tmp2 = load_reg(s, rn);
7189 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7190 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7191 }
7192 if (insn & (1 << 20))
5e3f878a
PB
7193 gen_logic_CC(tmp);
7194 store_reg(s, rd, tmp);
9ee6e8bb 7195 break;
8aac08b1
AJ
7196 case 4:
7197 /* 64 bit mul double accumulate (UMAAL) */
7198 ARCH(6);
7199 tmp = load_reg(s, rs);
7200 tmp2 = load_reg(s, rm);
7201 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7202 gen_addq_lo(s, tmp64, rn);
7203 gen_addq_lo(s, tmp64, rd);
7204 gen_storeq_reg(s, rn, rd, tmp64);
7205 tcg_temp_free_i64(tmp64);
7206 break;
7207 case 8: case 9: case 10: case 11:
7208 case 12: case 13: case 14: case 15:
7209 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7210 tmp = load_reg(s, rs);
7211 tmp2 = load_reg(s, rm);
8aac08b1 7212 if (insn & (1 << 22)) {
a7812ae4 7213 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 7214 } else {
a7812ae4 7215 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
7216 }
7217 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 7218 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 7219 }
8aac08b1 7220 if (insn & (1 << 20)) {
a7812ae4 7221 gen_logicq_cc(tmp64);
8aac08b1 7222 }
a7812ae4 7223 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7224 tcg_temp_free_i64(tmp64);
9ee6e8bb 7225 break;
8aac08b1
AJ
7226 default:
7227 goto illegal_op;
9ee6e8bb
PB
7228 }
7229 } else {
7230 rn = (insn >> 16) & 0xf;
7231 rd = (insn >> 12) & 0xf;
7232 if (insn & (1 << 23)) {
7233 /* load/store exclusive */
86753403
PB
7234 op1 = (insn >> 21) & 0x3;
7235 if (op1)
a47f43d2 7236 ARCH(6K);
86753403
PB
7237 else
7238 ARCH(6);
3174f8e9 7239 addr = tcg_temp_local_new_i32();
98a46317 7240 load_reg_var(s, addr, rn);
9ee6e8bb 7241 if (insn & (1 << 20)) {
86753403
PB
7242 switch (op1) {
7243 case 0: /* ldrex */
426f5abc 7244 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7245 break;
7246 case 1: /* ldrexd */
426f5abc 7247 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7248 break;
7249 case 2: /* ldrexb */
426f5abc 7250 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7251 break;
7252 case 3: /* ldrexh */
426f5abc 7253 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7254 break;
7255 default:
7256 abort();
7257 }
9ee6e8bb
PB
7258 } else {
7259 rm = insn & 0xf;
86753403
PB
7260 switch (op1) {
7261 case 0: /* strex */
426f5abc 7262 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7263 break;
7264 case 1: /* strexd */
502e64fe 7265 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7266 break;
7267 case 2: /* strexb */
426f5abc 7268 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7269 break;
7270 case 3: /* strexh */
426f5abc 7271 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7272 break;
7273 default:
7274 abort();
7275 }
9ee6e8bb 7276 }
3174f8e9 7277 tcg_temp_free(addr);
9ee6e8bb
PB
7278 } else {
7279 /* SWP instruction */
7280 rm = (insn) & 0xf;
7281
8984bd2e
PB
7282 /* ??? This is not really atomic. However we know
7283 we never have multiple CPUs running in parallel,
7284 so it is good enough. */
7285 addr = load_reg(s, rn);
7286 tmp = load_reg(s, rm);
9ee6e8bb 7287 if (insn & (1 << 22)) {
8984bd2e
PB
7288 tmp2 = gen_ld8u(addr, IS_USER(s));
7289 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7290 } else {
8984bd2e
PB
7291 tmp2 = gen_ld32(addr, IS_USER(s));
7292 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7293 }
7d1b0095 7294 tcg_temp_free_i32(addr);
8984bd2e 7295 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7296 }
7297 }
7298 } else {
7299 int address_offset;
7300 int load;
7301 /* Misc load/store */
7302 rn = (insn >> 16) & 0xf;
7303 rd = (insn >> 12) & 0xf;
b0109805 7304 addr = load_reg(s, rn);
9ee6e8bb 7305 if (insn & (1 << 24))
b0109805 7306 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7307 address_offset = 0;
7308 if (insn & (1 << 20)) {
7309 /* load */
7310 switch(sh) {
7311 case 1:
b0109805 7312 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
7313 break;
7314 case 2:
b0109805 7315 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
7316 break;
7317 default:
7318 case 3:
b0109805 7319 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
7320 break;
7321 }
7322 load = 1;
7323 } else if (sh & 2) {
be5e7a76 7324 ARCH(5TE);
9ee6e8bb
PB
7325 /* doubleword */
7326 if (sh & 1) {
7327 /* store */
b0109805
PB
7328 tmp = load_reg(s, rd);
7329 gen_st32(tmp, addr, IS_USER(s));
7330 tcg_gen_addi_i32(addr, addr, 4);
7331 tmp = load_reg(s, rd + 1);
7332 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7333 load = 0;
7334 } else {
7335 /* load */
b0109805
PB
7336 tmp = gen_ld32(addr, IS_USER(s));
7337 store_reg(s, rd, tmp);
7338 tcg_gen_addi_i32(addr, addr, 4);
7339 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
7340 rd++;
7341 load = 1;
7342 }
7343 address_offset = -4;
7344 } else {
7345 /* store */
b0109805
PB
7346 tmp = load_reg(s, rd);
7347 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7348 load = 0;
7349 }
7350 /* Perform base writeback before the loaded value to
7351 ensure correct behavior with overlapping index registers.
7352 ldrd with base writeback is is undefined if the
7353 destination and index registers overlap. */
7354 if (!(insn & (1 << 24))) {
b0109805
PB
7355 gen_add_datah_offset(s, insn, address_offset, addr);
7356 store_reg(s, rn, addr);
9ee6e8bb
PB
7357 } else if (insn & (1 << 21)) {
7358 if (address_offset)
b0109805
PB
7359 tcg_gen_addi_i32(addr, addr, address_offset);
7360 store_reg(s, rn, addr);
7361 } else {
7d1b0095 7362 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7363 }
7364 if (load) {
7365 /* Complete the load. */
b0109805 7366 store_reg(s, rd, tmp);
9ee6e8bb
PB
7367 }
7368 }
7369 break;
7370 case 0x4:
7371 case 0x5:
7372 goto do_ldst;
7373 case 0x6:
7374 case 0x7:
7375 if (insn & (1 << 4)) {
7376 ARCH(6);
7377 /* Armv6 Media instructions. */
7378 rm = insn & 0xf;
7379 rn = (insn >> 16) & 0xf;
2c0262af 7380 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7381 rs = (insn >> 8) & 0xf;
7382 switch ((insn >> 23) & 3) {
7383 case 0: /* Parallel add/subtract. */
7384 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7385 tmp = load_reg(s, rn);
7386 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7387 sh = (insn >> 5) & 7;
7388 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7389 goto illegal_op;
6ddbc6e4 7390 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7391 tcg_temp_free_i32(tmp2);
6ddbc6e4 7392 store_reg(s, rd, tmp);
9ee6e8bb
PB
7393 break;
7394 case 1:
7395 if ((insn & 0x00700020) == 0) {
6c95676b 7396 /* Halfword pack. */
3670669c
PB
7397 tmp = load_reg(s, rn);
7398 tmp2 = load_reg(s, rm);
9ee6e8bb 7399 shift = (insn >> 7) & 0x1f;
3670669c
PB
7400 if (insn & (1 << 6)) {
7401 /* pkhtb */
22478e79
AZ
7402 if (shift == 0)
7403 shift = 31;
7404 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7405 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7406 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7407 } else {
7408 /* pkhbt */
22478e79
AZ
7409 if (shift)
7410 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7411 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7412 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7413 }
7414 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7415 tcg_temp_free_i32(tmp2);
3670669c 7416 store_reg(s, rd, tmp);
9ee6e8bb
PB
7417 } else if ((insn & 0x00200020) == 0x00200000) {
7418 /* [us]sat */
6ddbc6e4 7419 tmp = load_reg(s, rm);
9ee6e8bb
PB
7420 shift = (insn >> 7) & 0x1f;
7421 if (insn & (1 << 6)) {
7422 if (shift == 0)
7423 shift = 31;
6ddbc6e4 7424 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7425 } else {
6ddbc6e4 7426 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7427 }
7428 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7429 tmp2 = tcg_const_i32(sh);
7430 if (insn & (1 << 22))
9ef39277 7431 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 7432 else
9ef39277 7433 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 7434 tcg_temp_free_i32(tmp2);
6ddbc6e4 7435 store_reg(s, rd, tmp);
9ee6e8bb
PB
7436 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7437 /* [us]sat16 */
6ddbc6e4 7438 tmp = load_reg(s, rm);
9ee6e8bb 7439 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7440 tmp2 = tcg_const_i32(sh);
7441 if (insn & (1 << 22))
9ef39277 7442 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7443 else
9ef39277 7444 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7445 tcg_temp_free_i32(tmp2);
6ddbc6e4 7446 store_reg(s, rd, tmp);
9ee6e8bb
PB
7447 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7448 /* Select bytes. */
6ddbc6e4
PB
7449 tmp = load_reg(s, rn);
7450 tmp2 = load_reg(s, rm);
7d1b0095 7451 tmp3 = tcg_temp_new_i32();
0ecb72a5 7452 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 7453 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7454 tcg_temp_free_i32(tmp3);
7455 tcg_temp_free_i32(tmp2);
6ddbc6e4 7456 store_reg(s, rd, tmp);
9ee6e8bb 7457 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7458 tmp = load_reg(s, rm);
9ee6e8bb 7459 shift = (insn >> 10) & 3;
1301f322 7460 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7461 rotate, a shift is sufficient. */
7462 if (shift != 0)
f669df27 7463 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7464 op1 = (insn >> 20) & 7;
7465 switch (op1) {
5e3f878a
PB
7466 case 0: gen_sxtb16(tmp); break;
7467 case 2: gen_sxtb(tmp); break;
7468 case 3: gen_sxth(tmp); break;
7469 case 4: gen_uxtb16(tmp); break;
7470 case 6: gen_uxtb(tmp); break;
7471 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7472 default: goto illegal_op;
7473 }
7474 if (rn != 15) {
5e3f878a 7475 tmp2 = load_reg(s, rn);
9ee6e8bb 7476 if ((op1 & 3) == 0) {
5e3f878a 7477 gen_add16(tmp, tmp2);
9ee6e8bb 7478 } else {
5e3f878a 7479 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7480 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7481 }
7482 }
6c95676b 7483 store_reg(s, rd, tmp);
9ee6e8bb
PB
7484 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7485 /* rev */
b0109805 7486 tmp = load_reg(s, rm);
9ee6e8bb
PB
7487 if (insn & (1 << 22)) {
7488 if (insn & (1 << 7)) {
b0109805 7489 gen_revsh(tmp);
9ee6e8bb
PB
7490 } else {
7491 ARCH(6T2);
b0109805 7492 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7493 }
7494 } else {
7495 if (insn & (1 << 7))
b0109805 7496 gen_rev16(tmp);
9ee6e8bb 7497 else
66896cb8 7498 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7499 }
b0109805 7500 store_reg(s, rd, tmp);
9ee6e8bb
PB
7501 } else {
7502 goto illegal_op;
7503 }
7504 break;
7505 case 2: /* Multiplies (Type 3). */
41e9564d
PM
7506 switch ((insn >> 20) & 0x7) {
7507 case 5:
7508 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7509 /* op2 not 00x or 11x : UNDEF */
7510 goto illegal_op;
7511 }
838fa72d
AJ
7512 /* Signed multiply most significant [accumulate].
7513 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
7514 tmp = load_reg(s, rm);
7515 tmp2 = load_reg(s, rs);
a7812ae4 7516 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7517
955a7dd5 7518 if (rd != 15) {
838fa72d 7519 tmp = load_reg(s, rd);
9ee6e8bb 7520 if (insn & (1 << 6)) {
838fa72d 7521 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7522 } else {
838fa72d 7523 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7524 }
7525 }
838fa72d
AJ
7526 if (insn & (1 << 5)) {
7527 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7528 }
7529 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7530 tmp = tcg_temp_new_i32();
838fa72d
AJ
7531 tcg_gen_trunc_i64_i32(tmp, tmp64);
7532 tcg_temp_free_i64(tmp64);
955a7dd5 7533 store_reg(s, rn, tmp);
41e9564d
PM
7534 break;
7535 case 0:
7536 case 4:
7537 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7538 if (insn & (1 << 7)) {
7539 goto illegal_op;
7540 }
7541 tmp = load_reg(s, rm);
7542 tmp2 = load_reg(s, rs);
9ee6e8bb 7543 if (insn & (1 << 5))
5e3f878a
PB
7544 gen_swap_half(tmp2);
7545 gen_smul_dual(tmp, tmp2);
5e3f878a 7546 if (insn & (1 << 6)) {
e1d177b9 7547 /* This subtraction cannot overflow. */
5e3f878a
PB
7548 tcg_gen_sub_i32(tmp, tmp, tmp2);
7549 } else {
e1d177b9
PM
7550 /* This addition cannot overflow 32 bits;
7551 * however it may overflow considered as a signed
7552 * operation, in which case we must set the Q flag.
7553 */
9ef39277 7554 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
5e3f878a 7555 }
7d1b0095 7556 tcg_temp_free_i32(tmp2);
9ee6e8bb 7557 if (insn & (1 << 22)) {
5e3f878a 7558 /* smlald, smlsld */
a7812ae4
PB
7559 tmp64 = tcg_temp_new_i64();
7560 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7561 tcg_temp_free_i32(tmp);
a7812ae4
PB
7562 gen_addq(s, tmp64, rd, rn);
7563 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7564 tcg_temp_free_i64(tmp64);
9ee6e8bb 7565 } else {
5e3f878a 7566 /* smuad, smusd, smlad, smlsd */
22478e79 7567 if (rd != 15)
9ee6e8bb 7568 {
22478e79 7569 tmp2 = load_reg(s, rd);
9ef39277 7570 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7571 tcg_temp_free_i32(tmp2);
9ee6e8bb 7572 }
22478e79 7573 store_reg(s, rn, tmp);
9ee6e8bb 7574 }
41e9564d 7575 break;
b8b8ea05
PM
7576 case 1:
7577 case 3:
7578 /* SDIV, UDIV */
7579 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7580 goto illegal_op;
7581 }
7582 if (((insn >> 5) & 7) || (rd != 15)) {
7583 goto illegal_op;
7584 }
7585 tmp = load_reg(s, rm);
7586 tmp2 = load_reg(s, rs);
7587 if (insn & (1 << 21)) {
7588 gen_helper_udiv(tmp, tmp, tmp2);
7589 } else {
7590 gen_helper_sdiv(tmp, tmp, tmp2);
7591 }
7592 tcg_temp_free_i32(tmp2);
7593 store_reg(s, rn, tmp);
7594 break;
41e9564d
PM
7595 default:
7596 goto illegal_op;
9ee6e8bb
PB
7597 }
7598 break;
7599 case 3:
7600 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7601 switch (op1) {
7602 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7603 ARCH(6);
7604 tmp = load_reg(s, rm);
7605 tmp2 = load_reg(s, rs);
7606 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7607 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7608 if (rd != 15) {
7609 tmp2 = load_reg(s, rd);
6ddbc6e4 7610 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7611 tcg_temp_free_i32(tmp2);
9ee6e8bb 7612 }
ded9d295 7613 store_reg(s, rn, tmp);
9ee6e8bb
PB
7614 break;
7615 case 0x20: case 0x24: case 0x28: case 0x2c:
7616 /* Bitfield insert/clear. */
7617 ARCH(6T2);
7618 shift = (insn >> 7) & 0x1f;
7619 i = (insn >> 16) & 0x1f;
7620 i = i + 1 - shift;
7621 if (rm == 15) {
7d1b0095 7622 tmp = tcg_temp_new_i32();
5e3f878a 7623 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7624 } else {
5e3f878a 7625 tmp = load_reg(s, rm);
9ee6e8bb
PB
7626 }
7627 if (i != 32) {
5e3f878a 7628 tmp2 = load_reg(s, rd);
d593c48e 7629 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 7630 tcg_temp_free_i32(tmp2);
9ee6e8bb 7631 }
5e3f878a 7632 store_reg(s, rd, tmp);
9ee6e8bb
PB
7633 break;
7634 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7635 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7636 ARCH(6T2);
5e3f878a 7637 tmp = load_reg(s, rm);
9ee6e8bb
PB
7638 shift = (insn >> 7) & 0x1f;
7639 i = ((insn >> 16) & 0x1f) + 1;
7640 if (shift + i > 32)
7641 goto illegal_op;
7642 if (i < 32) {
7643 if (op1 & 0x20) {
5e3f878a 7644 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7645 } else {
5e3f878a 7646 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7647 }
7648 }
5e3f878a 7649 store_reg(s, rd, tmp);
9ee6e8bb
PB
7650 break;
7651 default:
7652 goto illegal_op;
7653 }
7654 break;
7655 }
7656 break;
7657 }
7658 do_ldst:
7659 /* Check for undefined extension instructions
7660 * per the ARM Bible IE:
7661 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7662 */
7663 sh = (0xf << 20) | (0xf << 4);
7664 if (op1 == 0x7 && ((insn & sh) == sh))
7665 {
7666 goto illegal_op;
7667 }
7668 /* load/store byte/word */
7669 rn = (insn >> 16) & 0xf;
7670 rd = (insn >> 12) & 0xf;
b0109805 7671 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7672 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7673 if (insn & (1 << 24))
b0109805 7674 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7675 if (insn & (1 << 20)) {
7676 /* load */
9ee6e8bb 7677 if (insn & (1 << 22)) {
b0109805 7678 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7679 } else {
b0109805 7680 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7681 }
9ee6e8bb
PB
7682 } else {
7683 /* store */
b0109805 7684 tmp = load_reg(s, rd);
9ee6e8bb 7685 if (insn & (1 << 22))
b0109805 7686 gen_st8(tmp, tmp2, i);
9ee6e8bb 7687 else
b0109805 7688 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7689 }
7690 if (!(insn & (1 << 24))) {
b0109805
PB
7691 gen_add_data_offset(s, insn, tmp2);
7692 store_reg(s, rn, tmp2);
7693 } else if (insn & (1 << 21)) {
7694 store_reg(s, rn, tmp2);
7695 } else {
7d1b0095 7696 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7697 }
7698 if (insn & (1 << 20)) {
7699 /* Complete the load. */
be5e7a76 7700 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7701 }
7702 break;
7703 case 0x08:
7704 case 0x09:
7705 {
7706 int j, n, user, loaded_base;
b0109805 7707 TCGv loaded_var;
9ee6e8bb
PB
7708 /* load/store multiple words */
7709 /* XXX: store correct base if write back */
7710 user = 0;
7711 if (insn & (1 << 22)) {
7712 if (IS_USER(s))
7713 goto illegal_op; /* only usable in supervisor mode */
7714
7715 if ((insn & (1 << 15)) == 0)
7716 user = 1;
7717 }
7718 rn = (insn >> 16) & 0xf;
b0109805 7719 addr = load_reg(s, rn);
9ee6e8bb
PB
7720
7721 /* compute total size */
7722 loaded_base = 0;
a50f5b91 7723 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7724 n = 0;
7725 for(i=0;i<16;i++) {
7726 if (insn & (1 << i))
7727 n++;
7728 }
7729 /* XXX: test invalid n == 0 case ? */
7730 if (insn & (1 << 23)) {
7731 if (insn & (1 << 24)) {
7732 /* pre increment */
b0109805 7733 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7734 } else {
7735 /* post increment */
7736 }
7737 } else {
7738 if (insn & (1 << 24)) {
7739 /* pre decrement */
b0109805 7740 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7741 } else {
7742 /* post decrement */
7743 if (n != 1)
b0109805 7744 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7745 }
7746 }
7747 j = 0;
7748 for(i=0;i<16;i++) {
7749 if (insn & (1 << i)) {
7750 if (insn & (1 << 20)) {
7751 /* load */
b0109805 7752 tmp = gen_ld32(addr, IS_USER(s));
be5e7a76 7753 if (user) {
b75263d6 7754 tmp2 = tcg_const_i32(i);
1ce94f81 7755 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 7756 tcg_temp_free_i32(tmp2);
7d1b0095 7757 tcg_temp_free_i32(tmp);
9ee6e8bb 7758 } else if (i == rn) {
b0109805 7759 loaded_var = tmp;
9ee6e8bb
PB
7760 loaded_base = 1;
7761 } else {
be5e7a76 7762 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7763 }
7764 } else {
7765 /* store */
7766 if (i == 15) {
7767 /* special case: r15 = PC + 8 */
7768 val = (long)s->pc + 4;
7d1b0095 7769 tmp = tcg_temp_new_i32();
b0109805 7770 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7771 } else if (user) {
7d1b0095 7772 tmp = tcg_temp_new_i32();
b75263d6 7773 tmp2 = tcg_const_i32(i);
9ef39277 7774 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 7775 tcg_temp_free_i32(tmp2);
9ee6e8bb 7776 } else {
b0109805 7777 tmp = load_reg(s, i);
9ee6e8bb 7778 }
b0109805 7779 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7780 }
7781 j++;
7782 /* no need to add after the last transfer */
7783 if (j != n)
b0109805 7784 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7785 }
7786 }
7787 if (insn & (1 << 21)) {
7788 /* write back */
7789 if (insn & (1 << 23)) {
7790 if (insn & (1 << 24)) {
7791 /* pre increment */
7792 } else {
7793 /* post increment */
b0109805 7794 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7795 }
7796 } else {
7797 if (insn & (1 << 24)) {
7798 /* pre decrement */
7799 if (n != 1)
b0109805 7800 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7801 } else {
7802 /* post decrement */
b0109805 7803 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7804 }
7805 }
b0109805
PB
7806 store_reg(s, rn, addr);
7807 } else {
7d1b0095 7808 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7809 }
7810 if (loaded_base) {
b0109805 7811 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7812 }
7813 if ((insn & (1 << 22)) && !user) {
7814 /* Restore CPSR from SPSR. */
d9ba4830
PB
7815 tmp = load_cpu_field(spsr);
7816 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7817 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7818 s->is_jmp = DISAS_UPDATE;
7819 }
7820 }
7821 break;
7822 case 0xa:
7823 case 0xb:
7824 {
7825 int32_t offset;
7826
7827 /* branch (and link) */
7828 val = (int32_t)s->pc;
7829 if (insn & (1 << 24)) {
7d1b0095 7830 tmp = tcg_temp_new_i32();
5e3f878a
PB
7831 tcg_gen_movi_i32(tmp, val);
7832 store_reg(s, 14, tmp);
9ee6e8bb
PB
7833 }
7834 offset = (((int32_t)insn << 8) >> 8);
7835 val += (offset << 2) + 4;
7836 gen_jmp(s, val);
7837 }
7838 break;
7839 case 0xc:
7840 case 0xd:
7841 case 0xe:
7842 /* Coprocessor. */
7843 if (disas_coproc_insn(env, s, insn))
7844 goto illegal_op;
7845 break;
7846 case 0xf:
7847 /* swi */
5e3f878a 7848 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7849 s->is_jmp = DISAS_SWI;
7850 break;
7851 default:
7852 illegal_op:
bc4a0de0 7853 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7854 break;
7855 }
7856 }
7857}
7858
7859/* Return true if this is a Thumb-2 logical op. */
7860static int
7861thumb2_logic_op(int op)
7862{
7863 return (op < 8);
7864}
7865
7866/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7867 then set condition code flags based on the result of the operation.
7868 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7869 to the high bit of T1.
7870 Returns zero if the opcode is valid. */
7871
7872static int
396e467c 7873gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7874{
7875 int logic_cc;
7876
7877 logic_cc = 0;
7878 switch (op) {
7879 case 0: /* and */
396e467c 7880 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7881 logic_cc = conds;
7882 break;
7883 case 1: /* bic */
f669df27 7884 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7885 logic_cc = conds;
7886 break;
7887 case 2: /* orr */
396e467c 7888 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7889 logic_cc = conds;
7890 break;
7891 case 3: /* orn */
29501f1b 7892 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7893 logic_cc = conds;
7894 break;
7895 case 4: /* eor */
396e467c 7896 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7897 logic_cc = conds;
7898 break;
7899 case 8: /* add */
7900 if (conds)
72485ec4 7901 gen_add_CC(t0, t0, t1);
9ee6e8bb 7902 else
396e467c 7903 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7904 break;
7905 case 10: /* adc */
7906 if (conds)
9ef39277 7907 gen_helper_adc_cc(t0, cpu_env, t0, t1);
9ee6e8bb 7908 else
396e467c 7909 gen_adc(t0, t1);
9ee6e8bb
PB
7910 break;
7911 case 11: /* sbc */
7912 if (conds)
9ef39277 7913 gen_helper_sbc_cc(t0, cpu_env, t0, t1);
9ee6e8bb 7914 else
396e467c 7915 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7916 break;
7917 case 13: /* sub */
7918 if (conds)
72485ec4 7919 gen_sub_CC(t0, t0, t1);
9ee6e8bb 7920 else
396e467c 7921 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7922 break;
7923 case 14: /* rsb */
7924 if (conds)
72485ec4 7925 gen_sub_CC(t0, t1, t0);
9ee6e8bb 7926 else
396e467c 7927 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7928 break;
7929 default: /* 5, 6, 7, 9, 12, 15. */
7930 return 1;
7931 }
7932 if (logic_cc) {
396e467c 7933 gen_logic_CC(t0);
9ee6e8bb 7934 if (shifter_out)
396e467c 7935 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7936 }
7937 return 0;
7938}
7939
7940/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7941 is not legal. */
0ecb72a5 7942static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 7943{
b0109805 7944 uint32_t insn, imm, shift, offset;
9ee6e8bb 7945 uint32_t rd, rn, rm, rs;
b26eefb6 7946 TCGv tmp;
6ddbc6e4
PB
7947 TCGv tmp2;
7948 TCGv tmp3;
b0109805 7949 TCGv addr;
a7812ae4 7950 TCGv_i64 tmp64;
9ee6e8bb
PB
7951 int op;
7952 int shiftop;
7953 int conds;
7954 int logic_cc;
7955
7956 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7957 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7958 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7959 16-bit instructions to get correct prefetch abort behavior. */
7960 insn = insn_hw1;
7961 if ((insn & (1 << 12)) == 0) {
be5e7a76 7962 ARCH(5);
9ee6e8bb
PB
7963 /* Second half of blx. */
7964 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7965 tmp = load_reg(s, 14);
7966 tcg_gen_addi_i32(tmp, tmp, offset);
7967 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7968
7d1b0095 7969 tmp2 = tcg_temp_new_i32();
b0109805 7970 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7971 store_reg(s, 14, tmp2);
7972 gen_bx(s, tmp);
9ee6e8bb
PB
7973 return 0;
7974 }
7975 if (insn & (1 << 11)) {
7976 /* Second half of bl. */
7977 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7978 tmp = load_reg(s, 14);
6a0d8a1d 7979 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7980
7d1b0095 7981 tmp2 = tcg_temp_new_i32();
b0109805 7982 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7983 store_reg(s, 14, tmp2);
7984 gen_bx(s, tmp);
9ee6e8bb
PB
7985 return 0;
7986 }
7987 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7988 /* Instruction spans a page boundary. Implement it as two
7989 16-bit instructions in case the second half causes an
7990 prefetch abort. */
7991 offset = ((int32_t)insn << 21) >> 9;
396e467c 7992 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7993 return 0;
7994 }
7995 /* Fall through to 32-bit decode. */
7996 }
7997
d31dd73e 7998 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
7999 s->pc += 2;
8000 insn |= (uint32_t)insn_hw1 << 16;
8001
8002 if ((insn & 0xf800e800) != 0xf000e800) {
8003 ARCH(6T2);
8004 }
8005
8006 rn = (insn >> 16) & 0xf;
8007 rs = (insn >> 12) & 0xf;
8008 rd = (insn >> 8) & 0xf;
8009 rm = insn & 0xf;
8010 switch ((insn >> 25) & 0xf) {
8011 case 0: case 1: case 2: case 3:
8012 /* 16-bit instructions. Should never happen. */
8013 abort();
8014 case 4:
8015 if (insn & (1 << 22)) {
8016 /* Other load/store, table branch. */
8017 if (insn & 0x01200000) {
8018 /* Load/store doubleword. */
8019 if (rn == 15) {
7d1b0095 8020 addr = tcg_temp_new_i32();
b0109805 8021 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8022 } else {
b0109805 8023 addr = load_reg(s, rn);
9ee6e8bb
PB
8024 }
8025 offset = (insn & 0xff) * 4;
8026 if ((insn & (1 << 23)) == 0)
8027 offset = -offset;
8028 if (insn & (1 << 24)) {
b0109805 8029 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8030 offset = 0;
8031 }
8032 if (insn & (1 << 20)) {
8033 /* ldrd */
b0109805
PB
8034 tmp = gen_ld32(addr, IS_USER(s));
8035 store_reg(s, rs, tmp);
8036 tcg_gen_addi_i32(addr, addr, 4);
8037 tmp = gen_ld32(addr, IS_USER(s));
8038 store_reg(s, rd, tmp);
9ee6e8bb
PB
8039 } else {
8040 /* strd */
b0109805
PB
8041 tmp = load_reg(s, rs);
8042 gen_st32(tmp, addr, IS_USER(s));
8043 tcg_gen_addi_i32(addr, addr, 4);
8044 tmp = load_reg(s, rd);
8045 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8046 }
8047 if (insn & (1 << 21)) {
8048 /* Base writeback. */
8049 if (rn == 15)
8050 goto illegal_op;
b0109805
PB
8051 tcg_gen_addi_i32(addr, addr, offset - 4);
8052 store_reg(s, rn, addr);
8053 } else {
7d1b0095 8054 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8055 }
8056 } else if ((insn & (1 << 23)) == 0) {
8057 /* Load/store exclusive word. */
3174f8e9 8058 addr = tcg_temp_local_new();
98a46317 8059 load_reg_var(s, addr, rn);
426f5abc 8060 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8061 if (insn & (1 << 20)) {
426f5abc 8062 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8063 } else {
426f5abc 8064 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8065 }
3174f8e9 8066 tcg_temp_free(addr);
9ee6e8bb
PB
8067 } else if ((insn & (1 << 6)) == 0) {
8068 /* Table Branch. */
8069 if (rn == 15) {
7d1b0095 8070 addr = tcg_temp_new_i32();
b0109805 8071 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8072 } else {
b0109805 8073 addr = load_reg(s, rn);
9ee6e8bb 8074 }
b26eefb6 8075 tmp = load_reg(s, rm);
b0109805 8076 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8077 if (insn & (1 << 4)) {
8078 /* tbh */
b0109805 8079 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8080 tcg_temp_free_i32(tmp);
b0109805 8081 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 8082 } else { /* tbb */
7d1b0095 8083 tcg_temp_free_i32(tmp);
b0109805 8084 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 8085 }
7d1b0095 8086 tcg_temp_free_i32(addr);
b0109805
PB
8087 tcg_gen_shli_i32(tmp, tmp, 1);
8088 tcg_gen_addi_i32(tmp, tmp, s->pc);
8089 store_reg(s, 15, tmp);
9ee6e8bb
PB
8090 } else {
8091 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 8092 ARCH(7);
9ee6e8bb 8093 op = (insn >> 4) & 0x3;
426f5abc
PB
8094 if (op == 2) {
8095 goto illegal_op;
8096 }
3174f8e9 8097 addr = tcg_temp_local_new();
98a46317 8098 load_reg_var(s, addr, rn);
9ee6e8bb 8099 if (insn & (1 << 20)) {
426f5abc 8100 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8101 } else {
426f5abc 8102 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8103 }
3174f8e9 8104 tcg_temp_free(addr);
9ee6e8bb
PB
8105 }
8106 } else {
8107 /* Load/store multiple, RFE, SRS. */
8108 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8109 /* Not available in user mode. */
b0109805 8110 if (IS_USER(s))
9ee6e8bb
PB
8111 goto illegal_op;
8112 if (insn & (1 << 20)) {
8113 /* rfe */
b0109805
PB
8114 addr = load_reg(s, rn);
8115 if ((insn & (1 << 24)) == 0)
8116 tcg_gen_addi_i32(addr, addr, -8);
8117 /* Load PC into tmp and CPSR into tmp2. */
8118 tmp = gen_ld32(addr, 0);
8119 tcg_gen_addi_i32(addr, addr, 4);
8120 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
8121 if (insn & (1 << 21)) {
8122 /* Base writeback. */
b0109805
PB
8123 if (insn & (1 << 24)) {
8124 tcg_gen_addi_i32(addr, addr, 4);
8125 } else {
8126 tcg_gen_addi_i32(addr, addr, -4);
8127 }
8128 store_reg(s, rn, addr);
8129 } else {
7d1b0095 8130 tcg_temp_free_i32(addr);
9ee6e8bb 8131 }
b0109805 8132 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8133 } else {
8134 /* srs */
8135 op = (insn & 0x1f);
7d1b0095 8136 addr = tcg_temp_new_i32();
39ea3d4e
PM
8137 tmp = tcg_const_i32(op);
8138 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8139 tcg_temp_free_i32(tmp);
9ee6e8bb 8140 if ((insn & (1 << 24)) == 0) {
b0109805 8141 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 8142 }
b0109805
PB
8143 tmp = load_reg(s, 14);
8144 gen_st32(tmp, addr, 0);
8145 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 8146 tmp = tcg_temp_new_i32();
9ef39277 8147 gen_helper_cpsr_read(tmp, cpu_env);
b0109805 8148 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
8149 if (insn & (1 << 21)) {
8150 if ((insn & (1 << 24)) == 0) {
b0109805 8151 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 8152 } else {
b0109805 8153 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8154 }
39ea3d4e
PM
8155 tmp = tcg_const_i32(op);
8156 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8157 tcg_temp_free_i32(tmp);
b0109805 8158 } else {
7d1b0095 8159 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8160 }
8161 }
8162 } else {
5856d44e
YO
8163 int i, loaded_base = 0;
8164 TCGv loaded_var;
9ee6e8bb 8165 /* Load/store multiple. */
b0109805 8166 addr = load_reg(s, rn);
9ee6e8bb
PB
8167 offset = 0;
8168 for (i = 0; i < 16; i++) {
8169 if (insn & (1 << i))
8170 offset += 4;
8171 }
8172 if (insn & (1 << 24)) {
b0109805 8173 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8174 }
8175
5856d44e 8176 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
8177 for (i = 0; i < 16; i++) {
8178 if ((insn & (1 << i)) == 0)
8179 continue;
8180 if (insn & (1 << 20)) {
8181 /* Load. */
b0109805 8182 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 8183 if (i == 15) {
b0109805 8184 gen_bx(s, tmp);
5856d44e
YO
8185 } else if (i == rn) {
8186 loaded_var = tmp;
8187 loaded_base = 1;
9ee6e8bb 8188 } else {
b0109805 8189 store_reg(s, i, tmp);
9ee6e8bb
PB
8190 }
8191 } else {
8192 /* Store. */
b0109805
PB
8193 tmp = load_reg(s, i);
8194 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8195 }
b0109805 8196 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8197 }
5856d44e
YO
8198 if (loaded_base) {
8199 store_reg(s, rn, loaded_var);
8200 }
9ee6e8bb
PB
8201 if (insn & (1 << 21)) {
8202 /* Base register writeback. */
8203 if (insn & (1 << 24)) {
b0109805 8204 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8205 }
8206 /* Fault if writeback register is in register list. */
8207 if (insn & (1 << rn))
8208 goto illegal_op;
b0109805
PB
8209 store_reg(s, rn, addr);
8210 } else {
7d1b0095 8211 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8212 }
8213 }
8214 }
8215 break;
2af9ab77
JB
8216 case 5:
8217
9ee6e8bb 8218 op = (insn >> 21) & 0xf;
2af9ab77
JB
8219 if (op == 6) {
8220 /* Halfword pack. */
8221 tmp = load_reg(s, rn);
8222 tmp2 = load_reg(s, rm);
8223 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8224 if (insn & (1 << 5)) {
8225 /* pkhtb */
8226 if (shift == 0)
8227 shift = 31;
8228 tcg_gen_sari_i32(tmp2, tmp2, shift);
8229 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8230 tcg_gen_ext16u_i32(tmp2, tmp2);
8231 } else {
8232 /* pkhbt */
8233 if (shift)
8234 tcg_gen_shli_i32(tmp2, tmp2, shift);
8235 tcg_gen_ext16u_i32(tmp, tmp);
8236 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8237 }
8238 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8239 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8240 store_reg(s, rd, tmp);
8241 } else {
2af9ab77
JB
8242 /* Data processing register constant shift. */
8243 if (rn == 15) {
7d1b0095 8244 tmp = tcg_temp_new_i32();
2af9ab77
JB
8245 tcg_gen_movi_i32(tmp, 0);
8246 } else {
8247 tmp = load_reg(s, rn);
8248 }
8249 tmp2 = load_reg(s, rm);
8250
8251 shiftop = (insn >> 4) & 3;
8252 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8253 conds = (insn & (1 << 20)) != 0;
8254 logic_cc = (conds && thumb2_logic_op(op));
8255 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8256 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8257 goto illegal_op;
7d1b0095 8258 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8259 if (rd != 15) {
8260 store_reg(s, rd, tmp);
8261 } else {
7d1b0095 8262 tcg_temp_free_i32(tmp);
2af9ab77 8263 }
3174f8e9 8264 }
9ee6e8bb
PB
8265 break;
8266 case 13: /* Misc data processing. */
8267 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8268 if (op < 4 && (insn & 0xf000) != 0xf000)
8269 goto illegal_op;
8270 switch (op) {
8271 case 0: /* Register controlled shift. */
8984bd2e
PB
8272 tmp = load_reg(s, rn);
8273 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8274 if ((insn & 0x70) != 0)
8275 goto illegal_op;
8276 op = (insn >> 21) & 3;
8984bd2e
PB
8277 logic_cc = (insn & (1 << 20)) != 0;
8278 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8279 if (logic_cc)
8280 gen_logic_CC(tmp);
21aeb343 8281 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8282 break;
8283 case 1: /* Sign/zero extend. */
5e3f878a 8284 tmp = load_reg(s, rm);
9ee6e8bb 8285 shift = (insn >> 4) & 3;
1301f322 8286 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8287 rotate, a shift is sufficient. */
8288 if (shift != 0)
f669df27 8289 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8290 op = (insn >> 20) & 7;
8291 switch (op) {
5e3f878a
PB
8292 case 0: gen_sxth(tmp); break;
8293 case 1: gen_uxth(tmp); break;
8294 case 2: gen_sxtb16(tmp); break;
8295 case 3: gen_uxtb16(tmp); break;
8296 case 4: gen_sxtb(tmp); break;
8297 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8298 default: goto illegal_op;
8299 }
8300 if (rn != 15) {
5e3f878a 8301 tmp2 = load_reg(s, rn);
9ee6e8bb 8302 if ((op >> 1) == 1) {
5e3f878a 8303 gen_add16(tmp, tmp2);
9ee6e8bb 8304 } else {
5e3f878a 8305 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8306 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8307 }
8308 }
5e3f878a 8309 store_reg(s, rd, tmp);
9ee6e8bb
PB
8310 break;
8311 case 2: /* SIMD add/subtract. */
8312 op = (insn >> 20) & 7;
8313 shift = (insn >> 4) & 7;
8314 if ((op & 3) == 3 || (shift & 3) == 3)
8315 goto illegal_op;
6ddbc6e4
PB
8316 tmp = load_reg(s, rn);
8317 tmp2 = load_reg(s, rm);
8318 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8319 tcg_temp_free_i32(tmp2);
6ddbc6e4 8320 store_reg(s, rd, tmp);
9ee6e8bb
PB
8321 break;
8322 case 3: /* Other data processing. */
8323 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8324 if (op < 4) {
8325 /* Saturating add/subtract. */
d9ba4830
PB
8326 tmp = load_reg(s, rn);
8327 tmp2 = load_reg(s, rm);
9ee6e8bb 8328 if (op & 1)
9ef39277 8329 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 8330 if (op & 2)
9ef39277 8331 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 8332 else
9ef39277 8333 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8334 tcg_temp_free_i32(tmp2);
9ee6e8bb 8335 } else {
d9ba4830 8336 tmp = load_reg(s, rn);
9ee6e8bb
PB
8337 switch (op) {
8338 case 0x0a: /* rbit */
d9ba4830 8339 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8340 break;
8341 case 0x08: /* rev */
66896cb8 8342 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8343 break;
8344 case 0x09: /* rev16 */
d9ba4830 8345 gen_rev16(tmp);
9ee6e8bb
PB
8346 break;
8347 case 0x0b: /* revsh */
d9ba4830 8348 gen_revsh(tmp);
9ee6e8bb
PB
8349 break;
8350 case 0x10: /* sel */
d9ba4830 8351 tmp2 = load_reg(s, rm);
7d1b0095 8352 tmp3 = tcg_temp_new_i32();
0ecb72a5 8353 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 8354 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8355 tcg_temp_free_i32(tmp3);
8356 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8357 break;
8358 case 0x18: /* clz */
d9ba4830 8359 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8360 break;
8361 default:
8362 goto illegal_op;
8363 }
8364 }
d9ba4830 8365 store_reg(s, rd, tmp);
9ee6e8bb
PB
8366 break;
8367 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8368 op = (insn >> 4) & 0xf;
d9ba4830
PB
8369 tmp = load_reg(s, rn);
8370 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8371 switch ((insn >> 20) & 7) {
8372 case 0: /* 32 x 32 -> 32 */
d9ba4830 8373 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8374 tcg_temp_free_i32(tmp2);
9ee6e8bb 8375 if (rs != 15) {
d9ba4830 8376 tmp2 = load_reg(s, rs);
9ee6e8bb 8377 if (op)
d9ba4830 8378 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8379 else
d9ba4830 8380 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8381 tcg_temp_free_i32(tmp2);
9ee6e8bb 8382 }
9ee6e8bb
PB
8383 break;
8384 case 1: /* 16 x 16 -> 32 */
d9ba4830 8385 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8386 tcg_temp_free_i32(tmp2);
9ee6e8bb 8387 if (rs != 15) {
d9ba4830 8388 tmp2 = load_reg(s, rs);
9ef39277 8389 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8390 tcg_temp_free_i32(tmp2);
9ee6e8bb 8391 }
9ee6e8bb
PB
8392 break;
8393 case 2: /* Dual multiply add. */
8394 case 4: /* Dual multiply subtract. */
8395 if (op)
d9ba4830
PB
8396 gen_swap_half(tmp2);
8397 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8398 if (insn & (1 << 22)) {
e1d177b9 8399 /* This subtraction cannot overflow. */
d9ba4830 8400 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8401 } else {
e1d177b9
PM
8402 /* This addition cannot overflow 32 bits;
8403 * however it may overflow considered as a signed
8404 * operation, in which case we must set the Q flag.
8405 */
9ef39277 8406 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8407 }
7d1b0095 8408 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8409 if (rs != 15)
8410 {
d9ba4830 8411 tmp2 = load_reg(s, rs);
9ef39277 8412 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8413 tcg_temp_free_i32(tmp2);
9ee6e8bb 8414 }
9ee6e8bb
PB
8415 break;
8416 case 3: /* 32 * 16 -> 32msb */
8417 if (op)
d9ba4830 8418 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8419 else
d9ba4830 8420 gen_sxth(tmp2);
a7812ae4
PB
8421 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8422 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8423 tmp = tcg_temp_new_i32();
a7812ae4 8424 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8425 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8426 if (rs != 15)
8427 {
d9ba4830 8428 tmp2 = load_reg(s, rs);
9ef39277 8429 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8430 tcg_temp_free_i32(tmp2);
9ee6e8bb 8431 }
9ee6e8bb 8432 break;
838fa72d
AJ
8433 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8434 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8435 if (rs != 15) {
838fa72d
AJ
8436 tmp = load_reg(s, rs);
8437 if (insn & (1 << 20)) {
8438 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8439 } else {
838fa72d 8440 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8441 }
2c0262af 8442 }
838fa72d
AJ
8443 if (insn & (1 << 4)) {
8444 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8445 }
8446 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8447 tmp = tcg_temp_new_i32();
838fa72d
AJ
8448 tcg_gen_trunc_i64_i32(tmp, tmp64);
8449 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8450 break;
8451 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8452 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8453 tcg_temp_free_i32(tmp2);
9ee6e8bb 8454 if (rs != 15) {
d9ba4830
PB
8455 tmp2 = load_reg(s, rs);
8456 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8457 tcg_temp_free_i32(tmp2);
5fd46862 8458 }
9ee6e8bb 8459 break;
2c0262af 8460 }
d9ba4830 8461 store_reg(s, rd, tmp);
2c0262af 8462 break;
9ee6e8bb
PB
8463 case 6: case 7: /* 64-bit multiply, Divide. */
8464 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8465 tmp = load_reg(s, rn);
8466 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8467 if ((op & 0x50) == 0x10) {
8468 /* sdiv, udiv */
47789990 8469 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 8470 goto illegal_op;
47789990 8471 }
9ee6e8bb 8472 if (op & 0x20)
5e3f878a 8473 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8474 else
5e3f878a 8475 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8476 tcg_temp_free_i32(tmp2);
5e3f878a 8477 store_reg(s, rd, tmp);
9ee6e8bb
PB
8478 } else if ((op & 0xe) == 0xc) {
8479 /* Dual multiply accumulate long. */
8480 if (op & 1)
5e3f878a
PB
8481 gen_swap_half(tmp2);
8482 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8483 if (op & 0x10) {
5e3f878a 8484 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8485 } else {
5e3f878a 8486 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8487 }
7d1b0095 8488 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8489 /* BUGFIX */
8490 tmp64 = tcg_temp_new_i64();
8491 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8492 tcg_temp_free_i32(tmp);
a7812ae4
PB
8493 gen_addq(s, tmp64, rs, rd);
8494 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8495 tcg_temp_free_i64(tmp64);
2c0262af 8496 } else {
9ee6e8bb
PB
8497 if (op & 0x20) {
8498 /* Unsigned 64-bit multiply */
a7812ae4 8499 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8500 } else {
9ee6e8bb
PB
8501 if (op & 8) {
8502 /* smlalxy */
5e3f878a 8503 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8504 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8505 tmp64 = tcg_temp_new_i64();
8506 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8507 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8508 } else {
8509 /* Signed 64-bit multiply */
a7812ae4 8510 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8511 }
b5ff1b31 8512 }
9ee6e8bb
PB
8513 if (op & 4) {
8514 /* umaal */
a7812ae4
PB
8515 gen_addq_lo(s, tmp64, rs);
8516 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8517 } else if (op & 0x40) {
8518 /* 64-bit accumulate. */
a7812ae4 8519 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8520 }
a7812ae4 8521 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8522 tcg_temp_free_i64(tmp64);
5fd46862 8523 }
2c0262af 8524 break;
9ee6e8bb
PB
8525 }
8526 break;
8527 case 6: case 7: case 14: case 15:
8528 /* Coprocessor. */
8529 if (((insn >> 24) & 3) == 3) {
8530 /* Translate into the equivalent ARM encoding. */
f06053e3 8531 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8532 if (disas_neon_data_insn(env, s, insn))
8533 goto illegal_op;
8534 } else {
8535 if (insn & (1 << 28))
8536 goto illegal_op;
8537 if (disas_coproc_insn (env, s, insn))
8538 goto illegal_op;
8539 }
8540 break;
8541 case 8: case 9: case 10: case 11:
8542 if (insn & (1 << 15)) {
8543 /* Branches, misc control. */
8544 if (insn & 0x5000) {
8545 /* Unconditional branch. */
8546 /* signextend(hw1[10:0]) -> offset[:12]. */
8547 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8548 /* hw1[10:0] -> offset[11:1]. */
8549 offset |= (insn & 0x7ff) << 1;
8550 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8551 offset[24:22] already have the same value because of the
8552 sign extension above. */
8553 offset ^= ((~insn) & (1 << 13)) << 10;
8554 offset ^= ((~insn) & (1 << 11)) << 11;
8555
9ee6e8bb
PB
8556 if (insn & (1 << 14)) {
8557 /* Branch and link. */
3174f8e9 8558 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8559 }
3b46e624 8560
b0109805 8561 offset += s->pc;
9ee6e8bb
PB
8562 if (insn & (1 << 12)) {
8563 /* b/bl */
b0109805 8564 gen_jmp(s, offset);
9ee6e8bb
PB
8565 } else {
8566 /* blx */
b0109805 8567 offset &= ~(uint32_t)2;
be5e7a76 8568 /* thumb2 bx, no need to check */
b0109805 8569 gen_bx_im(s, offset);
2c0262af 8570 }
9ee6e8bb
PB
8571 } else if (((insn >> 23) & 7) == 7) {
8572 /* Misc control */
8573 if (insn & (1 << 13))
8574 goto illegal_op;
8575
8576 if (insn & (1 << 26)) {
8577 /* Secure monitor call (v6Z) */
8578 goto illegal_op; /* not implemented. */
2c0262af 8579 } else {
9ee6e8bb
PB
8580 op = (insn >> 20) & 7;
8581 switch (op) {
8582 case 0: /* msr cpsr. */
8583 if (IS_M(env)) {
8984bd2e
PB
8584 tmp = load_reg(s, rn);
8585 addr = tcg_const_i32(insn & 0xff);
8586 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8587 tcg_temp_free_i32(addr);
7d1b0095 8588 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8589 gen_lookup_tb(s);
8590 break;
8591 }
8592 /* fall through */
8593 case 1: /* msr spsr. */
8594 if (IS_M(env))
8595 goto illegal_op;
2fbac54b
FN
8596 tmp = load_reg(s, rn);
8597 if (gen_set_psr(s,
9ee6e8bb 8598 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8599 op == 1, tmp))
9ee6e8bb
PB
8600 goto illegal_op;
8601 break;
8602 case 2: /* cps, nop-hint. */
8603 if (((insn >> 8) & 7) == 0) {
8604 gen_nop_hint(s, insn & 0xff);
8605 }
8606 /* Implemented as NOP in user mode. */
8607 if (IS_USER(s))
8608 break;
8609 offset = 0;
8610 imm = 0;
8611 if (insn & (1 << 10)) {
8612 if (insn & (1 << 7))
8613 offset |= CPSR_A;
8614 if (insn & (1 << 6))
8615 offset |= CPSR_I;
8616 if (insn & (1 << 5))
8617 offset |= CPSR_F;
8618 if (insn & (1 << 9))
8619 imm = CPSR_A | CPSR_I | CPSR_F;
8620 }
8621 if (insn & (1 << 8)) {
8622 offset |= 0x1f;
8623 imm |= (insn & 0x1f);
8624 }
8625 if (offset) {
2fbac54b 8626 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8627 }
8628 break;
8629 case 3: /* Special control operations. */
426f5abc 8630 ARCH(7);
9ee6e8bb
PB
8631 op = (insn >> 4) & 0xf;
8632 switch (op) {
8633 case 2: /* clrex */
426f5abc 8634 gen_clrex(s);
9ee6e8bb
PB
8635 break;
8636 case 4: /* dsb */
8637 case 5: /* dmb */
8638 case 6: /* isb */
8639 /* These execute as NOPs. */
9ee6e8bb
PB
8640 break;
8641 default:
8642 goto illegal_op;
8643 }
8644 break;
8645 case 4: /* bxj */
8646 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8647 tmp = load_reg(s, rn);
8648 gen_bx(s, tmp);
9ee6e8bb
PB
8649 break;
8650 case 5: /* Exception return. */
b8b45b68
RV
8651 if (IS_USER(s)) {
8652 goto illegal_op;
8653 }
8654 if (rn != 14 || rd != 15) {
8655 goto illegal_op;
8656 }
8657 tmp = load_reg(s, rn);
8658 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8659 gen_exception_return(s, tmp);
8660 break;
9ee6e8bb 8661 case 6: /* mrs cpsr. */
7d1b0095 8662 tmp = tcg_temp_new_i32();
9ee6e8bb 8663 if (IS_M(env)) {
8984bd2e
PB
8664 addr = tcg_const_i32(insn & 0xff);
8665 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8666 tcg_temp_free_i32(addr);
9ee6e8bb 8667 } else {
9ef39277 8668 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8669 }
8984bd2e 8670 store_reg(s, rd, tmp);
9ee6e8bb
PB
8671 break;
8672 case 7: /* mrs spsr. */
8673 /* Not accessible in user mode. */
8674 if (IS_USER(s) || IS_M(env))
8675 goto illegal_op;
d9ba4830
PB
8676 tmp = load_cpu_field(spsr);
8677 store_reg(s, rd, tmp);
9ee6e8bb 8678 break;
2c0262af
FB
8679 }
8680 }
9ee6e8bb
PB
8681 } else {
8682 /* Conditional branch. */
8683 op = (insn >> 22) & 0xf;
8684 /* Generate a conditional jump to next instruction. */
8685 s->condlabel = gen_new_label();
d9ba4830 8686 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8687 s->condjmp = 1;
8688
8689 /* offset[11:1] = insn[10:0] */
8690 offset = (insn & 0x7ff) << 1;
8691 /* offset[17:12] = insn[21:16]. */
8692 offset |= (insn & 0x003f0000) >> 4;
8693 /* offset[31:20] = insn[26]. */
8694 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8695 /* offset[18] = insn[13]. */
8696 offset |= (insn & (1 << 13)) << 5;
8697 /* offset[19] = insn[11]. */
8698 offset |= (insn & (1 << 11)) << 8;
8699
8700 /* jump to the offset */
b0109805 8701 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8702 }
8703 } else {
8704 /* Data processing immediate. */
8705 if (insn & (1 << 25)) {
8706 if (insn & (1 << 24)) {
8707 if (insn & (1 << 20))
8708 goto illegal_op;
8709 /* Bitfield/Saturate. */
8710 op = (insn >> 21) & 7;
8711 imm = insn & 0x1f;
8712 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8713 if (rn == 15) {
7d1b0095 8714 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8715 tcg_gen_movi_i32(tmp, 0);
8716 } else {
8717 tmp = load_reg(s, rn);
8718 }
9ee6e8bb
PB
8719 switch (op) {
8720 case 2: /* Signed bitfield extract. */
8721 imm++;
8722 if (shift + imm > 32)
8723 goto illegal_op;
8724 if (imm < 32)
6ddbc6e4 8725 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8726 break;
8727 case 6: /* Unsigned bitfield extract. */
8728 imm++;
8729 if (shift + imm > 32)
8730 goto illegal_op;
8731 if (imm < 32)
6ddbc6e4 8732 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8733 break;
8734 case 3: /* Bitfield insert/clear. */
8735 if (imm < shift)
8736 goto illegal_op;
8737 imm = imm + 1 - shift;
8738 if (imm != 32) {
6ddbc6e4 8739 tmp2 = load_reg(s, rd);
d593c48e 8740 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 8741 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8742 }
8743 break;
8744 case 7:
8745 goto illegal_op;
8746 default: /* Saturate. */
9ee6e8bb
PB
8747 if (shift) {
8748 if (op & 1)
6ddbc6e4 8749 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8750 else
6ddbc6e4 8751 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8752 }
6ddbc6e4 8753 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8754 if (op & 4) {
8755 /* Unsigned. */
9ee6e8bb 8756 if ((op & 1) && shift == 0)
9ef39277 8757 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8758 else
9ef39277 8759 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 8760 } else {
9ee6e8bb 8761 /* Signed. */
9ee6e8bb 8762 if ((op & 1) && shift == 0)
9ef39277 8763 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8764 else
9ef39277 8765 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 8766 }
b75263d6 8767 tcg_temp_free_i32(tmp2);
9ee6e8bb 8768 break;
2c0262af 8769 }
6ddbc6e4 8770 store_reg(s, rd, tmp);
9ee6e8bb
PB
8771 } else {
8772 imm = ((insn & 0x04000000) >> 15)
8773 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8774 if (insn & (1 << 22)) {
8775 /* 16-bit immediate. */
8776 imm |= (insn >> 4) & 0xf000;
8777 if (insn & (1 << 23)) {
8778 /* movt */
5e3f878a 8779 tmp = load_reg(s, rd);
86831435 8780 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8781 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8782 } else {
9ee6e8bb 8783 /* movw */
7d1b0095 8784 tmp = tcg_temp_new_i32();
5e3f878a 8785 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8786 }
8787 } else {
9ee6e8bb
PB
8788 /* Add/sub 12-bit immediate. */
8789 if (rn == 15) {
b0109805 8790 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8791 if (insn & (1 << 23))
b0109805 8792 offset -= imm;
9ee6e8bb 8793 else
b0109805 8794 offset += imm;
7d1b0095 8795 tmp = tcg_temp_new_i32();
5e3f878a 8796 tcg_gen_movi_i32(tmp, offset);
2c0262af 8797 } else {
5e3f878a 8798 tmp = load_reg(s, rn);
9ee6e8bb 8799 if (insn & (1 << 23))
5e3f878a 8800 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8801 else
5e3f878a 8802 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8803 }
9ee6e8bb 8804 }
5e3f878a 8805 store_reg(s, rd, tmp);
191abaa2 8806 }
9ee6e8bb
PB
8807 } else {
8808 int shifter_out = 0;
8809 /* modified 12-bit immediate. */
8810 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8811 imm = (insn & 0xff);
8812 switch (shift) {
8813 case 0: /* XY */
8814 /* Nothing to do. */
8815 break;
8816 case 1: /* 00XY00XY */
8817 imm |= imm << 16;
8818 break;
8819 case 2: /* XY00XY00 */
8820 imm |= imm << 16;
8821 imm <<= 8;
8822 break;
8823 case 3: /* XYXYXYXY */
8824 imm |= imm << 16;
8825 imm |= imm << 8;
8826 break;
8827 default: /* Rotated constant. */
8828 shift = (shift << 1) | (imm >> 7);
8829 imm |= 0x80;
8830 imm = imm << (32 - shift);
8831 shifter_out = 1;
8832 break;
b5ff1b31 8833 }
7d1b0095 8834 tmp2 = tcg_temp_new_i32();
3174f8e9 8835 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8836 rn = (insn >> 16) & 0xf;
3174f8e9 8837 if (rn == 15) {
7d1b0095 8838 tmp = tcg_temp_new_i32();
3174f8e9
FN
8839 tcg_gen_movi_i32(tmp, 0);
8840 } else {
8841 tmp = load_reg(s, rn);
8842 }
9ee6e8bb
PB
8843 op = (insn >> 21) & 0xf;
8844 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8845 shifter_out, tmp, tmp2))
9ee6e8bb 8846 goto illegal_op;
7d1b0095 8847 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8848 rd = (insn >> 8) & 0xf;
8849 if (rd != 15) {
3174f8e9
FN
8850 store_reg(s, rd, tmp);
8851 } else {
7d1b0095 8852 tcg_temp_free_i32(tmp);
2c0262af 8853 }
2c0262af 8854 }
9ee6e8bb
PB
8855 }
8856 break;
8857 case 12: /* Load/store single data item. */
8858 {
8859 int postinc = 0;
8860 int writeback = 0;
b0109805 8861 int user;
9ee6e8bb
PB
8862 if ((insn & 0x01100000) == 0x01000000) {
8863 if (disas_neon_ls_insn(env, s, insn))
c1713132 8864 goto illegal_op;
9ee6e8bb
PB
8865 break;
8866 }
a2fdc890
PM
8867 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8868 if (rs == 15) {
8869 if (!(insn & (1 << 20))) {
8870 goto illegal_op;
8871 }
8872 if (op != 2) {
8873 /* Byte or halfword load space with dest == r15 : memory hints.
8874 * Catch them early so we don't emit pointless addressing code.
8875 * This space is a mix of:
8876 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8877 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8878 * cores)
8879 * unallocated hints, which must be treated as NOPs
8880 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8881 * which is easiest for the decoding logic
8882 * Some space which must UNDEF
8883 */
8884 int op1 = (insn >> 23) & 3;
8885 int op2 = (insn >> 6) & 0x3f;
8886 if (op & 2) {
8887 goto illegal_op;
8888 }
8889 if (rn == 15) {
02afbf64
PM
8890 /* UNPREDICTABLE, unallocated hint or
8891 * PLD/PLDW/PLI (literal)
8892 */
a2fdc890
PM
8893 return 0;
8894 }
8895 if (op1 & 1) {
02afbf64 8896 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
8897 }
8898 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 8899 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
8900 }
8901 /* UNDEF space, or an UNPREDICTABLE */
8902 return 1;
8903 }
8904 }
b0109805 8905 user = IS_USER(s);
9ee6e8bb 8906 if (rn == 15) {
7d1b0095 8907 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8908 /* PC relative. */
8909 /* s->pc has already been incremented by 4. */
8910 imm = s->pc & 0xfffffffc;
8911 if (insn & (1 << 23))
8912 imm += insn & 0xfff;
8913 else
8914 imm -= insn & 0xfff;
b0109805 8915 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8916 } else {
b0109805 8917 addr = load_reg(s, rn);
9ee6e8bb
PB
8918 if (insn & (1 << 23)) {
8919 /* Positive offset. */
8920 imm = insn & 0xfff;
b0109805 8921 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8922 } else {
9ee6e8bb 8923 imm = insn & 0xff;
2a0308c5
PM
8924 switch ((insn >> 8) & 0xf) {
8925 case 0x0: /* Shifted Register. */
9ee6e8bb 8926 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8927 if (shift > 3) {
8928 tcg_temp_free_i32(addr);
18c9b560 8929 goto illegal_op;
2a0308c5 8930 }
b26eefb6 8931 tmp = load_reg(s, rm);
9ee6e8bb 8932 if (shift)
b26eefb6 8933 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8934 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8935 tcg_temp_free_i32(tmp);
9ee6e8bb 8936 break;
2a0308c5 8937 case 0xc: /* Negative offset. */
b0109805 8938 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8939 break;
2a0308c5 8940 case 0xe: /* User privilege. */
b0109805
PB
8941 tcg_gen_addi_i32(addr, addr, imm);
8942 user = 1;
9ee6e8bb 8943 break;
2a0308c5 8944 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8945 imm = -imm;
8946 /* Fall through. */
2a0308c5 8947 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8948 postinc = 1;
8949 writeback = 1;
8950 break;
2a0308c5 8951 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
8952 imm = -imm;
8953 /* Fall through. */
2a0308c5 8954 case 0xf: /* Pre-increment. */
b0109805 8955 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8956 writeback = 1;
8957 break;
8958 default:
2a0308c5 8959 tcg_temp_free_i32(addr);
b7bcbe95 8960 goto illegal_op;
9ee6e8bb
PB
8961 }
8962 }
8963 }
9ee6e8bb
PB
8964 if (insn & (1 << 20)) {
8965 /* Load. */
a2fdc890
PM
8966 switch (op) {
8967 case 0: tmp = gen_ld8u(addr, user); break;
8968 case 4: tmp = gen_ld8s(addr, user); break;
8969 case 1: tmp = gen_ld16u(addr, user); break;
8970 case 5: tmp = gen_ld16s(addr, user); break;
8971 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
8972 default:
8973 tcg_temp_free_i32(addr);
8974 goto illegal_op;
a2fdc890
PM
8975 }
8976 if (rs == 15) {
8977 gen_bx(s, tmp);
9ee6e8bb 8978 } else {
a2fdc890 8979 store_reg(s, rs, tmp);
9ee6e8bb
PB
8980 }
8981 } else {
8982 /* Store. */
b0109805 8983 tmp = load_reg(s, rs);
9ee6e8bb 8984 switch (op) {
b0109805
PB
8985 case 0: gen_st8(tmp, addr, user); break;
8986 case 1: gen_st16(tmp, addr, user); break;
8987 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
8988 default:
8989 tcg_temp_free_i32(addr);
8990 goto illegal_op;
b7bcbe95 8991 }
2c0262af 8992 }
9ee6e8bb 8993 if (postinc)
b0109805
PB
8994 tcg_gen_addi_i32(addr, addr, imm);
8995 if (writeback) {
8996 store_reg(s, rn, addr);
8997 } else {
7d1b0095 8998 tcg_temp_free_i32(addr);
b0109805 8999 }
9ee6e8bb
PB
9000 }
9001 break;
9002 default:
9003 goto illegal_op;
2c0262af 9004 }
9ee6e8bb
PB
9005 return 0;
9006illegal_op:
9007 return 1;
2c0262af
FB
9008}
9009
0ecb72a5 9010static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9011{
9012 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9013 int32_t offset;
9014 int i;
b26eefb6 9015 TCGv tmp;
d9ba4830 9016 TCGv tmp2;
b0109805 9017 TCGv addr;
99c475ab 9018
9ee6e8bb
PB
9019 if (s->condexec_mask) {
9020 cond = s->condexec_cond;
bedd2912
JB
9021 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9022 s->condlabel = gen_new_label();
9023 gen_test_cc(cond ^ 1, s->condlabel);
9024 s->condjmp = 1;
9025 }
9ee6e8bb
PB
9026 }
9027
d31dd73e 9028 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 9029 s->pc += 2;
b5ff1b31 9030
99c475ab
FB
9031 switch (insn >> 12) {
9032 case 0: case 1:
396e467c 9033
99c475ab
FB
9034 rd = insn & 7;
9035 op = (insn >> 11) & 3;
9036 if (op == 3) {
9037 /* add/subtract */
9038 rn = (insn >> 3) & 7;
396e467c 9039 tmp = load_reg(s, rn);
99c475ab
FB
9040 if (insn & (1 << 10)) {
9041 /* immediate */
7d1b0095 9042 tmp2 = tcg_temp_new_i32();
396e467c 9043 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9044 } else {
9045 /* reg */
9046 rm = (insn >> 6) & 7;
396e467c 9047 tmp2 = load_reg(s, rm);
99c475ab 9048 }
9ee6e8bb
PB
9049 if (insn & (1 << 9)) {
9050 if (s->condexec_mask)
396e467c 9051 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9052 else
72485ec4 9053 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
9054 } else {
9055 if (s->condexec_mask)
396e467c 9056 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9057 else
72485ec4 9058 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9059 }
7d1b0095 9060 tcg_temp_free_i32(tmp2);
396e467c 9061 store_reg(s, rd, tmp);
99c475ab
FB
9062 } else {
9063 /* shift immediate */
9064 rm = (insn >> 3) & 7;
9065 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9066 tmp = load_reg(s, rm);
9067 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9068 if (!s->condexec_mask)
9069 gen_logic_CC(tmp);
9070 store_reg(s, rd, tmp);
99c475ab
FB
9071 }
9072 break;
9073 case 2: case 3:
9074 /* arithmetic large immediate */
9075 op = (insn >> 11) & 3;
9076 rd = (insn >> 8) & 0x7;
396e467c 9077 if (op == 0) { /* mov */
7d1b0095 9078 tmp = tcg_temp_new_i32();
396e467c 9079 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9080 if (!s->condexec_mask)
396e467c
FN
9081 gen_logic_CC(tmp);
9082 store_reg(s, rd, tmp);
9083 } else {
9084 tmp = load_reg(s, rd);
7d1b0095 9085 tmp2 = tcg_temp_new_i32();
396e467c
FN
9086 tcg_gen_movi_i32(tmp2, insn & 0xff);
9087 switch (op) {
9088 case 1: /* cmp */
72485ec4 9089 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9090 tcg_temp_free_i32(tmp);
9091 tcg_temp_free_i32(tmp2);
396e467c
FN
9092 break;
9093 case 2: /* add */
9094 if (s->condexec_mask)
9095 tcg_gen_add_i32(tmp, tmp, tmp2);
9096 else
72485ec4 9097 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 9098 tcg_temp_free_i32(tmp2);
396e467c
FN
9099 store_reg(s, rd, tmp);
9100 break;
9101 case 3: /* sub */
9102 if (s->condexec_mask)
9103 tcg_gen_sub_i32(tmp, tmp, tmp2);
9104 else
72485ec4 9105 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 9106 tcg_temp_free_i32(tmp2);
396e467c
FN
9107 store_reg(s, rd, tmp);
9108 break;
9109 }
99c475ab 9110 }
99c475ab
FB
9111 break;
9112 case 4:
9113 if (insn & (1 << 11)) {
9114 rd = (insn >> 8) & 7;
5899f386
FB
9115 /* load pc-relative. Bit 1 of PC is ignored. */
9116 val = s->pc + 2 + ((insn & 0xff) * 4);
9117 val &= ~(uint32_t)2;
7d1b0095 9118 addr = tcg_temp_new_i32();
b0109805
PB
9119 tcg_gen_movi_i32(addr, val);
9120 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 9121 tcg_temp_free_i32(addr);
b0109805 9122 store_reg(s, rd, tmp);
99c475ab
FB
9123 break;
9124 }
9125 if (insn & (1 << 10)) {
9126 /* data processing extended or blx */
9127 rd = (insn & 7) | ((insn >> 4) & 8);
9128 rm = (insn >> 3) & 0xf;
9129 op = (insn >> 8) & 3;
9130 switch (op) {
9131 case 0: /* add */
396e467c
FN
9132 tmp = load_reg(s, rd);
9133 tmp2 = load_reg(s, rm);
9134 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9135 tcg_temp_free_i32(tmp2);
396e467c 9136 store_reg(s, rd, tmp);
99c475ab
FB
9137 break;
9138 case 1: /* cmp */
396e467c
FN
9139 tmp = load_reg(s, rd);
9140 tmp2 = load_reg(s, rm);
72485ec4 9141 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9142 tcg_temp_free_i32(tmp2);
9143 tcg_temp_free_i32(tmp);
99c475ab
FB
9144 break;
9145 case 2: /* mov/cpy */
396e467c
FN
9146 tmp = load_reg(s, rm);
9147 store_reg(s, rd, tmp);
99c475ab
FB
9148 break;
9149 case 3:/* branch [and link] exchange thumb register */
b0109805 9150 tmp = load_reg(s, rm);
99c475ab 9151 if (insn & (1 << 7)) {
be5e7a76 9152 ARCH(5);
99c475ab 9153 val = (uint32_t)s->pc | 1;
7d1b0095 9154 tmp2 = tcg_temp_new_i32();
b0109805
PB
9155 tcg_gen_movi_i32(tmp2, val);
9156 store_reg(s, 14, tmp2);
99c475ab 9157 }
be5e7a76 9158 /* already thumb, no need to check */
d9ba4830 9159 gen_bx(s, tmp);
99c475ab
FB
9160 break;
9161 }
9162 break;
9163 }
9164
9165 /* data processing register */
9166 rd = insn & 7;
9167 rm = (insn >> 3) & 7;
9168 op = (insn >> 6) & 0xf;
9169 if (op == 2 || op == 3 || op == 4 || op == 7) {
9170 /* the shift/rotate ops want the operands backwards */
9171 val = rm;
9172 rm = rd;
9173 rd = val;
9174 val = 1;
9175 } else {
9176 val = 0;
9177 }
9178
396e467c 9179 if (op == 9) { /* neg */
7d1b0095 9180 tmp = tcg_temp_new_i32();
396e467c
FN
9181 tcg_gen_movi_i32(tmp, 0);
9182 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9183 tmp = load_reg(s, rd);
9184 } else {
9185 TCGV_UNUSED(tmp);
9186 }
99c475ab 9187
396e467c 9188 tmp2 = load_reg(s, rm);
5899f386 9189 switch (op) {
99c475ab 9190 case 0x0: /* and */
396e467c 9191 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9192 if (!s->condexec_mask)
396e467c 9193 gen_logic_CC(tmp);
99c475ab
FB
9194 break;
9195 case 0x1: /* eor */
396e467c 9196 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9197 if (!s->condexec_mask)
396e467c 9198 gen_logic_CC(tmp);
99c475ab
FB
9199 break;
9200 case 0x2: /* lsl */
9ee6e8bb 9201 if (s->condexec_mask) {
365af80e 9202 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 9203 } else {
9ef39277 9204 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9205 gen_logic_CC(tmp2);
9ee6e8bb 9206 }
99c475ab
FB
9207 break;
9208 case 0x3: /* lsr */
9ee6e8bb 9209 if (s->condexec_mask) {
365af80e 9210 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 9211 } else {
9ef39277 9212 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9213 gen_logic_CC(tmp2);
9ee6e8bb 9214 }
99c475ab
FB
9215 break;
9216 case 0x4: /* asr */
9ee6e8bb 9217 if (s->condexec_mask) {
365af80e 9218 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 9219 } else {
9ef39277 9220 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9221 gen_logic_CC(tmp2);
9ee6e8bb 9222 }
99c475ab
FB
9223 break;
9224 case 0x5: /* adc */
9ee6e8bb 9225 if (s->condexec_mask)
396e467c 9226 gen_adc(tmp, tmp2);
9ee6e8bb 9227 else
9ef39277 9228 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
99c475ab
FB
9229 break;
9230 case 0x6: /* sbc */
9ee6e8bb 9231 if (s->condexec_mask)
396e467c 9232 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 9233 else
9ef39277 9234 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
99c475ab
FB
9235 break;
9236 case 0x7: /* ror */
9ee6e8bb 9237 if (s->condexec_mask) {
f669df27
AJ
9238 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9239 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9240 } else {
9ef39277 9241 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9242 gen_logic_CC(tmp2);
9ee6e8bb 9243 }
99c475ab
FB
9244 break;
9245 case 0x8: /* tst */
396e467c
FN
9246 tcg_gen_and_i32(tmp, tmp, tmp2);
9247 gen_logic_CC(tmp);
99c475ab 9248 rd = 16;
5899f386 9249 break;
99c475ab 9250 case 0x9: /* neg */
9ee6e8bb 9251 if (s->condexec_mask)
396e467c 9252 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9253 else
72485ec4 9254 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9255 break;
9256 case 0xa: /* cmp */
72485ec4 9257 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9258 rd = 16;
9259 break;
9260 case 0xb: /* cmn */
72485ec4 9261 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
9262 rd = 16;
9263 break;
9264 case 0xc: /* orr */
396e467c 9265 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9266 if (!s->condexec_mask)
396e467c 9267 gen_logic_CC(tmp);
99c475ab
FB
9268 break;
9269 case 0xd: /* mul */
7b2919a0 9270 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9271 if (!s->condexec_mask)
396e467c 9272 gen_logic_CC(tmp);
99c475ab
FB
9273 break;
9274 case 0xe: /* bic */
f669df27 9275 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9276 if (!s->condexec_mask)
396e467c 9277 gen_logic_CC(tmp);
99c475ab
FB
9278 break;
9279 case 0xf: /* mvn */
396e467c 9280 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9281 if (!s->condexec_mask)
396e467c 9282 gen_logic_CC(tmp2);
99c475ab 9283 val = 1;
5899f386 9284 rm = rd;
99c475ab
FB
9285 break;
9286 }
9287 if (rd != 16) {
396e467c
FN
9288 if (val) {
9289 store_reg(s, rm, tmp2);
9290 if (op != 0xf)
7d1b0095 9291 tcg_temp_free_i32(tmp);
396e467c
FN
9292 } else {
9293 store_reg(s, rd, tmp);
7d1b0095 9294 tcg_temp_free_i32(tmp2);
396e467c
FN
9295 }
9296 } else {
7d1b0095
PM
9297 tcg_temp_free_i32(tmp);
9298 tcg_temp_free_i32(tmp2);
99c475ab
FB
9299 }
9300 break;
9301
9302 case 5:
9303 /* load/store register offset. */
9304 rd = insn & 7;
9305 rn = (insn >> 3) & 7;
9306 rm = (insn >> 6) & 7;
9307 op = (insn >> 9) & 7;
b0109805 9308 addr = load_reg(s, rn);
b26eefb6 9309 tmp = load_reg(s, rm);
b0109805 9310 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9311 tcg_temp_free_i32(tmp);
99c475ab
FB
9312
9313 if (op < 3) /* store */
b0109805 9314 tmp = load_reg(s, rd);
99c475ab
FB
9315
9316 switch (op) {
9317 case 0: /* str */
b0109805 9318 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9319 break;
9320 case 1: /* strh */
b0109805 9321 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9322 break;
9323 case 2: /* strb */
b0109805 9324 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9325 break;
9326 case 3: /* ldrsb */
b0109805 9327 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
9328 break;
9329 case 4: /* ldr */
b0109805 9330 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9331 break;
9332 case 5: /* ldrh */
b0109805 9333 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
9334 break;
9335 case 6: /* ldrb */
b0109805 9336 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
9337 break;
9338 case 7: /* ldrsh */
b0109805 9339 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
9340 break;
9341 }
9342 if (op >= 3) /* load */
b0109805 9343 store_reg(s, rd, tmp);
7d1b0095 9344 tcg_temp_free_i32(addr);
99c475ab
FB
9345 break;
9346
9347 case 6:
9348 /* load/store word immediate offset */
9349 rd = insn & 7;
9350 rn = (insn >> 3) & 7;
b0109805 9351 addr = load_reg(s, rn);
99c475ab 9352 val = (insn >> 4) & 0x7c;
b0109805 9353 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9354
9355 if (insn & (1 << 11)) {
9356 /* load */
b0109805
PB
9357 tmp = gen_ld32(addr, IS_USER(s));
9358 store_reg(s, rd, tmp);
99c475ab
FB
9359 } else {
9360 /* store */
b0109805
PB
9361 tmp = load_reg(s, rd);
9362 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9363 }
7d1b0095 9364 tcg_temp_free_i32(addr);
99c475ab
FB
9365 break;
9366
9367 case 7:
9368 /* load/store byte immediate offset */
9369 rd = insn & 7;
9370 rn = (insn >> 3) & 7;
b0109805 9371 addr = load_reg(s, rn);
99c475ab 9372 val = (insn >> 6) & 0x1f;
b0109805 9373 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9374
9375 if (insn & (1 << 11)) {
9376 /* load */
b0109805
PB
9377 tmp = gen_ld8u(addr, IS_USER(s));
9378 store_reg(s, rd, tmp);
99c475ab
FB
9379 } else {
9380 /* store */
b0109805
PB
9381 tmp = load_reg(s, rd);
9382 gen_st8(tmp, addr, IS_USER(s));
99c475ab 9383 }
7d1b0095 9384 tcg_temp_free_i32(addr);
99c475ab
FB
9385 break;
9386
9387 case 8:
9388 /* load/store halfword immediate offset */
9389 rd = insn & 7;
9390 rn = (insn >> 3) & 7;
b0109805 9391 addr = load_reg(s, rn);
99c475ab 9392 val = (insn >> 5) & 0x3e;
b0109805 9393 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9394
9395 if (insn & (1 << 11)) {
9396 /* load */
b0109805
PB
9397 tmp = gen_ld16u(addr, IS_USER(s));
9398 store_reg(s, rd, tmp);
99c475ab
FB
9399 } else {
9400 /* store */
b0109805
PB
9401 tmp = load_reg(s, rd);
9402 gen_st16(tmp, addr, IS_USER(s));
99c475ab 9403 }
7d1b0095 9404 tcg_temp_free_i32(addr);
99c475ab
FB
9405 break;
9406
9407 case 9:
9408 /* load/store from stack */
9409 rd = (insn >> 8) & 7;
b0109805 9410 addr = load_reg(s, 13);
99c475ab 9411 val = (insn & 0xff) * 4;
b0109805 9412 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9413
9414 if (insn & (1 << 11)) {
9415 /* load */
b0109805
PB
9416 tmp = gen_ld32(addr, IS_USER(s));
9417 store_reg(s, rd, tmp);
99c475ab
FB
9418 } else {
9419 /* store */
b0109805
PB
9420 tmp = load_reg(s, rd);
9421 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9422 }
7d1b0095 9423 tcg_temp_free_i32(addr);
99c475ab
FB
9424 break;
9425
9426 case 10:
9427 /* add to high reg */
9428 rd = (insn >> 8) & 7;
5899f386
FB
9429 if (insn & (1 << 11)) {
9430 /* SP */
5e3f878a 9431 tmp = load_reg(s, 13);
5899f386
FB
9432 } else {
9433 /* PC. bit 1 is ignored. */
7d1b0095 9434 tmp = tcg_temp_new_i32();
5e3f878a 9435 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9436 }
99c475ab 9437 val = (insn & 0xff) * 4;
5e3f878a
PB
9438 tcg_gen_addi_i32(tmp, tmp, val);
9439 store_reg(s, rd, tmp);
99c475ab
FB
9440 break;
9441
9442 case 11:
9443 /* misc */
9444 op = (insn >> 8) & 0xf;
9445 switch (op) {
9446 case 0:
9447 /* adjust stack pointer */
b26eefb6 9448 tmp = load_reg(s, 13);
99c475ab
FB
9449 val = (insn & 0x7f) * 4;
9450 if (insn & (1 << 7))
6a0d8a1d 9451 val = -(int32_t)val;
b26eefb6
PB
9452 tcg_gen_addi_i32(tmp, tmp, val);
9453 store_reg(s, 13, tmp);
99c475ab
FB
9454 break;
9455
9ee6e8bb
PB
9456 case 2: /* sign/zero extend. */
9457 ARCH(6);
9458 rd = insn & 7;
9459 rm = (insn >> 3) & 7;
b0109805 9460 tmp = load_reg(s, rm);
9ee6e8bb 9461 switch ((insn >> 6) & 3) {
b0109805
PB
9462 case 0: gen_sxth(tmp); break;
9463 case 1: gen_sxtb(tmp); break;
9464 case 2: gen_uxth(tmp); break;
9465 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9466 }
b0109805 9467 store_reg(s, rd, tmp);
9ee6e8bb 9468 break;
99c475ab
FB
9469 case 4: case 5: case 0xc: case 0xd:
9470 /* push/pop */
b0109805 9471 addr = load_reg(s, 13);
5899f386
FB
9472 if (insn & (1 << 8))
9473 offset = 4;
99c475ab 9474 else
5899f386
FB
9475 offset = 0;
9476 for (i = 0; i < 8; i++) {
9477 if (insn & (1 << i))
9478 offset += 4;
9479 }
9480 if ((insn & (1 << 11)) == 0) {
b0109805 9481 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9482 }
99c475ab
FB
9483 for (i = 0; i < 8; i++) {
9484 if (insn & (1 << i)) {
9485 if (insn & (1 << 11)) {
9486 /* pop */
b0109805
PB
9487 tmp = gen_ld32(addr, IS_USER(s));
9488 store_reg(s, i, tmp);
99c475ab
FB
9489 } else {
9490 /* push */
b0109805
PB
9491 tmp = load_reg(s, i);
9492 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9493 }
5899f386 9494 /* advance to the next address. */
b0109805 9495 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9496 }
9497 }
a50f5b91 9498 TCGV_UNUSED(tmp);
99c475ab
FB
9499 if (insn & (1 << 8)) {
9500 if (insn & (1 << 11)) {
9501 /* pop pc */
b0109805 9502 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9503 /* don't set the pc until the rest of the instruction
9504 has completed */
9505 } else {
9506 /* push lr */
b0109805
PB
9507 tmp = load_reg(s, 14);
9508 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9509 }
b0109805 9510 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9511 }
5899f386 9512 if ((insn & (1 << 11)) == 0) {
b0109805 9513 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9514 }
99c475ab 9515 /* write back the new stack pointer */
b0109805 9516 store_reg(s, 13, addr);
99c475ab 9517 /* set the new PC value */
be5e7a76
DES
9518 if ((insn & 0x0900) == 0x0900) {
9519 store_reg_from_load(env, s, 15, tmp);
9520 }
99c475ab
FB
9521 break;
9522
9ee6e8bb
PB
9523 case 1: case 3: case 9: case 11: /* czb */
9524 rm = insn & 7;
d9ba4830 9525 tmp = load_reg(s, rm);
9ee6e8bb
PB
9526 s->condlabel = gen_new_label();
9527 s->condjmp = 1;
9528 if (insn & (1 << 11))
cb63669a 9529 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9530 else
cb63669a 9531 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9532 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9533 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9534 val = (uint32_t)s->pc + 2;
9535 val += offset;
9536 gen_jmp(s, val);
9537 break;
9538
9539 case 15: /* IT, nop-hint. */
9540 if ((insn & 0xf) == 0) {
9541 gen_nop_hint(s, (insn >> 4) & 0xf);
9542 break;
9543 }
9544 /* If Then. */
9545 s->condexec_cond = (insn >> 4) & 0xe;
9546 s->condexec_mask = insn & 0x1f;
9547 /* No actual code generated for this insn, just setup state. */
9548 break;
9549
06c949e6 9550 case 0xe: /* bkpt */
be5e7a76 9551 ARCH(5);
bc4a0de0 9552 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9553 break;
9554
9ee6e8bb
PB
9555 case 0xa: /* rev */
9556 ARCH(6);
9557 rn = (insn >> 3) & 0x7;
9558 rd = insn & 0x7;
b0109805 9559 tmp = load_reg(s, rn);
9ee6e8bb 9560 switch ((insn >> 6) & 3) {
66896cb8 9561 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9562 case 1: gen_rev16(tmp); break;
9563 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9564 default: goto illegal_op;
9565 }
b0109805 9566 store_reg(s, rd, tmp);
9ee6e8bb
PB
9567 break;
9568
d9e028c1
PM
9569 case 6:
9570 switch ((insn >> 5) & 7) {
9571 case 2:
9572 /* setend */
9573 ARCH(6);
10962fd5
PM
9574 if (((insn >> 3) & 1) != s->bswap_code) {
9575 /* Dynamic endianness switching not implemented. */
d9e028c1
PM
9576 goto illegal_op;
9577 }
9ee6e8bb 9578 break;
d9e028c1
PM
9579 case 3:
9580 /* cps */
9581 ARCH(6);
9582 if (IS_USER(s)) {
9583 break;
8984bd2e 9584 }
d9e028c1
PM
9585 if (IS_M(env)) {
9586 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9587 /* FAULTMASK */
9588 if (insn & 1) {
9589 addr = tcg_const_i32(19);
9590 gen_helper_v7m_msr(cpu_env, addr, tmp);
9591 tcg_temp_free_i32(addr);
9592 }
9593 /* PRIMASK */
9594 if (insn & 2) {
9595 addr = tcg_const_i32(16);
9596 gen_helper_v7m_msr(cpu_env, addr, tmp);
9597 tcg_temp_free_i32(addr);
9598 }
9599 tcg_temp_free_i32(tmp);
9600 gen_lookup_tb(s);
9601 } else {
9602 if (insn & (1 << 4)) {
9603 shift = CPSR_A | CPSR_I | CPSR_F;
9604 } else {
9605 shift = 0;
9606 }
9607 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 9608 }
d9e028c1
PM
9609 break;
9610 default:
9611 goto undef;
9ee6e8bb
PB
9612 }
9613 break;
9614
99c475ab
FB
9615 default:
9616 goto undef;
9617 }
9618 break;
9619
9620 case 12:
a7d3970d 9621 {
99c475ab 9622 /* load/store multiple */
a7d3970d
PM
9623 TCGv loaded_var;
9624 TCGV_UNUSED(loaded_var);
99c475ab 9625 rn = (insn >> 8) & 0x7;
b0109805 9626 addr = load_reg(s, rn);
99c475ab
FB
9627 for (i = 0; i < 8; i++) {
9628 if (insn & (1 << i)) {
99c475ab
FB
9629 if (insn & (1 << 11)) {
9630 /* load */
b0109805 9631 tmp = gen_ld32(addr, IS_USER(s));
a7d3970d
PM
9632 if (i == rn) {
9633 loaded_var = tmp;
9634 } else {
9635 store_reg(s, i, tmp);
9636 }
99c475ab
FB
9637 } else {
9638 /* store */
b0109805
PB
9639 tmp = load_reg(s, i);
9640 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9641 }
5899f386 9642 /* advance to the next address */
b0109805 9643 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9644 }
9645 }
b0109805 9646 if ((insn & (1 << rn)) == 0) {
a7d3970d 9647 /* base reg not in list: base register writeback */
b0109805
PB
9648 store_reg(s, rn, addr);
9649 } else {
a7d3970d
PM
9650 /* base reg in list: if load, complete it now */
9651 if (insn & (1 << 11)) {
9652 store_reg(s, rn, loaded_var);
9653 }
7d1b0095 9654 tcg_temp_free_i32(addr);
b0109805 9655 }
99c475ab 9656 break;
a7d3970d 9657 }
99c475ab
FB
9658 case 13:
9659 /* conditional branch or swi */
9660 cond = (insn >> 8) & 0xf;
9661 if (cond == 0xe)
9662 goto undef;
9663
9664 if (cond == 0xf) {
9665 /* swi */
422ebf69 9666 gen_set_pc_im(s->pc);
9ee6e8bb 9667 s->is_jmp = DISAS_SWI;
99c475ab
FB
9668 break;
9669 }
9670 /* generate a conditional jump to next instruction */
e50e6a20 9671 s->condlabel = gen_new_label();
d9ba4830 9672 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9673 s->condjmp = 1;
99c475ab
FB
9674
9675 /* jump to the offset */
5899f386 9676 val = (uint32_t)s->pc + 2;
99c475ab 9677 offset = ((int32_t)insn << 24) >> 24;
5899f386 9678 val += offset << 1;
8aaca4c0 9679 gen_jmp(s, val);
99c475ab
FB
9680 break;
9681
9682 case 14:
358bf29e 9683 if (insn & (1 << 11)) {
9ee6e8bb
PB
9684 if (disas_thumb2_insn(env, s, insn))
9685 goto undef32;
358bf29e
PB
9686 break;
9687 }
9ee6e8bb 9688 /* unconditional branch */
99c475ab
FB
9689 val = (uint32_t)s->pc;
9690 offset = ((int32_t)insn << 21) >> 21;
9691 val += (offset << 1) + 2;
8aaca4c0 9692 gen_jmp(s, val);
99c475ab
FB
9693 break;
9694
9695 case 15:
9ee6e8bb 9696 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9697 goto undef32;
9ee6e8bb 9698 break;
99c475ab
FB
9699 }
9700 return;
9ee6e8bb 9701undef32:
bc4a0de0 9702 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9703 return;
9704illegal_op:
99c475ab 9705undef:
bc4a0de0 9706 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9707}
9708
2c0262af
FB
9709/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9710 basic block 'tb'. If search_pc is TRUE, also generate PC
9711 information for each intermediate instruction. */
0ecb72a5 9712static inline void gen_intermediate_code_internal(CPUARMState *env,
2cfc5f17
TS
9713 TranslationBlock *tb,
9714 int search_pc)
2c0262af
FB
9715{
9716 DisasContext dc1, *dc = &dc1;
a1d1bb31 9717 CPUBreakpoint *bp;
2c0262af
FB
9718 uint16_t *gen_opc_end;
9719 int j, lj;
0fa85d43 9720 target_ulong pc_start;
b5ff1b31 9721 uint32_t next_page_start;
2e70f6ef
PB
9722 int num_insns;
9723 int max_insns;
3b46e624 9724
2c0262af 9725 /* generate intermediate code */
0fa85d43 9726 pc_start = tb->pc;
3b46e624 9727
2c0262af
FB
9728 dc->tb = tb;
9729
92414b31 9730 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9731
9732 dc->is_jmp = DISAS_NEXT;
9733 dc->pc = pc_start;
8aaca4c0 9734 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9735 dc->condjmp = 0;
7204ab88 9736 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
d8fd2954 9737 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
98eac7ca
PM
9738 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9739 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9740#if !defined(CONFIG_USER_ONLY)
61f74d6a 9741 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9742#endif
5df8bac1 9743 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9744 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9745 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9746 cpu_F0s = tcg_temp_new_i32();
9747 cpu_F1s = tcg_temp_new_i32();
9748 cpu_F0d = tcg_temp_new_i64();
9749 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9750 cpu_V0 = cpu_F0d;
9751 cpu_V1 = cpu_F1d;
e677137d 9752 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9753 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9754 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9755 lj = -1;
2e70f6ef
PB
9756 num_insns = 0;
9757 max_insns = tb->cflags & CF_COUNT_MASK;
9758 if (max_insns == 0)
9759 max_insns = CF_COUNT_MASK;
9760
9761 gen_icount_start();
e12ce78d 9762
3849902c
PM
9763 tcg_clear_temp_count();
9764
e12ce78d
PM
9765 /* A note on handling of the condexec (IT) bits:
9766 *
9767 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 9768 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 9769 * (1) if the condexec bits are not already zero then we write
0ecb72a5 9770 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
9771 * to do it at the end of the block. (For example if we don't do this
9772 * it's hard to identify whether we can safely skip writing condexec
9773 * at the end of the TB, which we definitely want to do for the case
9774 * where a TB doesn't do anything with the IT state at all.)
9775 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 9776 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
9777 * This is done both for leaving the TB at the end, and for leaving
9778 * it because of an exception we know will happen, which is done in
9779 * gen_exception_insn(). The latter is necessary because we need to
9780 * leave the TB with the PC/IT state just prior to execution of the
9781 * instruction which caused the exception.
9782 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 9783 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
9784 * This is handled in the same way as restoration of the
9785 * PC in these situations: we will be called again with search_pc=1
9786 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9787 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9788 * this to restore the condexec bits.
e12ce78d
PM
9789 *
9790 * Note that there are no instructions which can read the condexec
9791 * bits, and none which can write non-static values to them, so
0ecb72a5 9792 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
9793 * middle of a TB.
9794 */
9795
9ee6e8bb
PB
9796 /* Reset the conditional execution bits immediately. This avoids
9797 complications trying to do it at the end of the block. */
98eac7ca 9798 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9799 {
7d1b0095 9800 TCGv tmp = tcg_temp_new_i32();
8f01245e 9801 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9802 store_cpu_field(tmp, condexec_bits);
8f01245e 9803 }
2c0262af 9804 do {
fbb4a2e3
PB
9805#ifdef CONFIG_USER_ONLY
9806 /* Intercept jump to the magic kernel page. */
9807 if (dc->pc >= 0xffff0000) {
9808 /* We always get here via a jump, so know we are not in a
9809 conditional execution block. */
9810 gen_exception(EXCP_KERNEL_TRAP);
9811 dc->is_jmp = DISAS_UPDATE;
9812 break;
9813 }
9814#else
9ee6e8bb
PB
9815 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9816 /* We always get here via a jump, so know we are not in a
9817 conditional execution block. */
d9ba4830 9818 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9819 dc->is_jmp = DISAS_UPDATE;
9820 break;
9ee6e8bb
PB
9821 }
9822#endif
9823
72cf2d4f
BS
9824 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9825 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9826 if (bp->pc == dc->pc) {
bc4a0de0 9827 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9828 /* Advance PC so that clearing the breakpoint will
9829 invalidate this TB. */
9830 dc->pc += 2;
9831 goto done_generating;
1fddef4b
FB
9832 break;
9833 }
9834 }
9835 }
2c0262af 9836 if (search_pc) {
92414b31 9837 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
9838 if (lj < j) {
9839 lj++;
9840 while (lj < j)
ab1103de 9841 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 9842 }
25983cad 9843 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 9844 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 9845 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 9846 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 9847 }
e50e6a20 9848
2e70f6ef
PB
9849 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9850 gen_io_start();
9851
fdefe51c 9852 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
9853 tcg_gen_debug_insn_start(dc->pc);
9854 }
9855
7204ab88 9856 if (dc->thumb) {
9ee6e8bb
PB
9857 disas_thumb_insn(env, dc);
9858 if (dc->condexec_mask) {
9859 dc->condexec_cond = (dc->condexec_cond & 0xe)
9860 | ((dc->condexec_mask >> 4) & 1);
9861 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9862 if (dc->condexec_mask == 0) {
9863 dc->condexec_cond = 0;
9864 }
9865 }
9866 } else {
9867 disas_arm_insn(env, dc);
9868 }
e50e6a20
FB
9869
9870 if (dc->condjmp && !dc->is_jmp) {
9871 gen_set_label(dc->condlabel);
9872 dc->condjmp = 0;
9873 }
3849902c
PM
9874
9875 if (tcg_check_temp_count()) {
9876 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9877 }
9878
aaf2d97d 9879 /* Translation stops when a conditional branch is encountered.
e50e6a20 9880 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9881 * Also stop translation when a page boundary is reached. This
bf20dc07 9882 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9883 num_insns ++;
efd7f486 9884 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
1fddef4b 9885 !env->singlestep_enabled &&
1b530a6d 9886 !singlestep &&
2e70f6ef
PB
9887 dc->pc < next_page_start &&
9888 num_insns < max_insns);
9889
9890 if (tb->cflags & CF_LAST_IO) {
9891 if (dc->condjmp) {
9892 /* FIXME: This can theoretically happen with self-modifying
9893 code. */
9894 cpu_abort(env, "IO on conditional branch instruction");
9895 }
9896 gen_io_end();
9897 }
9ee6e8bb 9898
b5ff1b31 9899 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9900 instruction was a conditional branch or trap, and the PC has
9901 already been written. */
551bd27f 9902 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9903 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9904 if (dc->condjmp) {
9ee6e8bb
PB
9905 gen_set_condexec(dc);
9906 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9907 gen_exception(EXCP_SWI);
9ee6e8bb 9908 } else {
d9ba4830 9909 gen_exception(EXCP_DEBUG);
9ee6e8bb 9910 }
e50e6a20
FB
9911 gen_set_label(dc->condlabel);
9912 }
9913 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9914 gen_set_pc_im(dc->pc);
e50e6a20 9915 dc->condjmp = 0;
8aaca4c0 9916 }
9ee6e8bb
PB
9917 gen_set_condexec(dc);
9918 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9919 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9920 } else {
9921 /* FIXME: Single stepping a WFI insn will not halt
9922 the CPU. */
d9ba4830 9923 gen_exception(EXCP_DEBUG);
9ee6e8bb 9924 }
8aaca4c0 9925 } else {
9ee6e8bb
PB
9926 /* While branches must always occur at the end of an IT block,
9927 there are a few other things that can cause us to terminate
65626741 9928 the TB in the middle of an IT block:
9ee6e8bb
PB
9929 - Exception generating instructions (bkpt, swi, undefined).
9930 - Page boundaries.
9931 - Hardware watchpoints.
9932 Hardware breakpoints have already been handled and skip this code.
9933 */
9934 gen_set_condexec(dc);
8aaca4c0 9935 switch(dc->is_jmp) {
8aaca4c0 9936 case DISAS_NEXT:
6e256c93 9937 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9938 break;
9939 default:
9940 case DISAS_JUMP:
9941 case DISAS_UPDATE:
9942 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9943 tcg_gen_exit_tb(0);
8aaca4c0
FB
9944 break;
9945 case DISAS_TB_JUMP:
9946 /* nothing more to generate */
9947 break;
9ee6e8bb 9948 case DISAS_WFI:
1ce94f81 9949 gen_helper_wfi(cpu_env);
9ee6e8bb
PB
9950 break;
9951 case DISAS_SWI:
d9ba4830 9952 gen_exception(EXCP_SWI);
9ee6e8bb 9953 break;
8aaca4c0 9954 }
e50e6a20
FB
9955 if (dc->condjmp) {
9956 gen_set_label(dc->condlabel);
9ee6e8bb 9957 gen_set_condexec(dc);
6e256c93 9958 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9959 dc->condjmp = 0;
9960 }
2c0262af 9961 }
2e70f6ef 9962
9ee6e8bb 9963done_generating:
2e70f6ef 9964 gen_icount_end(tb, num_insns);
efd7f486 9965 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
9966
9967#ifdef DEBUG_DISAS
8fec2b8c 9968 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9969 qemu_log("----------------\n");
9970 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 9971 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 9972 dc->thumb | (dc->bswap_code << 1));
93fcfe39 9973 qemu_log("\n");
2c0262af
FB
9974 }
9975#endif
b5ff1b31 9976 if (search_pc) {
92414b31 9977 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
9978 lj++;
9979 while (lj <= j)
ab1103de 9980 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 9981 } else {
2c0262af 9982 tb->size = dc->pc - pc_start;
2e70f6ef 9983 tb->icount = num_insns;
b5ff1b31 9984 }
2c0262af
FB
9985}
9986
0ecb72a5 9987void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 9988{
2cfc5f17 9989 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9990}
9991
0ecb72a5 9992void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 9993{
2cfc5f17 9994 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9995}
9996
b5ff1b31
FB
9997static const char *cpu_mode_names[16] = {
9998 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9999 "???", "???", "???", "und", "???", "???", "???", "sys"
10000};
9ee6e8bb 10001
0ecb72a5 10002void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 10003 int flags)
2c0262af
FB
10004{
10005 int i;
b5ff1b31 10006 uint32_t psr;
2c0262af
FB
10007
10008 for(i=0;i<16;i++) {
7fe48483 10009 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10010 if ((i % 4) == 3)
7fe48483 10011 cpu_fprintf(f, "\n");
2c0262af 10012 else
7fe48483 10013 cpu_fprintf(f, " ");
2c0262af 10014 }
b5ff1b31 10015 psr = cpsr_read(env);
687fa640
TS
10016 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10017 psr,
b5ff1b31
FB
10018 psr & (1 << 31) ? 'N' : '-',
10019 psr & (1 << 30) ? 'Z' : '-',
10020 psr & (1 << 29) ? 'C' : '-',
10021 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10022 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10023 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10024
f2617cfc
PM
10025 if (flags & CPU_DUMP_FPU) {
10026 int numvfpregs = 0;
10027 if (arm_feature(env, ARM_FEATURE_VFP)) {
10028 numvfpregs += 16;
10029 }
10030 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10031 numvfpregs += 16;
10032 }
10033 for (i = 0; i < numvfpregs; i++) {
10034 uint64_t v = float64_val(env->vfp.regs[i]);
10035 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10036 i * 2, (uint32_t)v,
10037 i * 2 + 1, (uint32_t)(v >> 32),
10038 i, v);
10039 }
10040 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 10041 }
2c0262af 10042}
a6b025d3 10043
0ecb72a5 10044void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 10045{
25983cad 10046 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
e12ce78d 10047 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10048}