]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-arm: Use mul[us]2 and add2 in umlal et al
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
1de7afc9 30#include "qemu/log.h"
1497c961 31
7b59220e 32#include "helper.h"
1497c961 33#define GEN_HELPER 1
7b59220e 34#include "helper.h"
2c0262af 35
be5e7a76
DES
36#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38/* currently all emulated v5 cores are also v5TE, so don't bother */
39#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
40#define ENABLE_ARCH_5J 0
41#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 45
86753403 46#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 47
2c0262af
FB
48/* internal defines */
49typedef struct DisasContext {
0fa85d43 50 target_ulong pc;
2c0262af 51 int is_jmp;
e50e6a20
FB
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
b90372ad 56 /* Thumb-2 conditional execution bits. */
9ee6e8bb
PB
57 int condexec_mask;
58 int condexec_cond;
2c0262af 59 struct TranslationBlock *tb;
8aaca4c0 60 int singlestep_enabled;
5899f386 61 int thumb;
d8fd2954 62 int bswap_code;
b5ff1b31
FB
63#if !defined(CONFIG_USER_ONLY)
64 int user;
65#endif
5df8bac1 66 int vfp_enabled;
69d1fc22
PM
67 int vec_len;
68 int vec_stride;
2c0262af
FB
69} DisasContext;
70
e12ce78d
PM
71static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
b5ff1b31
FB
73#if defined(CONFIG_USER_ONLY)
74#define IS_USER(s) 1
75#else
76#define IS_USER(s) (s->user)
77#endif
78
9ee6e8bb 79/* These instructions trap after executing, so defer them until after the
b90372ad 80 conditional execution state has been updated. */
9ee6e8bb
PB
81#define DISAS_WFI 4
82#define DISAS_SWI 5
2c0262af 83
a7812ae4 84static TCGv_ptr cpu_env;
ad69471c 85/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 86static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 87static TCGv_i32 cpu_R[16];
66c374de 88static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
426f5abc
PB
89static TCGv_i32 cpu_exclusive_addr;
90static TCGv_i32 cpu_exclusive_val;
91static TCGv_i32 cpu_exclusive_high;
92#ifdef CONFIG_USER_ONLY
93static TCGv_i32 cpu_exclusive_test;
94static TCGv_i32 cpu_exclusive_info;
95#endif
ad69471c 96
b26eefb6 97/* FIXME: These should be removed. */
a7812ae4
PB
98static TCGv cpu_F0s, cpu_F1s;
99static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 100
022c62cb 101#include "exec/gen-icount.h"
2e70f6ef 102
155c3eac
FN
103static const char *regnames[] =
104 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
105 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106
b26eefb6
PB
107/* initialize TCG globals. */
108void arm_translate_init(void)
109{
155c3eac
FN
110 int i;
111
a7812ae4
PB
112 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
113
155c3eac
FN
114 for (i = 0; i < 16; i++) {
115 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 116 offsetof(CPUARMState, regs[i]),
155c3eac
FN
117 regnames[i]);
118 }
66c374de
AJ
119 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
120 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
121 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
122 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
123
426f5abc 124 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 125 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
426f5abc 126 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 127 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 128 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 129 offsetof(CPUARMState, exclusive_high), "exclusive_high");
426f5abc
PB
130#ifdef CONFIG_USER_ONLY
131 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 132 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 133 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 134 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 135#endif
155c3eac 136
a7812ae4 137#define GEN_HELPER 2
7b59220e 138#include "helper.h"
b26eefb6
PB
139}
140
d9ba4830
PB
141static inline TCGv load_cpu_offset(int offset)
142{
7d1b0095 143 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
144 tcg_gen_ld_i32(tmp, cpu_env, offset);
145 return tmp;
146}
147
0ecb72a5 148#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830
PB
149
150static inline void store_cpu_offset(TCGv var, int offset)
151{
152 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 153 tcg_temp_free_i32(var);
d9ba4830
PB
154}
155
156#define store_cpu_field(var, name) \
0ecb72a5 157 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 158
b26eefb6
PB
159/* Set a variable to the value of a CPU register. */
160static void load_reg_var(DisasContext *s, TCGv var, int reg)
161{
162 if (reg == 15) {
163 uint32_t addr;
b90372ad 164 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
165 if (s->thumb)
166 addr = (long)s->pc + 2;
167 else
168 addr = (long)s->pc + 4;
169 tcg_gen_movi_i32(var, addr);
170 } else {
155c3eac 171 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
172 }
173}
174
175/* Create a new temporary and set it to the value of a CPU register. */
176static inline TCGv load_reg(DisasContext *s, int reg)
177{
7d1b0095 178 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
179 load_reg_var(s, tmp, reg);
180 return tmp;
181}
182
183/* Set a CPU register. The source must be a temporary and will be
184 marked as dead. */
185static void store_reg(DisasContext *s, int reg, TCGv var)
186{
187 if (reg == 15) {
188 tcg_gen_andi_i32(var, var, ~1);
189 s->is_jmp = DISAS_JUMP;
190 }
155c3eac 191 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 192 tcg_temp_free_i32(var);
b26eefb6
PB
193}
194
b26eefb6 195/* Value extensions. */
86831435
PB
196#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
197#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
198#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
199#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
200
1497c961
PB
201#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
202#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 203
b26eefb6 204
b75263d6
JR
205static inline void gen_set_cpsr(TCGv var, uint32_t mask)
206{
207 TCGv tmp_mask = tcg_const_i32(mask);
1ce94f81 208 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
209 tcg_temp_free_i32(tmp_mask);
210}
d9ba4830
PB
211/* Set NZCV flags from the high 4 bits of var. */
212#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
213
214static void gen_exception(int excp)
215{
7d1b0095 216 TCGv tmp = tcg_temp_new_i32();
d9ba4830 217 tcg_gen_movi_i32(tmp, excp);
1ce94f81 218 gen_helper_exception(cpu_env, tmp);
7d1b0095 219 tcg_temp_free_i32(tmp);
d9ba4830
PB
220}
221
3670669c
PB
222static void gen_smul_dual(TCGv a, TCGv b)
223{
7d1b0095
PM
224 TCGv tmp1 = tcg_temp_new_i32();
225 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
226 tcg_gen_ext16s_i32(tmp1, a);
227 tcg_gen_ext16s_i32(tmp2, b);
3670669c 228 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 229 tcg_temp_free_i32(tmp2);
3670669c
PB
230 tcg_gen_sari_i32(a, a, 16);
231 tcg_gen_sari_i32(b, b, 16);
232 tcg_gen_mul_i32(b, b, a);
233 tcg_gen_mov_i32(a, tmp1);
7d1b0095 234 tcg_temp_free_i32(tmp1);
3670669c
PB
235}
236
237/* Byteswap each halfword. */
238static void gen_rev16(TCGv var)
239{
7d1b0095 240 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
241 tcg_gen_shri_i32(tmp, var, 8);
242 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
243 tcg_gen_shli_i32(var, var, 8);
244 tcg_gen_andi_i32(var, var, 0xff00ff00);
245 tcg_gen_or_i32(var, var, tmp);
7d1b0095 246 tcg_temp_free_i32(tmp);
3670669c
PB
247}
248
249/* Byteswap low halfword and sign extend. */
250static void gen_revsh(TCGv var)
251{
1a855029
AJ
252 tcg_gen_ext16u_i32(var, var);
253 tcg_gen_bswap16_i32(var, var);
254 tcg_gen_ext16s_i32(var, var);
3670669c
PB
255}
256
257/* Unsigned bitfield extract. */
258static void gen_ubfx(TCGv var, int shift, uint32_t mask)
259{
260 if (shift)
261 tcg_gen_shri_i32(var, var, shift);
262 tcg_gen_andi_i32(var, var, mask);
263}
264
265/* Signed bitfield extract. */
266static void gen_sbfx(TCGv var, int shift, int width)
267{
268 uint32_t signbit;
269
270 if (shift)
271 tcg_gen_sari_i32(var, var, shift);
272 if (shift + width < 32) {
273 signbit = 1u << (width - 1);
274 tcg_gen_andi_i32(var, var, (1u << width) - 1);
275 tcg_gen_xori_i32(var, var, signbit);
276 tcg_gen_subi_i32(var, var, signbit);
277 }
278}
279
838fa72d
AJ
280/* Return (b << 32) + a. Mark inputs as dead */
281static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 282{
838fa72d
AJ
283 TCGv_i64 tmp64 = tcg_temp_new_i64();
284
285 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 286 tcg_temp_free_i32(b);
838fa72d
AJ
287 tcg_gen_shli_i64(tmp64, tmp64, 32);
288 tcg_gen_add_i64(a, tmp64, a);
289
290 tcg_temp_free_i64(tmp64);
291 return a;
292}
293
294/* Return (b << 32) - a. Mark inputs as dead. */
295static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
296{
297 TCGv_i64 tmp64 = tcg_temp_new_i64();
298
299 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 300 tcg_temp_free_i32(b);
838fa72d
AJ
301 tcg_gen_shli_i64(tmp64, tmp64, 32);
302 tcg_gen_sub_i64(a, tmp64, a);
303
304 tcg_temp_free_i64(tmp64);
305 return a;
3670669c
PB
306}
307
5e3f878a 308/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 309static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 310{
831d7fe8
RH
311 TCGv lo = tcg_temp_new_i32();
312 TCGv hi = tcg_temp_new_i32();
313 TCGv_i64 ret;
5e3f878a 314
831d7fe8 315 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 316 tcg_temp_free_i32(a);
7d1b0095 317 tcg_temp_free_i32(b);
831d7fe8
RH
318
319 ret = tcg_temp_new_i64();
320 tcg_gen_concat_i32_i64(ret, lo, hi);
321 tcg_temp_free(lo);
322 tcg_temp_free(hi);
323
324 return ret;
5e3f878a
PB
325}
326
a7812ae4 327static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 328{
831d7fe8
RH
329 TCGv lo = tcg_temp_new_i32();
330 TCGv hi = tcg_temp_new_i32();
331 TCGv_i64 ret;
5e3f878a 332
831d7fe8 333 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 334 tcg_temp_free_i32(a);
7d1b0095 335 tcg_temp_free_i32(b);
831d7fe8
RH
336
337 ret = tcg_temp_new_i64();
338 tcg_gen_concat_i32_i64(ret, lo, hi);
339 tcg_temp_free(lo);
340 tcg_temp_free(hi);
341
342 return ret;
5e3f878a
PB
343}
344
8f01245e
PB
345/* Swap low and high halfwords. */
346static void gen_swap_half(TCGv var)
347{
7d1b0095 348 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
349 tcg_gen_shri_i32(tmp, var, 16);
350 tcg_gen_shli_i32(var, var, 16);
351 tcg_gen_or_i32(var, var, tmp);
7d1b0095 352 tcg_temp_free_i32(tmp);
8f01245e
PB
353}
354
b26eefb6
PB
355/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
356 tmp = (t0 ^ t1) & 0x8000;
357 t0 &= ~0x8000;
358 t1 &= ~0x8000;
359 t0 = (t0 + t1) ^ tmp;
360 */
361
362static void gen_add16(TCGv t0, TCGv t1)
363{
7d1b0095 364 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
365 tcg_gen_xor_i32(tmp, t0, t1);
366 tcg_gen_andi_i32(tmp, tmp, 0x8000);
367 tcg_gen_andi_i32(t0, t0, ~0x8000);
368 tcg_gen_andi_i32(t1, t1, ~0x8000);
369 tcg_gen_add_i32(t0, t0, t1);
370 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
371 tcg_temp_free_i32(tmp);
372 tcg_temp_free_i32(t1);
b26eefb6
PB
373}
374
375/* Set CF to the top bit of var. */
376static void gen_set_CF_bit31(TCGv var)
377{
66c374de 378 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
379}
380
381/* Set N and Z flags from var. */
382static inline void gen_logic_CC(TCGv var)
383{
66c374de
AJ
384 tcg_gen_mov_i32(cpu_NF, var);
385 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
386}
387
388/* T0 += T1 + CF. */
396e467c 389static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 390{
396e467c 391 tcg_gen_add_i32(t0, t0, t1);
66c374de 392 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
393}
394
e9bb4aa9
JR
395/* dest = T0 + T1 + CF. */
396static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
397{
e9bb4aa9 398 tcg_gen_add_i32(dest, t0, t1);
66c374de 399 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
400}
401
3670669c
PB
402/* dest = T0 - T1 + CF - 1. */
403static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
404{
3670669c 405 tcg_gen_sub_i32(dest, t0, t1);
66c374de 406 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 407 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
408}
409
72485ec4
AJ
410/* dest = T0 + T1. Compute C, N, V and Z flags */
411static void gen_add_CC(TCGv dest, TCGv t0, TCGv t1)
412{
413 TCGv tmp;
414 tcg_gen_add_i32(cpu_NF, t0, t1);
415 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
416 tcg_gen_setcond_i32(TCG_COND_LTU, cpu_CF, cpu_NF, t0);
417 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
418 tmp = tcg_temp_new_i32();
419 tcg_gen_xor_i32(tmp, t0, t1);
420 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
421 tcg_temp_free_i32(tmp);
422 tcg_gen_mov_i32(dest, cpu_NF);
423}
424
425/* dest = T0 - T1. Compute C, N, V and Z flags */
426static void gen_sub_CC(TCGv dest, TCGv t0, TCGv t1)
427{
428 TCGv tmp;
429 tcg_gen_sub_i32(cpu_NF, t0, t1);
430 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
431 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
432 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
433 tmp = tcg_temp_new_i32();
434 tcg_gen_xor_i32(tmp, t0, t1);
435 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
436 tcg_temp_free_i32(tmp);
437 tcg_gen_mov_i32(dest, cpu_NF);
438}
439
365af80e
AJ
440#define GEN_SHIFT(name) \
441static void gen_##name(TCGv dest, TCGv t0, TCGv t1) \
442{ \
443 TCGv tmp1, tmp2, tmp3; \
444 tmp1 = tcg_temp_new_i32(); \
445 tcg_gen_andi_i32(tmp1, t1, 0xff); \
446 tmp2 = tcg_const_i32(0); \
447 tmp3 = tcg_const_i32(0x1f); \
448 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
449 tcg_temp_free_i32(tmp3); \
450 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
451 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
452 tcg_temp_free_i32(tmp2); \
453 tcg_temp_free_i32(tmp1); \
454}
455GEN_SHIFT(shl)
456GEN_SHIFT(shr)
457#undef GEN_SHIFT
458
459static void gen_sar(TCGv dest, TCGv t0, TCGv t1)
460{
461 TCGv tmp1, tmp2;
462 tmp1 = tcg_temp_new_i32();
463 tcg_gen_andi_i32(tmp1, t1, 0xff);
464 tmp2 = tcg_const_i32(0x1f);
465 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
466 tcg_temp_free_i32(tmp2);
467 tcg_gen_sar_i32(dest, t0, tmp1);
468 tcg_temp_free_i32(tmp1);
469}
470
36c91fd1
PM
471static void tcg_gen_abs_i32(TCGv dest, TCGv src)
472{
473 TCGv c0 = tcg_const_i32(0);
474 TCGv tmp = tcg_temp_new_i32();
475 tcg_gen_neg_i32(tmp, src);
476 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
477 tcg_temp_free_i32(c0);
478 tcg_temp_free_i32(tmp);
479}
ad69471c 480
9a119ff6 481static void shifter_out_im(TCGv var, int shift)
b26eefb6 482{
9a119ff6 483 if (shift == 0) {
66c374de 484 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 485 } else {
66c374de
AJ
486 tcg_gen_shri_i32(cpu_CF, var, shift);
487 if (shift != 31) {
488 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
489 }
9a119ff6 490 }
9a119ff6 491}
b26eefb6 492
9a119ff6
PB
493/* Shift by immediate. Includes special handling for shift == 0. */
494static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
495{
496 switch (shiftop) {
497 case 0: /* LSL */
498 if (shift != 0) {
499 if (flags)
500 shifter_out_im(var, 32 - shift);
501 tcg_gen_shli_i32(var, var, shift);
502 }
503 break;
504 case 1: /* LSR */
505 if (shift == 0) {
506 if (flags) {
66c374de 507 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
508 }
509 tcg_gen_movi_i32(var, 0);
510 } else {
511 if (flags)
512 shifter_out_im(var, shift - 1);
513 tcg_gen_shri_i32(var, var, shift);
514 }
515 break;
516 case 2: /* ASR */
517 if (shift == 0)
518 shift = 32;
519 if (flags)
520 shifter_out_im(var, shift - 1);
521 if (shift == 32)
522 shift = 31;
523 tcg_gen_sari_i32(var, var, shift);
524 break;
525 case 3: /* ROR/RRX */
526 if (shift != 0) {
527 if (flags)
528 shifter_out_im(var, shift - 1);
f669df27 529 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 530 } else {
66c374de 531 TCGv tmp = tcg_temp_new_i32();
b6348f29 532 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
533 if (flags)
534 shifter_out_im(var, 0);
535 tcg_gen_shri_i32(var, var, 1);
b26eefb6 536 tcg_gen_or_i32(var, var, tmp);
7d1b0095 537 tcg_temp_free_i32(tmp);
b26eefb6
PB
538 }
539 }
540};
541
8984bd2e
PB
542static inline void gen_arm_shift_reg(TCGv var, int shiftop,
543 TCGv shift, int flags)
544{
545 if (flags) {
546 switch (shiftop) {
9ef39277
BS
547 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
548 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
549 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
550 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
551 }
552 } else {
553 switch (shiftop) {
365af80e
AJ
554 case 0:
555 gen_shl(var, var, shift);
556 break;
557 case 1:
558 gen_shr(var, var, shift);
559 break;
560 case 2:
561 gen_sar(var, var, shift);
562 break;
f669df27
AJ
563 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
564 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
565 }
566 }
7d1b0095 567 tcg_temp_free_i32(shift);
8984bd2e
PB
568}
569
6ddbc6e4
PB
570#define PAS_OP(pfx) \
571 switch (op2) { \
572 case 0: gen_pas_helper(glue(pfx,add16)); break; \
573 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
574 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
575 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
576 case 4: gen_pas_helper(glue(pfx,add8)); break; \
577 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
578 }
d9ba4830 579static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 580{
a7812ae4 581 TCGv_ptr tmp;
6ddbc6e4
PB
582
583 switch (op1) {
584#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
585 case 1:
a7812ae4 586 tmp = tcg_temp_new_ptr();
0ecb72a5 587 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 588 PAS_OP(s)
b75263d6 589 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
590 break;
591 case 5:
a7812ae4 592 tmp = tcg_temp_new_ptr();
0ecb72a5 593 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 594 PAS_OP(u)
b75263d6 595 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
596 break;
597#undef gen_pas_helper
598#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
599 case 2:
600 PAS_OP(q);
601 break;
602 case 3:
603 PAS_OP(sh);
604 break;
605 case 6:
606 PAS_OP(uq);
607 break;
608 case 7:
609 PAS_OP(uh);
610 break;
611#undef gen_pas_helper
612 }
613}
9ee6e8bb
PB
614#undef PAS_OP
615
6ddbc6e4
PB
616/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
617#define PAS_OP(pfx) \
ed89a2f1 618 switch (op1) { \
6ddbc6e4
PB
619 case 0: gen_pas_helper(glue(pfx,add8)); break; \
620 case 1: gen_pas_helper(glue(pfx,add16)); break; \
621 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
622 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
623 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
624 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
625 }
d9ba4830 626static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 627{
a7812ae4 628 TCGv_ptr tmp;
6ddbc6e4 629
ed89a2f1 630 switch (op2) {
6ddbc6e4
PB
631#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
632 case 0:
a7812ae4 633 tmp = tcg_temp_new_ptr();
0ecb72a5 634 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 635 PAS_OP(s)
b75263d6 636 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
637 break;
638 case 4:
a7812ae4 639 tmp = tcg_temp_new_ptr();
0ecb72a5 640 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 641 PAS_OP(u)
b75263d6 642 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
643 break;
644#undef gen_pas_helper
645#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
646 case 1:
647 PAS_OP(q);
648 break;
649 case 2:
650 PAS_OP(sh);
651 break;
652 case 5:
653 PAS_OP(uq);
654 break;
655 case 6:
656 PAS_OP(uh);
657 break;
658#undef gen_pas_helper
659 }
660}
9ee6e8bb
PB
661#undef PAS_OP
662
d9ba4830
PB
663static void gen_test_cc(int cc, int label)
664{
665 TCGv tmp;
d9ba4830
PB
666 int inv;
667
d9ba4830
PB
668 switch (cc) {
669 case 0: /* eq: Z */
66c374de 670 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
671 break;
672 case 1: /* ne: !Z */
66c374de 673 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
674 break;
675 case 2: /* cs: C */
66c374de 676 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
677 break;
678 case 3: /* cc: !C */
66c374de 679 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
680 break;
681 case 4: /* mi: N */
66c374de 682 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
683 break;
684 case 5: /* pl: !N */
66c374de 685 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
686 break;
687 case 6: /* vs: V */
66c374de 688 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
689 break;
690 case 7: /* vc: !V */
66c374de 691 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
692 break;
693 case 8: /* hi: C && !Z */
694 inv = gen_new_label();
66c374de
AJ
695 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
696 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
697 gen_set_label(inv);
698 break;
699 case 9: /* ls: !C || Z */
66c374de
AJ
700 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
701 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
702 break;
703 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
704 tmp = tcg_temp_new_i32();
705 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 706 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 707 tcg_temp_free_i32(tmp);
d9ba4830
PB
708 break;
709 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
710 tmp = tcg_temp_new_i32();
711 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 712 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 713 tcg_temp_free_i32(tmp);
d9ba4830
PB
714 break;
715 case 12: /* gt: !Z && N == V */
716 inv = gen_new_label();
66c374de
AJ
717 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
718 tmp = tcg_temp_new_i32();
719 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 720 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 721 tcg_temp_free_i32(tmp);
d9ba4830
PB
722 gen_set_label(inv);
723 break;
724 case 13: /* le: Z || N != V */
66c374de
AJ
725 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
726 tmp = tcg_temp_new_i32();
727 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 728 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 729 tcg_temp_free_i32(tmp);
d9ba4830
PB
730 break;
731 default:
732 fprintf(stderr, "Bad condition code 0x%x\n", cc);
733 abort();
734 }
d9ba4830 735}
2c0262af 736
b1d8e52e 737static const uint8_t table_logic_cc[16] = {
2c0262af
FB
738 1, /* and */
739 1, /* xor */
740 0, /* sub */
741 0, /* rsb */
742 0, /* add */
743 0, /* adc */
744 0, /* sbc */
745 0, /* rsc */
746 1, /* andl */
747 1, /* xorl */
748 0, /* cmp */
749 0, /* cmn */
750 1, /* orr */
751 1, /* mov */
752 1, /* bic */
753 1, /* mvn */
754};
3b46e624 755
d9ba4830
PB
756/* Set PC and Thumb state from an immediate address. */
757static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 758{
b26eefb6 759 TCGv tmp;
99c475ab 760
b26eefb6 761 s->is_jmp = DISAS_UPDATE;
d9ba4830 762 if (s->thumb != (addr & 1)) {
7d1b0095 763 tmp = tcg_temp_new_i32();
d9ba4830 764 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 765 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 766 tcg_temp_free_i32(tmp);
d9ba4830 767 }
155c3eac 768 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
769}
770
771/* Set PC and Thumb state from var. var is marked as dead. */
772static inline void gen_bx(DisasContext *s, TCGv var)
773{
d9ba4830 774 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
775 tcg_gen_andi_i32(cpu_R[15], var, ~1);
776 tcg_gen_andi_i32(var, var, 1);
777 store_cpu_field(var, thumb);
d9ba4830
PB
778}
779
21aeb343
JR
780/* Variant of store_reg which uses branch&exchange logic when storing
781 to r15 in ARM architecture v7 and above. The source must be a temporary
782 and will be marked as dead. */
0ecb72a5 783static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
21aeb343
JR
784 int reg, TCGv var)
785{
786 if (reg == 15 && ENABLE_ARCH_7) {
787 gen_bx(s, var);
788 } else {
789 store_reg(s, reg, var);
790 }
791}
792
be5e7a76
DES
793/* Variant of store_reg which uses branch&exchange logic when storing
794 * to r15 in ARM architecture v5T and above. This is used for storing
795 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
796 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 797static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
be5e7a76
DES
798 int reg, TCGv var)
799{
800 if (reg == 15 && ENABLE_ARCH_5) {
801 gen_bx(s, var);
802 } else {
803 store_reg(s, reg, var);
804 }
805}
806
b0109805
PB
807static inline TCGv gen_ld8s(TCGv addr, int index)
808{
7d1b0095 809 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
810 tcg_gen_qemu_ld8s(tmp, addr, index);
811 return tmp;
812}
813static inline TCGv gen_ld8u(TCGv addr, int index)
814{
7d1b0095 815 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
816 tcg_gen_qemu_ld8u(tmp, addr, index);
817 return tmp;
818}
819static inline TCGv gen_ld16s(TCGv addr, int index)
820{
7d1b0095 821 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
822 tcg_gen_qemu_ld16s(tmp, addr, index);
823 return tmp;
824}
825static inline TCGv gen_ld16u(TCGv addr, int index)
826{
7d1b0095 827 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
828 tcg_gen_qemu_ld16u(tmp, addr, index);
829 return tmp;
830}
831static inline TCGv gen_ld32(TCGv addr, int index)
832{
7d1b0095 833 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
834 tcg_gen_qemu_ld32u(tmp, addr, index);
835 return tmp;
836}
84496233
JR
837static inline TCGv_i64 gen_ld64(TCGv addr, int index)
838{
839 TCGv_i64 tmp = tcg_temp_new_i64();
840 tcg_gen_qemu_ld64(tmp, addr, index);
841 return tmp;
842}
b0109805
PB
843static inline void gen_st8(TCGv val, TCGv addr, int index)
844{
845 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 846 tcg_temp_free_i32(val);
b0109805
PB
847}
848static inline void gen_st16(TCGv val, TCGv addr, int index)
849{
850 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 851 tcg_temp_free_i32(val);
b0109805
PB
852}
853static inline void gen_st32(TCGv val, TCGv addr, int index)
854{
855 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 856 tcg_temp_free_i32(val);
b0109805 857}
84496233
JR
858static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
859{
860 tcg_gen_qemu_st64(val, addr, index);
861 tcg_temp_free_i64(val);
862}
b5ff1b31 863
5e3f878a
PB
864static inline void gen_set_pc_im(uint32_t val)
865{
155c3eac 866 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
867}
868
b5ff1b31
FB
869/* Force a TB lookup after an instruction that changes the CPU state. */
870static inline void gen_lookup_tb(DisasContext *s)
871{
a6445c52 872 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
873 s->is_jmp = DISAS_UPDATE;
874}
875
b0109805
PB
876static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
877 TCGv var)
2c0262af 878{
1e8d4eec 879 int val, rm, shift, shiftop;
b26eefb6 880 TCGv offset;
2c0262af
FB
881
882 if (!(insn & (1 << 25))) {
883 /* immediate */
884 val = insn & 0xfff;
885 if (!(insn & (1 << 23)))
886 val = -val;
537730b9 887 if (val != 0)
b0109805 888 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
889 } else {
890 /* shift/register */
891 rm = (insn) & 0xf;
892 shift = (insn >> 7) & 0x1f;
1e8d4eec 893 shiftop = (insn >> 5) & 3;
b26eefb6 894 offset = load_reg(s, rm);
9a119ff6 895 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 896 if (!(insn & (1 << 23)))
b0109805 897 tcg_gen_sub_i32(var, var, offset);
2c0262af 898 else
b0109805 899 tcg_gen_add_i32(var, var, offset);
7d1b0095 900 tcg_temp_free_i32(offset);
2c0262af
FB
901 }
902}
903
191f9a93 904static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 905 int extra, TCGv var)
2c0262af
FB
906{
907 int val, rm;
b26eefb6 908 TCGv offset;
3b46e624 909
2c0262af
FB
910 if (insn & (1 << 22)) {
911 /* immediate */
912 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
913 if (!(insn & (1 << 23)))
914 val = -val;
18acad92 915 val += extra;
537730b9 916 if (val != 0)
b0109805 917 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
918 } else {
919 /* register */
191f9a93 920 if (extra)
b0109805 921 tcg_gen_addi_i32(var, var, extra);
2c0262af 922 rm = (insn) & 0xf;
b26eefb6 923 offset = load_reg(s, rm);
2c0262af 924 if (!(insn & (1 << 23)))
b0109805 925 tcg_gen_sub_i32(var, var, offset);
2c0262af 926 else
b0109805 927 tcg_gen_add_i32(var, var, offset);
7d1b0095 928 tcg_temp_free_i32(offset);
2c0262af
FB
929 }
930}
931
5aaebd13
PM
932static TCGv_ptr get_fpstatus_ptr(int neon)
933{
934 TCGv_ptr statusptr = tcg_temp_new_ptr();
935 int offset;
936 if (neon) {
0ecb72a5 937 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 938 } else {
0ecb72a5 939 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
940 }
941 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
942 return statusptr;
943}
944
4373f3ce
PB
945#define VFP_OP2(name) \
946static inline void gen_vfp_##name(int dp) \
947{ \
ae1857ec
PM
948 TCGv_ptr fpst = get_fpstatus_ptr(0); \
949 if (dp) { \
950 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
951 } else { \
952 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
953 } \
954 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
955}
956
4373f3ce
PB
957VFP_OP2(add)
958VFP_OP2(sub)
959VFP_OP2(mul)
960VFP_OP2(div)
961
962#undef VFP_OP2
963
605a6aed
PM
964static inline void gen_vfp_F1_mul(int dp)
965{
966 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 967 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 968 if (dp) {
ae1857ec 969 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 970 } else {
ae1857ec 971 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 972 }
ae1857ec 973 tcg_temp_free_ptr(fpst);
605a6aed
PM
974}
975
976static inline void gen_vfp_F1_neg(int dp)
977{
978 /* Like gen_vfp_neg() but put result in F1 */
979 if (dp) {
980 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
981 } else {
982 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
983 }
984}
985
4373f3ce
PB
986static inline void gen_vfp_abs(int dp)
987{
988 if (dp)
989 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
990 else
991 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
992}
993
994static inline void gen_vfp_neg(int dp)
995{
996 if (dp)
997 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
998 else
999 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1000}
1001
1002static inline void gen_vfp_sqrt(int dp)
1003{
1004 if (dp)
1005 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1006 else
1007 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1008}
1009
1010static inline void gen_vfp_cmp(int dp)
1011{
1012 if (dp)
1013 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1014 else
1015 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1016}
1017
1018static inline void gen_vfp_cmpe(int dp)
1019{
1020 if (dp)
1021 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1022 else
1023 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1024}
1025
1026static inline void gen_vfp_F1_ld0(int dp)
1027{
1028 if (dp)
5b340b51 1029 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1030 else
5b340b51 1031 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1032}
1033
5500b06c
PM
1034#define VFP_GEN_ITOF(name) \
1035static inline void gen_vfp_##name(int dp, int neon) \
1036{ \
5aaebd13 1037 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1038 if (dp) { \
1039 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1040 } else { \
1041 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1042 } \
b7fa9214 1043 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1044}
1045
5500b06c
PM
1046VFP_GEN_ITOF(uito)
1047VFP_GEN_ITOF(sito)
1048#undef VFP_GEN_ITOF
4373f3ce 1049
5500b06c
PM
1050#define VFP_GEN_FTOI(name) \
1051static inline void gen_vfp_##name(int dp, int neon) \
1052{ \
5aaebd13 1053 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1054 if (dp) { \
1055 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1056 } else { \
1057 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1058 } \
b7fa9214 1059 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1060}
1061
5500b06c
PM
1062VFP_GEN_FTOI(toui)
1063VFP_GEN_FTOI(touiz)
1064VFP_GEN_FTOI(tosi)
1065VFP_GEN_FTOI(tosiz)
1066#undef VFP_GEN_FTOI
4373f3ce
PB
1067
1068#define VFP_GEN_FIX(name) \
5500b06c 1069static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1070{ \
b75263d6 1071 TCGv tmp_shift = tcg_const_i32(shift); \
5aaebd13 1072 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1073 if (dp) { \
1074 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1075 } else { \
1076 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1077 } \
b75263d6 1078 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1079 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1080}
4373f3ce
PB
1081VFP_GEN_FIX(tosh)
1082VFP_GEN_FIX(tosl)
1083VFP_GEN_FIX(touh)
1084VFP_GEN_FIX(toul)
1085VFP_GEN_FIX(shto)
1086VFP_GEN_FIX(slto)
1087VFP_GEN_FIX(uhto)
1088VFP_GEN_FIX(ulto)
1089#undef VFP_GEN_FIX
9ee6e8bb 1090
312eea9f 1091static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1092{
1093 if (dp)
312eea9f 1094 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1095 else
312eea9f 1096 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1097}
1098
312eea9f 1099static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1100{
1101 if (dp)
312eea9f 1102 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1103 else
312eea9f 1104 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1105}
1106
8e96005d
FB
1107static inline long
1108vfp_reg_offset (int dp, int reg)
1109{
1110 if (dp)
1111 return offsetof(CPUARMState, vfp.regs[reg]);
1112 else if (reg & 1) {
1113 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1114 + offsetof(CPU_DoubleU, l.upper);
1115 } else {
1116 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1117 + offsetof(CPU_DoubleU, l.lower);
1118 }
1119}
9ee6e8bb
PB
1120
1121/* Return the offset of a 32-bit piece of a NEON register.
1122 zero is the least significant end of the register. */
1123static inline long
1124neon_reg_offset (int reg, int n)
1125{
1126 int sreg;
1127 sreg = reg * 2 + n;
1128 return vfp_reg_offset(0, sreg);
1129}
1130
8f8e3aa4
PB
1131static TCGv neon_load_reg(int reg, int pass)
1132{
7d1b0095 1133 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1134 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1135 return tmp;
1136}
1137
1138static void neon_store_reg(int reg, int pass, TCGv var)
1139{
1140 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1141 tcg_temp_free_i32(var);
8f8e3aa4
PB
1142}
1143
a7812ae4 1144static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1145{
1146 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1147}
1148
a7812ae4 1149static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1150{
1151 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1152}
1153
4373f3ce
PB
1154#define tcg_gen_ld_f32 tcg_gen_ld_i32
1155#define tcg_gen_ld_f64 tcg_gen_ld_i64
1156#define tcg_gen_st_f32 tcg_gen_st_i32
1157#define tcg_gen_st_f64 tcg_gen_st_i64
1158
b7bcbe95
FB
1159static inline void gen_mov_F0_vreg(int dp, int reg)
1160{
1161 if (dp)
4373f3ce 1162 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1163 else
4373f3ce 1164 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1165}
1166
1167static inline void gen_mov_F1_vreg(int dp, int reg)
1168{
1169 if (dp)
4373f3ce 1170 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1171 else
4373f3ce 1172 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1173}
1174
1175static inline void gen_mov_vreg_F0(int dp, int reg)
1176{
1177 if (dp)
4373f3ce 1178 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1179 else
4373f3ce 1180 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1181}
1182
18c9b560
AZ
1183#define ARM_CP_RW_BIT (1 << 20)
1184
a7812ae4 1185static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1186{
0ecb72a5 1187 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1188}
1189
a7812ae4 1190static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1191{
0ecb72a5 1192 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1193}
1194
da6b5335 1195static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1196{
7d1b0095 1197 TCGv var = tcg_temp_new_i32();
0ecb72a5 1198 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1199 return var;
e677137d
PB
1200}
1201
da6b5335 1202static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1203{
0ecb72a5 1204 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1205 tcg_temp_free_i32(var);
e677137d
PB
1206}
1207
1208static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1209{
1210 iwmmxt_store_reg(cpu_M0, rn);
1211}
1212
1213static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1214{
1215 iwmmxt_load_reg(cpu_M0, rn);
1216}
1217
1218static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1219{
1220 iwmmxt_load_reg(cpu_V1, rn);
1221 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1222}
1223
1224static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1225{
1226 iwmmxt_load_reg(cpu_V1, rn);
1227 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1228}
1229
1230static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1231{
1232 iwmmxt_load_reg(cpu_V1, rn);
1233 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1234}
1235
1236#define IWMMXT_OP(name) \
1237static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1238{ \
1239 iwmmxt_load_reg(cpu_V1, rn); \
1240 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1241}
1242
477955bd
PM
1243#define IWMMXT_OP_ENV(name) \
1244static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1245{ \
1246 iwmmxt_load_reg(cpu_V1, rn); \
1247 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1248}
1249
1250#define IWMMXT_OP_ENV_SIZE(name) \
1251IWMMXT_OP_ENV(name##b) \
1252IWMMXT_OP_ENV(name##w) \
1253IWMMXT_OP_ENV(name##l)
e677137d 1254
477955bd 1255#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1256static inline void gen_op_iwmmxt_##name##_M0(void) \
1257{ \
477955bd 1258 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1259}
1260
1261IWMMXT_OP(maddsq)
1262IWMMXT_OP(madduq)
1263IWMMXT_OP(sadb)
1264IWMMXT_OP(sadw)
1265IWMMXT_OP(mulslw)
1266IWMMXT_OP(mulshw)
1267IWMMXT_OP(mululw)
1268IWMMXT_OP(muluhw)
1269IWMMXT_OP(macsw)
1270IWMMXT_OP(macuw)
1271
477955bd
PM
1272IWMMXT_OP_ENV_SIZE(unpackl)
1273IWMMXT_OP_ENV_SIZE(unpackh)
1274
1275IWMMXT_OP_ENV1(unpacklub)
1276IWMMXT_OP_ENV1(unpackluw)
1277IWMMXT_OP_ENV1(unpacklul)
1278IWMMXT_OP_ENV1(unpackhub)
1279IWMMXT_OP_ENV1(unpackhuw)
1280IWMMXT_OP_ENV1(unpackhul)
1281IWMMXT_OP_ENV1(unpacklsb)
1282IWMMXT_OP_ENV1(unpacklsw)
1283IWMMXT_OP_ENV1(unpacklsl)
1284IWMMXT_OP_ENV1(unpackhsb)
1285IWMMXT_OP_ENV1(unpackhsw)
1286IWMMXT_OP_ENV1(unpackhsl)
1287
1288IWMMXT_OP_ENV_SIZE(cmpeq)
1289IWMMXT_OP_ENV_SIZE(cmpgtu)
1290IWMMXT_OP_ENV_SIZE(cmpgts)
1291
1292IWMMXT_OP_ENV_SIZE(mins)
1293IWMMXT_OP_ENV_SIZE(minu)
1294IWMMXT_OP_ENV_SIZE(maxs)
1295IWMMXT_OP_ENV_SIZE(maxu)
1296
1297IWMMXT_OP_ENV_SIZE(subn)
1298IWMMXT_OP_ENV_SIZE(addn)
1299IWMMXT_OP_ENV_SIZE(subu)
1300IWMMXT_OP_ENV_SIZE(addu)
1301IWMMXT_OP_ENV_SIZE(subs)
1302IWMMXT_OP_ENV_SIZE(adds)
1303
1304IWMMXT_OP_ENV(avgb0)
1305IWMMXT_OP_ENV(avgb1)
1306IWMMXT_OP_ENV(avgw0)
1307IWMMXT_OP_ENV(avgw1)
e677137d
PB
1308
1309IWMMXT_OP(msadb)
1310
477955bd
PM
1311IWMMXT_OP_ENV(packuw)
1312IWMMXT_OP_ENV(packul)
1313IWMMXT_OP_ENV(packuq)
1314IWMMXT_OP_ENV(packsw)
1315IWMMXT_OP_ENV(packsl)
1316IWMMXT_OP_ENV(packsq)
e677137d 1317
e677137d
PB
1318static void gen_op_iwmmxt_set_mup(void)
1319{
1320 TCGv tmp;
1321 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1322 tcg_gen_ori_i32(tmp, tmp, 2);
1323 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1324}
1325
1326static void gen_op_iwmmxt_set_cup(void)
1327{
1328 TCGv tmp;
1329 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1330 tcg_gen_ori_i32(tmp, tmp, 1);
1331 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1332}
1333
1334static void gen_op_iwmmxt_setpsr_nz(void)
1335{
7d1b0095 1336 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1337 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1338 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1339}
1340
1341static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1342{
1343 iwmmxt_load_reg(cpu_V1, rn);
86831435 1344 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1345 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1346}
1347
da6b5335 1348static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1349{
1350 int rd;
1351 uint32_t offset;
da6b5335 1352 TCGv tmp;
18c9b560
AZ
1353
1354 rd = (insn >> 16) & 0xf;
da6b5335 1355 tmp = load_reg(s, rd);
18c9b560
AZ
1356
1357 offset = (insn & 0xff) << ((insn >> 7) & 2);
1358 if (insn & (1 << 24)) {
1359 /* Pre indexed */
1360 if (insn & (1 << 23))
da6b5335 1361 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1362 else
da6b5335
FN
1363 tcg_gen_addi_i32(tmp, tmp, -offset);
1364 tcg_gen_mov_i32(dest, tmp);
18c9b560 1365 if (insn & (1 << 21))
da6b5335
FN
1366 store_reg(s, rd, tmp);
1367 else
7d1b0095 1368 tcg_temp_free_i32(tmp);
18c9b560
AZ
1369 } else if (insn & (1 << 21)) {
1370 /* Post indexed */
da6b5335 1371 tcg_gen_mov_i32(dest, tmp);
18c9b560 1372 if (insn & (1 << 23))
da6b5335 1373 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1374 else
da6b5335
FN
1375 tcg_gen_addi_i32(tmp, tmp, -offset);
1376 store_reg(s, rd, tmp);
18c9b560
AZ
1377 } else if (!(insn & (1 << 23)))
1378 return 1;
1379 return 0;
1380}
1381
da6b5335 1382static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1383{
1384 int rd = (insn >> 0) & 0xf;
da6b5335 1385 TCGv tmp;
18c9b560 1386
da6b5335
FN
1387 if (insn & (1 << 8)) {
1388 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1389 return 1;
da6b5335
FN
1390 } else {
1391 tmp = iwmmxt_load_creg(rd);
1392 }
1393 } else {
7d1b0095 1394 tmp = tcg_temp_new_i32();
da6b5335
FN
1395 iwmmxt_load_reg(cpu_V0, rd);
1396 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1397 }
1398 tcg_gen_andi_i32(tmp, tmp, mask);
1399 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1400 tcg_temp_free_i32(tmp);
18c9b560
AZ
1401 return 0;
1402}
1403
a1c7273b 1404/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1405 (ie. an undefined instruction). */
0ecb72a5 1406static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1407{
1408 int rd, wrd;
1409 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1410 TCGv addr;
1411 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1412
1413 if ((insn & 0x0e000e00) == 0x0c000000) {
1414 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1415 wrd = insn & 0xf;
1416 rdlo = (insn >> 12) & 0xf;
1417 rdhi = (insn >> 16) & 0xf;
1418 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1419 iwmmxt_load_reg(cpu_V0, wrd);
1420 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1421 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1422 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1423 } else { /* TMCRR */
da6b5335
FN
1424 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1425 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1426 gen_op_iwmmxt_set_mup();
1427 }
1428 return 0;
1429 }
1430
1431 wrd = (insn >> 12) & 0xf;
7d1b0095 1432 addr = tcg_temp_new_i32();
da6b5335 1433 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1434 tcg_temp_free_i32(addr);
18c9b560 1435 return 1;
da6b5335 1436 }
18c9b560
AZ
1437 if (insn & ARM_CP_RW_BIT) {
1438 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1439 tmp = tcg_temp_new_i32();
da6b5335
FN
1440 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1441 iwmmxt_store_creg(wrd, tmp);
18c9b560 1442 } else {
e677137d
PB
1443 i = 1;
1444 if (insn & (1 << 8)) {
1445 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1446 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1447 i = 0;
1448 } else { /* WLDRW wRd */
da6b5335 1449 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1450 }
1451 } else {
1452 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1453 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1454 } else { /* WLDRB */
da6b5335 1455 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1456 }
1457 }
1458 if (i) {
1459 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1460 tcg_temp_free_i32(tmp);
e677137d 1461 }
18c9b560
AZ
1462 gen_op_iwmmxt_movq_wRn_M0(wrd);
1463 }
1464 } else {
1465 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1466 tmp = iwmmxt_load_creg(wrd);
1467 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1468 } else {
1469 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1470 tmp = tcg_temp_new_i32();
e677137d
PB
1471 if (insn & (1 << 8)) {
1472 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1473 tcg_temp_free_i32(tmp);
da6b5335 1474 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1475 } else { /* WSTRW wRd */
1476 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1477 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1478 }
1479 } else {
1480 if (insn & (1 << 22)) { /* WSTRH */
1481 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1482 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1483 } else { /* WSTRB */
1484 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1485 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1486 }
1487 }
18c9b560
AZ
1488 }
1489 }
7d1b0095 1490 tcg_temp_free_i32(addr);
18c9b560
AZ
1491 return 0;
1492 }
1493
1494 if ((insn & 0x0f000000) != 0x0e000000)
1495 return 1;
1496
1497 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1498 case 0x000: /* WOR */
1499 wrd = (insn >> 12) & 0xf;
1500 rd0 = (insn >> 0) & 0xf;
1501 rd1 = (insn >> 16) & 0xf;
1502 gen_op_iwmmxt_movq_M0_wRn(rd0);
1503 gen_op_iwmmxt_orq_M0_wRn(rd1);
1504 gen_op_iwmmxt_setpsr_nz();
1505 gen_op_iwmmxt_movq_wRn_M0(wrd);
1506 gen_op_iwmmxt_set_mup();
1507 gen_op_iwmmxt_set_cup();
1508 break;
1509 case 0x011: /* TMCR */
1510 if (insn & 0xf)
1511 return 1;
1512 rd = (insn >> 12) & 0xf;
1513 wrd = (insn >> 16) & 0xf;
1514 switch (wrd) {
1515 case ARM_IWMMXT_wCID:
1516 case ARM_IWMMXT_wCASF:
1517 break;
1518 case ARM_IWMMXT_wCon:
1519 gen_op_iwmmxt_set_cup();
1520 /* Fall through. */
1521 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1522 tmp = iwmmxt_load_creg(wrd);
1523 tmp2 = load_reg(s, rd);
f669df27 1524 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1525 tcg_temp_free_i32(tmp2);
da6b5335 1526 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1527 break;
1528 case ARM_IWMMXT_wCGR0:
1529 case ARM_IWMMXT_wCGR1:
1530 case ARM_IWMMXT_wCGR2:
1531 case ARM_IWMMXT_wCGR3:
1532 gen_op_iwmmxt_set_cup();
da6b5335
FN
1533 tmp = load_reg(s, rd);
1534 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1535 break;
1536 default:
1537 return 1;
1538 }
1539 break;
1540 case 0x100: /* WXOR */
1541 wrd = (insn >> 12) & 0xf;
1542 rd0 = (insn >> 0) & 0xf;
1543 rd1 = (insn >> 16) & 0xf;
1544 gen_op_iwmmxt_movq_M0_wRn(rd0);
1545 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1546 gen_op_iwmmxt_setpsr_nz();
1547 gen_op_iwmmxt_movq_wRn_M0(wrd);
1548 gen_op_iwmmxt_set_mup();
1549 gen_op_iwmmxt_set_cup();
1550 break;
1551 case 0x111: /* TMRC */
1552 if (insn & 0xf)
1553 return 1;
1554 rd = (insn >> 12) & 0xf;
1555 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1556 tmp = iwmmxt_load_creg(wrd);
1557 store_reg(s, rd, tmp);
18c9b560
AZ
1558 break;
1559 case 0x300: /* WANDN */
1560 wrd = (insn >> 12) & 0xf;
1561 rd0 = (insn >> 0) & 0xf;
1562 rd1 = (insn >> 16) & 0xf;
1563 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1564 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1565 gen_op_iwmmxt_andq_M0_wRn(rd1);
1566 gen_op_iwmmxt_setpsr_nz();
1567 gen_op_iwmmxt_movq_wRn_M0(wrd);
1568 gen_op_iwmmxt_set_mup();
1569 gen_op_iwmmxt_set_cup();
1570 break;
1571 case 0x200: /* WAND */
1572 wrd = (insn >> 12) & 0xf;
1573 rd0 = (insn >> 0) & 0xf;
1574 rd1 = (insn >> 16) & 0xf;
1575 gen_op_iwmmxt_movq_M0_wRn(rd0);
1576 gen_op_iwmmxt_andq_M0_wRn(rd1);
1577 gen_op_iwmmxt_setpsr_nz();
1578 gen_op_iwmmxt_movq_wRn_M0(wrd);
1579 gen_op_iwmmxt_set_mup();
1580 gen_op_iwmmxt_set_cup();
1581 break;
1582 case 0x810: case 0xa10: /* WMADD */
1583 wrd = (insn >> 12) & 0xf;
1584 rd0 = (insn >> 0) & 0xf;
1585 rd1 = (insn >> 16) & 0xf;
1586 gen_op_iwmmxt_movq_M0_wRn(rd0);
1587 if (insn & (1 << 21))
1588 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1589 else
1590 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1591 gen_op_iwmmxt_movq_wRn_M0(wrd);
1592 gen_op_iwmmxt_set_mup();
1593 break;
1594 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1595 wrd = (insn >> 12) & 0xf;
1596 rd0 = (insn >> 16) & 0xf;
1597 rd1 = (insn >> 0) & 0xf;
1598 gen_op_iwmmxt_movq_M0_wRn(rd0);
1599 switch ((insn >> 22) & 3) {
1600 case 0:
1601 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1602 break;
1603 case 1:
1604 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1605 break;
1606 case 2:
1607 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1608 break;
1609 case 3:
1610 return 1;
1611 }
1612 gen_op_iwmmxt_movq_wRn_M0(wrd);
1613 gen_op_iwmmxt_set_mup();
1614 gen_op_iwmmxt_set_cup();
1615 break;
1616 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1617 wrd = (insn >> 12) & 0xf;
1618 rd0 = (insn >> 16) & 0xf;
1619 rd1 = (insn >> 0) & 0xf;
1620 gen_op_iwmmxt_movq_M0_wRn(rd0);
1621 switch ((insn >> 22) & 3) {
1622 case 0:
1623 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1624 break;
1625 case 1:
1626 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1627 break;
1628 case 2:
1629 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1630 break;
1631 case 3:
1632 return 1;
1633 }
1634 gen_op_iwmmxt_movq_wRn_M0(wrd);
1635 gen_op_iwmmxt_set_mup();
1636 gen_op_iwmmxt_set_cup();
1637 break;
1638 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1639 wrd = (insn >> 12) & 0xf;
1640 rd0 = (insn >> 16) & 0xf;
1641 rd1 = (insn >> 0) & 0xf;
1642 gen_op_iwmmxt_movq_M0_wRn(rd0);
1643 if (insn & (1 << 22))
1644 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1645 else
1646 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1647 if (!(insn & (1 << 20)))
1648 gen_op_iwmmxt_addl_M0_wRn(wrd);
1649 gen_op_iwmmxt_movq_wRn_M0(wrd);
1650 gen_op_iwmmxt_set_mup();
1651 break;
1652 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1653 wrd = (insn >> 12) & 0xf;
1654 rd0 = (insn >> 16) & 0xf;
1655 rd1 = (insn >> 0) & 0xf;
1656 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1657 if (insn & (1 << 21)) {
1658 if (insn & (1 << 20))
1659 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1660 else
1661 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1662 } else {
1663 if (insn & (1 << 20))
1664 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1665 else
1666 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1667 }
18c9b560
AZ
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 break;
1671 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1672 wrd = (insn >> 12) & 0xf;
1673 rd0 = (insn >> 16) & 0xf;
1674 rd1 = (insn >> 0) & 0xf;
1675 gen_op_iwmmxt_movq_M0_wRn(rd0);
1676 if (insn & (1 << 21))
1677 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1678 else
1679 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1680 if (!(insn & (1 << 20))) {
e677137d
PB
1681 iwmmxt_load_reg(cpu_V1, wrd);
1682 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1683 }
1684 gen_op_iwmmxt_movq_wRn_M0(wrd);
1685 gen_op_iwmmxt_set_mup();
1686 break;
1687 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1688 wrd = (insn >> 12) & 0xf;
1689 rd0 = (insn >> 16) & 0xf;
1690 rd1 = (insn >> 0) & 0xf;
1691 gen_op_iwmmxt_movq_M0_wRn(rd0);
1692 switch ((insn >> 22) & 3) {
1693 case 0:
1694 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1695 break;
1696 case 1:
1697 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1698 break;
1699 case 2:
1700 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1701 break;
1702 case 3:
1703 return 1;
1704 }
1705 gen_op_iwmmxt_movq_wRn_M0(wrd);
1706 gen_op_iwmmxt_set_mup();
1707 gen_op_iwmmxt_set_cup();
1708 break;
1709 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1710 wrd = (insn >> 12) & 0xf;
1711 rd0 = (insn >> 16) & 0xf;
1712 rd1 = (insn >> 0) & 0xf;
1713 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1714 if (insn & (1 << 22)) {
1715 if (insn & (1 << 20))
1716 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1717 else
1718 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1719 } else {
1720 if (insn & (1 << 20))
1721 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1722 else
1723 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1724 }
18c9b560
AZ
1725 gen_op_iwmmxt_movq_wRn_M0(wrd);
1726 gen_op_iwmmxt_set_mup();
1727 gen_op_iwmmxt_set_cup();
1728 break;
1729 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1730 wrd = (insn >> 12) & 0xf;
1731 rd0 = (insn >> 16) & 0xf;
1732 rd1 = (insn >> 0) & 0xf;
1733 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1734 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1735 tcg_gen_andi_i32(tmp, tmp, 7);
1736 iwmmxt_load_reg(cpu_V1, rd1);
1737 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1738 tcg_temp_free_i32(tmp);
18c9b560
AZ
1739 gen_op_iwmmxt_movq_wRn_M0(wrd);
1740 gen_op_iwmmxt_set_mup();
1741 break;
1742 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1743 if (((insn >> 6) & 3) == 3)
1744 return 1;
18c9b560
AZ
1745 rd = (insn >> 12) & 0xf;
1746 wrd = (insn >> 16) & 0xf;
da6b5335 1747 tmp = load_reg(s, rd);
18c9b560
AZ
1748 gen_op_iwmmxt_movq_M0_wRn(wrd);
1749 switch ((insn >> 6) & 3) {
1750 case 0:
da6b5335
FN
1751 tmp2 = tcg_const_i32(0xff);
1752 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1753 break;
1754 case 1:
da6b5335
FN
1755 tmp2 = tcg_const_i32(0xffff);
1756 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1757 break;
1758 case 2:
da6b5335
FN
1759 tmp2 = tcg_const_i32(0xffffffff);
1760 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1761 break;
da6b5335
FN
1762 default:
1763 TCGV_UNUSED(tmp2);
1764 TCGV_UNUSED(tmp3);
18c9b560 1765 }
da6b5335
FN
1766 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1767 tcg_temp_free(tmp3);
1768 tcg_temp_free(tmp2);
7d1b0095 1769 tcg_temp_free_i32(tmp);
18c9b560
AZ
1770 gen_op_iwmmxt_movq_wRn_M0(wrd);
1771 gen_op_iwmmxt_set_mup();
1772 break;
1773 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1774 rd = (insn >> 12) & 0xf;
1775 wrd = (insn >> 16) & 0xf;
da6b5335 1776 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1777 return 1;
1778 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1779 tmp = tcg_temp_new_i32();
18c9b560
AZ
1780 switch ((insn >> 22) & 3) {
1781 case 0:
da6b5335
FN
1782 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1783 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1784 if (insn & 8) {
1785 tcg_gen_ext8s_i32(tmp, tmp);
1786 } else {
1787 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1788 }
1789 break;
1790 case 1:
da6b5335
FN
1791 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1792 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1793 if (insn & 8) {
1794 tcg_gen_ext16s_i32(tmp, tmp);
1795 } else {
1796 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1797 }
1798 break;
1799 case 2:
da6b5335
FN
1800 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1801 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1802 break;
18c9b560 1803 }
da6b5335 1804 store_reg(s, rd, tmp);
18c9b560
AZ
1805 break;
1806 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1807 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1808 return 1;
da6b5335 1809 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1810 switch ((insn >> 22) & 3) {
1811 case 0:
da6b5335 1812 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1813 break;
1814 case 1:
da6b5335 1815 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1816 break;
1817 case 2:
da6b5335 1818 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1819 break;
18c9b560 1820 }
da6b5335
FN
1821 tcg_gen_shli_i32(tmp, tmp, 28);
1822 gen_set_nzcv(tmp);
7d1b0095 1823 tcg_temp_free_i32(tmp);
18c9b560
AZ
1824 break;
1825 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1826 if (((insn >> 6) & 3) == 3)
1827 return 1;
18c9b560
AZ
1828 rd = (insn >> 12) & 0xf;
1829 wrd = (insn >> 16) & 0xf;
da6b5335 1830 tmp = load_reg(s, rd);
18c9b560
AZ
1831 switch ((insn >> 6) & 3) {
1832 case 0:
da6b5335 1833 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1834 break;
1835 case 1:
da6b5335 1836 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1837 break;
1838 case 2:
da6b5335 1839 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1840 break;
18c9b560 1841 }
7d1b0095 1842 tcg_temp_free_i32(tmp);
18c9b560
AZ
1843 gen_op_iwmmxt_movq_wRn_M0(wrd);
1844 gen_op_iwmmxt_set_mup();
1845 break;
1846 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1847 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1848 return 1;
da6b5335 1849 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1850 tmp2 = tcg_temp_new_i32();
da6b5335 1851 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1852 switch ((insn >> 22) & 3) {
1853 case 0:
1854 for (i = 0; i < 7; i ++) {
da6b5335
FN
1855 tcg_gen_shli_i32(tmp2, tmp2, 4);
1856 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1857 }
1858 break;
1859 case 1:
1860 for (i = 0; i < 3; i ++) {
da6b5335
FN
1861 tcg_gen_shli_i32(tmp2, tmp2, 8);
1862 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1863 }
1864 break;
1865 case 2:
da6b5335
FN
1866 tcg_gen_shli_i32(tmp2, tmp2, 16);
1867 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1868 break;
18c9b560 1869 }
da6b5335 1870 gen_set_nzcv(tmp);
7d1b0095
PM
1871 tcg_temp_free_i32(tmp2);
1872 tcg_temp_free_i32(tmp);
18c9b560
AZ
1873 break;
1874 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1875 wrd = (insn >> 12) & 0xf;
1876 rd0 = (insn >> 16) & 0xf;
1877 gen_op_iwmmxt_movq_M0_wRn(rd0);
1878 switch ((insn >> 22) & 3) {
1879 case 0:
e677137d 1880 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1881 break;
1882 case 1:
e677137d 1883 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1884 break;
1885 case 2:
e677137d 1886 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1887 break;
1888 case 3:
1889 return 1;
1890 }
1891 gen_op_iwmmxt_movq_wRn_M0(wrd);
1892 gen_op_iwmmxt_set_mup();
1893 break;
1894 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1895 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1896 return 1;
da6b5335 1897 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1898 tmp2 = tcg_temp_new_i32();
da6b5335 1899 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1900 switch ((insn >> 22) & 3) {
1901 case 0:
1902 for (i = 0; i < 7; i ++) {
da6b5335
FN
1903 tcg_gen_shli_i32(tmp2, tmp2, 4);
1904 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1905 }
1906 break;
1907 case 1:
1908 for (i = 0; i < 3; i ++) {
da6b5335
FN
1909 tcg_gen_shli_i32(tmp2, tmp2, 8);
1910 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1911 }
1912 break;
1913 case 2:
da6b5335
FN
1914 tcg_gen_shli_i32(tmp2, tmp2, 16);
1915 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1916 break;
18c9b560 1917 }
da6b5335 1918 gen_set_nzcv(tmp);
7d1b0095
PM
1919 tcg_temp_free_i32(tmp2);
1920 tcg_temp_free_i32(tmp);
18c9b560
AZ
1921 break;
1922 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1923 rd = (insn >> 12) & 0xf;
1924 rd0 = (insn >> 16) & 0xf;
da6b5335 1925 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1926 return 1;
1927 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1928 tmp = tcg_temp_new_i32();
18c9b560
AZ
1929 switch ((insn >> 22) & 3) {
1930 case 0:
da6b5335 1931 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1932 break;
1933 case 1:
da6b5335 1934 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1935 break;
1936 case 2:
da6b5335 1937 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1938 break;
18c9b560 1939 }
da6b5335 1940 store_reg(s, rd, tmp);
18c9b560
AZ
1941 break;
1942 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1943 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1944 wrd = (insn >> 12) & 0xf;
1945 rd0 = (insn >> 16) & 0xf;
1946 rd1 = (insn >> 0) & 0xf;
1947 gen_op_iwmmxt_movq_M0_wRn(rd0);
1948 switch ((insn >> 22) & 3) {
1949 case 0:
1950 if (insn & (1 << 21))
1951 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1952 else
1953 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1954 break;
1955 case 1:
1956 if (insn & (1 << 21))
1957 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1958 else
1959 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1960 break;
1961 case 2:
1962 if (insn & (1 << 21))
1963 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1964 else
1965 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1966 break;
1967 case 3:
1968 return 1;
1969 }
1970 gen_op_iwmmxt_movq_wRn_M0(wrd);
1971 gen_op_iwmmxt_set_mup();
1972 gen_op_iwmmxt_set_cup();
1973 break;
1974 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1975 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1976 wrd = (insn >> 12) & 0xf;
1977 rd0 = (insn >> 16) & 0xf;
1978 gen_op_iwmmxt_movq_M0_wRn(rd0);
1979 switch ((insn >> 22) & 3) {
1980 case 0:
1981 if (insn & (1 << 21))
1982 gen_op_iwmmxt_unpacklsb_M0();
1983 else
1984 gen_op_iwmmxt_unpacklub_M0();
1985 break;
1986 case 1:
1987 if (insn & (1 << 21))
1988 gen_op_iwmmxt_unpacklsw_M0();
1989 else
1990 gen_op_iwmmxt_unpackluw_M0();
1991 break;
1992 case 2:
1993 if (insn & (1 << 21))
1994 gen_op_iwmmxt_unpacklsl_M0();
1995 else
1996 gen_op_iwmmxt_unpacklul_M0();
1997 break;
1998 case 3:
1999 return 1;
2000 }
2001 gen_op_iwmmxt_movq_wRn_M0(wrd);
2002 gen_op_iwmmxt_set_mup();
2003 gen_op_iwmmxt_set_cup();
2004 break;
2005 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2006 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2007 wrd = (insn >> 12) & 0xf;
2008 rd0 = (insn >> 16) & 0xf;
2009 gen_op_iwmmxt_movq_M0_wRn(rd0);
2010 switch ((insn >> 22) & 3) {
2011 case 0:
2012 if (insn & (1 << 21))
2013 gen_op_iwmmxt_unpackhsb_M0();
2014 else
2015 gen_op_iwmmxt_unpackhub_M0();
2016 break;
2017 case 1:
2018 if (insn & (1 << 21))
2019 gen_op_iwmmxt_unpackhsw_M0();
2020 else
2021 gen_op_iwmmxt_unpackhuw_M0();
2022 break;
2023 case 2:
2024 if (insn & (1 << 21))
2025 gen_op_iwmmxt_unpackhsl_M0();
2026 else
2027 gen_op_iwmmxt_unpackhul_M0();
2028 break;
2029 case 3:
2030 return 1;
2031 }
2032 gen_op_iwmmxt_movq_wRn_M0(wrd);
2033 gen_op_iwmmxt_set_mup();
2034 gen_op_iwmmxt_set_cup();
2035 break;
2036 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2037 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2038 if (((insn >> 22) & 3) == 0)
2039 return 1;
18c9b560
AZ
2040 wrd = (insn >> 12) & 0xf;
2041 rd0 = (insn >> 16) & 0xf;
2042 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2043 tmp = tcg_temp_new_i32();
da6b5335 2044 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2045 tcg_temp_free_i32(tmp);
18c9b560 2046 return 1;
da6b5335 2047 }
18c9b560 2048 switch ((insn >> 22) & 3) {
18c9b560 2049 case 1:
477955bd 2050 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2051 break;
2052 case 2:
477955bd 2053 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2054 break;
2055 case 3:
477955bd 2056 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2057 break;
2058 }
7d1b0095 2059 tcg_temp_free_i32(tmp);
18c9b560
AZ
2060 gen_op_iwmmxt_movq_wRn_M0(wrd);
2061 gen_op_iwmmxt_set_mup();
2062 gen_op_iwmmxt_set_cup();
2063 break;
2064 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2065 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2066 if (((insn >> 22) & 3) == 0)
2067 return 1;
18c9b560
AZ
2068 wrd = (insn >> 12) & 0xf;
2069 rd0 = (insn >> 16) & 0xf;
2070 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2071 tmp = tcg_temp_new_i32();
da6b5335 2072 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2073 tcg_temp_free_i32(tmp);
18c9b560 2074 return 1;
da6b5335 2075 }
18c9b560 2076 switch ((insn >> 22) & 3) {
18c9b560 2077 case 1:
477955bd 2078 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2079 break;
2080 case 2:
477955bd 2081 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2082 break;
2083 case 3:
477955bd 2084 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2085 break;
2086 }
7d1b0095 2087 tcg_temp_free_i32(tmp);
18c9b560
AZ
2088 gen_op_iwmmxt_movq_wRn_M0(wrd);
2089 gen_op_iwmmxt_set_mup();
2090 gen_op_iwmmxt_set_cup();
2091 break;
2092 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2093 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2094 if (((insn >> 22) & 3) == 0)
2095 return 1;
18c9b560
AZ
2096 wrd = (insn >> 12) & 0xf;
2097 rd0 = (insn >> 16) & 0xf;
2098 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2099 tmp = tcg_temp_new_i32();
da6b5335 2100 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2101 tcg_temp_free_i32(tmp);
18c9b560 2102 return 1;
da6b5335 2103 }
18c9b560 2104 switch ((insn >> 22) & 3) {
18c9b560 2105 case 1:
477955bd 2106 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2107 break;
2108 case 2:
477955bd 2109 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2110 break;
2111 case 3:
477955bd 2112 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2113 break;
2114 }
7d1b0095 2115 tcg_temp_free_i32(tmp);
18c9b560
AZ
2116 gen_op_iwmmxt_movq_wRn_M0(wrd);
2117 gen_op_iwmmxt_set_mup();
2118 gen_op_iwmmxt_set_cup();
2119 break;
2120 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2121 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2122 if (((insn >> 22) & 3) == 0)
2123 return 1;
18c9b560
AZ
2124 wrd = (insn >> 12) & 0xf;
2125 rd0 = (insn >> 16) & 0xf;
2126 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2127 tmp = tcg_temp_new_i32();
18c9b560 2128 switch ((insn >> 22) & 3) {
18c9b560 2129 case 1:
da6b5335 2130 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2131 tcg_temp_free_i32(tmp);
18c9b560 2132 return 1;
da6b5335 2133 }
477955bd 2134 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2135 break;
2136 case 2:
da6b5335 2137 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2138 tcg_temp_free_i32(tmp);
18c9b560 2139 return 1;
da6b5335 2140 }
477955bd 2141 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2142 break;
2143 case 3:
da6b5335 2144 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2145 tcg_temp_free_i32(tmp);
18c9b560 2146 return 1;
da6b5335 2147 }
477955bd 2148 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2149 break;
2150 }
7d1b0095 2151 tcg_temp_free_i32(tmp);
18c9b560
AZ
2152 gen_op_iwmmxt_movq_wRn_M0(wrd);
2153 gen_op_iwmmxt_set_mup();
2154 gen_op_iwmmxt_set_cup();
2155 break;
2156 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2157 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2158 wrd = (insn >> 12) & 0xf;
2159 rd0 = (insn >> 16) & 0xf;
2160 rd1 = (insn >> 0) & 0xf;
2161 gen_op_iwmmxt_movq_M0_wRn(rd0);
2162 switch ((insn >> 22) & 3) {
2163 case 0:
2164 if (insn & (1 << 21))
2165 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2166 else
2167 gen_op_iwmmxt_minub_M0_wRn(rd1);
2168 break;
2169 case 1:
2170 if (insn & (1 << 21))
2171 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2172 else
2173 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2174 break;
2175 case 2:
2176 if (insn & (1 << 21))
2177 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2178 else
2179 gen_op_iwmmxt_minul_M0_wRn(rd1);
2180 break;
2181 case 3:
2182 return 1;
2183 }
2184 gen_op_iwmmxt_movq_wRn_M0(wrd);
2185 gen_op_iwmmxt_set_mup();
2186 break;
2187 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2188 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2189 wrd = (insn >> 12) & 0xf;
2190 rd0 = (insn >> 16) & 0xf;
2191 rd1 = (insn >> 0) & 0xf;
2192 gen_op_iwmmxt_movq_M0_wRn(rd0);
2193 switch ((insn >> 22) & 3) {
2194 case 0:
2195 if (insn & (1 << 21))
2196 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2197 else
2198 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2199 break;
2200 case 1:
2201 if (insn & (1 << 21))
2202 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2203 else
2204 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2205 break;
2206 case 2:
2207 if (insn & (1 << 21))
2208 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2209 else
2210 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2211 break;
2212 case 3:
2213 return 1;
2214 }
2215 gen_op_iwmmxt_movq_wRn_M0(wrd);
2216 gen_op_iwmmxt_set_mup();
2217 break;
2218 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2219 case 0x402: case 0x502: case 0x602: case 0x702:
2220 wrd = (insn >> 12) & 0xf;
2221 rd0 = (insn >> 16) & 0xf;
2222 rd1 = (insn >> 0) & 0xf;
2223 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2224 tmp = tcg_const_i32((insn >> 20) & 3);
2225 iwmmxt_load_reg(cpu_V1, rd1);
2226 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2227 tcg_temp_free(tmp);
18c9b560
AZ
2228 gen_op_iwmmxt_movq_wRn_M0(wrd);
2229 gen_op_iwmmxt_set_mup();
2230 break;
2231 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2232 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2233 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2234 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2235 wrd = (insn >> 12) & 0xf;
2236 rd0 = (insn >> 16) & 0xf;
2237 rd1 = (insn >> 0) & 0xf;
2238 gen_op_iwmmxt_movq_M0_wRn(rd0);
2239 switch ((insn >> 20) & 0xf) {
2240 case 0x0:
2241 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2242 break;
2243 case 0x1:
2244 gen_op_iwmmxt_subub_M0_wRn(rd1);
2245 break;
2246 case 0x3:
2247 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2248 break;
2249 case 0x4:
2250 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2251 break;
2252 case 0x5:
2253 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2254 break;
2255 case 0x7:
2256 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2257 break;
2258 case 0x8:
2259 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2260 break;
2261 case 0x9:
2262 gen_op_iwmmxt_subul_M0_wRn(rd1);
2263 break;
2264 case 0xb:
2265 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2266 break;
2267 default:
2268 return 1;
2269 }
2270 gen_op_iwmmxt_movq_wRn_M0(wrd);
2271 gen_op_iwmmxt_set_mup();
2272 gen_op_iwmmxt_set_cup();
2273 break;
2274 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2275 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2276 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2277 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2278 wrd = (insn >> 12) & 0xf;
2279 rd0 = (insn >> 16) & 0xf;
2280 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2281 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2282 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
da6b5335 2283 tcg_temp_free(tmp);
18c9b560
AZ
2284 gen_op_iwmmxt_movq_wRn_M0(wrd);
2285 gen_op_iwmmxt_set_mup();
2286 gen_op_iwmmxt_set_cup();
2287 break;
2288 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2289 case 0x418: case 0x518: case 0x618: case 0x718:
2290 case 0x818: case 0x918: case 0xa18: case 0xb18:
2291 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2292 wrd = (insn >> 12) & 0xf;
2293 rd0 = (insn >> 16) & 0xf;
2294 rd1 = (insn >> 0) & 0xf;
2295 gen_op_iwmmxt_movq_M0_wRn(rd0);
2296 switch ((insn >> 20) & 0xf) {
2297 case 0x0:
2298 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2299 break;
2300 case 0x1:
2301 gen_op_iwmmxt_addub_M0_wRn(rd1);
2302 break;
2303 case 0x3:
2304 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2305 break;
2306 case 0x4:
2307 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2308 break;
2309 case 0x5:
2310 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2311 break;
2312 case 0x7:
2313 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2314 break;
2315 case 0x8:
2316 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2317 break;
2318 case 0x9:
2319 gen_op_iwmmxt_addul_M0_wRn(rd1);
2320 break;
2321 case 0xb:
2322 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2323 break;
2324 default:
2325 return 1;
2326 }
2327 gen_op_iwmmxt_movq_wRn_M0(wrd);
2328 gen_op_iwmmxt_set_mup();
2329 gen_op_iwmmxt_set_cup();
2330 break;
2331 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2332 case 0x408: case 0x508: case 0x608: case 0x708:
2333 case 0x808: case 0x908: case 0xa08: case 0xb08:
2334 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2335 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2336 return 1;
18c9b560
AZ
2337 wrd = (insn >> 12) & 0xf;
2338 rd0 = (insn >> 16) & 0xf;
2339 rd1 = (insn >> 0) & 0xf;
2340 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2341 switch ((insn >> 22) & 3) {
18c9b560
AZ
2342 case 1:
2343 if (insn & (1 << 21))
2344 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2345 else
2346 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2347 break;
2348 case 2:
2349 if (insn & (1 << 21))
2350 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2351 else
2352 gen_op_iwmmxt_packul_M0_wRn(rd1);
2353 break;
2354 case 3:
2355 if (insn & (1 << 21))
2356 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2357 else
2358 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2359 break;
2360 }
2361 gen_op_iwmmxt_movq_wRn_M0(wrd);
2362 gen_op_iwmmxt_set_mup();
2363 gen_op_iwmmxt_set_cup();
2364 break;
2365 case 0x201: case 0x203: case 0x205: case 0x207:
2366 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2367 case 0x211: case 0x213: case 0x215: case 0x217:
2368 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2369 wrd = (insn >> 5) & 0xf;
2370 rd0 = (insn >> 12) & 0xf;
2371 rd1 = (insn >> 0) & 0xf;
2372 if (rd0 == 0xf || rd1 == 0xf)
2373 return 1;
2374 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2375 tmp = load_reg(s, rd0);
2376 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2377 switch ((insn >> 16) & 0xf) {
2378 case 0x0: /* TMIA */
da6b5335 2379 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2380 break;
2381 case 0x8: /* TMIAPH */
da6b5335 2382 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2383 break;
2384 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2385 if (insn & (1 << 16))
da6b5335 2386 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2387 if (insn & (1 << 17))
da6b5335
FN
2388 tcg_gen_shri_i32(tmp2, tmp2, 16);
2389 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2390 break;
2391 default:
7d1b0095
PM
2392 tcg_temp_free_i32(tmp2);
2393 tcg_temp_free_i32(tmp);
18c9b560
AZ
2394 return 1;
2395 }
7d1b0095
PM
2396 tcg_temp_free_i32(tmp2);
2397 tcg_temp_free_i32(tmp);
18c9b560
AZ
2398 gen_op_iwmmxt_movq_wRn_M0(wrd);
2399 gen_op_iwmmxt_set_mup();
2400 break;
2401 default:
2402 return 1;
2403 }
2404
2405 return 0;
2406}
2407
a1c7273b 2408/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2409 (ie. an undefined instruction). */
0ecb72a5 2410static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2411{
2412 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2413 TCGv tmp, tmp2;
18c9b560
AZ
2414
2415 if ((insn & 0x0ff00f10) == 0x0e200010) {
2416 /* Multiply with Internal Accumulate Format */
2417 rd0 = (insn >> 12) & 0xf;
2418 rd1 = insn & 0xf;
2419 acc = (insn >> 5) & 7;
2420
2421 if (acc != 0)
2422 return 1;
2423
3a554c0f
FN
2424 tmp = load_reg(s, rd0);
2425 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2426 switch ((insn >> 16) & 0xf) {
2427 case 0x0: /* MIA */
3a554c0f 2428 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2429 break;
2430 case 0x8: /* MIAPH */
3a554c0f 2431 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2432 break;
2433 case 0xc: /* MIABB */
2434 case 0xd: /* MIABT */
2435 case 0xe: /* MIATB */
2436 case 0xf: /* MIATT */
18c9b560 2437 if (insn & (1 << 16))
3a554c0f 2438 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2439 if (insn & (1 << 17))
3a554c0f
FN
2440 tcg_gen_shri_i32(tmp2, tmp2, 16);
2441 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2442 break;
2443 default:
2444 return 1;
2445 }
7d1b0095
PM
2446 tcg_temp_free_i32(tmp2);
2447 tcg_temp_free_i32(tmp);
18c9b560
AZ
2448
2449 gen_op_iwmmxt_movq_wRn_M0(acc);
2450 return 0;
2451 }
2452
2453 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2454 /* Internal Accumulator Access Format */
2455 rdhi = (insn >> 16) & 0xf;
2456 rdlo = (insn >> 12) & 0xf;
2457 acc = insn & 7;
2458
2459 if (acc != 0)
2460 return 1;
2461
2462 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2463 iwmmxt_load_reg(cpu_V0, acc);
2464 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2465 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2466 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2467 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2468 } else { /* MAR */
3a554c0f
FN
2469 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2470 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2471 }
2472 return 0;
2473 }
2474
2475 return 1;
2476}
2477
9ee6e8bb
PB
2478#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2479#define VFP_SREG(insn, bigbit, smallbit) \
2480 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2481#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2482 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2483 reg = (((insn) >> (bigbit)) & 0x0f) \
2484 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2485 } else { \
2486 if (insn & (1 << (smallbit))) \
2487 return 1; \
2488 reg = ((insn) >> (bigbit)) & 0x0f; \
2489 }} while (0)
2490
2491#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2492#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2493#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2494#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2495#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2496#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2497
4373f3ce
PB
2498/* Move between integer and VFP cores. */
2499static TCGv gen_vfp_mrs(void)
2500{
7d1b0095 2501 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2502 tcg_gen_mov_i32(tmp, cpu_F0s);
2503 return tmp;
2504}
2505
2506static void gen_vfp_msr(TCGv tmp)
2507{
2508 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2509 tcg_temp_free_i32(tmp);
4373f3ce
PB
2510}
2511
ad69471c
PB
2512static void gen_neon_dup_u8(TCGv var, int shift)
2513{
7d1b0095 2514 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2515 if (shift)
2516 tcg_gen_shri_i32(var, var, shift);
86831435 2517 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2518 tcg_gen_shli_i32(tmp, var, 8);
2519 tcg_gen_or_i32(var, var, tmp);
2520 tcg_gen_shli_i32(tmp, var, 16);
2521 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2522 tcg_temp_free_i32(tmp);
ad69471c
PB
2523}
2524
2525static void gen_neon_dup_low16(TCGv var)
2526{
7d1b0095 2527 TCGv tmp = tcg_temp_new_i32();
86831435 2528 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2529 tcg_gen_shli_i32(tmp, var, 16);
2530 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2531 tcg_temp_free_i32(tmp);
ad69471c
PB
2532}
2533
2534static void gen_neon_dup_high16(TCGv var)
2535{
7d1b0095 2536 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2537 tcg_gen_andi_i32(var, var, 0xffff0000);
2538 tcg_gen_shri_i32(tmp, var, 16);
2539 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2540 tcg_temp_free_i32(tmp);
ad69471c
PB
2541}
2542
8e18cde3
PM
2543static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2544{
2545 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2546 TCGv tmp;
2547 switch (size) {
2548 case 0:
2549 tmp = gen_ld8u(addr, IS_USER(s));
2550 gen_neon_dup_u8(tmp, 0);
2551 break;
2552 case 1:
2553 tmp = gen_ld16u(addr, IS_USER(s));
2554 gen_neon_dup_low16(tmp);
2555 break;
2556 case 2:
2557 tmp = gen_ld32(addr, IS_USER(s));
2558 break;
2559 default: /* Avoid compiler warnings. */
2560 abort();
2561 }
2562 return tmp;
2563}
2564
a1c7273b 2565/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2566 (ie. an undefined instruction). */
0ecb72a5 2567static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2568{
2569 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2570 int dp, veclen;
312eea9f 2571 TCGv addr;
4373f3ce 2572 TCGv tmp;
ad69471c 2573 TCGv tmp2;
b7bcbe95 2574
40f137e1
PB
2575 if (!arm_feature(env, ARM_FEATURE_VFP))
2576 return 1;
2577
5df8bac1 2578 if (!s->vfp_enabled) {
9ee6e8bb 2579 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2580 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2581 return 1;
2582 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2583 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2584 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2585 return 1;
2586 }
b7bcbe95
FB
2587 dp = ((insn & 0xf00) == 0xb00);
2588 switch ((insn >> 24) & 0xf) {
2589 case 0xe:
2590 if (insn & (1 << 4)) {
2591 /* single register transfer */
b7bcbe95
FB
2592 rd = (insn >> 12) & 0xf;
2593 if (dp) {
9ee6e8bb
PB
2594 int size;
2595 int pass;
2596
2597 VFP_DREG_N(rn, insn);
2598 if (insn & 0xf)
b7bcbe95 2599 return 1;
9ee6e8bb
PB
2600 if (insn & 0x00c00060
2601 && !arm_feature(env, ARM_FEATURE_NEON))
2602 return 1;
2603
2604 pass = (insn >> 21) & 1;
2605 if (insn & (1 << 22)) {
2606 size = 0;
2607 offset = ((insn >> 5) & 3) * 8;
2608 } else if (insn & (1 << 5)) {
2609 size = 1;
2610 offset = (insn & (1 << 6)) ? 16 : 0;
2611 } else {
2612 size = 2;
2613 offset = 0;
2614 }
18c9b560 2615 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2616 /* vfp->arm */
ad69471c 2617 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2618 switch (size) {
2619 case 0:
9ee6e8bb 2620 if (offset)
ad69471c 2621 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2622 if (insn & (1 << 23))
ad69471c 2623 gen_uxtb(tmp);
9ee6e8bb 2624 else
ad69471c 2625 gen_sxtb(tmp);
9ee6e8bb
PB
2626 break;
2627 case 1:
9ee6e8bb
PB
2628 if (insn & (1 << 23)) {
2629 if (offset) {
ad69471c 2630 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2631 } else {
ad69471c 2632 gen_uxth(tmp);
9ee6e8bb
PB
2633 }
2634 } else {
2635 if (offset) {
ad69471c 2636 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2637 } else {
ad69471c 2638 gen_sxth(tmp);
9ee6e8bb
PB
2639 }
2640 }
2641 break;
2642 case 2:
9ee6e8bb
PB
2643 break;
2644 }
ad69471c 2645 store_reg(s, rd, tmp);
b7bcbe95
FB
2646 } else {
2647 /* arm->vfp */
ad69471c 2648 tmp = load_reg(s, rd);
9ee6e8bb
PB
2649 if (insn & (1 << 23)) {
2650 /* VDUP */
2651 if (size == 0) {
ad69471c 2652 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2653 } else if (size == 1) {
ad69471c 2654 gen_neon_dup_low16(tmp);
9ee6e8bb 2655 }
cbbccffc 2656 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2657 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2658 tcg_gen_mov_i32(tmp2, tmp);
2659 neon_store_reg(rn, n, tmp2);
2660 }
2661 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2662 } else {
2663 /* VMOV */
2664 switch (size) {
2665 case 0:
ad69471c 2666 tmp2 = neon_load_reg(rn, pass);
d593c48e 2667 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 2668 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2669 break;
2670 case 1:
ad69471c 2671 tmp2 = neon_load_reg(rn, pass);
d593c48e 2672 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 2673 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2674 break;
2675 case 2:
9ee6e8bb
PB
2676 break;
2677 }
ad69471c 2678 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2679 }
b7bcbe95 2680 }
9ee6e8bb
PB
2681 } else { /* !dp */
2682 if ((insn & 0x6f) != 0x00)
2683 return 1;
2684 rn = VFP_SREG_N(insn);
18c9b560 2685 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2686 /* vfp->arm */
2687 if (insn & (1 << 21)) {
2688 /* system register */
40f137e1 2689 rn >>= 1;
9ee6e8bb 2690
b7bcbe95 2691 switch (rn) {
40f137e1 2692 case ARM_VFP_FPSID:
4373f3ce 2693 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2694 VFP3 restricts all id registers to privileged
2695 accesses. */
2696 if (IS_USER(s)
2697 && arm_feature(env, ARM_FEATURE_VFP3))
2698 return 1;
4373f3ce 2699 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2700 break;
40f137e1 2701 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2702 if (IS_USER(s))
2703 return 1;
4373f3ce 2704 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2705 break;
40f137e1
PB
2706 case ARM_VFP_FPINST:
2707 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2708 /* Not present in VFP3. */
2709 if (IS_USER(s)
2710 || arm_feature(env, ARM_FEATURE_VFP3))
2711 return 1;
4373f3ce 2712 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2713 break;
40f137e1 2714 case ARM_VFP_FPSCR:
601d70b9 2715 if (rd == 15) {
4373f3ce
PB
2716 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2717 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2718 } else {
7d1b0095 2719 tmp = tcg_temp_new_i32();
4373f3ce
PB
2720 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2721 }
b7bcbe95 2722 break;
9ee6e8bb
PB
2723 case ARM_VFP_MVFR0:
2724 case ARM_VFP_MVFR1:
2725 if (IS_USER(s)
06ed5d66 2726 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 2727 return 1;
4373f3ce 2728 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2729 break;
b7bcbe95
FB
2730 default:
2731 return 1;
2732 }
2733 } else {
2734 gen_mov_F0_vreg(0, rn);
4373f3ce 2735 tmp = gen_vfp_mrs();
b7bcbe95
FB
2736 }
2737 if (rd == 15) {
b5ff1b31 2738 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2739 gen_set_nzcv(tmp);
7d1b0095 2740 tcg_temp_free_i32(tmp);
4373f3ce
PB
2741 } else {
2742 store_reg(s, rd, tmp);
2743 }
b7bcbe95
FB
2744 } else {
2745 /* arm->vfp */
b7bcbe95 2746 if (insn & (1 << 21)) {
40f137e1 2747 rn >>= 1;
b7bcbe95
FB
2748 /* system register */
2749 switch (rn) {
40f137e1 2750 case ARM_VFP_FPSID:
9ee6e8bb
PB
2751 case ARM_VFP_MVFR0:
2752 case ARM_VFP_MVFR1:
b7bcbe95
FB
2753 /* Writes are ignored. */
2754 break;
40f137e1 2755 case ARM_VFP_FPSCR:
e4c1cfa5 2756 tmp = load_reg(s, rd);
4373f3ce 2757 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2758 tcg_temp_free_i32(tmp);
b5ff1b31 2759 gen_lookup_tb(s);
b7bcbe95 2760 break;
40f137e1 2761 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2762 if (IS_USER(s))
2763 return 1;
71b3c3de
JR
2764 /* TODO: VFP subarchitecture support.
2765 * For now, keep the EN bit only */
e4c1cfa5 2766 tmp = load_reg(s, rd);
71b3c3de 2767 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2768 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2769 gen_lookup_tb(s);
2770 break;
2771 case ARM_VFP_FPINST:
2772 case ARM_VFP_FPINST2:
e4c1cfa5 2773 tmp = load_reg(s, rd);
4373f3ce 2774 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2775 break;
b7bcbe95
FB
2776 default:
2777 return 1;
2778 }
2779 } else {
e4c1cfa5 2780 tmp = load_reg(s, rd);
4373f3ce 2781 gen_vfp_msr(tmp);
b7bcbe95
FB
2782 gen_mov_vreg_F0(0, rn);
2783 }
2784 }
2785 }
2786 } else {
2787 /* data processing */
2788 /* The opcode is in bits 23, 21, 20 and 6. */
2789 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2790 if (dp) {
2791 if (op == 15) {
2792 /* rn is opcode */
2793 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2794 } else {
2795 /* rn is register number */
9ee6e8bb 2796 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2797 }
2798
04595bf6 2799 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2800 /* Integer or single precision destination. */
9ee6e8bb 2801 rd = VFP_SREG_D(insn);
b7bcbe95 2802 } else {
9ee6e8bb 2803 VFP_DREG_D(rd, insn);
b7bcbe95 2804 }
04595bf6
PM
2805 if (op == 15 &&
2806 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2807 /* VCVT from int is always from S reg regardless of dp bit.
2808 * VCVT with immediate frac_bits has same format as SREG_M
2809 */
2810 rm = VFP_SREG_M(insn);
b7bcbe95 2811 } else {
9ee6e8bb 2812 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2813 }
2814 } else {
9ee6e8bb 2815 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2816 if (op == 15 && rn == 15) {
2817 /* Double precision destination. */
9ee6e8bb
PB
2818 VFP_DREG_D(rd, insn);
2819 } else {
2820 rd = VFP_SREG_D(insn);
2821 }
04595bf6
PM
2822 /* NB that we implicitly rely on the encoding for the frac_bits
2823 * in VCVT of fixed to float being the same as that of an SREG_M
2824 */
9ee6e8bb 2825 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2826 }
2827
69d1fc22 2828 veclen = s->vec_len;
b7bcbe95
FB
2829 if (op == 15 && rn > 3)
2830 veclen = 0;
2831
2832 /* Shut up compiler warnings. */
2833 delta_m = 0;
2834 delta_d = 0;
2835 bank_mask = 0;
3b46e624 2836
b7bcbe95
FB
2837 if (veclen > 0) {
2838 if (dp)
2839 bank_mask = 0xc;
2840 else
2841 bank_mask = 0x18;
2842
2843 /* Figure out what type of vector operation this is. */
2844 if ((rd & bank_mask) == 0) {
2845 /* scalar */
2846 veclen = 0;
2847 } else {
2848 if (dp)
69d1fc22 2849 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2850 else
69d1fc22 2851 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2852
2853 if ((rm & bank_mask) == 0) {
2854 /* mixed scalar/vector */
2855 delta_m = 0;
2856 } else {
2857 /* vector */
2858 delta_m = delta_d;
2859 }
2860 }
2861 }
2862
2863 /* Load the initial operands. */
2864 if (op == 15) {
2865 switch (rn) {
2866 case 16:
2867 case 17:
2868 /* Integer source */
2869 gen_mov_F0_vreg(0, rm);
2870 break;
2871 case 8:
2872 case 9:
2873 /* Compare */
2874 gen_mov_F0_vreg(dp, rd);
2875 gen_mov_F1_vreg(dp, rm);
2876 break;
2877 case 10:
2878 case 11:
2879 /* Compare with zero */
2880 gen_mov_F0_vreg(dp, rd);
2881 gen_vfp_F1_ld0(dp);
2882 break;
9ee6e8bb
PB
2883 case 20:
2884 case 21:
2885 case 22:
2886 case 23:
644ad806
PB
2887 case 28:
2888 case 29:
2889 case 30:
2890 case 31:
9ee6e8bb
PB
2891 /* Source and destination the same. */
2892 gen_mov_F0_vreg(dp, rd);
2893 break;
6e0c0ed1
PM
2894 case 4:
2895 case 5:
2896 case 6:
2897 case 7:
2898 /* VCVTB, VCVTT: only present with the halfprec extension,
2899 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2900 */
2901 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2902 return 1;
2903 }
2904 /* Otherwise fall through */
b7bcbe95
FB
2905 default:
2906 /* One source operand. */
2907 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2908 break;
b7bcbe95
FB
2909 }
2910 } else {
2911 /* Two source operands. */
2912 gen_mov_F0_vreg(dp, rn);
2913 gen_mov_F1_vreg(dp, rm);
2914 }
2915
2916 for (;;) {
2917 /* Perform the calculation. */
2918 switch (op) {
605a6aed
PM
2919 case 0: /* VMLA: fd + (fn * fm) */
2920 /* Note that order of inputs to the add matters for NaNs */
2921 gen_vfp_F1_mul(dp);
2922 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2923 gen_vfp_add(dp);
2924 break;
605a6aed 2925 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 2926 gen_vfp_mul(dp);
605a6aed
PM
2927 gen_vfp_F1_neg(dp);
2928 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2929 gen_vfp_add(dp);
2930 break;
605a6aed
PM
2931 case 2: /* VNMLS: -fd + (fn * fm) */
2932 /* Note that it isn't valid to replace (-A + B) with (B - A)
2933 * or similar plausible looking simplifications
2934 * because this will give wrong results for NaNs.
2935 */
2936 gen_vfp_F1_mul(dp);
2937 gen_mov_F0_vreg(dp, rd);
2938 gen_vfp_neg(dp);
2939 gen_vfp_add(dp);
b7bcbe95 2940 break;
605a6aed 2941 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 2942 gen_vfp_mul(dp);
605a6aed
PM
2943 gen_vfp_F1_neg(dp);
2944 gen_mov_F0_vreg(dp, rd);
b7bcbe95 2945 gen_vfp_neg(dp);
605a6aed 2946 gen_vfp_add(dp);
b7bcbe95
FB
2947 break;
2948 case 4: /* mul: fn * fm */
2949 gen_vfp_mul(dp);
2950 break;
2951 case 5: /* nmul: -(fn * fm) */
2952 gen_vfp_mul(dp);
2953 gen_vfp_neg(dp);
2954 break;
2955 case 6: /* add: fn + fm */
2956 gen_vfp_add(dp);
2957 break;
2958 case 7: /* sub: fn - fm */
2959 gen_vfp_sub(dp);
2960 break;
2961 case 8: /* div: fn / fm */
2962 gen_vfp_div(dp);
2963 break;
da97f52c
PM
2964 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
2965 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
2966 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
2967 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
2968 /* These are fused multiply-add, and must be done as one
2969 * floating point operation with no rounding between the
2970 * multiplication and addition steps.
2971 * NB that doing the negations here as separate steps is
2972 * correct : an input NaN should come out with its sign bit
2973 * flipped if it is a negated-input.
2974 */
2975 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
2976 return 1;
2977 }
2978 if (dp) {
2979 TCGv_ptr fpst;
2980 TCGv_i64 frd;
2981 if (op & 1) {
2982 /* VFNMS, VFMS */
2983 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
2984 }
2985 frd = tcg_temp_new_i64();
2986 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
2987 if (op & 2) {
2988 /* VFNMA, VFNMS */
2989 gen_helper_vfp_negd(frd, frd);
2990 }
2991 fpst = get_fpstatus_ptr(0);
2992 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
2993 cpu_F1d, frd, fpst);
2994 tcg_temp_free_ptr(fpst);
2995 tcg_temp_free_i64(frd);
2996 } else {
2997 TCGv_ptr fpst;
2998 TCGv_i32 frd;
2999 if (op & 1) {
3000 /* VFNMS, VFMS */
3001 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3002 }
3003 frd = tcg_temp_new_i32();
3004 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3005 if (op & 2) {
3006 gen_helper_vfp_negs(frd, frd);
3007 }
3008 fpst = get_fpstatus_ptr(0);
3009 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3010 cpu_F1s, frd, fpst);
3011 tcg_temp_free_ptr(fpst);
3012 tcg_temp_free_i32(frd);
3013 }
3014 break;
9ee6e8bb
PB
3015 case 14: /* fconst */
3016 if (!arm_feature(env, ARM_FEATURE_VFP3))
3017 return 1;
3018
3019 n = (insn << 12) & 0x80000000;
3020 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3021 if (dp) {
3022 if (i & 0x40)
3023 i |= 0x3f80;
3024 else
3025 i |= 0x4000;
3026 n |= i << 16;
4373f3ce 3027 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3028 } else {
3029 if (i & 0x40)
3030 i |= 0x780;
3031 else
3032 i |= 0x800;
3033 n |= i << 19;
5b340b51 3034 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3035 }
9ee6e8bb 3036 break;
b7bcbe95
FB
3037 case 15: /* extension space */
3038 switch (rn) {
3039 case 0: /* cpy */
3040 /* no-op */
3041 break;
3042 case 1: /* abs */
3043 gen_vfp_abs(dp);
3044 break;
3045 case 2: /* neg */
3046 gen_vfp_neg(dp);
3047 break;
3048 case 3: /* sqrt */
3049 gen_vfp_sqrt(dp);
3050 break;
60011498 3051 case 4: /* vcvtb.f32.f16 */
60011498
PB
3052 tmp = gen_vfp_mrs();
3053 tcg_gen_ext16u_i32(tmp, tmp);
3054 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3055 tcg_temp_free_i32(tmp);
60011498
PB
3056 break;
3057 case 5: /* vcvtt.f32.f16 */
60011498
PB
3058 tmp = gen_vfp_mrs();
3059 tcg_gen_shri_i32(tmp, tmp, 16);
3060 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3061 tcg_temp_free_i32(tmp);
60011498
PB
3062 break;
3063 case 6: /* vcvtb.f16.f32 */
7d1b0095 3064 tmp = tcg_temp_new_i32();
60011498
PB
3065 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3066 gen_mov_F0_vreg(0, rd);
3067 tmp2 = gen_vfp_mrs();
3068 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3069 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3070 tcg_temp_free_i32(tmp2);
60011498
PB
3071 gen_vfp_msr(tmp);
3072 break;
3073 case 7: /* vcvtt.f16.f32 */
7d1b0095 3074 tmp = tcg_temp_new_i32();
60011498
PB
3075 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3076 tcg_gen_shli_i32(tmp, tmp, 16);
3077 gen_mov_F0_vreg(0, rd);
3078 tmp2 = gen_vfp_mrs();
3079 tcg_gen_ext16u_i32(tmp2, tmp2);
3080 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3081 tcg_temp_free_i32(tmp2);
60011498
PB
3082 gen_vfp_msr(tmp);
3083 break;
b7bcbe95
FB
3084 case 8: /* cmp */
3085 gen_vfp_cmp(dp);
3086 break;
3087 case 9: /* cmpe */
3088 gen_vfp_cmpe(dp);
3089 break;
3090 case 10: /* cmpz */
3091 gen_vfp_cmp(dp);
3092 break;
3093 case 11: /* cmpez */
3094 gen_vfp_F1_ld0(dp);
3095 gen_vfp_cmpe(dp);
3096 break;
3097 case 15: /* single<->double conversion */
3098 if (dp)
4373f3ce 3099 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3100 else
4373f3ce 3101 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3102 break;
3103 case 16: /* fuito */
5500b06c 3104 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3105 break;
3106 case 17: /* fsito */
5500b06c 3107 gen_vfp_sito(dp, 0);
b7bcbe95 3108 break;
9ee6e8bb
PB
3109 case 20: /* fshto */
3110 if (!arm_feature(env, ARM_FEATURE_VFP3))
3111 return 1;
5500b06c 3112 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3113 break;
3114 case 21: /* fslto */
3115 if (!arm_feature(env, ARM_FEATURE_VFP3))
3116 return 1;
5500b06c 3117 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3118 break;
3119 case 22: /* fuhto */
3120 if (!arm_feature(env, ARM_FEATURE_VFP3))
3121 return 1;
5500b06c 3122 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3123 break;
3124 case 23: /* fulto */
3125 if (!arm_feature(env, ARM_FEATURE_VFP3))
3126 return 1;
5500b06c 3127 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3128 break;
b7bcbe95 3129 case 24: /* ftoui */
5500b06c 3130 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3131 break;
3132 case 25: /* ftouiz */
5500b06c 3133 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3134 break;
3135 case 26: /* ftosi */
5500b06c 3136 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3137 break;
3138 case 27: /* ftosiz */
5500b06c 3139 gen_vfp_tosiz(dp, 0);
b7bcbe95 3140 break;
9ee6e8bb
PB
3141 case 28: /* ftosh */
3142 if (!arm_feature(env, ARM_FEATURE_VFP3))
3143 return 1;
5500b06c 3144 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3145 break;
3146 case 29: /* ftosl */
3147 if (!arm_feature(env, ARM_FEATURE_VFP3))
3148 return 1;
5500b06c 3149 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3150 break;
3151 case 30: /* ftouh */
3152 if (!arm_feature(env, ARM_FEATURE_VFP3))
3153 return 1;
5500b06c 3154 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3155 break;
3156 case 31: /* ftoul */
3157 if (!arm_feature(env, ARM_FEATURE_VFP3))
3158 return 1;
5500b06c 3159 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3160 break;
b7bcbe95 3161 default: /* undefined */
b7bcbe95
FB
3162 return 1;
3163 }
3164 break;
3165 default: /* undefined */
b7bcbe95
FB
3166 return 1;
3167 }
3168
3169 /* Write back the result. */
3170 if (op == 15 && (rn >= 8 && rn <= 11))
3171 ; /* Comparison, do nothing. */
04595bf6
PM
3172 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3173 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3174 gen_mov_vreg_F0(0, rd);
3175 else if (op == 15 && rn == 15)
3176 /* conversion */
3177 gen_mov_vreg_F0(!dp, rd);
3178 else
3179 gen_mov_vreg_F0(dp, rd);
3180
3181 /* break out of the loop if we have finished */
3182 if (veclen == 0)
3183 break;
3184
3185 if (op == 15 && delta_m == 0) {
3186 /* single source one-many */
3187 while (veclen--) {
3188 rd = ((rd + delta_d) & (bank_mask - 1))
3189 | (rd & bank_mask);
3190 gen_mov_vreg_F0(dp, rd);
3191 }
3192 break;
3193 }
3194 /* Setup the next operands. */
3195 veclen--;
3196 rd = ((rd + delta_d) & (bank_mask - 1))
3197 | (rd & bank_mask);
3198
3199 if (op == 15) {
3200 /* One source operand. */
3201 rm = ((rm + delta_m) & (bank_mask - 1))
3202 | (rm & bank_mask);
3203 gen_mov_F0_vreg(dp, rm);
3204 } else {
3205 /* Two source operands. */
3206 rn = ((rn + delta_d) & (bank_mask - 1))
3207 | (rn & bank_mask);
3208 gen_mov_F0_vreg(dp, rn);
3209 if (delta_m) {
3210 rm = ((rm + delta_m) & (bank_mask - 1))
3211 | (rm & bank_mask);
3212 gen_mov_F1_vreg(dp, rm);
3213 }
3214 }
3215 }
3216 }
3217 break;
3218 case 0xc:
3219 case 0xd:
8387da81 3220 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3221 /* two-register transfer */
3222 rn = (insn >> 16) & 0xf;
3223 rd = (insn >> 12) & 0xf;
3224 if (dp) {
9ee6e8bb
PB
3225 VFP_DREG_M(rm, insn);
3226 } else {
3227 rm = VFP_SREG_M(insn);
3228 }
b7bcbe95 3229
18c9b560 3230 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3231 /* vfp->arm */
3232 if (dp) {
4373f3ce
PB
3233 gen_mov_F0_vreg(0, rm * 2);
3234 tmp = gen_vfp_mrs();
3235 store_reg(s, rd, tmp);
3236 gen_mov_F0_vreg(0, rm * 2 + 1);
3237 tmp = gen_vfp_mrs();
3238 store_reg(s, rn, tmp);
b7bcbe95
FB
3239 } else {
3240 gen_mov_F0_vreg(0, rm);
4373f3ce 3241 tmp = gen_vfp_mrs();
8387da81 3242 store_reg(s, rd, tmp);
b7bcbe95 3243 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3244 tmp = gen_vfp_mrs();
8387da81 3245 store_reg(s, rn, tmp);
b7bcbe95
FB
3246 }
3247 } else {
3248 /* arm->vfp */
3249 if (dp) {
4373f3ce
PB
3250 tmp = load_reg(s, rd);
3251 gen_vfp_msr(tmp);
3252 gen_mov_vreg_F0(0, rm * 2);
3253 tmp = load_reg(s, rn);
3254 gen_vfp_msr(tmp);
3255 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3256 } else {
8387da81 3257 tmp = load_reg(s, rd);
4373f3ce 3258 gen_vfp_msr(tmp);
b7bcbe95 3259 gen_mov_vreg_F0(0, rm);
8387da81 3260 tmp = load_reg(s, rn);
4373f3ce 3261 gen_vfp_msr(tmp);
b7bcbe95
FB
3262 gen_mov_vreg_F0(0, rm + 1);
3263 }
3264 }
3265 } else {
3266 /* Load/store */
3267 rn = (insn >> 16) & 0xf;
3268 if (dp)
9ee6e8bb 3269 VFP_DREG_D(rd, insn);
b7bcbe95 3270 else
9ee6e8bb 3271 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3272 if ((insn & 0x01200000) == 0x01000000) {
3273 /* Single load/store */
3274 offset = (insn & 0xff) << 2;
3275 if ((insn & (1 << 23)) == 0)
3276 offset = -offset;
934814f1
PM
3277 if (s->thumb && rn == 15) {
3278 /* This is actually UNPREDICTABLE */
3279 addr = tcg_temp_new_i32();
3280 tcg_gen_movi_i32(addr, s->pc & ~2);
3281 } else {
3282 addr = load_reg(s, rn);
3283 }
312eea9f 3284 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3285 if (insn & (1 << 20)) {
312eea9f 3286 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3287 gen_mov_vreg_F0(dp, rd);
3288 } else {
3289 gen_mov_F0_vreg(dp, rd);
312eea9f 3290 gen_vfp_st(s, dp, addr);
b7bcbe95 3291 }
7d1b0095 3292 tcg_temp_free_i32(addr);
b7bcbe95
FB
3293 } else {
3294 /* load/store multiple */
934814f1 3295 int w = insn & (1 << 21);
b7bcbe95
FB
3296 if (dp)
3297 n = (insn >> 1) & 0x7f;
3298 else
3299 n = insn & 0xff;
3300
934814f1
PM
3301 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3302 /* P == U , W == 1 => UNDEF */
3303 return 1;
3304 }
3305 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3306 /* UNPREDICTABLE cases for bad immediates: we choose to
3307 * UNDEF to avoid generating huge numbers of TCG ops
3308 */
3309 return 1;
3310 }
3311 if (rn == 15 && w) {
3312 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3313 return 1;
3314 }
3315
3316 if (s->thumb && rn == 15) {
3317 /* This is actually UNPREDICTABLE */
3318 addr = tcg_temp_new_i32();
3319 tcg_gen_movi_i32(addr, s->pc & ~2);
3320 } else {
3321 addr = load_reg(s, rn);
3322 }
b7bcbe95 3323 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3324 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3325
3326 if (dp)
3327 offset = 8;
3328 else
3329 offset = 4;
3330 for (i = 0; i < n; i++) {
18c9b560 3331 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3332 /* load */
312eea9f 3333 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3334 gen_mov_vreg_F0(dp, rd + i);
3335 } else {
3336 /* store */
3337 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3338 gen_vfp_st(s, dp, addr);
b7bcbe95 3339 }
312eea9f 3340 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3341 }
934814f1 3342 if (w) {
b7bcbe95
FB
3343 /* writeback */
3344 if (insn & (1 << 24))
3345 offset = -offset * n;
3346 else if (dp && (insn & 1))
3347 offset = 4;
3348 else
3349 offset = 0;
3350
3351 if (offset != 0)
312eea9f
FN
3352 tcg_gen_addi_i32(addr, addr, offset);
3353 store_reg(s, rn, addr);
3354 } else {
7d1b0095 3355 tcg_temp_free_i32(addr);
b7bcbe95
FB
3356 }
3357 }
3358 }
3359 break;
3360 default:
3361 /* Should never happen. */
3362 return 1;
3363 }
3364 return 0;
3365}
3366
6e256c93 3367static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3368{
6e256c93
FB
3369 TranslationBlock *tb;
3370
3371 tb = s->tb;
3372 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3373 tcg_gen_goto_tb(n);
8984bd2e 3374 gen_set_pc_im(dest);
4b4a72e5 3375 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3376 } else {
8984bd2e 3377 gen_set_pc_im(dest);
57fec1fe 3378 tcg_gen_exit_tb(0);
6e256c93 3379 }
c53be334
FB
3380}
3381
8aaca4c0
FB
3382static inline void gen_jmp (DisasContext *s, uint32_t dest)
3383{
551bd27f 3384 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3385 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3386 if (s->thumb)
d9ba4830
PB
3387 dest |= 1;
3388 gen_bx_im(s, dest);
8aaca4c0 3389 } else {
6e256c93 3390 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3391 s->is_jmp = DISAS_TB_JUMP;
3392 }
3393}
3394
d9ba4830 3395static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3396{
ee097184 3397 if (x)
d9ba4830 3398 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3399 else
d9ba4830 3400 gen_sxth(t0);
ee097184 3401 if (y)
d9ba4830 3402 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3403 else
d9ba4830
PB
3404 gen_sxth(t1);
3405 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3406}
3407
3408/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3409static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3410 uint32_t mask;
3411
3412 mask = 0;
3413 if (flags & (1 << 0))
3414 mask |= 0xff;
3415 if (flags & (1 << 1))
3416 mask |= 0xff00;
3417 if (flags & (1 << 2))
3418 mask |= 0xff0000;
3419 if (flags & (1 << 3))
3420 mask |= 0xff000000;
9ee6e8bb 3421
2ae23e75 3422 /* Mask out undefined bits. */
9ee6e8bb 3423 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3424 if (!arm_feature(env, ARM_FEATURE_V4T))
3425 mask &= ~CPSR_T;
3426 if (!arm_feature(env, ARM_FEATURE_V5))
3427 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3428 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3429 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3430 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3431 mask &= ~CPSR_IT;
9ee6e8bb 3432 /* Mask out execution state bits. */
2ae23e75 3433 if (!spsr)
e160c51c 3434 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3435 /* Mask out privileged bits. */
3436 if (IS_USER(s))
9ee6e8bb 3437 mask &= CPSR_USER;
b5ff1b31
FB
3438 return mask;
3439}
3440
2fbac54b
FN
3441/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3442static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3443{
d9ba4830 3444 TCGv tmp;
b5ff1b31
FB
3445 if (spsr) {
3446 /* ??? This is also undefined in system mode. */
3447 if (IS_USER(s))
3448 return 1;
d9ba4830
PB
3449
3450 tmp = load_cpu_field(spsr);
3451 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3452 tcg_gen_andi_i32(t0, t0, mask);
3453 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3454 store_cpu_field(tmp, spsr);
b5ff1b31 3455 } else {
2fbac54b 3456 gen_set_cpsr(t0, mask);
b5ff1b31 3457 }
7d1b0095 3458 tcg_temp_free_i32(t0);
b5ff1b31
FB
3459 gen_lookup_tb(s);
3460 return 0;
3461}
3462
2fbac54b
FN
3463/* Returns nonzero if access to the PSR is not permitted. */
3464static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3465{
3466 TCGv tmp;
7d1b0095 3467 tmp = tcg_temp_new_i32();
2fbac54b
FN
3468 tcg_gen_movi_i32(tmp, val);
3469 return gen_set_psr(s, mask, spsr, tmp);
3470}
3471
e9bb4aa9
JR
3472/* Generate an old-style exception return. Marks pc as dead. */
3473static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3474{
d9ba4830 3475 TCGv tmp;
e9bb4aa9 3476 store_reg(s, 15, pc);
d9ba4830
PB
3477 tmp = load_cpu_field(spsr);
3478 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3479 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3480 s->is_jmp = DISAS_UPDATE;
3481}
3482
b0109805
PB
3483/* Generate a v6 exception return. Marks both values as dead. */
3484static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3485{
b0109805 3486 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3487 tcg_temp_free_i32(cpsr);
b0109805 3488 store_reg(s, 15, pc);
9ee6e8bb
PB
3489 s->is_jmp = DISAS_UPDATE;
3490}
3b46e624 3491
9ee6e8bb
PB
3492static inline void
3493gen_set_condexec (DisasContext *s)
3494{
3495 if (s->condexec_mask) {
8f01245e 3496 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3497 TCGv tmp = tcg_temp_new_i32();
8f01245e 3498 tcg_gen_movi_i32(tmp, val);
d9ba4830 3499 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3500 }
3501}
3b46e624 3502
bc4a0de0
PM
3503static void gen_exception_insn(DisasContext *s, int offset, int excp)
3504{
3505 gen_set_condexec(s);
3506 gen_set_pc_im(s->pc - offset);
3507 gen_exception(excp);
3508 s->is_jmp = DISAS_JUMP;
3509}
3510
9ee6e8bb
PB
3511static void gen_nop_hint(DisasContext *s, int val)
3512{
3513 switch (val) {
3514 case 3: /* wfi */
8984bd2e 3515 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3516 s->is_jmp = DISAS_WFI;
3517 break;
3518 case 2: /* wfe */
3519 case 4: /* sev */
3520 /* TODO: Implement SEV and WFE. May help SMP performance. */
3521 default: /* nop */
3522 break;
3523 }
3524}
99c475ab 3525
ad69471c 3526#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3527
62698be3 3528static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3529{
3530 switch (size) {
dd8fbd78
FN
3531 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3532 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3533 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3534 default: abort();
9ee6e8bb 3535 }
9ee6e8bb
PB
3536}
3537
dd8fbd78 3538static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3539{
3540 switch (size) {
dd8fbd78
FN
3541 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3542 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3543 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3544 default: return;
3545 }
3546}
3547
3548/* 32-bit pairwise ops end up the same as the elementwise versions. */
3549#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3550#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3551#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3552#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3553
ad69471c
PB
3554#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3555 switch ((size << 1) | u) { \
3556 case 0: \
dd8fbd78 3557 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3558 break; \
3559 case 1: \
dd8fbd78 3560 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3561 break; \
3562 case 2: \
dd8fbd78 3563 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3564 break; \
3565 case 3: \
dd8fbd78 3566 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3567 break; \
3568 case 4: \
dd8fbd78 3569 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3570 break; \
3571 case 5: \
dd8fbd78 3572 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3573 break; \
3574 default: return 1; \
3575 }} while (0)
9ee6e8bb
PB
3576
3577#define GEN_NEON_INTEGER_OP(name) do { \
3578 switch ((size << 1) | u) { \
ad69471c 3579 case 0: \
dd8fbd78 3580 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3581 break; \
3582 case 1: \
dd8fbd78 3583 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3584 break; \
3585 case 2: \
dd8fbd78 3586 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3587 break; \
3588 case 3: \
dd8fbd78 3589 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3590 break; \
3591 case 4: \
dd8fbd78 3592 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3593 break; \
3594 case 5: \
dd8fbd78 3595 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3596 break; \
9ee6e8bb
PB
3597 default: return 1; \
3598 }} while (0)
3599
dd8fbd78 3600static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3601{
7d1b0095 3602 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3603 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3604 return tmp;
9ee6e8bb
PB
3605}
3606
dd8fbd78 3607static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3608{
dd8fbd78 3609 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3610 tcg_temp_free_i32(var);
9ee6e8bb
PB
3611}
3612
dd8fbd78 3613static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3614{
dd8fbd78 3615 TCGv tmp;
9ee6e8bb 3616 if (size == 1) {
0fad6efc
PM
3617 tmp = neon_load_reg(reg & 7, reg >> 4);
3618 if (reg & 8) {
dd8fbd78 3619 gen_neon_dup_high16(tmp);
0fad6efc
PM
3620 } else {
3621 gen_neon_dup_low16(tmp);
dd8fbd78 3622 }
0fad6efc
PM
3623 } else {
3624 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3625 }
dd8fbd78 3626 return tmp;
9ee6e8bb
PB
3627}
3628
02acedf9 3629static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3630{
02acedf9 3631 TCGv tmp, tmp2;
600b828c 3632 if (!q && size == 2) {
02acedf9
PM
3633 return 1;
3634 }
3635 tmp = tcg_const_i32(rd);
3636 tmp2 = tcg_const_i32(rm);
3637 if (q) {
3638 switch (size) {
3639 case 0:
02da0b2d 3640 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3641 break;
3642 case 1:
02da0b2d 3643 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3644 break;
3645 case 2:
02da0b2d 3646 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3647 break;
3648 default:
3649 abort();
3650 }
3651 } else {
3652 switch (size) {
3653 case 0:
02da0b2d 3654 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3655 break;
3656 case 1:
02da0b2d 3657 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3658 break;
3659 default:
3660 abort();
3661 }
3662 }
3663 tcg_temp_free_i32(tmp);
3664 tcg_temp_free_i32(tmp2);
3665 return 0;
19457615
FN
3666}
3667
d68a6f3a 3668static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3669{
3670 TCGv tmp, tmp2;
600b828c 3671 if (!q && size == 2) {
d68a6f3a
PM
3672 return 1;
3673 }
3674 tmp = tcg_const_i32(rd);
3675 tmp2 = tcg_const_i32(rm);
3676 if (q) {
3677 switch (size) {
3678 case 0:
02da0b2d 3679 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3680 break;
3681 case 1:
02da0b2d 3682 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3683 break;
3684 case 2:
02da0b2d 3685 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3686 break;
3687 default:
3688 abort();
3689 }
3690 } else {
3691 switch (size) {
3692 case 0:
02da0b2d 3693 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3694 break;
3695 case 1:
02da0b2d 3696 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3697 break;
3698 default:
3699 abort();
3700 }
3701 }
3702 tcg_temp_free_i32(tmp);
3703 tcg_temp_free_i32(tmp2);
3704 return 0;
19457615
FN
3705}
3706
19457615
FN
3707static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3708{
3709 TCGv rd, tmp;
3710
7d1b0095
PM
3711 rd = tcg_temp_new_i32();
3712 tmp = tcg_temp_new_i32();
19457615
FN
3713
3714 tcg_gen_shli_i32(rd, t0, 8);
3715 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3716 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3717 tcg_gen_or_i32(rd, rd, tmp);
3718
3719 tcg_gen_shri_i32(t1, t1, 8);
3720 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3721 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3722 tcg_gen_or_i32(t1, t1, tmp);
3723 tcg_gen_mov_i32(t0, rd);
3724
7d1b0095
PM
3725 tcg_temp_free_i32(tmp);
3726 tcg_temp_free_i32(rd);
19457615
FN
3727}
3728
3729static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3730{
3731 TCGv rd, tmp;
3732
7d1b0095
PM
3733 rd = tcg_temp_new_i32();
3734 tmp = tcg_temp_new_i32();
19457615
FN
3735
3736 tcg_gen_shli_i32(rd, t0, 16);
3737 tcg_gen_andi_i32(tmp, t1, 0xffff);
3738 tcg_gen_or_i32(rd, rd, tmp);
3739 tcg_gen_shri_i32(t1, t1, 16);
3740 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3741 tcg_gen_or_i32(t1, t1, tmp);
3742 tcg_gen_mov_i32(t0, rd);
3743
7d1b0095
PM
3744 tcg_temp_free_i32(tmp);
3745 tcg_temp_free_i32(rd);
19457615
FN
3746}
3747
3748
9ee6e8bb
PB
3749static struct {
3750 int nregs;
3751 int interleave;
3752 int spacing;
3753} neon_ls_element_type[11] = {
3754 {4, 4, 1},
3755 {4, 4, 2},
3756 {4, 1, 1},
3757 {4, 2, 1},
3758 {3, 3, 1},
3759 {3, 3, 2},
3760 {3, 1, 1},
3761 {1, 1, 1},
3762 {2, 2, 1},
3763 {2, 2, 2},
3764 {2, 1, 1}
3765};
3766
3767/* Translate a NEON load/store element instruction. Return nonzero if the
3768 instruction is invalid. */
0ecb72a5 3769static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3770{
3771 int rd, rn, rm;
3772 int op;
3773 int nregs;
3774 int interleave;
84496233 3775 int spacing;
9ee6e8bb
PB
3776 int stride;
3777 int size;
3778 int reg;
3779 int pass;
3780 int load;
3781 int shift;
9ee6e8bb 3782 int n;
1b2b1e54 3783 TCGv addr;
b0109805 3784 TCGv tmp;
8f8e3aa4 3785 TCGv tmp2;
84496233 3786 TCGv_i64 tmp64;
9ee6e8bb 3787
5df8bac1 3788 if (!s->vfp_enabled)
9ee6e8bb
PB
3789 return 1;
3790 VFP_DREG_D(rd, insn);
3791 rn = (insn >> 16) & 0xf;
3792 rm = insn & 0xf;
3793 load = (insn & (1 << 21)) != 0;
3794 if ((insn & (1 << 23)) == 0) {
3795 /* Load store all elements. */
3796 op = (insn >> 8) & 0xf;
3797 size = (insn >> 6) & 3;
84496233 3798 if (op > 10)
9ee6e8bb 3799 return 1;
f2dd89d0
PM
3800 /* Catch UNDEF cases for bad values of align field */
3801 switch (op & 0xc) {
3802 case 4:
3803 if (((insn >> 5) & 1) == 1) {
3804 return 1;
3805 }
3806 break;
3807 case 8:
3808 if (((insn >> 4) & 3) == 3) {
3809 return 1;
3810 }
3811 break;
3812 default:
3813 break;
3814 }
9ee6e8bb
PB
3815 nregs = neon_ls_element_type[op].nregs;
3816 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3817 spacing = neon_ls_element_type[op].spacing;
3818 if (size == 3 && (interleave | spacing) != 1)
3819 return 1;
e318a60b 3820 addr = tcg_temp_new_i32();
dcc65026 3821 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3822 stride = (1 << size) * interleave;
3823 for (reg = 0; reg < nregs; reg++) {
3824 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3825 load_reg_var(s, addr, rn);
3826 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3827 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3828 load_reg_var(s, addr, rn);
3829 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3830 }
84496233
JR
3831 if (size == 3) {
3832 if (load) {
3833 tmp64 = gen_ld64(addr, IS_USER(s));
3834 neon_store_reg64(tmp64, rd);
3835 tcg_temp_free_i64(tmp64);
3836 } else {
3837 tmp64 = tcg_temp_new_i64();
3838 neon_load_reg64(tmp64, rd);
3839 gen_st64(tmp64, addr, IS_USER(s));
3840 }
3841 tcg_gen_addi_i32(addr, addr, stride);
3842 } else {
3843 for (pass = 0; pass < 2; pass++) {
3844 if (size == 2) {
3845 if (load) {
3846 tmp = gen_ld32(addr, IS_USER(s));
3847 neon_store_reg(rd, pass, tmp);
3848 } else {
3849 tmp = neon_load_reg(rd, pass);
3850 gen_st32(tmp, addr, IS_USER(s));
3851 }
1b2b1e54 3852 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3853 } else if (size == 1) {
3854 if (load) {
3855 tmp = gen_ld16u(addr, IS_USER(s));
3856 tcg_gen_addi_i32(addr, addr, stride);
3857 tmp2 = gen_ld16u(addr, IS_USER(s));
3858 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3859 tcg_gen_shli_i32(tmp2, tmp2, 16);
3860 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3861 tcg_temp_free_i32(tmp2);
84496233
JR
3862 neon_store_reg(rd, pass, tmp);
3863 } else {
3864 tmp = neon_load_reg(rd, pass);
7d1b0095 3865 tmp2 = tcg_temp_new_i32();
84496233
JR
3866 tcg_gen_shri_i32(tmp2, tmp, 16);
3867 gen_st16(tmp, addr, IS_USER(s));
3868 tcg_gen_addi_i32(addr, addr, stride);
3869 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3870 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3871 }
84496233
JR
3872 } else /* size == 0 */ {
3873 if (load) {
3874 TCGV_UNUSED(tmp2);
3875 for (n = 0; n < 4; n++) {
3876 tmp = gen_ld8u(addr, IS_USER(s));
3877 tcg_gen_addi_i32(addr, addr, stride);
3878 if (n == 0) {
3879 tmp2 = tmp;
3880 } else {
41ba8341
PB
3881 tcg_gen_shli_i32(tmp, tmp, n * 8);
3882 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3883 tcg_temp_free_i32(tmp);
84496233 3884 }
9ee6e8bb 3885 }
84496233
JR
3886 neon_store_reg(rd, pass, tmp2);
3887 } else {
3888 tmp2 = neon_load_reg(rd, pass);
3889 for (n = 0; n < 4; n++) {
7d1b0095 3890 tmp = tcg_temp_new_i32();
84496233
JR
3891 if (n == 0) {
3892 tcg_gen_mov_i32(tmp, tmp2);
3893 } else {
3894 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3895 }
3896 gen_st8(tmp, addr, IS_USER(s));
3897 tcg_gen_addi_i32(addr, addr, stride);
3898 }
7d1b0095 3899 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3900 }
3901 }
3902 }
3903 }
84496233 3904 rd += spacing;
9ee6e8bb 3905 }
e318a60b 3906 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3907 stride = nregs * 8;
3908 } else {
3909 size = (insn >> 10) & 3;
3910 if (size == 3) {
3911 /* Load single element to all lanes. */
8e18cde3
PM
3912 int a = (insn >> 4) & 1;
3913 if (!load) {
9ee6e8bb 3914 return 1;
8e18cde3 3915 }
9ee6e8bb
PB
3916 size = (insn >> 6) & 3;
3917 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3918
3919 if (size == 3) {
3920 if (nregs != 4 || a == 0) {
9ee6e8bb 3921 return 1;
99c475ab 3922 }
8e18cde3
PM
3923 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3924 size = 2;
3925 }
3926 if (nregs == 1 && a == 1 && size == 0) {
3927 return 1;
3928 }
3929 if (nregs == 3 && a == 1) {
3930 return 1;
3931 }
e318a60b 3932 addr = tcg_temp_new_i32();
8e18cde3
PM
3933 load_reg_var(s, addr, rn);
3934 if (nregs == 1) {
3935 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3936 tmp = gen_load_and_replicate(s, addr, size);
3937 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3938 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3939 if (insn & (1 << 5)) {
3940 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3941 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3942 }
3943 tcg_temp_free_i32(tmp);
3944 } else {
3945 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3946 stride = (insn & (1 << 5)) ? 2 : 1;
3947 for (reg = 0; reg < nregs; reg++) {
3948 tmp = gen_load_and_replicate(s, addr, size);
3949 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3950 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3951 tcg_temp_free_i32(tmp);
3952 tcg_gen_addi_i32(addr, addr, 1 << size);
3953 rd += stride;
3954 }
9ee6e8bb 3955 }
e318a60b 3956 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3957 stride = (1 << size) * nregs;
3958 } else {
3959 /* Single element. */
93262b16 3960 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
3961 pass = (insn >> 7) & 1;
3962 switch (size) {
3963 case 0:
3964 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3965 stride = 1;
3966 break;
3967 case 1:
3968 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3969 stride = (insn & (1 << 5)) ? 2 : 1;
3970 break;
3971 case 2:
3972 shift = 0;
9ee6e8bb
PB
3973 stride = (insn & (1 << 6)) ? 2 : 1;
3974 break;
3975 default:
3976 abort();
3977 }
3978 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
3979 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3980 switch (nregs) {
3981 case 1:
3982 if (((idx & (1 << size)) != 0) ||
3983 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3984 return 1;
3985 }
3986 break;
3987 case 3:
3988 if ((idx & 1) != 0) {
3989 return 1;
3990 }
3991 /* fall through */
3992 case 2:
3993 if (size == 2 && (idx & 2) != 0) {
3994 return 1;
3995 }
3996 break;
3997 case 4:
3998 if ((size == 2) && ((idx & 3) == 3)) {
3999 return 1;
4000 }
4001 break;
4002 default:
4003 abort();
4004 }
4005 if ((rd + stride * (nregs - 1)) > 31) {
4006 /* Attempts to write off the end of the register file
4007 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4008 * the neon_load_reg() would write off the end of the array.
4009 */
4010 return 1;
4011 }
e318a60b 4012 addr = tcg_temp_new_i32();
dcc65026 4013 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4014 for (reg = 0; reg < nregs; reg++) {
4015 if (load) {
9ee6e8bb
PB
4016 switch (size) {
4017 case 0:
1b2b1e54 4018 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
4019 break;
4020 case 1:
1b2b1e54 4021 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
4022 break;
4023 case 2:
1b2b1e54 4024 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 4025 break;
a50f5b91
PB
4026 default: /* Avoid compiler warnings. */
4027 abort();
9ee6e8bb
PB
4028 }
4029 if (size != 2) {
8f8e3aa4 4030 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4031 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4032 shift, size ? 16 : 8);
7d1b0095 4033 tcg_temp_free_i32(tmp2);
9ee6e8bb 4034 }
8f8e3aa4 4035 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4036 } else { /* Store */
8f8e3aa4
PB
4037 tmp = neon_load_reg(rd, pass);
4038 if (shift)
4039 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4040 switch (size) {
4041 case 0:
1b2b1e54 4042 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4043 break;
4044 case 1:
1b2b1e54 4045 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4046 break;
4047 case 2:
1b2b1e54 4048 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4049 break;
99c475ab 4050 }
99c475ab 4051 }
9ee6e8bb 4052 rd += stride;
1b2b1e54 4053 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4054 }
e318a60b 4055 tcg_temp_free_i32(addr);
9ee6e8bb 4056 stride = nregs * (1 << size);
99c475ab 4057 }
9ee6e8bb
PB
4058 }
4059 if (rm != 15) {
b26eefb6
PB
4060 TCGv base;
4061
4062 base = load_reg(s, rn);
9ee6e8bb 4063 if (rm == 13) {
b26eefb6 4064 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4065 } else {
b26eefb6
PB
4066 TCGv index;
4067 index = load_reg(s, rm);
4068 tcg_gen_add_i32(base, base, index);
7d1b0095 4069 tcg_temp_free_i32(index);
9ee6e8bb 4070 }
b26eefb6 4071 store_reg(s, rn, base);
9ee6e8bb
PB
4072 }
4073 return 0;
4074}
3b46e624 4075
8f8e3aa4
PB
4076/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4077static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4078{
4079 tcg_gen_and_i32(t, t, c);
f669df27 4080 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4081 tcg_gen_or_i32(dest, t, f);
4082}
4083
a7812ae4 4084static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4085{
4086 switch (size) {
4087 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4088 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4089 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4090 default: abort();
4091 }
4092}
4093
a7812ae4 4094static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4095{
4096 switch (size) {
02da0b2d
PM
4097 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4098 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4099 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4100 default: abort();
4101 }
4102}
4103
a7812ae4 4104static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4105{
4106 switch (size) {
02da0b2d
PM
4107 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4108 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4109 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4110 default: abort();
4111 }
4112}
4113
af1bbf30
JR
4114static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4115{
4116 switch (size) {
02da0b2d
PM
4117 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4118 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4119 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4120 default: abort();
4121 }
4122}
4123
ad69471c
PB
4124static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4125 int q, int u)
4126{
4127 if (q) {
4128 if (u) {
4129 switch (size) {
4130 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4131 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4132 default: abort();
4133 }
4134 } else {
4135 switch (size) {
4136 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4137 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4138 default: abort();
4139 }
4140 }
4141 } else {
4142 if (u) {
4143 switch (size) {
b408a9b0
CL
4144 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4145 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4146 default: abort();
4147 }
4148 } else {
4149 switch (size) {
4150 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4151 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4152 default: abort();
4153 }
4154 }
4155 }
4156}
4157
a7812ae4 4158static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4159{
4160 if (u) {
4161 switch (size) {
4162 case 0: gen_helper_neon_widen_u8(dest, src); break;
4163 case 1: gen_helper_neon_widen_u16(dest, src); break;
4164 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4165 default: abort();
4166 }
4167 } else {
4168 switch (size) {
4169 case 0: gen_helper_neon_widen_s8(dest, src); break;
4170 case 1: gen_helper_neon_widen_s16(dest, src); break;
4171 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4172 default: abort();
4173 }
4174 }
7d1b0095 4175 tcg_temp_free_i32(src);
ad69471c
PB
4176}
4177
4178static inline void gen_neon_addl(int size)
4179{
4180 switch (size) {
4181 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4182 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4183 case 2: tcg_gen_add_i64(CPU_V001); break;
4184 default: abort();
4185 }
4186}
4187
4188static inline void gen_neon_subl(int size)
4189{
4190 switch (size) {
4191 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4192 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4193 case 2: tcg_gen_sub_i64(CPU_V001); break;
4194 default: abort();
4195 }
4196}
4197
a7812ae4 4198static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4199{
4200 switch (size) {
4201 case 0: gen_helper_neon_negl_u16(var, var); break;
4202 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4203 case 2:
4204 tcg_gen_neg_i64(var, var);
4205 break;
ad69471c
PB
4206 default: abort();
4207 }
4208}
4209
a7812ae4 4210static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4211{
4212 switch (size) {
02da0b2d
PM
4213 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4214 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4215 default: abort();
4216 }
4217}
4218
a7812ae4 4219static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4220{
a7812ae4 4221 TCGv_i64 tmp;
ad69471c
PB
4222
4223 switch ((size << 1) | u) {
4224 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4225 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4226 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4227 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4228 case 4:
4229 tmp = gen_muls_i64_i32(a, b);
4230 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4231 tcg_temp_free_i64(tmp);
ad69471c
PB
4232 break;
4233 case 5:
4234 tmp = gen_mulu_i64_i32(a, b);
4235 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4236 tcg_temp_free_i64(tmp);
ad69471c
PB
4237 break;
4238 default: abort();
4239 }
c6067f04
CL
4240
4241 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4242 Don't forget to clean them now. */
4243 if (size < 2) {
7d1b0095
PM
4244 tcg_temp_free_i32(a);
4245 tcg_temp_free_i32(b);
c6067f04 4246 }
ad69471c
PB
4247}
4248
c33171c7
PM
4249static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4250{
4251 if (op) {
4252 if (u) {
4253 gen_neon_unarrow_sats(size, dest, src);
4254 } else {
4255 gen_neon_narrow(size, dest, src);
4256 }
4257 } else {
4258 if (u) {
4259 gen_neon_narrow_satu(size, dest, src);
4260 } else {
4261 gen_neon_narrow_sats(size, dest, src);
4262 }
4263 }
4264}
4265
62698be3
PM
4266/* Symbolic constants for op fields for Neon 3-register same-length.
4267 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4268 * table A7-9.
4269 */
4270#define NEON_3R_VHADD 0
4271#define NEON_3R_VQADD 1
4272#define NEON_3R_VRHADD 2
4273#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4274#define NEON_3R_VHSUB 4
4275#define NEON_3R_VQSUB 5
4276#define NEON_3R_VCGT 6
4277#define NEON_3R_VCGE 7
4278#define NEON_3R_VSHL 8
4279#define NEON_3R_VQSHL 9
4280#define NEON_3R_VRSHL 10
4281#define NEON_3R_VQRSHL 11
4282#define NEON_3R_VMAX 12
4283#define NEON_3R_VMIN 13
4284#define NEON_3R_VABD 14
4285#define NEON_3R_VABA 15
4286#define NEON_3R_VADD_VSUB 16
4287#define NEON_3R_VTST_VCEQ 17
4288#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4289#define NEON_3R_VMUL 19
4290#define NEON_3R_VPMAX 20
4291#define NEON_3R_VPMIN 21
4292#define NEON_3R_VQDMULH_VQRDMULH 22
4293#define NEON_3R_VPADD 23
da97f52c 4294#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4295#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4296#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4297#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4298#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4299#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4300#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4301
4302static const uint8_t neon_3r_sizes[] = {
4303 [NEON_3R_VHADD] = 0x7,
4304 [NEON_3R_VQADD] = 0xf,
4305 [NEON_3R_VRHADD] = 0x7,
4306 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4307 [NEON_3R_VHSUB] = 0x7,
4308 [NEON_3R_VQSUB] = 0xf,
4309 [NEON_3R_VCGT] = 0x7,
4310 [NEON_3R_VCGE] = 0x7,
4311 [NEON_3R_VSHL] = 0xf,
4312 [NEON_3R_VQSHL] = 0xf,
4313 [NEON_3R_VRSHL] = 0xf,
4314 [NEON_3R_VQRSHL] = 0xf,
4315 [NEON_3R_VMAX] = 0x7,
4316 [NEON_3R_VMIN] = 0x7,
4317 [NEON_3R_VABD] = 0x7,
4318 [NEON_3R_VABA] = 0x7,
4319 [NEON_3R_VADD_VSUB] = 0xf,
4320 [NEON_3R_VTST_VCEQ] = 0x7,
4321 [NEON_3R_VML] = 0x7,
4322 [NEON_3R_VMUL] = 0x7,
4323 [NEON_3R_VPMAX] = 0x7,
4324 [NEON_3R_VPMIN] = 0x7,
4325 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4326 [NEON_3R_VPADD] = 0x7,
da97f52c 4327 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4328 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4329 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4330 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4331 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4332 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4333 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4334};
4335
600b828c
PM
4336/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4337 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4338 * table A7-13.
4339 */
4340#define NEON_2RM_VREV64 0
4341#define NEON_2RM_VREV32 1
4342#define NEON_2RM_VREV16 2
4343#define NEON_2RM_VPADDL 4
4344#define NEON_2RM_VPADDL_U 5
4345#define NEON_2RM_VCLS 8
4346#define NEON_2RM_VCLZ 9
4347#define NEON_2RM_VCNT 10
4348#define NEON_2RM_VMVN 11
4349#define NEON_2RM_VPADAL 12
4350#define NEON_2RM_VPADAL_U 13
4351#define NEON_2RM_VQABS 14
4352#define NEON_2RM_VQNEG 15
4353#define NEON_2RM_VCGT0 16
4354#define NEON_2RM_VCGE0 17
4355#define NEON_2RM_VCEQ0 18
4356#define NEON_2RM_VCLE0 19
4357#define NEON_2RM_VCLT0 20
4358#define NEON_2RM_VABS 22
4359#define NEON_2RM_VNEG 23
4360#define NEON_2RM_VCGT0_F 24
4361#define NEON_2RM_VCGE0_F 25
4362#define NEON_2RM_VCEQ0_F 26
4363#define NEON_2RM_VCLE0_F 27
4364#define NEON_2RM_VCLT0_F 28
4365#define NEON_2RM_VABS_F 30
4366#define NEON_2RM_VNEG_F 31
4367#define NEON_2RM_VSWP 32
4368#define NEON_2RM_VTRN 33
4369#define NEON_2RM_VUZP 34
4370#define NEON_2RM_VZIP 35
4371#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4372#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4373#define NEON_2RM_VSHLL 38
4374#define NEON_2RM_VCVT_F16_F32 44
4375#define NEON_2RM_VCVT_F32_F16 46
4376#define NEON_2RM_VRECPE 56
4377#define NEON_2RM_VRSQRTE 57
4378#define NEON_2RM_VRECPE_F 58
4379#define NEON_2RM_VRSQRTE_F 59
4380#define NEON_2RM_VCVT_FS 60
4381#define NEON_2RM_VCVT_FU 61
4382#define NEON_2RM_VCVT_SF 62
4383#define NEON_2RM_VCVT_UF 63
4384
4385static int neon_2rm_is_float_op(int op)
4386{
4387 /* Return true if this neon 2reg-misc op is float-to-float */
4388 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4389 op >= NEON_2RM_VRECPE_F);
4390}
4391
4392/* Each entry in this array has bit n set if the insn allows
4393 * size value n (otherwise it will UNDEF). Since unallocated
4394 * op values will have no bits set they always UNDEF.
4395 */
4396static const uint8_t neon_2rm_sizes[] = {
4397 [NEON_2RM_VREV64] = 0x7,
4398 [NEON_2RM_VREV32] = 0x3,
4399 [NEON_2RM_VREV16] = 0x1,
4400 [NEON_2RM_VPADDL] = 0x7,
4401 [NEON_2RM_VPADDL_U] = 0x7,
4402 [NEON_2RM_VCLS] = 0x7,
4403 [NEON_2RM_VCLZ] = 0x7,
4404 [NEON_2RM_VCNT] = 0x1,
4405 [NEON_2RM_VMVN] = 0x1,
4406 [NEON_2RM_VPADAL] = 0x7,
4407 [NEON_2RM_VPADAL_U] = 0x7,
4408 [NEON_2RM_VQABS] = 0x7,
4409 [NEON_2RM_VQNEG] = 0x7,
4410 [NEON_2RM_VCGT0] = 0x7,
4411 [NEON_2RM_VCGE0] = 0x7,
4412 [NEON_2RM_VCEQ0] = 0x7,
4413 [NEON_2RM_VCLE0] = 0x7,
4414 [NEON_2RM_VCLT0] = 0x7,
4415 [NEON_2RM_VABS] = 0x7,
4416 [NEON_2RM_VNEG] = 0x7,
4417 [NEON_2RM_VCGT0_F] = 0x4,
4418 [NEON_2RM_VCGE0_F] = 0x4,
4419 [NEON_2RM_VCEQ0_F] = 0x4,
4420 [NEON_2RM_VCLE0_F] = 0x4,
4421 [NEON_2RM_VCLT0_F] = 0x4,
4422 [NEON_2RM_VABS_F] = 0x4,
4423 [NEON_2RM_VNEG_F] = 0x4,
4424 [NEON_2RM_VSWP] = 0x1,
4425 [NEON_2RM_VTRN] = 0x7,
4426 [NEON_2RM_VUZP] = 0x7,
4427 [NEON_2RM_VZIP] = 0x7,
4428 [NEON_2RM_VMOVN] = 0x7,
4429 [NEON_2RM_VQMOVN] = 0x7,
4430 [NEON_2RM_VSHLL] = 0x7,
4431 [NEON_2RM_VCVT_F16_F32] = 0x2,
4432 [NEON_2RM_VCVT_F32_F16] = 0x2,
4433 [NEON_2RM_VRECPE] = 0x4,
4434 [NEON_2RM_VRSQRTE] = 0x4,
4435 [NEON_2RM_VRECPE_F] = 0x4,
4436 [NEON_2RM_VRSQRTE_F] = 0x4,
4437 [NEON_2RM_VCVT_FS] = 0x4,
4438 [NEON_2RM_VCVT_FU] = 0x4,
4439 [NEON_2RM_VCVT_SF] = 0x4,
4440 [NEON_2RM_VCVT_UF] = 0x4,
4441};
4442
9ee6e8bb
PB
4443/* Translate a NEON data processing instruction. Return nonzero if the
4444 instruction is invalid.
ad69471c
PB
4445 We process data in a mixture of 32-bit and 64-bit chunks.
4446 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4447
0ecb72a5 4448static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4449{
4450 int op;
4451 int q;
4452 int rd, rn, rm;
4453 int size;
4454 int shift;
4455 int pass;
4456 int count;
4457 int pairwise;
4458 int u;
ca9a32e4 4459 uint32_t imm, mask;
b75263d6 4460 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4461 TCGv_i64 tmp64;
9ee6e8bb 4462
5df8bac1 4463 if (!s->vfp_enabled)
9ee6e8bb
PB
4464 return 1;
4465 q = (insn & (1 << 6)) != 0;
4466 u = (insn >> 24) & 1;
4467 VFP_DREG_D(rd, insn);
4468 VFP_DREG_N(rn, insn);
4469 VFP_DREG_M(rm, insn);
4470 size = (insn >> 20) & 3;
4471 if ((insn & (1 << 23)) == 0) {
4472 /* Three register same length. */
4473 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4474 /* Catch invalid op and bad size combinations: UNDEF */
4475 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4476 return 1;
4477 }
25f84f79
PM
4478 /* All insns of this form UNDEF for either this condition or the
4479 * superset of cases "Q==1"; we catch the latter later.
4480 */
4481 if (q && ((rd | rn | rm) & 1)) {
4482 return 1;
4483 }
62698be3
PM
4484 if (size == 3 && op != NEON_3R_LOGIC) {
4485 /* 64-bit element instructions. */
9ee6e8bb 4486 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4487 neon_load_reg64(cpu_V0, rn + pass);
4488 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4489 switch (op) {
62698be3 4490 case NEON_3R_VQADD:
9ee6e8bb 4491 if (u) {
02da0b2d
PM
4492 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4493 cpu_V0, cpu_V1);
2c0262af 4494 } else {
02da0b2d
PM
4495 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4496 cpu_V0, cpu_V1);
2c0262af 4497 }
9ee6e8bb 4498 break;
62698be3 4499 case NEON_3R_VQSUB:
9ee6e8bb 4500 if (u) {
02da0b2d
PM
4501 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4502 cpu_V0, cpu_V1);
ad69471c 4503 } else {
02da0b2d
PM
4504 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4505 cpu_V0, cpu_V1);
ad69471c
PB
4506 }
4507 break;
62698be3 4508 case NEON_3R_VSHL:
ad69471c
PB
4509 if (u) {
4510 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4511 } else {
4512 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4513 }
4514 break;
62698be3 4515 case NEON_3R_VQSHL:
ad69471c 4516 if (u) {
02da0b2d
PM
4517 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4518 cpu_V1, cpu_V0);
ad69471c 4519 } else {
02da0b2d
PM
4520 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4521 cpu_V1, cpu_V0);
ad69471c
PB
4522 }
4523 break;
62698be3 4524 case NEON_3R_VRSHL:
ad69471c
PB
4525 if (u) {
4526 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4527 } else {
ad69471c
PB
4528 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4529 }
4530 break;
62698be3 4531 case NEON_3R_VQRSHL:
ad69471c 4532 if (u) {
02da0b2d
PM
4533 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4534 cpu_V1, cpu_V0);
ad69471c 4535 } else {
02da0b2d
PM
4536 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4537 cpu_V1, cpu_V0);
1e8d4eec 4538 }
9ee6e8bb 4539 break;
62698be3 4540 case NEON_3R_VADD_VSUB:
9ee6e8bb 4541 if (u) {
ad69471c 4542 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4543 } else {
ad69471c 4544 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4545 }
4546 break;
4547 default:
4548 abort();
2c0262af 4549 }
ad69471c 4550 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4551 }
9ee6e8bb 4552 return 0;
2c0262af 4553 }
25f84f79 4554 pairwise = 0;
9ee6e8bb 4555 switch (op) {
62698be3
PM
4556 case NEON_3R_VSHL:
4557 case NEON_3R_VQSHL:
4558 case NEON_3R_VRSHL:
4559 case NEON_3R_VQRSHL:
9ee6e8bb 4560 {
ad69471c
PB
4561 int rtmp;
4562 /* Shift instruction operands are reversed. */
4563 rtmp = rn;
9ee6e8bb 4564 rn = rm;
ad69471c 4565 rm = rtmp;
9ee6e8bb 4566 }
2c0262af 4567 break;
25f84f79
PM
4568 case NEON_3R_VPADD:
4569 if (u) {
4570 return 1;
4571 }
4572 /* Fall through */
62698be3
PM
4573 case NEON_3R_VPMAX:
4574 case NEON_3R_VPMIN:
9ee6e8bb 4575 pairwise = 1;
2c0262af 4576 break;
25f84f79
PM
4577 case NEON_3R_FLOAT_ARITH:
4578 pairwise = (u && size < 2); /* if VPADD (float) */
4579 break;
4580 case NEON_3R_FLOAT_MINMAX:
4581 pairwise = u; /* if VPMIN/VPMAX (float) */
4582 break;
4583 case NEON_3R_FLOAT_CMP:
4584 if (!u && size) {
4585 /* no encoding for U=0 C=1x */
4586 return 1;
4587 }
4588 break;
4589 case NEON_3R_FLOAT_ACMP:
4590 if (!u) {
4591 return 1;
4592 }
4593 break;
4594 case NEON_3R_VRECPS_VRSQRTS:
4595 if (u) {
4596 return 1;
4597 }
2c0262af 4598 break;
25f84f79
PM
4599 case NEON_3R_VMUL:
4600 if (u && (size != 0)) {
4601 /* UNDEF on invalid size for polynomial subcase */
4602 return 1;
4603 }
2c0262af 4604 break;
da97f52c
PM
4605 case NEON_3R_VFM:
4606 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4607 return 1;
4608 }
4609 break;
9ee6e8bb 4610 default:
2c0262af 4611 break;
9ee6e8bb 4612 }
dd8fbd78 4613
25f84f79
PM
4614 if (pairwise && q) {
4615 /* All the pairwise insns UNDEF if Q is set */
4616 return 1;
4617 }
4618
9ee6e8bb
PB
4619 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4620
4621 if (pairwise) {
4622 /* Pairwise. */
a5a14945
JR
4623 if (pass < 1) {
4624 tmp = neon_load_reg(rn, 0);
4625 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4626 } else {
a5a14945
JR
4627 tmp = neon_load_reg(rm, 0);
4628 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4629 }
4630 } else {
4631 /* Elementwise. */
dd8fbd78
FN
4632 tmp = neon_load_reg(rn, pass);
4633 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4634 }
4635 switch (op) {
62698be3 4636 case NEON_3R_VHADD:
9ee6e8bb
PB
4637 GEN_NEON_INTEGER_OP(hadd);
4638 break;
62698be3 4639 case NEON_3R_VQADD:
02da0b2d 4640 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4641 break;
62698be3 4642 case NEON_3R_VRHADD:
9ee6e8bb 4643 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4644 break;
62698be3 4645 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4646 switch ((u << 2) | size) {
4647 case 0: /* VAND */
dd8fbd78 4648 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4649 break;
4650 case 1: /* BIC */
f669df27 4651 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4652 break;
4653 case 2: /* VORR */
dd8fbd78 4654 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4655 break;
4656 case 3: /* VORN */
f669df27 4657 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4658 break;
4659 case 4: /* VEOR */
dd8fbd78 4660 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4661 break;
4662 case 5: /* VBSL */
dd8fbd78
FN
4663 tmp3 = neon_load_reg(rd, pass);
4664 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4665 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4666 break;
4667 case 6: /* VBIT */
dd8fbd78
FN
4668 tmp3 = neon_load_reg(rd, pass);
4669 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4670 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4671 break;
4672 case 7: /* VBIF */
dd8fbd78
FN
4673 tmp3 = neon_load_reg(rd, pass);
4674 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4675 tcg_temp_free_i32(tmp3);
9ee6e8bb 4676 break;
2c0262af
FB
4677 }
4678 break;
62698be3 4679 case NEON_3R_VHSUB:
9ee6e8bb
PB
4680 GEN_NEON_INTEGER_OP(hsub);
4681 break;
62698be3 4682 case NEON_3R_VQSUB:
02da0b2d 4683 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4684 break;
62698be3 4685 case NEON_3R_VCGT:
9ee6e8bb
PB
4686 GEN_NEON_INTEGER_OP(cgt);
4687 break;
62698be3 4688 case NEON_3R_VCGE:
9ee6e8bb
PB
4689 GEN_NEON_INTEGER_OP(cge);
4690 break;
62698be3 4691 case NEON_3R_VSHL:
ad69471c 4692 GEN_NEON_INTEGER_OP(shl);
2c0262af 4693 break;
62698be3 4694 case NEON_3R_VQSHL:
02da0b2d 4695 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4696 break;
62698be3 4697 case NEON_3R_VRSHL:
ad69471c 4698 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4699 break;
62698be3 4700 case NEON_3R_VQRSHL:
02da0b2d 4701 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4702 break;
62698be3 4703 case NEON_3R_VMAX:
9ee6e8bb
PB
4704 GEN_NEON_INTEGER_OP(max);
4705 break;
62698be3 4706 case NEON_3R_VMIN:
9ee6e8bb
PB
4707 GEN_NEON_INTEGER_OP(min);
4708 break;
62698be3 4709 case NEON_3R_VABD:
9ee6e8bb
PB
4710 GEN_NEON_INTEGER_OP(abd);
4711 break;
62698be3 4712 case NEON_3R_VABA:
9ee6e8bb 4713 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4714 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4715 tmp2 = neon_load_reg(rd, pass);
4716 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4717 break;
62698be3 4718 case NEON_3R_VADD_VSUB:
9ee6e8bb 4719 if (!u) { /* VADD */
62698be3 4720 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4721 } else { /* VSUB */
4722 switch (size) {
dd8fbd78
FN
4723 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4724 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4725 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4726 default: abort();
9ee6e8bb
PB
4727 }
4728 }
4729 break;
62698be3 4730 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4731 if (!u) { /* VTST */
4732 switch (size) {
dd8fbd78
FN
4733 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4734 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4735 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4736 default: abort();
9ee6e8bb
PB
4737 }
4738 } else { /* VCEQ */
4739 switch (size) {
dd8fbd78
FN
4740 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4741 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4742 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4743 default: abort();
9ee6e8bb
PB
4744 }
4745 }
4746 break;
62698be3 4747 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4748 switch (size) {
dd8fbd78
FN
4749 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4750 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4751 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4752 default: abort();
9ee6e8bb 4753 }
7d1b0095 4754 tcg_temp_free_i32(tmp2);
dd8fbd78 4755 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4756 if (u) { /* VMLS */
dd8fbd78 4757 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4758 } else { /* VMLA */
dd8fbd78 4759 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4760 }
4761 break;
62698be3 4762 case NEON_3R_VMUL:
9ee6e8bb 4763 if (u) { /* polynomial */
dd8fbd78 4764 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4765 } else { /* Integer */
4766 switch (size) {
dd8fbd78
FN
4767 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4768 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4769 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4770 default: abort();
9ee6e8bb
PB
4771 }
4772 }
4773 break;
62698be3 4774 case NEON_3R_VPMAX:
9ee6e8bb
PB
4775 GEN_NEON_INTEGER_OP(pmax);
4776 break;
62698be3 4777 case NEON_3R_VPMIN:
9ee6e8bb
PB
4778 GEN_NEON_INTEGER_OP(pmin);
4779 break;
62698be3 4780 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4781 if (!u) { /* VQDMULH */
4782 switch (size) {
02da0b2d
PM
4783 case 1:
4784 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4785 break;
4786 case 2:
4787 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4788 break;
62698be3 4789 default: abort();
9ee6e8bb 4790 }
62698be3 4791 } else { /* VQRDMULH */
9ee6e8bb 4792 switch (size) {
02da0b2d
PM
4793 case 1:
4794 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4795 break;
4796 case 2:
4797 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4798 break;
62698be3 4799 default: abort();
9ee6e8bb
PB
4800 }
4801 }
4802 break;
62698be3 4803 case NEON_3R_VPADD:
9ee6e8bb 4804 switch (size) {
dd8fbd78
FN
4805 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4806 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4807 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4808 default: abort();
9ee6e8bb
PB
4809 }
4810 break;
62698be3 4811 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4812 {
4813 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4814 switch ((u << 2) | size) {
4815 case 0: /* VADD */
aa47cfdd
PM
4816 case 4: /* VPADD */
4817 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4818 break;
4819 case 2: /* VSUB */
aa47cfdd 4820 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4821 break;
4822 case 6: /* VABD */
aa47cfdd 4823 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4824 break;
4825 default:
62698be3 4826 abort();
9ee6e8bb 4827 }
aa47cfdd 4828 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4829 break;
aa47cfdd 4830 }
62698be3 4831 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
4832 {
4833 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4834 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4835 if (!u) {
7d1b0095 4836 tcg_temp_free_i32(tmp2);
dd8fbd78 4837 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4838 if (size == 0) {
aa47cfdd 4839 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4840 } else {
aa47cfdd 4841 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
4842 }
4843 }
aa47cfdd 4844 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4845 break;
aa47cfdd 4846 }
62698be3 4847 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
4848 {
4849 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 4850 if (!u) {
aa47cfdd 4851 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 4852 } else {
aa47cfdd
PM
4853 if (size == 0) {
4854 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4855 } else {
4856 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4857 }
b5ff1b31 4858 }
aa47cfdd 4859 tcg_temp_free_ptr(fpstatus);
2c0262af 4860 break;
aa47cfdd 4861 }
62698be3 4862 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
4863 {
4864 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4865 if (size == 0) {
4866 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4867 } else {
4868 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4869 }
4870 tcg_temp_free_ptr(fpstatus);
2c0262af 4871 break;
aa47cfdd 4872 }
62698be3 4873 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
4874 {
4875 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4876 if (size == 0) {
4877 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4878 } else {
4879 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4880 }
4881 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4882 break;
aa47cfdd 4883 }
62698be3 4884 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4885 if (size == 0)
dd8fbd78 4886 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4887 else
dd8fbd78 4888 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4889 break;
da97f52c
PM
4890 case NEON_3R_VFM:
4891 {
4892 /* VFMA, VFMS: fused multiply-add */
4893 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4894 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4895 if (size) {
4896 /* VFMS */
4897 gen_helper_vfp_negs(tmp, tmp);
4898 }
4899 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4900 tcg_temp_free_i32(tmp3);
4901 tcg_temp_free_ptr(fpstatus);
4902 break;
4903 }
9ee6e8bb
PB
4904 default:
4905 abort();
2c0262af 4906 }
7d1b0095 4907 tcg_temp_free_i32(tmp2);
dd8fbd78 4908
9ee6e8bb
PB
4909 /* Save the result. For elementwise operations we can put it
4910 straight into the destination register. For pairwise operations
4911 we have to be careful to avoid clobbering the source operands. */
4912 if (pairwise && rd == rm) {
dd8fbd78 4913 neon_store_scratch(pass, tmp);
9ee6e8bb 4914 } else {
dd8fbd78 4915 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4916 }
4917
4918 } /* for pass */
4919 if (pairwise && rd == rm) {
4920 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4921 tmp = neon_load_scratch(pass);
4922 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4923 }
4924 }
ad69471c 4925 /* End of 3 register same size operations. */
9ee6e8bb
PB
4926 } else if (insn & (1 << 4)) {
4927 if ((insn & 0x00380080) != 0) {
4928 /* Two registers and shift. */
4929 op = (insn >> 8) & 0xf;
4930 if (insn & (1 << 7)) {
cc13115b
PM
4931 /* 64-bit shift. */
4932 if (op > 7) {
4933 return 1;
4934 }
9ee6e8bb
PB
4935 size = 3;
4936 } else {
4937 size = 2;
4938 while ((insn & (1 << (size + 19))) == 0)
4939 size--;
4940 }
4941 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 4942 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
4943 by immediate using the variable shift operations. */
4944 if (op < 8) {
4945 /* Shift by immediate:
4946 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
4947 if (q && ((rd | rm) & 1)) {
4948 return 1;
4949 }
4950 if (!u && (op == 4 || op == 6)) {
4951 return 1;
4952 }
9ee6e8bb
PB
4953 /* Right shifts are encoded as N - shift, where N is the
4954 element size in bits. */
4955 if (op <= 4)
4956 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4957 if (size == 3) {
4958 count = q + 1;
4959 } else {
4960 count = q ? 4: 2;
4961 }
4962 switch (size) {
4963 case 0:
4964 imm = (uint8_t) shift;
4965 imm |= imm << 8;
4966 imm |= imm << 16;
4967 break;
4968 case 1:
4969 imm = (uint16_t) shift;
4970 imm |= imm << 16;
4971 break;
4972 case 2:
4973 case 3:
4974 imm = shift;
4975 break;
4976 default:
4977 abort();
4978 }
4979
4980 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4981 if (size == 3) {
4982 neon_load_reg64(cpu_V0, rm + pass);
4983 tcg_gen_movi_i64(cpu_V1, imm);
4984 switch (op) {
4985 case 0: /* VSHR */
4986 case 1: /* VSRA */
4987 if (u)
4988 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4989 else
ad69471c 4990 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4991 break;
ad69471c
PB
4992 case 2: /* VRSHR */
4993 case 3: /* VRSRA */
4994 if (u)
4995 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4996 else
ad69471c 4997 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4998 break;
ad69471c 4999 case 4: /* VSRI */
ad69471c
PB
5000 case 5: /* VSHL, VSLI */
5001 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5002 break;
0322b26e 5003 case 6: /* VQSHLU */
02da0b2d
PM
5004 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5005 cpu_V0, cpu_V1);
ad69471c 5006 break;
0322b26e
PM
5007 case 7: /* VQSHL */
5008 if (u) {
02da0b2d 5009 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5010 cpu_V0, cpu_V1);
5011 } else {
02da0b2d 5012 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5013 cpu_V0, cpu_V1);
5014 }
9ee6e8bb 5015 break;
9ee6e8bb 5016 }
ad69471c
PB
5017 if (op == 1 || op == 3) {
5018 /* Accumulate. */
5371cb81 5019 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5020 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5021 } else if (op == 4 || (op == 5 && u)) {
5022 /* Insert */
923e6509
CL
5023 neon_load_reg64(cpu_V1, rd + pass);
5024 uint64_t mask;
5025 if (shift < -63 || shift > 63) {
5026 mask = 0;
5027 } else {
5028 if (op == 4) {
5029 mask = 0xffffffffffffffffull >> -shift;
5030 } else {
5031 mask = 0xffffffffffffffffull << shift;
5032 }
5033 }
5034 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5035 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5036 }
5037 neon_store_reg64(cpu_V0, rd + pass);
5038 } else { /* size < 3 */
5039 /* Operands in T0 and T1. */
dd8fbd78 5040 tmp = neon_load_reg(rm, pass);
7d1b0095 5041 tmp2 = tcg_temp_new_i32();
dd8fbd78 5042 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5043 switch (op) {
5044 case 0: /* VSHR */
5045 case 1: /* VSRA */
5046 GEN_NEON_INTEGER_OP(shl);
5047 break;
5048 case 2: /* VRSHR */
5049 case 3: /* VRSRA */
5050 GEN_NEON_INTEGER_OP(rshl);
5051 break;
5052 case 4: /* VSRI */
ad69471c
PB
5053 case 5: /* VSHL, VSLI */
5054 switch (size) {
dd8fbd78
FN
5055 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5056 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5057 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5058 default: abort();
ad69471c
PB
5059 }
5060 break;
0322b26e 5061 case 6: /* VQSHLU */
ad69471c 5062 switch (size) {
0322b26e 5063 case 0:
02da0b2d
PM
5064 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5065 tmp, tmp2);
0322b26e
PM
5066 break;
5067 case 1:
02da0b2d
PM
5068 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5069 tmp, tmp2);
0322b26e
PM
5070 break;
5071 case 2:
02da0b2d
PM
5072 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5073 tmp, tmp2);
0322b26e
PM
5074 break;
5075 default:
cc13115b 5076 abort();
ad69471c
PB
5077 }
5078 break;
0322b26e 5079 case 7: /* VQSHL */
02da0b2d 5080 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5081 break;
ad69471c 5082 }
7d1b0095 5083 tcg_temp_free_i32(tmp2);
ad69471c
PB
5084
5085 if (op == 1 || op == 3) {
5086 /* Accumulate. */
dd8fbd78 5087 tmp2 = neon_load_reg(rd, pass);
5371cb81 5088 gen_neon_add(size, tmp, tmp2);
7d1b0095 5089 tcg_temp_free_i32(tmp2);
ad69471c
PB
5090 } else if (op == 4 || (op == 5 && u)) {
5091 /* Insert */
5092 switch (size) {
5093 case 0:
5094 if (op == 4)
ca9a32e4 5095 mask = 0xff >> -shift;
ad69471c 5096 else
ca9a32e4
JR
5097 mask = (uint8_t)(0xff << shift);
5098 mask |= mask << 8;
5099 mask |= mask << 16;
ad69471c
PB
5100 break;
5101 case 1:
5102 if (op == 4)
ca9a32e4 5103 mask = 0xffff >> -shift;
ad69471c 5104 else
ca9a32e4
JR
5105 mask = (uint16_t)(0xffff << shift);
5106 mask |= mask << 16;
ad69471c
PB
5107 break;
5108 case 2:
ca9a32e4
JR
5109 if (shift < -31 || shift > 31) {
5110 mask = 0;
5111 } else {
5112 if (op == 4)
5113 mask = 0xffffffffu >> -shift;
5114 else
5115 mask = 0xffffffffu << shift;
5116 }
ad69471c
PB
5117 break;
5118 default:
5119 abort();
5120 }
dd8fbd78 5121 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5122 tcg_gen_andi_i32(tmp, tmp, mask);
5123 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5124 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5125 tcg_temp_free_i32(tmp2);
ad69471c 5126 }
dd8fbd78 5127 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5128 }
5129 } /* for pass */
5130 } else if (op < 10) {
ad69471c 5131 /* Shift by immediate and narrow:
9ee6e8bb 5132 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5133 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5134 if (rm & 1) {
5135 return 1;
5136 }
9ee6e8bb
PB
5137 shift = shift - (1 << (size + 3));
5138 size++;
92cdfaeb 5139 if (size == 3) {
a7812ae4 5140 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5141 neon_load_reg64(cpu_V0, rm);
5142 neon_load_reg64(cpu_V1, rm + 1);
5143 for (pass = 0; pass < 2; pass++) {
5144 TCGv_i64 in;
5145 if (pass == 0) {
5146 in = cpu_V0;
5147 } else {
5148 in = cpu_V1;
5149 }
ad69471c 5150 if (q) {
0b36f4cd 5151 if (input_unsigned) {
92cdfaeb 5152 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5153 } else {
92cdfaeb 5154 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5155 }
ad69471c 5156 } else {
0b36f4cd 5157 if (input_unsigned) {
92cdfaeb 5158 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5159 } else {
92cdfaeb 5160 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5161 }
ad69471c 5162 }
7d1b0095 5163 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5164 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5165 neon_store_reg(rd, pass, tmp);
5166 } /* for pass */
5167 tcg_temp_free_i64(tmp64);
5168 } else {
5169 if (size == 1) {
5170 imm = (uint16_t)shift;
5171 imm |= imm << 16;
2c0262af 5172 } else {
92cdfaeb
PM
5173 /* size == 2 */
5174 imm = (uint32_t)shift;
5175 }
5176 tmp2 = tcg_const_i32(imm);
5177 tmp4 = neon_load_reg(rm + 1, 0);
5178 tmp5 = neon_load_reg(rm + 1, 1);
5179 for (pass = 0; pass < 2; pass++) {
5180 if (pass == 0) {
5181 tmp = neon_load_reg(rm, 0);
5182 } else {
5183 tmp = tmp4;
5184 }
0b36f4cd
CL
5185 gen_neon_shift_narrow(size, tmp, tmp2, q,
5186 input_unsigned);
92cdfaeb
PM
5187 if (pass == 0) {
5188 tmp3 = neon_load_reg(rm, 1);
5189 } else {
5190 tmp3 = tmp5;
5191 }
0b36f4cd
CL
5192 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5193 input_unsigned);
36aa55dc 5194 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5195 tcg_temp_free_i32(tmp);
5196 tcg_temp_free_i32(tmp3);
5197 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5198 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5199 neon_store_reg(rd, pass, tmp);
5200 } /* for pass */
c6067f04 5201 tcg_temp_free_i32(tmp2);
b75263d6 5202 }
9ee6e8bb 5203 } else if (op == 10) {
cc13115b
PM
5204 /* VSHLL, VMOVL */
5205 if (q || (rd & 1)) {
9ee6e8bb 5206 return 1;
cc13115b 5207 }
ad69471c
PB
5208 tmp = neon_load_reg(rm, 0);
5209 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5210 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5211 if (pass == 1)
5212 tmp = tmp2;
5213
5214 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5215
9ee6e8bb
PB
5216 if (shift != 0) {
5217 /* The shift is less than the width of the source
ad69471c
PB
5218 type, so we can just shift the whole register. */
5219 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5220 /* Widen the result of shift: we need to clear
5221 * the potential overflow bits resulting from
5222 * left bits of the narrow input appearing as
5223 * right bits of left the neighbour narrow
5224 * input. */
ad69471c
PB
5225 if (size < 2 || !u) {
5226 uint64_t imm64;
5227 if (size == 0) {
5228 imm = (0xffu >> (8 - shift));
5229 imm |= imm << 16;
acdf01ef 5230 } else if (size == 1) {
ad69471c 5231 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5232 } else {
5233 /* size == 2 */
5234 imm = 0xffffffff >> (32 - shift);
5235 }
5236 if (size < 2) {
5237 imm64 = imm | (((uint64_t)imm) << 32);
5238 } else {
5239 imm64 = imm;
9ee6e8bb 5240 }
acdf01ef 5241 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5242 }
5243 }
ad69471c 5244 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5245 }
f73534a5 5246 } else if (op >= 14) {
9ee6e8bb 5247 /* VCVT fixed-point. */
cc13115b
PM
5248 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5249 return 1;
5250 }
f73534a5
PM
5251 /* We have already masked out the must-be-1 top bit of imm6,
5252 * hence this 32-shift where the ARM ARM has 64-imm6.
5253 */
5254 shift = 32 - shift;
9ee6e8bb 5255 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5256 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5257 if (!(op & 1)) {
9ee6e8bb 5258 if (u)
5500b06c 5259 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5260 else
5500b06c 5261 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5262 } else {
5263 if (u)
5500b06c 5264 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5265 else
5500b06c 5266 gen_vfp_tosl(0, shift, 1);
2c0262af 5267 }
4373f3ce 5268 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5269 }
5270 } else {
9ee6e8bb
PB
5271 return 1;
5272 }
5273 } else { /* (insn & 0x00380080) == 0 */
5274 int invert;
7d80fee5
PM
5275 if (q && (rd & 1)) {
5276 return 1;
5277 }
9ee6e8bb
PB
5278
5279 op = (insn >> 8) & 0xf;
5280 /* One register and immediate. */
5281 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5282 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5283 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5284 * We choose to not special-case this and will behave as if a
5285 * valid constant encoding of 0 had been given.
5286 */
9ee6e8bb
PB
5287 switch (op) {
5288 case 0: case 1:
5289 /* no-op */
5290 break;
5291 case 2: case 3:
5292 imm <<= 8;
5293 break;
5294 case 4: case 5:
5295 imm <<= 16;
5296 break;
5297 case 6: case 7:
5298 imm <<= 24;
5299 break;
5300 case 8: case 9:
5301 imm |= imm << 16;
5302 break;
5303 case 10: case 11:
5304 imm = (imm << 8) | (imm << 24);
5305 break;
5306 case 12:
8e31209e 5307 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5308 break;
5309 case 13:
5310 imm = (imm << 16) | 0xffff;
5311 break;
5312 case 14:
5313 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5314 if (invert)
5315 imm = ~imm;
5316 break;
5317 case 15:
7d80fee5
PM
5318 if (invert) {
5319 return 1;
5320 }
9ee6e8bb
PB
5321 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5322 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5323 break;
5324 }
5325 if (invert)
5326 imm = ~imm;
5327
9ee6e8bb
PB
5328 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5329 if (op & 1 && op < 12) {
ad69471c 5330 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5331 if (invert) {
5332 /* The immediate value has already been inverted, so
5333 BIC becomes AND. */
ad69471c 5334 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5335 } else {
ad69471c 5336 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5337 }
9ee6e8bb 5338 } else {
ad69471c 5339 /* VMOV, VMVN. */
7d1b0095 5340 tmp = tcg_temp_new_i32();
9ee6e8bb 5341 if (op == 14 && invert) {
a5a14945 5342 int n;
ad69471c
PB
5343 uint32_t val;
5344 val = 0;
9ee6e8bb
PB
5345 for (n = 0; n < 4; n++) {
5346 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5347 val |= 0xff << (n * 8);
9ee6e8bb 5348 }
ad69471c
PB
5349 tcg_gen_movi_i32(tmp, val);
5350 } else {
5351 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5352 }
9ee6e8bb 5353 }
ad69471c 5354 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5355 }
5356 }
e4b3861d 5357 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5358 if (size != 3) {
5359 op = (insn >> 8) & 0xf;
5360 if ((insn & (1 << 6)) == 0) {
5361 /* Three registers of different lengths. */
5362 int src1_wide;
5363 int src2_wide;
5364 int prewiden;
695272dc
PM
5365 /* undefreq: bit 0 : UNDEF if size != 0
5366 * bit 1 : UNDEF if size == 0
5367 * bit 2 : UNDEF if U == 1
5368 * Note that [1:0] set implies 'always UNDEF'
5369 */
5370 int undefreq;
5371 /* prewiden, src1_wide, src2_wide, undefreq */
5372 static const int neon_3reg_wide[16][4] = {
5373 {1, 0, 0, 0}, /* VADDL */
5374 {1, 1, 0, 0}, /* VADDW */
5375 {1, 0, 0, 0}, /* VSUBL */
5376 {1, 1, 0, 0}, /* VSUBW */
5377 {0, 1, 1, 0}, /* VADDHN */
5378 {0, 0, 0, 0}, /* VABAL */
5379 {0, 1, 1, 0}, /* VSUBHN */
5380 {0, 0, 0, 0}, /* VABDL */
5381 {0, 0, 0, 0}, /* VMLAL */
5382 {0, 0, 0, 6}, /* VQDMLAL */
5383 {0, 0, 0, 0}, /* VMLSL */
5384 {0, 0, 0, 6}, /* VQDMLSL */
5385 {0, 0, 0, 0}, /* Integer VMULL */
5386 {0, 0, 0, 2}, /* VQDMULL */
5387 {0, 0, 0, 5}, /* Polynomial VMULL */
5388 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5389 };
5390
5391 prewiden = neon_3reg_wide[op][0];
5392 src1_wide = neon_3reg_wide[op][1];
5393 src2_wide = neon_3reg_wide[op][2];
695272dc 5394 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5395
695272dc
PM
5396 if (((undefreq & 1) && (size != 0)) ||
5397 ((undefreq & 2) && (size == 0)) ||
5398 ((undefreq & 4) && u)) {
5399 return 1;
5400 }
5401 if ((src1_wide && (rn & 1)) ||
5402 (src2_wide && (rm & 1)) ||
5403 (!src2_wide && (rd & 1))) {
ad69471c 5404 return 1;
695272dc 5405 }
ad69471c 5406
9ee6e8bb
PB
5407 /* Avoid overlapping operands. Wide source operands are
5408 always aligned so will never overlap with wide
5409 destinations in problematic ways. */
8f8e3aa4 5410 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5411 tmp = neon_load_reg(rm, 1);
5412 neon_store_scratch(2, tmp);
8f8e3aa4 5413 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5414 tmp = neon_load_reg(rn, 1);
5415 neon_store_scratch(2, tmp);
9ee6e8bb 5416 }
a50f5b91 5417 TCGV_UNUSED(tmp3);
9ee6e8bb 5418 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5419 if (src1_wide) {
5420 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5421 TCGV_UNUSED(tmp);
9ee6e8bb 5422 } else {
ad69471c 5423 if (pass == 1 && rd == rn) {
dd8fbd78 5424 tmp = neon_load_scratch(2);
9ee6e8bb 5425 } else {
ad69471c
PB
5426 tmp = neon_load_reg(rn, pass);
5427 }
5428 if (prewiden) {
5429 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5430 }
5431 }
ad69471c
PB
5432 if (src2_wide) {
5433 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5434 TCGV_UNUSED(tmp2);
9ee6e8bb 5435 } else {
ad69471c 5436 if (pass == 1 && rd == rm) {
dd8fbd78 5437 tmp2 = neon_load_scratch(2);
9ee6e8bb 5438 } else {
ad69471c
PB
5439 tmp2 = neon_load_reg(rm, pass);
5440 }
5441 if (prewiden) {
5442 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5443 }
9ee6e8bb
PB
5444 }
5445 switch (op) {
5446 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5447 gen_neon_addl(size);
9ee6e8bb 5448 break;
79b0e534 5449 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5450 gen_neon_subl(size);
9ee6e8bb
PB
5451 break;
5452 case 5: case 7: /* VABAL, VABDL */
5453 switch ((size << 1) | u) {
ad69471c
PB
5454 case 0:
5455 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5456 break;
5457 case 1:
5458 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5459 break;
5460 case 2:
5461 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5462 break;
5463 case 3:
5464 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5465 break;
5466 case 4:
5467 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5468 break;
5469 case 5:
5470 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5471 break;
9ee6e8bb
PB
5472 default: abort();
5473 }
7d1b0095
PM
5474 tcg_temp_free_i32(tmp2);
5475 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5476 break;
5477 case 8: case 9: case 10: case 11: case 12: case 13:
5478 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5479 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5480 break;
5481 case 14: /* Polynomial VMULL */
e5ca24cb 5482 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5483 tcg_temp_free_i32(tmp2);
5484 tcg_temp_free_i32(tmp);
e5ca24cb 5485 break;
695272dc
PM
5486 default: /* 15 is RESERVED: caught earlier */
5487 abort();
9ee6e8bb 5488 }
ebcd88ce
PM
5489 if (op == 13) {
5490 /* VQDMULL */
5491 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5492 neon_store_reg64(cpu_V0, rd + pass);
5493 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5494 /* Accumulate. */
ebcd88ce 5495 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5496 switch (op) {
4dc064e6
PM
5497 case 10: /* VMLSL */
5498 gen_neon_negl(cpu_V0, size);
5499 /* Fall through */
5500 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5501 gen_neon_addl(size);
9ee6e8bb
PB
5502 break;
5503 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5504 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5505 if (op == 11) {
5506 gen_neon_negl(cpu_V0, size);
5507 }
ad69471c
PB
5508 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5509 break;
9ee6e8bb
PB
5510 default:
5511 abort();
5512 }
ad69471c 5513 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5514 } else if (op == 4 || op == 6) {
5515 /* Narrowing operation. */
7d1b0095 5516 tmp = tcg_temp_new_i32();
79b0e534 5517 if (!u) {
9ee6e8bb 5518 switch (size) {
ad69471c
PB
5519 case 0:
5520 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5521 break;
5522 case 1:
5523 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5524 break;
5525 case 2:
5526 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5527 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5528 break;
9ee6e8bb
PB
5529 default: abort();
5530 }
5531 } else {
5532 switch (size) {
ad69471c
PB
5533 case 0:
5534 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5535 break;
5536 case 1:
5537 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5538 break;
5539 case 2:
5540 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5541 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5542 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5543 break;
9ee6e8bb
PB
5544 default: abort();
5545 }
5546 }
ad69471c
PB
5547 if (pass == 0) {
5548 tmp3 = tmp;
5549 } else {
5550 neon_store_reg(rd, 0, tmp3);
5551 neon_store_reg(rd, 1, tmp);
5552 }
9ee6e8bb
PB
5553 } else {
5554 /* Write back the result. */
ad69471c 5555 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5556 }
5557 }
5558 } else {
3e3326df
PM
5559 /* Two registers and a scalar. NB that for ops of this form
5560 * the ARM ARM labels bit 24 as Q, but it is in our variable
5561 * 'u', not 'q'.
5562 */
5563 if (size == 0) {
5564 return 1;
5565 }
9ee6e8bb 5566 switch (op) {
9ee6e8bb 5567 case 1: /* Float VMLA scalar */
9ee6e8bb 5568 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5569 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5570 if (size == 1) {
5571 return 1;
5572 }
5573 /* fall through */
5574 case 0: /* Integer VMLA scalar */
5575 case 4: /* Integer VMLS scalar */
5576 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5577 case 12: /* VQDMULH scalar */
5578 case 13: /* VQRDMULH scalar */
3e3326df
PM
5579 if (u && ((rd | rn) & 1)) {
5580 return 1;
5581 }
dd8fbd78
FN
5582 tmp = neon_get_scalar(size, rm);
5583 neon_store_scratch(0, tmp);
9ee6e8bb 5584 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5585 tmp = neon_load_scratch(0);
5586 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5587 if (op == 12) {
5588 if (size == 1) {
02da0b2d 5589 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5590 } else {
02da0b2d 5591 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5592 }
5593 } else if (op == 13) {
5594 if (size == 1) {
02da0b2d 5595 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5596 } else {
02da0b2d 5597 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5598 }
5599 } else if (op & 1) {
aa47cfdd
PM
5600 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5601 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5602 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5603 } else {
5604 switch (size) {
dd8fbd78
FN
5605 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5606 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5607 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5608 default: abort();
9ee6e8bb
PB
5609 }
5610 }
7d1b0095 5611 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5612 if (op < 8) {
5613 /* Accumulate. */
dd8fbd78 5614 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5615 switch (op) {
5616 case 0:
dd8fbd78 5617 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5618 break;
5619 case 1:
aa47cfdd
PM
5620 {
5621 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5622 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5623 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5624 break;
aa47cfdd 5625 }
9ee6e8bb 5626 case 4:
dd8fbd78 5627 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5628 break;
5629 case 5:
aa47cfdd
PM
5630 {
5631 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5632 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5633 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5634 break;
aa47cfdd 5635 }
9ee6e8bb
PB
5636 default:
5637 abort();
5638 }
7d1b0095 5639 tcg_temp_free_i32(tmp2);
9ee6e8bb 5640 }
dd8fbd78 5641 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5642 }
5643 break;
9ee6e8bb 5644 case 3: /* VQDMLAL scalar */
9ee6e8bb 5645 case 7: /* VQDMLSL scalar */
9ee6e8bb 5646 case 11: /* VQDMULL scalar */
3e3326df 5647 if (u == 1) {
ad69471c 5648 return 1;
3e3326df
PM
5649 }
5650 /* fall through */
5651 case 2: /* VMLAL sclar */
5652 case 6: /* VMLSL scalar */
5653 case 10: /* VMULL scalar */
5654 if (rd & 1) {
5655 return 1;
5656 }
dd8fbd78 5657 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5658 /* We need a copy of tmp2 because gen_neon_mull
5659 * deletes it during pass 0. */
7d1b0095 5660 tmp4 = tcg_temp_new_i32();
c6067f04 5661 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5662 tmp3 = neon_load_reg(rn, 1);
ad69471c 5663
9ee6e8bb 5664 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5665 if (pass == 0) {
5666 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5667 } else {
dd8fbd78 5668 tmp = tmp3;
c6067f04 5669 tmp2 = tmp4;
9ee6e8bb 5670 }
ad69471c 5671 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5672 if (op != 11) {
5673 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5674 }
9ee6e8bb 5675 switch (op) {
4dc064e6
PM
5676 case 6:
5677 gen_neon_negl(cpu_V0, size);
5678 /* Fall through */
5679 case 2:
ad69471c 5680 gen_neon_addl(size);
9ee6e8bb
PB
5681 break;
5682 case 3: case 7:
ad69471c 5683 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5684 if (op == 7) {
5685 gen_neon_negl(cpu_V0, size);
5686 }
ad69471c 5687 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5688 break;
5689 case 10:
5690 /* no-op */
5691 break;
5692 case 11:
ad69471c 5693 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5694 break;
5695 default:
5696 abort();
5697 }
ad69471c 5698 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5699 }
dd8fbd78 5700
dd8fbd78 5701
9ee6e8bb
PB
5702 break;
5703 default: /* 14 and 15 are RESERVED */
5704 return 1;
5705 }
5706 }
5707 } else { /* size == 3 */
5708 if (!u) {
5709 /* Extract. */
9ee6e8bb 5710 imm = (insn >> 8) & 0xf;
ad69471c
PB
5711
5712 if (imm > 7 && !q)
5713 return 1;
5714
52579ea1
PM
5715 if (q && ((rd | rn | rm) & 1)) {
5716 return 1;
5717 }
5718
ad69471c
PB
5719 if (imm == 0) {
5720 neon_load_reg64(cpu_V0, rn);
5721 if (q) {
5722 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5723 }
ad69471c
PB
5724 } else if (imm == 8) {
5725 neon_load_reg64(cpu_V0, rn + 1);
5726 if (q) {
5727 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5728 }
ad69471c 5729 } else if (q) {
a7812ae4 5730 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5731 if (imm < 8) {
5732 neon_load_reg64(cpu_V0, rn);
a7812ae4 5733 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5734 } else {
5735 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5736 neon_load_reg64(tmp64, rm);
ad69471c
PB
5737 }
5738 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5739 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5740 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5741 if (imm < 8) {
5742 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5743 } else {
ad69471c
PB
5744 neon_load_reg64(cpu_V1, rm + 1);
5745 imm -= 8;
9ee6e8bb 5746 }
ad69471c 5747 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5748 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5749 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5750 tcg_temp_free_i64(tmp64);
ad69471c 5751 } else {
a7812ae4 5752 /* BUGFIX */
ad69471c 5753 neon_load_reg64(cpu_V0, rn);
a7812ae4 5754 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5755 neon_load_reg64(cpu_V1, rm);
a7812ae4 5756 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5757 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5758 }
5759 neon_store_reg64(cpu_V0, rd);
5760 if (q) {
5761 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5762 }
5763 } else if ((insn & (1 << 11)) == 0) {
5764 /* Two register misc. */
5765 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5766 size = (insn >> 18) & 3;
600b828c
PM
5767 /* UNDEF for unknown op values and bad op-size combinations */
5768 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5769 return 1;
5770 }
fc2a9b37
PM
5771 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5772 q && ((rm | rd) & 1)) {
5773 return 1;
5774 }
9ee6e8bb 5775 switch (op) {
600b828c 5776 case NEON_2RM_VREV64:
9ee6e8bb 5777 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5778 tmp = neon_load_reg(rm, pass * 2);
5779 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5780 switch (size) {
dd8fbd78
FN
5781 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5782 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5783 case 2: /* no-op */ break;
5784 default: abort();
5785 }
dd8fbd78 5786 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5787 if (size == 2) {
dd8fbd78 5788 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5789 } else {
9ee6e8bb 5790 switch (size) {
dd8fbd78
FN
5791 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5792 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5793 default: abort();
5794 }
dd8fbd78 5795 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5796 }
5797 }
5798 break;
600b828c
PM
5799 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5800 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5801 for (pass = 0; pass < q + 1; pass++) {
5802 tmp = neon_load_reg(rm, pass * 2);
5803 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5804 tmp = neon_load_reg(rm, pass * 2 + 1);
5805 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5806 switch (size) {
5807 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5808 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5809 case 2: tcg_gen_add_i64(CPU_V001); break;
5810 default: abort();
5811 }
600b828c 5812 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5813 /* Accumulate. */
ad69471c
PB
5814 neon_load_reg64(cpu_V1, rd + pass);
5815 gen_neon_addl(size);
9ee6e8bb 5816 }
ad69471c 5817 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5818 }
5819 break;
600b828c 5820 case NEON_2RM_VTRN:
9ee6e8bb 5821 if (size == 2) {
a5a14945 5822 int n;
9ee6e8bb 5823 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5824 tmp = neon_load_reg(rm, n);
5825 tmp2 = neon_load_reg(rd, n + 1);
5826 neon_store_reg(rm, n, tmp2);
5827 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5828 }
5829 } else {
5830 goto elementwise;
5831 }
5832 break;
600b828c 5833 case NEON_2RM_VUZP:
02acedf9 5834 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5835 return 1;
9ee6e8bb
PB
5836 }
5837 break;
600b828c 5838 case NEON_2RM_VZIP:
d68a6f3a 5839 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5840 return 1;
9ee6e8bb
PB
5841 }
5842 break;
600b828c
PM
5843 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5844 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5845 if (rm & 1) {
5846 return 1;
5847 }
a50f5b91 5848 TCGV_UNUSED(tmp2);
9ee6e8bb 5849 for (pass = 0; pass < 2; pass++) {
ad69471c 5850 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5851 tmp = tcg_temp_new_i32();
600b828c
PM
5852 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5853 tmp, cpu_V0);
ad69471c
PB
5854 if (pass == 0) {
5855 tmp2 = tmp;
5856 } else {
5857 neon_store_reg(rd, 0, tmp2);
5858 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5859 }
9ee6e8bb
PB
5860 }
5861 break;
600b828c 5862 case NEON_2RM_VSHLL:
fc2a9b37 5863 if (q || (rd & 1)) {
9ee6e8bb 5864 return 1;
600b828c 5865 }
ad69471c
PB
5866 tmp = neon_load_reg(rm, 0);
5867 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5868 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5869 if (pass == 1)
5870 tmp = tmp2;
5871 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5872 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5873 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5874 }
5875 break;
600b828c 5876 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5877 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5878 q || (rm & 1)) {
5879 return 1;
5880 }
7d1b0095
PM
5881 tmp = tcg_temp_new_i32();
5882 tmp2 = tcg_temp_new_i32();
60011498 5883 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5884 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5885 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5886 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5887 tcg_gen_shli_i32(tmp2, tmp2, 16);
5888 tcg_gen_or_i32(tmp2, tmp2, tmp);
5889 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5890 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5891 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5892 neon_store_reg(rd, 0, tmp2);
7d1b0095 5893 tmp2 = tcg_temp_new_i32();
2d981da7 5894 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5895 tcg_gen_shli_i32(tmp2, tmp2, 16);
5896 tcg_gen_or_i32(tmp2, tmp2, tmp);
5897 neon_store_reg(rd, 1, tmp2);
7d1b0095 5898 tcg_temp_free_i32(tmp);
60011498 5899 break;
600b828c 5900 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5901 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5902 q || (rd & 1)) {
5903 return 1;
5904 }
7d1b0095 5905 tmp3 = tcg_temp_new_i32();
60011498
PB
5906 tmp = neon_load_reg(rm, 0);
5907 tmp2 = neon_load_reg(rm, 1);
5908 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5909 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5910 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5911 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5912 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5913 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5914 tcg_temp_free_i32(tmp);
60011498 5915 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5916 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5917 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5918 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5919 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5920 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5921 tcg_temp_free_i32(tmp2);
5922 tcg_temp_free_i32(tmp3);
60011498 5923 break;
9ee6e8bb
PB
5924 default:
5925 elementwise:
5926 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 5927 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
5928 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5929 neon_reg_offset(rm, pass));
dd8fbd78 5930 TCGV_UNUSED(tmp);
9ee6e8bb 5931 } else {
dd8fbd78 5932 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5933 }
5934 switch (op) {
600b828c 5935 case NEON_2RM_VREV32:
9ee6e8bb 5936 switch (size) {
dd8fbd78
FN
5937 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5938 case 1: gen_swap_half(tmp); break;
600b828c 5939 default: abort();
9ee6e8bb
PB
5940 }
5941 break;
600b828c 5942 case NEON_2RM_VREV16:
dd8fbd78 5943 gen_rev16(tmp);
9ee6e8bb 5944 break;
600b828c 5945 case NEON_2RM_VCLS:
9ee6e8bb 5946 switch (size) {
dd8fbd78
FN
5947 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5948 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5949 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 5950 default: abort();
9ee6e8bb
PB
5951 }
5952 break;
600b828c 5953 case NEON_2RM_VCLZ:
9ee6e8bb 5954 switch (size) {
dd8fbd78
FN
5955 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5956 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5957 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 5958 default: abort();
9ee6e8bb
PB
5959 }
5960 break;
600b828c 5961 case NEON_2RM_VCNT:
dd8fbd78 5962 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 5963 break;
600b828c 5964 case NEON_2RM_VMVN:
dd8fbd78 5965 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 5966 break;
600b828c 5967 case NEON_2RM_VQABS:
9ee6e8bb 5968 switch (size) {
02da0b2d
PM
5969 case 0:
5970 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
5971 break;
5972 case 1:
5973 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
5974 break;
5975 case 2:
5976 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
5977 break;
600b828c 5978 default: abort();
9ee6e8bb
PB
5979 }
5980 break;
600b828c 5981 case NEON_2RM_VQNEG:
9ee6e8bb 5982 switch (size) {
02da0b2d
PM
5983 case 0:
5984 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
5985 break;
5986 case 1:
5987 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
5988 break;
5989 case 2:
5990 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
5991 break;
600b828c 5992 default: abort();
9ee6e8bb
PB
5993 }
5994 break;
600b828c 5995 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 5996 tmp2 = tcg_const_i32(0);
9ee6e8bb 5997 switch(size) {
dd8fbd78
FN
5998 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5999 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6000 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6001 default: abort();
9ee6e8bb 6002 }
dd8fbd78 6003 tcg_temp_free(tmp2);
600b828c 6004 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6005 tcg_gen_not_i32(tmp, tmp);
600b828c 6006 }
9ee6e8bb 6007 break;
600b828c 6008 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6009 tmp2 = tcg_const_i32(0);
9ee6e8bb 6010 switch(size) {
dd8fbd78
FN
6011 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6012 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6013 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6014 default: abort();
9ee6e8bb 6015 }
dd8fbd78 6016 tcg_temp_free(tmp2);
600b828c 6017 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6018 tcg_gen_not_i32(tmp, tmp);
600b828c 6019 }
9ee6e8bb 6020 break;
600b828c 6021 case NEON_2RM_VCEQ0:
dd8fbd78 6022 tmp2 = tcg_const_i32(0);
9ee6e8bb 6023 switch(size) {
dd8fbd78
FN
6024 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6025 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6026 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6027 default: abort();
9ee6e8bb 6028 }
dd8fbd78 6029 tcg_temp_free(tmp2);
9ee6e8bb 6030 break;
600b828c 6031 case NEON_2RM_VABS:
9ee6e8bb 6032 switch(size) {
dd8fbd78
FN
6033 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6034 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6035 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6036 default: abort();
9ee6e8bb
PB
6037 }
6038 break;
600b828c 6039 case NEON_2RM_VNEG:
dd8fbd78
FN
6040 tmp2 = tcg_const_i32(0);
6041 gen_neon_rsb(size, tmp, tmp2);
6042 tcg_temp_free(tmp2);
9ee6e8bb 6043 break;
600b828c 6044 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6045 {
6046 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6047 tmp2 = tcg_const_i32(0);
aa47cfdd 6048 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6049 tcg_temp_free(tmp2);
aa47cfdd 6050 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6051 break;
aa47cfdd 6052 }
600b828c 6053 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6054 {
6055 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6056 tmp2 = tcg_const_i32(0);
aa47cfdd 6057 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6058 tcg_temp_free(tmp2);
aa47cfdd 6059 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6060 break;
aa47cfdd 6061 }
600b828c 6062 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6063 {
6064 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6065 tmp2 = tcg_const_i32(0);
aa47cfdd 6066 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6067 tcg_temp_free(tmp2);
aa47cfdd 6068 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6069 break;
aa47cfdd 6070 }
600b828c 6071 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6072 {
6073 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6074 tmp2 = tcg_const_i32(0);
aa47cfdd 6075 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6076 tcg_temp_free(tmp2);
aa47cfdd 6077 tcg_temp_free_ptr(fpstatus);
0e326109 6078 break;
aa47cfdd 6079 }
600b828c 6080 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6081 {
6082 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6083 tmp2 = tcg_const_i32(0);
aa47cfdd 6084 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6085 tcg_temp_free(tmp2);
aa47cfdd 6086 tcg_temp_free_ptr(fpstatus);
0e326109 6087 break;
aa47cfdd 6088 }
600b828c 6089 case NEON_2RM_VABS_F:
4373f3ce 6090 gen_vfp_abs(0);
9ee6e8bb 6091 break;
600b828c 6092 case NEON_2RM_VNEG_F:
4373f3ce 6093 gen_vfp_neg(0);
9ee6e8bb 6094 break;
600b828c 6095 case NEON_2RM_VSWP:
dd8fbd78
FN
6096 tmp2 = neon_load_reg(rd, pass);
6097 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6098 break;
600b828c 6099 case NEON_2RM_VTRN:
dd8fbd78 6100 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6101 switch (size) {
dd8fbd78
FN
6102 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6103 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6104 default: abort();
9ee6e8bb 6105 }
dd8fbd78 6106 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6107 break;
600b828c 6108 case NEON_2RM_VRECPE:
dd8fbd78 6109 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6110 break;
600b828c 6111 case NEON_2RM_VRSQRTE:
dd8fbd78 6112 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6113 break;
600b828c 6114 case NEON_2RM_VRECPE_F:
4373f3ce 6115 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6116 break;
600b828c 6117 case NEON_2RM_VRSQRTE_F:
4373f3ce 6118 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6119 break;
600b828c 6120 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6121 gen_vfp_sito(0, 1);
9ee6e8bb 6122 break;
600b828c 6123 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6124 gen_vfp_uito(0, 1);
9ee6e8bb 6125 break;
600b828c 6126 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6127 gen_vfp_tosiz(0, 1);
9ee6e8bb 6128 break;
600b828c 6129 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6130 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6131 break;
6132 default:
600b828c
PM
6133 /* Reserved op values were caught by the
6134 * neon_2rm_sizes[] check earlier.
6135 */
6136 abort();
9ee6e8bb 6137 }
600b828c 6138 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6139 tcg_gen_st_f32(cpu_F0s, cpu_env,
6140 neon_reg_offset(rd, pass));
9ee6e8bb 6141 } else {
dd8fbd78 6142 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6143 }
6144 }
6145 break;
6146 }
6147 } else if ((insn & (1 << 10)) == 0) {
6148 /* VTBL, VTBX. */
56907d77
PM
6149 int n = ((insn >> 8) & 3) + 1;
6150 if ((rn + n) > 32) {
6151 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6152 * helper function running off the end of the register file.
6153 */
6154 return 1;
6155 }
6156 n <<= 3;
9ee6e8bb 6157 if (insn & (1 << 6)) {
8f8e3aa4 6158 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6159 } else {
7d1b0095 6160 tmp = tcg_temp_new_i32();
8f8e3aa4 6161 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6162 }
8f8e3aa4 6163 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6164 tmp4 = tcg_const_i32(rn);
6165 tmp5 = tcg_const_i32(n);
9ef39277 6166 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6167 tcg_temp_free_i32(tmp);
9ee6e8bb 6168 if (insn & (1 << 6)) {
8f8e3aa4 6169 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6170 } else {
7d1b0095 6171 tmp = tcg_temp_new_i32();
8f8e3aa4 6172 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6173 }
8f8e3aa4 6174 tmp3 = neon_load_reg(rm, 1);
9ef39277 6175 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6176 tcg_temp_free_i32(tmp5);
6177 tcg_temp_free_i32(tmp4);
8f8e3aa4 6178 neon_store_reg(rd, 0, tmp2);
3018f259 6179 neon_store_reg(rd, 1, tmp3);
7d1b0095 6180 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6181 } else if ((insn & 0x380) == 0) {
6182 /* VDUP */
133da6aa
JR
6183 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6184 return 1;
6185 }
9ee6e8bb 6186 if (insn & (1 << 19)) {
dd8fbd78 6187 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6188 } else {
dd8fbd78 6189 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6190 }
6191 if (insn & (1 << 16)) {
dd8fbd78 6192 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6193 } else if (insn & (1 << 17)) {
6194 if ((insn >> 18) & 1)
dd8fbd78 6195 gen_neon_dup_high16(tmp);
9ee6e8bb 6196 else
dd8fbd78 6197 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6198 }
6199 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6200 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6201 tcg_gen_mov_i32(tmp2, tmp);
6202 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6203 }
7d1b0095 6204 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6205 } else {
6206 return 1;
6207 }
6208 }
6209 }
6210 return 0;
6211}
6212
0ecb72a5 6213static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6214{
4b6a83fb
PM
6215 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6216 const ARMCPRegInfo *ri;
6217 ARMCPU *cpu = arm_env_get_cpu(env);
9ee6e8bb
PB
6218
6219 cpnum = (insn >> 8) & 0xf;
6220 if (arm_feature(env, ARM_FEATURE_XSCALE)
6221 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6222 return 1;
6223
4b6a83fb 6224 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6225 switch (cpnum) {
6226 case 0:
6227 case 1:
6228 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6229 return disas_iwmmxt_insn(env, s, insn);
6230 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6231 return disas_dsp_insn(env, s, insn);
6232 }
6233 return 1;
6234 case 10:
6235 case 11:
6236 return disas_vfp_insn (env, s, insn);
4b6a83fb
PM
6237 default:
6238 break;
6239 }
6240
6241 /* Otherwise treat as a generic register access */
6242 is64 = (insn & (1 << 25)) == 0;
6243 if (!is64 && ((insn & (1 << 4)) == 0)) {
6244 /* cdp */
6245 return 1;
6246 }
6247
6248 crm = insn & 0xf;
6249 if (is64) {
6250 crn = 0;
6251 opc1 = (insn >> 4) & 0xf;
6252 opc2 = 0;
6253 rt2 = (insn >> 16) & 0xf;
6254 } else {
6255 crn = (insn >> 16) & 0xf;
6256 opc1 = (insn >> 21) & 7;
6257 opc2 = (insn >> 5) & 7;
6258 rt2 = 0;
6259 }
6260 isread = (insn >> 20) & 1;
6261 rt = (insn >> 12) & 0xf;
6262
6263 ri = get_arm_cp_reginfo(cpu,
6264 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6265 if (ri) {
6266 /* Check access permissions */
6267 if (!cp_access_ok(env, ri, isread)) {
6268 return 1;
6269 }
6270
6271 /* Handle special cases first */
6272 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6273 case ARM_CP_NOP:
6274 return 0;
6275 case ARM_CP_WFI:
6276 if (isread) {
6277 return 1;
6278 }
6279 gen_set_pc_im(s->pc);
6280 s->is_jmp = DISAS_WFI;
2bee5105 6281 return 0;
4b6a83fb
PM
6282 default:
6283 break;
6284 }
6285
6286 if (isread) {
6287 /* Read */
6288 if (is64) {
6289 TCGv_i64 tmp64;
6290 TCGv_i32 tmp;
6291 if (ri->type & ARM_CP_CONST) {
6292 tmp64 = tcg_const_i64(ri->resetvalue);
6293 } else if (ri->readfn) {
6294 TCGv_ptr tmpptr;
6295 gen_set_pc_im(s->pc);
6296 tmp64 = tcg_temp_new_i64();
6297 tmpptr = tcg_const_ptr(ri);
6298 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6299 tcg_temp_free_ptr(tmpptr);
6300 } else {
6301 tmp64 = tcg_temp_new_i64();
6302 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6303 }
6304 tmp = tcg_temp_new_i32();
6305 tcg_gen_trunc_i64_i32(tmp, tmp64);
6306 store_reg(s, rt, tmp);
6307 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 6308 tmp = tcg_temp_new_i32();
4b6a83fb 6309 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 6310 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
6311 store_reg(s, rt2, tmp);
6312 } else {
6313 TCGv tmp;
6314 if (ri->type & ARM_CP_CONST) {
6315 tmp = tcg_const_i32(ri->resetvalue);
6316 } else if (ri->readfn) {
6317 TCGv_ptr tmpptr;
6318 gen_set_pc_im(s->pc);
6319 tmp = tcg_temp_new_i32();
6320 tmpptr = tcg_const_ptr(ri);
6321 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6322 tcg_temp_free_ptr(tmpptr);
6323 } else {
6324 tmp = load_cpu_offset(ri->fieldoffset);
6325 }
6326 if (rt == 15) {
6327 /* Destination register of r15 for 32 bit loads sets
6328 * the condition codes from the high 4 bits of the value
6329 */
6330 gen_set_nzcv(tmp);
6331 tcg_temp_free_i32(tmp);
6332 } else {
6333 store_reg(s, rt, tmp);
6334 }
6335 }
6336 } else {
6337 /* Write */
6338 if (ri->type & ARM_CP_CONST) {
6339 /* If not forbidden by access permissions, treat as WI */
6340 return 0;
6341 }
6342
6343 if (is64) {
6344 TCGv tmplo, tmphi;
6345 TCGv_i64 tmp64 = tcg_temp_new_i64();
6346 tmplo = load_reg(s, rt);
6347 tmphi = load_reg(s, rt2);
6348 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6349 tcg_temp_free_i32(tmplo);
6350 tcg_temp_free_i32(tmphi);
6351 if (ri->writefn) {
6352 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6353 gen_set_pc_im(s->pc);
6354 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6355 tcg_temp_free_ptr(tmpptr);
6356 } else {
6357 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6358 }
6359 tcg_temp_free_i64(tmp64);
6360 } else {
6361 if (ri->writefn) {
6362 TCGv tmp;
6363 TCGv_ptr tmpptr;
6364 gen_set_pc_im(s->pc);
6365 tmp = load_reg(s, rt);
6366 tmpptr = tcg_const_ptr(ri);
6367 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6368 tcg_temp_free_ptr(tmpptr);
6369 tcg_temp_free_i32(tmp);
6370 } else {
6371 TCGv tmp = load_reg(s, rt);
6372 store_cpu_offset(tmp, ri->fieldoffset);
6373 }
6374 }
6375 /* We default to ending the TB on a coprocessor register write,
6376 * but allow this to be suppressed by the register definition
6377 * (usually only necessary to work around guest bugs).
6378 */
6379 if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6380 gen_lookup_tb(s);
6381 }
6382 }
6383 return 0;
6384 }
6385
4a9a539f 6386 return 1;
9ee6e8bb
PB
6387}
6388
5e3f878a
PB
6389
6390/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6391static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
6392{
6393 TCGv tmp;
7d1b0095 6394 tmp = tcg_temp_new_i32();
5e3f878a
PB
6395 tcg_gen_trunc_i64_i32(tmp, val);
6396 store_reg(s, rlow, tmp);
7d1b0095 6397 tmp = tcg_temp_new_i32();
5e3f878a
PB
6398 tcg_gen_shri_i64(val, val, 32);
6399 tcg_gen_trunc_i64_i32(tmp, val);
6400 store_reg(s, rhigh, tmp);
6401}
6402
6403/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6404static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6405{
a7812ae4 6406 TCGv_i64 tmp;
5e3f878a
PB
6407 TCGv tmp2;
6408
36aa55dc 6409 /* Load value and extend to 64 bits. */
a7812ae4 6410 tmp = tcg_temp_new_i64();
5e3f878a
PB
6411 tmp2 = load_reg(s, rlow);
6412 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6413 tcg_temp_free_i32(tmp2);
5e3f878a 6414 tcg_gen_add_i64(val, val, tmp);
b75263d6 6415 tcg_temp_free_i64(tmp);
5e3f878a
PB
6416}
6417
6418/* load and add a 64-bit value from a register pair. */
a7812ae4 6419static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6420{
a7812ae4 6421 TCGv_i64 tmp;
36aa55dc
PB
6422 TCGv tmpl;
6423 TCGv tmph;
5e3f878a
PB
6424
6425 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6426 tmpl = load_reg(s, rlow);
6427 tmph = load_reg(s, rhigh);
a7812ae4 6428 tmp = tcg_temp_new_i64();
36aa55dc 6429 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6430 tcg_temp_free_i32(tmpl);
6431 tcg_temp_free_i32(tmph);
5e3f878a 6432 tcg_gen_add_i64(val, val, tmp);
b75263d6 6433 tcg_temp_free_i64(tmp);
5e3f878a
PB
6434}
6435
c9f10124
RH
6436/* Set N and Z flags from hi|lo. */
6437static void gen_logicq_cc(TCGv lo, TCGv hi)
5e3f878a 6438{
c9f10124
RH
6439 tcg_gen_mov_i32(cpu_NF, hi);
6440 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
6441}
6442
426f5abc
PB
6443/* Load/Store exclusive instructions are implemented by remembering
6444 the value/address loaded, and seeing if these are the same
b90372ad 6445 when the store is performed. This should be sufficient to implement
426f5abc
PB
6446 the architecturally mandated semantics, and avoids having to monitor
6447 regular stores.
6448
6449 In system emulation mode only one CPU will be running at once, so
6450 this sequence is effectively atomic. In user emulation mode we
6451 throw an exception and handle the atomic operation elsewhere. */
6452static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6453 TCGv addr, int size)
6454{
6455 TCGv tmp;
6456
6457 switch (size) {
6458 case 0:
6459 tmp = gen_ld8u(addr, IS_USER(s));
6460 break;
6461 case 1:
6462 tmp = gen_ld16u(addr, IS_USER(s));
6463 break;
6464 case 2:
6465 case 3:
6466 tmp = gen_ld32(addr, IS_USER(s));
6467 break;
6468 default:
6469 abort();
6470 }
6471 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6472 store_reg(s, rt, tmp);
6473 if (size == 3) {
7d1b0095 6474 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6475 tcg_gen_addi_i32(tmp2, addr, 4);
6476 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6477 tcg_temp_free_i32(tmp2);
426f5abc
PB
6478 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6479 store_reg(s, rt2, tmp);
6480 }
6481 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6482}
6483
6484static void gen_clrex(DisasContext *s)
6485{
6486 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6487}
6488
6489#ifdef CONFIG_USER_ONLY
6490static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6491 TCGv addr, int size)
6492{
6493 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6494 tcg_gen_movi_i32(cpu_exclusive_info,
6495 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6496 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6497}
6498#else
6499static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6500 TCGv addr, int size)
6501{
6502 TCGv tmp;
6503 int done_label;
6504 int fail_label;
6505
6506 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6507 [addr] = {Rt};
6508 {Rd} = 0;
6509 } else {
6510 {Rd} = 1;
6511 } */
6512 fail_label = gen_new_label();
6513 done_label = gen_new_label();
6514 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6515 switch (size) {
6516 case 0:
6517 tmp = gen_ld8u(addr, IS_USER(s));
6518 break;
6519 case 1:
6520 tmp = gen_ld16u(addr, IS_USER(s));
6521 break;
6522 case 2:
6523 case 3:
6524 tmp = gen_ld32(addr, IS_USER(s));
6525 break;
6526 default:
6527 abort();
6528 }
6529 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6530 tcg_temp_free_i32(tmp);
426f5abc 6531 if (size == 3) {
7d1b0095 6532 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6533 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6534 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6535 tcg_temp_free_i32(tmp2);
426f5abc 6536 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6537 tcg_temp_free_i32(tmp);
426f5abc
PB
6538 }
6539 tmp = load_reg(s, rt);
6540 switch (size) {
6541 case 0:
6542 gen_st8(tmp, addr, IS_USER(s));
6543 break;
6544 case 1:
6545 gen_st16(tmp, addr, IS_USER(s));
6546 break;
6547 case 2:
6548 case 3:
6549 gen_st32(tmp, addr, IS_USER(s));
6550 break;
6551 default:
6552 abort();
6553 }
6554 if (size == 3) {
6555 tcg_gen_addi_i32(addr, addr, 4);
6556 tmp = load_reg(s, rt2);
6557 gen_st32(tmp, addr, IS_USER(s));
6558 }
6559 tcg_gen_movi_i32(cpu_R[rd], 0);
6560 tcg_gen_br(done_label);
6561 gen_set_label(fail_label);
6562 tcg_gen_movi_i32(cpu_R[rd], 1);
6563 gen_set_label(done_label);
6564 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6565}
6566#endif
6567
0ecb72a5 6568static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
6569{
6570 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6571 TCGv tmp;
3670669c 6572 TCGv tmp2;
6ddbc6e4 6573 TCGv tmp3;
b0109805 6574 TCGv addr;
a7812ae4 6575 TCGv_i64 tmp64;
9ee6e8bb 6576
d31dd73e 6577 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
6578 s->pc += 4;
6579
6580 /* M variants do not implement ARM mode. */
6581 if (IS_M(env))
6582 goto illegal_op;
6583 cond = insn >> 28;
6584 if (cond == 0xf){
be5e7a76
DES
6585 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6586 * choose to UNDEF. In ARMv5 and above the space is used
6587 * for miscellaneous unconditional instructions.
6588 */
6589 ARCH(5);
6590
9ee6e8bb
PB
6591 /* Unconditional instructions. */
6592 if (((insn >> 25) & 7) == 1) {
6593 /* NEON Data processing. */
6594 if (!arm_feature(env, ARM_FEATURE_NEON))
6595 goto illegal_op;
6596
6597 if (disas_neon_data_insn(env, s, insn))
6598 goto illegal_op;
6599 return;
6600 }
6601 if ((insn & 0x0f100000) == 0x04000000) {
6602 /* NEON load/store. */
6603 if (!arm_feature(env, ARM_FEATURE_NEON))
6604 goto illegal_op;
6605
6606 if (disas_neon_ls_insn(env, s, insn))
6607 goto illegal_op;
6608 return;
6609 }
3d185e5d
PM
6610 if (((insn & 0x0f30f000) == 0x0510f000) ||
6611 ((insn & 0x0f30f010) == 0x0710f000)) {
6612 if ((insn & (1 << 22)) == 0) {
6613 /* PLDW; v7MP */
6614 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6615 goto illegal_op;
6616 }
6617 }
6618 /* Otherwise PLD; v5TE+ */
be5e7a76 6619 ARCH(5TE);
3d185e5d
PM
6620 return;
6621 }
6622 if (((insn & 0x0f70f000) == 0x0450f000) ||
6623 ((insn & 0x0f70f010) == 0x0650f000)) {
6624 ARCH(7);
6625 return; /* PLI; V7 */
6626 }
6627 if (((insn & 0x0f700000) == 0x04100000) ||
6628 ((insn & 0x0f700010) == 0x06100000)) {
6629 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6630 goto illegal_op;
6631 }
6632 return; /* v7MP: Unallocated memory hint: must NOP */
6633 }
6634
6635 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6636 ARCH(6);
6637 /* setend */
10962fd5
PM
6638 if (((insn >> 9) & 1) != s->bswap_code) {
6639 /* Dynamic endianness switching not implemented. */
9ee6e8bb
PB
6640 goto illegal_op;
6641 }
6642 return;
6643 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6644 switch ((insn >> 4) & 0xf) {
6645 case 1: /* clrex */
6646 ARCH(6K);
426f5abc 6647 gen_clrex(s);
9ee6e8bb
PB
6648 return;
6649 case 4: /* dsb */
6650 case 5: /* dmb */
6651 case 6: /* isb */
6652 ARCH(7);
6653 /* We don't emulate caches so these are a no-op. */
6654 return;
6655 default:
6656 goto illegal_op;
6657 }
6658 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6659 /* srs */
c67b6b71 6660 int32_t offset;
9ee6e8bb
PB
6661 if (IS_USER(s))
6662 goto illegal_op;
6663 ARCH(6);
6664 op1 = (insn & 0x1f);
7d1b0095 6665 addr = tcg_temp_new_i32();
39ea3d4e
PM
6666 tmp = tcg_const_i32(op1);
6667 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6668 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6669 i = (insn >> 23) & 3;
6670 switch (i) {
6671 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6672 case 1: offset = 0; break; /* IA */
6673 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6674 case 3: offset = 4; break; /* IB */
6675 default: abort();
6676 }
6677 if (offset)
b0109805
PB
6678 tcg_gen_addi_i32(addr, addr, offset);
6679 tmp = load_reg(s, 14);
6680 gen_st32(tmp, addr, 0);
c67b6b71 6681 tmp = load_cpu_field(spsr);
b0109805
PB
6682 tcg_gen_addi_i32(addr, addr, 4);
6683 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6684 if (insn & (1 << 21)) {
6685 /* Base writeback. */
6686 switch (i) {
6687 case 0: offset = -8; break;
c67b6b71
FN
6688 case 1: offset = 4; break;
6689 case 2: offset = -4; break;
9ee6e8bb
PB
6690 case 3: offset = 0; break;
6691 default: abort();
6692 }
6693 if (offset)
c67b6b71 6694 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6695 tmp = tcg_const_i32(op1);
6696 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6697 tcg_temp_free_i32(tmp);
7d1b0095 6698 tcg_temp_free_i32(addr);
b0109805 6699 } else {
7d1b0095 6700 tcg_temp_free_i32(addr);
9ee6e8bb 6701 }
a990f58f 6702 return;
ea825eee 6703 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6704 /* rfe */
c67b6b71 6705 int32_t offset;
9ee6e8bb
PB
6706 if (IS_USER(s))
6707 goto illegal_op;
6708 ARCH(6);
6709 rn = (insn >> 16) & 0xf;
b0109805 6710 addr = load_reg(s, rn);
9ee6e8bb
PB
6711 i = (insn >> 23) & 3;
6712 switch (i) {
b0109805 6713 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6714 case 1: offset = 0; break; /* IA */
6715 case 2: offset = -8; break; /* DB */
b0109805 6716 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6717 default: abort();
6718 }
6719 if (offset)
b0109805
PB
6720 tcg_gen_addi_i32(addr, addr, offset);
6721 /* Load PC into tmp and CPSR into tmp2. */
6722 tmp = gen_ld32(addr, 0);
6723 tcg_gen_addi_i32(addr, addr, 4);
6724 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6725 if (insn & (1 << 21)) {
6726 /* Base writeback. */
6727 switch (i) {
b0109805 6728 case 0: offset = -8; break;
c67b6b71
FN
6729 case 1: offset = 4; break;
6730 case 2: offset = -4; break;
b0109805 6731 case 3: offset = 0; break;
9ee6e8bb
PB
6732 default: abort();
6733 }
6734 if (offset)
b0109805
PB
6735 tcg_gen_addi_i32(addr, addr, offset);
6736 store_reg(s, rn, addr);
6737 } else {
7d1b0095 6738 tcg_temp_free_i32(addr);
9ee6e8bb 6739 }
b0109805 6740 gen_rfe(s, tmp, tmp2);
c67b6b71 6741 return;
9ee6e8bb
PB
6742 } else if ((insn & 0x0e000000) == 0x0a000000) {
6743 /* branch link and change to thumb (blx <offset>) */
6744 int32_t offset;
6745
6746 val = (uint32_t)s->pc;
7d1b0095 6747 tmp = tcg_temp_new_i32();
d9ba4830
PB
6748 tcg_gen_movi_i32(tmp, val);
6749 store_reg(s, 14, tmp);
9ee6e8bb
PB
6750 /* Sign-extend the 24-bit offset */
6751 offset = (((int32_t)insn) << 8) >> 8;
6752 /* offset * 4 + bit24 * 2 + (thumb bit) */
6753 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6754 /* pipeline offset */
6755 val += 4;
be5e7a76 6756 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6757 gen_bx_im(s, val);
9ee6e8bb
PB
6758 return;
6759 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6760 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6761 /* iWMMXt register transfer. */
6762 if (env->cp15.c15_cpar & (1 << 1))
6763 if (!disas_iwmmxt_insn(env, s, insn))
6764 return;
6765 }
6766 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6767 /* Coprocessor double register transfer. */
be5e7a76 6768 ARCH(5TE);
9ee6e8bb
PB
6769 } else if ((insn & 0x0f000010) == 0x0e000010) {
6770 /* Additional coprocessor register transfer. */
7997d92f 6771 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6772 uint32_t mask;
6773 uint32_t val;
6774 /* cps (privileged) */
6775 if (IS_USER(s))
6776 return;
6777 mask = val = 0;
6778 if (insn & (1 << 19)) {
6779 if (insn & (1 << 8))
6780 mask |= CPSR_A;
6781 if (insn & (1 << 7))
6782 mask |= CPSR_I;
6783 if (insn & (1 << 6))
6784 mask |= CPSR_F;
6785 if (insn & (1 << 18))
6786 val |= mask;
6787 }
7997d92f 6788 if (insn & (1 << 17)) {
9ee6e8bb
PB
6789 mask |= CPSR_M;
6790 val |= (insn & 0x1f);
6791 }
6792 if (mask) {
2fbac54b 6793 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6794 }
6795 return;
6796 }
6797 goto illegal_op;
6798 }
6799 if (cond != 0xe) {
6800 /* if not always execute, we generate a conditional jump to
6801 next instruction */
6802 s->condlabel = gen_new_label();
d9ba4830 6803 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6804 s->condjmp = 1;
6805 }
6806 if ((insn & 0x0f900000) == 0x03000000) {
6807 if ((insn & (1 << 21)) == 0) {
6808 ARCH(6T2);
6809 rd = (insn >> 12) & 0xf;
6810 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6811 if ((insn & (1 << 22)) == 0) {
6812 /* MOVW */
7d1b0095 6813 tmp = tcg_temp_new_i32();
5e3f878a 6814 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6815 } else {
6816 /* MOVT */
5e3f878a 6817 tmp = load_reg(s, rd);
86831435 6818 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6819 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6820 }
5e3f878a 6821 store_reg(s, rd, tmp);
9ee6e8bb
PB
6822 } else {
6823 if (((insn >> 12) & 0xf) != 0xf)
6824 goto illegal_op;
6825 if (((insn >> 16) & 0xf) == 0) {
6826 gen_nop_hint(s, insn & 0xff);
6827 } else {
6828 /* CPSR = immediate */
6829 val = insn & 0xff;
6830 shift = ((insn >> 8) & 0xf) * 2;
6831 if (shift)
6832 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6833 i = ((insn & (1 << 22)) != 0);
2fbac54b 6834 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6835 goto illegal_op;
6836 }
6837 }
6838 } else if ((insn & 0x0f900000) == 0x01000000
6839 && (insn & 0x00000090) != 0x00000090) {
6840 /* miscellaneous instructions */
6841 op1 = (insn >> 21) & 3;
6842 sh = (insn >> 4) & 0xf;
6843 rm = insn & 0xf;
6844 switch (sh) {
6845 case 0x0: /* move program status register */
6846 if (op1 & 1) {
6847 /* PSR = reg */
2fbac54b 6848 tmp = load_reg(s, rm);
9ee6e8bb 6849 i = ((op1 & 2) != 0);
2fbac54b 6850 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6851 goto illegal_op;
6852 } else {
6853 /* reg = PSR */
6854 rd = (insn >> 12) & 0xf;
6855 if (op1 & 2) {
6856 if (IS_USER(s))
6857 goto illegal_op;
d9ba4830 6858 tmp = load_cpu_field(spsr);
9ee6e8bb 6859 } else {
7d1b0095 6860 tmp = tcg_temp_new_i32();
9ef39277 6861 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 6862 }
d9ba4830 6863 store_reg(s, rd, tmp);
9ee6e8bb
PB
6864 }
6865 break;
6866 case 0x1:
6867 if (op1 == 1) {
6868 /* branch/exchange thumb (bx). */
be5e7a76 6869 ARCH(4T);
d9ba4830
PB
6870 tmp = load_reg(s, rm);
6871 gen_bx(s, tmp);
9ee6e8bb
PB
6872 } else if (op1 == 3) {
6873 /* clz */
be5e7a76 6874 ARCH(5);
9ee6e8bb 6875 rd = (insn >> 12) & 0xf;
1497c961
PB
6876 tmp = load_reg(s, rm);
6877 gen_helper_clz(tmp, tmp);
6878 store_reg(s, rd, tmp);
9ee6e8bb
PB
6879 } else {
6880 goto illegal_op;
6881 }
6882 break;
6883 case 0x2:
6884 if (op1 == 1) {
6885 ARCH(5J); /* bxj */
6886 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6887 tmp = load_reg(s, rm);
6888 gen_bx(s, tmp);
9ee6e8bb
PB
6889 } else {
6890 goto illegal_op;
6891 }
6892 break;
6893 case 0x3:
6894 if (op1 != 1)
6895 goto illegal_op;
6896
be5e7a76 6897 ARCH(5);
9ee6e8bb 6898 /* branch link/exchange thumb (blx) */
d9ba4830 6899 tmp = load_reg(s, rm);
7d1b0095 6900 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6901 tcg_gen_movi_i32(tmp2, s->pc);
6902 store_reg(s, 14, tmp2);
6903 gen_bx(s, tmp);
9ee6e8bb
PB
6904 break;
6905 case 0x5: /* saturating add/subtract */
be5e7a76 6906 ARCH(5TE);
9ee6e8bb
PB
6907 rd = (insn >> 12) & 0xf;
6908 rn = (insn >> 16) & 0xf;
b40d0353 6909 tmp = load_reg(s, rm);
5e3f878a 6910 tmp2 = load_reg(s, rn);
9ee6e8bb 6911 if (op1 & 2)
9ef39277 6912 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 6913 if (op1 & 1)
9ef39277 6914 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6915 else
9ef39277 6916 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 6917 tcg_temp_free_i32(tmp2);
5e3f878a 6918 store_reg(s, rd, tmp);
9ee6e8bb 6919 break;
49e14940
AL
6920 case 7:
6921 /* SMC instruction (op1 == 3)
6922 and undefined instructions (op1 == 0 || op1 == 2)
6923 will trap */
6924 if (op1 != 1) {
6925 goto illegal_op;
6926 }
6927 /* bkpt */
be5e7a76 6928 ARCH(5);
bc4a0de0 6929 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6930 break;
6931 case 0x8: /* signed multiply */
6932 case 0xa:
6933 case 0xc:
6934 case 0xe:
be5e7a76 6935 ARCH(5TE);
9ee6e8bb
PB
6936 rs = (insn >> 8) & 0xf;
6937 rn = (insn >> 12) & 0xf;
6938 rd = (insn >> 16) & 0xf;
6939 if (op1 == 1) {
6940 /* (32 * 16) >> 16 */
5e3f878a
PB
6941 tmp = load_reg(s, rm);
6942 tmp2 = load_reg(s, rs);
9ee6e8bb 6943 if (sh & 4)
5e3f878a 6944 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6945 else
5e3f878a 6946 gen_sxth(tmp2);
a7812ae4
PB
6947 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6948 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 6949 tmp = tcg_temp_new_i32();
a7812ae4 6950 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6951 tcg_temp_free_i64(tmp64);
9ee6e8bb 6952 if ((sh & 2) == 0) {
5e3f878a 6953 tmp2 = load_reg(s, rn);
9ef39277 6954 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 6955 tcg_temp_free_i32(tmp2);
9ee6e8bb 6956 }
5e3f878a 6957 store_reg(s, rd, tmp);
9ee6e8bb
PB
6958 } else {
6959 /* 16 * 16 */
5e3f878a
PB
6960 tmp = load_reg(s, rm);
6961 tmp2 = load_reg(s, rs);
6962 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 6963 tcg_temp_free_i32(tmp2);
9ee6e8bb 6964 if (op1 == 2) {
a7812ae4
PB
6965 tmp64 = tcg_temp_new_i64();
6966 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 6967 tcg_temp_free_i32(tmp);
a7812ae4
PB
6968 gen_addq(s, tmp64, rn, rd);
6969 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6970 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6971 } else {
6972 if (op1 == 0) {
5e3f878a 6973 tmp2 = load_reg(s, rn);
9ef39277 6974 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 6975 tcg_temp_free_i32(tmp2);
9ee6e8bb 6976 }
5e3f878a 6977 store_reg(s, rd, tmp);
9ee6e8bb
PB
6978 }
6979 }
6980 break;
6981 default:
6982 goto illegal_op;
6983 }
6984 } else if (((insn & 0x0e000000) == 0 &&
6985 (insn & 0x00000090) != 0x90) ||
6986 ((insn & 0x0e000000) == (1 << 25))) {
6987 int set_cc, logic_cc, shiftop;
6988
6989 op1 = (insn >> 21) & 0xf;
6990 set_cc = (insn >> 20) & 1;
6991 logic_cc = table_logic_cc[op1] & set_cc;
6992
6993 /* data processing instruction */
6994 if (insn & (1 << 25)) {
6995 /* immediate operand */
6996 val = insn & 0xff;
6997 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6998 if (shift) {
9ee6e8bb 6999 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7000 }
7d1b0095 7001 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7002 tcg_gen_movi_i32(tmp2, val);
7003 if (logic_cc && shift) {
7004 gen_set_CF_bit31(tmp2);
7005 }
9ee6e8bb
PB
7006 } else {
7007 /* register */
7008 rm = (insn) & 0xf;
e9bb4aa9 7009 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7010 shiftop = (insn >> 5) & 3;
7011 if (!(insn & (1 << 4))) {
7012 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7013 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7014 } else {
7015 rs = (insn >> 8) & 0xf;
8984bd2e 7016 tmp = load_reg(s, rs);
e9bb4aa9 7017 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7018 }
7019 }
7020 if (op1 != 0x0f && op1 != 0x0d) {
7021 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7022 tmp = load_reg(s, rn);
7023 } else {
7024 TCGV_UNUSED(tmp);
9ee6e8bb
PB
7025 }
7026 rd = (insn >> 12) & 0xf;
7027 switch(op1) {
7028 case 0x00:
e9bb4aa9
JR
7029 tcg_gen_and_i32(tmp, tmp, tmp2);
7030 if (logic_cc) {
7031 gen_logic_CC(tmp);
7032 }
21aeb343 7033 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7034 break;
7035 case 0x01:
e9bb4aa9
JR
7036 tcg_gen_xor_i32(tmp, tmp, tmp2);
7037 if (logic_cc) {
7038 gen_logic_CC(tmp);
7039 }
21aeb343 7040 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7041 break;
7042 case 0x02:
7043 if (set_cc && rd == 15) {
7044 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7045 if (IS_USER(s)) {
9ee6e8bb 7046 goto illegal_op;
e9bb4aa9 7047 }
72485ec4 7048 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7049 gen_exception_return(s, tmp);
9ee6e8bb 7050 } else {
e9bb4aa9 7051 if (set_cc) {
72485ec4 7052 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7053 } else {
7054 tcg_gen_sub_i32(tmp, tmp, tmp2);
7055 }
21aeb343 7056 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7057 }
7058 break;
7059 case 0x03:
e9bb4aa9 7060 if (set_cc) {
72485ec4 7061 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7062 } else {
7063 tcg_gen_sub_i32(tmp, tmp2, tmp);
7064 }
21aeb343 7065 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7066 break;
7067 case 0x04:
e9bb4aa9 7068 if (set_cc) {
72485ec4 7069 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7070 } else {
7071 tcg_gen_add_i32(tmp, tmp, tmp2);
7072 }
21aeb343 7073 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7074 break;
7075 case 0x05:
e9bb4aa9 7076 if (set_cc) {
9ef39277 7077 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
e9bb4aa9
JR
7078 } else {
7079 gen_add_carry(tmp, tmp, tmp2);
7080 }
21aeb343 7081 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7082 break;
7083 case 0x06:
e9bb4aa9 7084 if (set_cc) {
9ef39277 7085 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
e9bb4aa9
JR
7086 } else {
7087 gen_sub_carry(tmp, tmp, tmp2);
7088 }
21aeb343 7089 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7090 break;
7091 case 0x07:
e9bb4aa9 7092 if (set_cc) {
9ef39277 7093 gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
e9bb4aa9
JR
7094 } else {
7095 gen_sub_carry(tmp, tmp2, tmp);
7096 }
21aeb343 7097 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7098 break;
7099 case 0x08:
7100 if (set_cc) {
e9bb4aa9
JR
7101 tcg_gen_and_i32(tmp, tmp, tmp2);
7102 gen_logic_CC(tmp);
9ee6e8bb 7103 }
7d1b0095 7104 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7105 break;
7106 case 0x09:
7107 if (set_cc) {
e9bb4aa9
JR
7108 tcg_gen_xor_i32(tmp, tmp, tmp2);
7109 gen_logic_CC(tmp);
9ee6e8bb 7110 }
7d1b0095 7111 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7112 break;
7113 case 0x0a:
7114 if (set_cc) {
72485ec4 7115 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 7116 }
7d1b0095 7117 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7118 break;
7119 case 0x0b:
7120 if (set_cc) {
72485ec4 7121 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 7122 }
7d1b0095 7123 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7124 break;
7125 case 0x0c:
e9bb4aa9
JR
7126 tcg_gen_or_i32(tmp, tmp, tmp2);
7127 if (logic_cc) {
7128 gen_logic_CC(tmp);
7129 }
21aeb343 7130 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7131 break;
7132 case 0x0d:
7133 if (logic_cc && rd == 15) {
7134 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7135 if (IS_USER(s)) {
9ee6e8bb 7136 goto illegal_op;
e9bb4aa9
JR
7137 }
7138 gen_exception_return(s, tmp2);
9ee6e8bb 7139 } else {
e9bb4aa9
JR
7140 if (logic_cc) {
7141 gen_logic_CC(tmp2);
7142 }
21aeb343 7143 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7144 }
7145 break;
7146 case 0x0e:
f669df27 7147 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7148 if (logic_cc) {
7149 gen_logic_CC(tmp);
7150 }
21aeb343 7151 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7152 break;
7153 default:
7154 case 0x0f:
e9bb4aa9
JR
7155 tcg_gen_not_i32(tmp2, tmp2);
7156 if (logic_cc) {
7157 gen_logic_CC(tmp2);
7158 }
21aeb343 7159 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7160 break;
7161 }
e9bb4aa9 7162 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7163 tcg_temp_free_i32(tmp2);
e9bb4aa9 7164 }
9ee6e8bb
PB
7165 } else {
7166 /* other instructions */
7167 op1 = (insn >> 24) & 0xf;
7168 switch(op1) {
7169 case 0x0:
7170 case 0x1:
7171 /* multiplies, extra load/stores */
7172 sh = (insn >> 5) & 3;
7173 if (sh == 0) {
7174 if (op1 == 0x0) {
7175 rd = (insn >> 16) & 0xf;
7176 rn = (insn >> 12) & 0xf;
7177 rs = (insn >> 8) & 0xf;
7178 rm = (insn) & 0xf;
7179 op1 = (insn >> 20) & 0xf;
7180 switch (op1) {
7181 case 0: case 1: case 2: case 3: case 6:
7182 /* 32 bit mul */
5e3f878a
PB
7183 tmp = load_reg(s, rs);
7184 tmp2 = load_reg(s, rm);
7185 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7186 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7187 if (insn & (1 << 22)) {
7188 /* Subtract (mls) */
7189 ARCH(6T2);
5e3f878a
PB
7190 tmp2 = load_reg(s, rn);
7191 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7192 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7193 } else if (insn & (1 << 21)) {
7194 /* Add */
5e3f878a
PB
7195 tmp2 = load_reg(s, rn);
7196 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7197 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7198 }
7199 if (insn & (1 << 20))
5e3f878a
PB
7200 gen_logic_CC(tmp);
7201 store_reg(s, rd, tmp);
9ee6e8bb 7202 break;
8aac08b1
AJ
7203 case 4:
7204 /* 64 bit mul double accumulate (UMAAL) */
7205 ARCH(6);
7206 tmp = load_reg(s, rs);
7207 tmp2 = load_reg(s, rm);
7208 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7209 gen_addq_lo(s, tmp64, rn);
7210 gen_addq_lo(s, tmp64, rd);
7211 gen_storeq_reg(s, rn, rd, tmp64);
7212 tcg_temp_free_i64(tmp64);
7213 break;
7214 case 8: case 9: case 10: case 11:
7215 case 12: case 13: case 14: case 15:
7216 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7217 tmp = load_reg(s, rs);
7218 tmp2 = load_reg(s, rm);
8aac08b1 7219 if (insn & (1 << 22)) {
c9f10124 7220 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 7221 } else {
c9f10124 7222 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
7223 }
7224 if (insn & (1 << 21)) { /* mult accumulate */
c9f10124
RH
7225 TCGv al = load_reg(s, rn);
7226 TCGv ah = load_reg(s, rd);
7227 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
7228 tcg_temp_free(al);
7229 tcg_temp_free(ah);
9ee6e8bb 7230 }
8aac08b1 7231 if (insn & (1 << 20)) {
c9f10124 7232 gen_logicq_cc(tmp, tmp2);
8aac08b1 7233 }
c9f10124
RH
7234 store_reg(s, rn, tmp);
7235 store_reg(s, rd, tmp2);
9ee6e8bb 7236 break;
8aac08b1
AJ
7237 default:
7238 goto illegal_op;
9ee6e8bb
PB
7239 }
7240 } else {
7241 rn = (insn >> 16) & 0xf;
7242 rd = (insn >> 12) & 0xf;
7243 if (insn & (1 << 23)) {
7244 /* load/store exclusive */
86753403
PB
7245 op1 = (insn >> 21) & 0x3;
7246 if (op1)
a47f43d2 7247 ARCH(6K);
86753403
PB
7248 else
7249 ARCH(6);
3174f8e9 7250 addr = tcg_temp_local_new_i32();
98a46317 7251 load_reg_var(s, addr, rn);
9ee6e8bb 7252 if (insn & (1 << 20)) {
86753403
PB
7253 switch (op1) {
7254 case 0: /* ldrex */
426f5abc 7255 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7256 break;
7257 case 1: /* ldrexd */
426f5abc 7258 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7259 break;
7260 case 2: /* ldrexb */
426f5abc 7261 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7262 break;
7263 case 3: /* ldrexh */
426f5abc 7264 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7265 break;
7266 default:
7267 abort();
7268 }
9ee6e8bb
PB
7269 } else {
7270 rm = insn & 0xf;
86753403
PB
7271 switch (op1) {
7272 case 0: /* strex */
426f5abc 7273 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7274 break;
7275 case 1: /* strexd */
502e64fe 7276 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7277 break;
7278 case 2: /* strexb */
426f5abc 7279 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7280 break;
7281 case 3: /* strexh */
426f5abc 7282 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7283 break;
7284 default:
7285 abort();
7286 }
9ee6e8bb 7287 }
3174f8e9 7288 tcg_temp_free(addr);
9ee6e8bb
PB
7289 } else {
7290 /* SWP instruction */
7291 rm = (insn) & 0xf;
7292
8984bd2e
PB
7293 /* ??? This is not really atomic. However we know
7294 we never have multiple CPUs running in parallel,
7295 so it is good enough. */
7296 addr = load_reg(s, rn);
7297 tmp = load_reg(s, rm);
9ee6e8bb 7298 if (insn & (1 << 22)) {
8984bd2e
PB
7299 tmp2 = gen_ld8u(addr, IS_USER(s));
7300 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7301 } else {
8984bd2e
PB
7302 tmp2 = gen_ld32(addr, IS_USER(s));
7303 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7304 }
7d1b0095 7305 tcg_temp_free_i32(addr);
8984bd2e 7306 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7307 }
7308 }
7309 } else {
7310 int address_offset;
7311 int load;
7312 /* Misc load/store */
7313 rn = (insn >> 16) & 0xf;
7314 rd = (insn >> 12) & 0xf;
b0109805 7315 addr = load_reg(s, rn);
9ee6e8bb 7316 if (insn & (1 << 24))
b0109805 7317 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7318 address_offset = 0;
7319 if (insn & (1 << 20)) {
7320 /* load */
7321 switch(sh) {
7322 case 1:
b0109805 7323 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
7324 break;
7325 case 2:
b0109805 7326 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
7327 break;
7328 default:
7329 case 3:
b0109805 7330 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
7331 break;
7332 }
7333 load = 1;
7334 } else if (sh & 2) {
be5e7a76 7335 ARCH(5TE);
9ee6e8bb
PB
7336 /* doubleword */
7337 if (sh & 1) {
7338 /* store */
b0109805
PB
7339 tmp = load_reg(s, rd);
7340 gen_st32(tmp, addr, IS_USER(s));
7341 tcg_gen_addi_i32(addr, addr, 4);
7342 tmp = load_reg(s, rd + 1);
7343 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7344 load = 0;
7345 } else {
7346 /* load */
b0109805
PB
7347 tmp = gen_ld32(addr, IS_USER(s));
7348 store_reg(s, rd, tmp);
7349 tcg_gen_addi_i32(addr, addr, 4);
7350 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
7351 rd++;
7352 load = 1;
7353 }
7354 address_offset = -4;
7355 } else {
7356 /* store */
b0109805
PB
7357 tmp = load_reg(s, rd);
7358 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7359 load = 0;
7360 }
7361 /* Perform base writeback before the loaded value to
7362 ensure correct behavior with overlapping index registers.
7363 ldrd with base writeback is is undefined if the
7364 destination and index registers overlap. */
7365 if (!(insn & (1 << 24))) {
b0109805
PB
7366 gen_add_datah_offset(s, insn, address_offset, addr);
7367 store_reg(s, rn, addr);
9ee6e8bb
PB
7368 } else if (insn & (1 << 21)) {
7369 if (address_offset)
b0109805
PB
7370 tcg_gen_addi_i32(addr, addr, address_offset);
7371 store_reg(s, rn, addr);
7372 } else {
7d1b0095 7373 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7374 }
7375 if (load) {
7376 /* Complete the load. */
b0109805 7377 store_reg(s, rd, tmp);
9ee6e8bb
PB
7378 }
7379 }
7380 break;
7381 case 0x4:
7382 case 0x5:
7383 goto do_ldst;
7384 case 0x6:
7385 case 0x7:
7386 if (insn & (1 << 4)) {
7387 ARCH(6);
7388 /* Armv6 Media instructions. */
7389 rm = insn & 0xf;
7390 rn = (insn >> 16) & 0xf;
2c0262af 7391 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7392 rs = (insn >> 8) & 0xf;
7393 switch ((insn >> 23) & 3) {
7394 case 0: /* Parallel add/subtract. */
7395 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7396 tmp = load_reg(s, rn);
7397 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7398 sh = (insn >> 5) & 7;
7399 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7400 goto illegal_op;
6ddbc6e4 7401 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7402 tcg_temp_free_i32(tmp2);
6ddbc6e4 7403 store_reg(s, rd, tmp);
9ee6e8bb
PB
7404 break;
7405 case 1:
7406 if ((insn & 0x00700020) == 0) {
6c95676b 7407 /* Halfword pack. */
3670669c
PB
7408 tmp = load_reg(s, rn);
7409 tmp2 = load_reg(s, rm);
9ee6e8bb 7410 shift = (insn >> 7) & 0x1f;
3670669c
PB
7411 if (insn & (1 << 6)) {
7412 /* pkhtb */
22478e79
AZ
7413 if (shift == 0)
7414 shift = 31;
7415 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7416 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7417 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7418 } else {
7419 /* pkhbt */
22478e79
AZ
7420 if (shift)
7421 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7422 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7423 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7424 }
7425 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7426 tcg_temp_free_i32(tmp2);
3670669c 7427 store_reg(s, rd, tmp);
9ee6e8bb
PB
7428 } else if ((insn & 0x00200020) == 0x00200000) {
7429 /* [us]sat */
6ddbc6e4 7430 tmp = load_reg(s, rm);
9ee6e8bb
PB
7431 shift = (insn >> 7) & 0x1f;
7432 if (insn & (1 << 6)) {
7433 if (shift == 0)
7434 shift = 31;
6ddbc6e4 7435 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7436 } else {
6ddbc6e4 7437 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7438 }
7439 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7440 tmp2 = tcg_const_i32(sh);
7441 if (insn & (1 << 22))
9ef39277 7442 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 7443 else
9ef39277 7444 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 7445 tcg_temp_free_i32(tmp2);
6ddbc6e4 7446 store_reg(s, rd, tmp);
9ee6e8bb
PB
7447 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7448 /* [us]sat16 */
6ddbc6e4 7449 tmp = load_reg(s, rm);
9ee6e8bb 7450 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7451 tmp2 = tcg_const_i32(sh);
7452 if (insn & (1 << 22))
9ef39277 7453 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7454 else
9ef39277 7455 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7456 tcg_temp_free_i32(tmp2);
6ddbc6e4 7457 store_reg(s, rd, tmp);
9ee6e8bb
PB
7458 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7459 /* Select bytes. */
6ddbc6e4
PB
7460 tmp = load_reg(s, rn);
7461 tmp2 = load_reg(s, rm);
7d1b0095 7462 tmp3 = tcg_temp_new_i32();
0ecb72a5 7463 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 7464 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7465 tcg_temp_free_i32(tmp3);
7466 tcg_temp_free_i32(tmp2);
6ddbc6e4 7467 store_reg(s, rd, tmp);
9ee6e8bb 7468 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7469 tmp = load_reg(s, rm);
9ee6e8bb 7470 shift = (insn >> 10) & 3;
1301f322 7471 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7472 rotate, a shift is sufficient. */
7473 if (shift != 0)
f669df27 7474 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7475 op1 = (insn >> 20) & 7;
7476 switch (op1) {
5e3f878a
PB
7477 case 0: gen_sxtb16(tmp); break;
7478 case 2: gen_sxtb(tmp); break;
7479 case 3: gen_sxth(tmp); break;
7480 case 4: gen_uxtb16(tmp); break;
7481 case 6: gen_uxtb(tmp); break;
7482 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7483 default: goto illegal_op;
7484 }
7485 if (rn != 15) {
5e3f878a 7486 tmp2 = load_reg(s, rn);
9ee6e8bb 7487 if ((op1 & 3) == 0) {
5e3f878a 7488 gen_add16(tmp, tmp2);
9ee6e8bb 7489 } else {
5e3f878a 7490 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7491 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7492 }
7493 }
6c95676b 7494 store_reg(s, rd, tmp);
9ee6e8bb
PB
7495 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7496 /* rev */
b0109805 7497 tmp = load_reg(s, rm);
9ee6e8bb
PB
7498 if (insn & (1 << 22)) {
7499 if (insn & (1 << 7)) {
b0109805 7500 gen_revsh(tmp);
9ee6e8bb
PB
7501 } else {
7502 ARCH(6T2);
b0109805 7503 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7504 }
7505 } else {
7506 if (insn & (1 << 7))
b0109805 7507 gen_rev16(tmp);
9ee6e8bb 7508 else
66896cb8 7509 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7510 }
b0109805 7511 store_reg(s, rd, tmp);
9ee6e8bb
PB
7512 } else {
7513 goto illegal_op;
7514 }
7515 break;
7516 case 2: /* Multiplies (Type 3). */
41e9564d
PM
7517 switch ((insn >> 20) & 0x7) {
7518 case 5:
7519 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7520 /* op2 not 00x or 11x : UNDEF */
7521 goto illegal_op;
7522 }
838fa72d
AJ
7523 /* Signed multiply most significant [accumulate].
7524 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
7525 tmp = load_reg(s, rm);
7526 tmp2 = load_reg(s, rs);
a7812ae4 7527 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7528
955a7dd5 7529 if (rd != 15) {
838fa72d 7530 tmp = load_reg(s, rd);
9ee6e8bb 7531 if (insn & (1 << 6)) {
838fa72d 7532 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7533 } else {
838fa72d 7534 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7535 }
7536 }
838fa72d
AJ
7537 if (insn & (1 << 5)) {
7538 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7539 }
7540 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7541 tmp = tcg_temp_new_i32();
838fa72d
AJ
7542 tcg_gen_trunc_i64_i32(tmp, tmp64);
7543 tcg_temp_free_i64(tmp64);
955a7dd5 7544 store_reg(s, rn, tmp);
41e9564d
PM
7545 break;
7546 case 0:
7547 case 4:
7548 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7549 if (insn & (1 << 7)) {
7550 goto illegal_op;
7551 }
7552 tmp = load_reg(s, rm);
7553 tmp2 = load_reg(s, rs);
9ee6e8bb 7554 if (insn & (1 << 5))
5e3f878a
PB
7555 gen_swap_half(tmp2);
7556 gen_smul_dual(tmp, tmp2);
5e3f878a 7557 if (insn & (1 << 6)) {
e1d177b9 7558 /* This subtraction cannot overflow. */
5e3f878a
PB
7559 tcg_gen_sub_i32(tmp, tmp, tmp2);
7560 } else {
e1d177b9
PM
7561 /* This addition cannot overflow 32 bits;
7562 * however it may overflow considered as a signed
7563 * operation, in which case we must set the Q flag.
7564 */
9ef39277 7565 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
5e3f878a 7566 }
7d1b0095 7567 tcg_temp_free_i32(tmp2);
9ee6e8bb 7568 if (insn & (1 << 22)) {
5e3f878a 7569 /* smlald, smlsld */
a7812ae4
PB
7570 tmp64 = tcg_temp_new_i64();
7571 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7572 tcg_temp_free_i32(tmp);
a7812ae4
PB
7573 gen_addq(s, tmp64, rd, rn);
7574 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7575 tcg_temp_free_i64(tmp64);
9ee6e8bb 7576 } else {
5e3f878a 7577 /* smuad, smusd, smlad, smlsd */
22478e79 7578 if (rd != 15)
9ee6e8bb 7579 {
22478e79 7580 tmp2 = load_reg(s, rd);
9ef39277 7581 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7582 tcg_temp_free_i32(tmp2);
9ee6e8bb 7583 }
22478e79 7584 store_reg(s, rn, tmp);
9ee6e8bb 7585 }
41e9564d 7586 break;
b8b8ea05
PM
7587 case 1:
7588 case 3:
7589 /* SDIV, UDIV */
7590 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7591 goto illegal_op;
7592 }
7593 if (((insn >> 5) & 7) || (rd != 15)) {
7594 goto illegal_op;
7595 }
7596 tmp = load_reg(s, rm);
7597 tmp2 = load_reg(s, rs);
7598 if (insn & (1 << 21)) {
7599 gen_helper_udiv(tmp, tmp, tmp2);
7600 } else {
7601 gen_helper_sdiv(tmp, tmp, tmp2);
7602 }
7603 tcg_temp_free_i32(tmp2);
7604 store_reg(s, rn, tmp);
7605 break;
41e9564d
PM
7606 default:
7607 goto illegal_op;
9ee6e8bb
PB
7608 }
7609 break;
7610 case 3:
7611 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7612 switch (op1) {
7613 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7614 ARCH(6);
7615 tmp = load_reg(s, rm);
7616 tmp2 = load_reg(s, rs);
7617 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7618 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7619 if (rd != 15) {
7620 tmp2 = load_reg(s, rd);
6ddbc6e4 7621 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7622 tcg_temp_free_i32(tmp2);
9ee6e8bb 7623 }
ded9d295 7624 store_reg(s, rn, tmp);
9ee6e8bb
PB
7625 break;
7626 case 0x20: case 0x24: case 0x28: case 0x2c:
7627 /* Bitfield insert/clear. */
7628 ARCH(6T2);
7629 shift = (insn >> 7) & 0x1f;
7630 i = (insn >> 16) & 0x1f;
7631 i = i + 1 - shift;
7632 if (rm == 15) {
7d1b0095 7633 tmp = tcg_temp_new_i32();
5e3f878a 7634 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7635 } else {
5e3f878a 7636 tmp = load_reg(s, rm);
9ee6e8bb
PB
7637 }
7638 if (i != 32) {
5e3f878a 7639 tmp2 = load_reg(s, rd);
d593c48e 7640 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 7641 tcg_temp_free_i32(tmp2);
9ee6e8bb 7642 }
5e3f878a 7643 store_reg(s, rd, tmp);
9ee6e8bb
PB
7644 break;
7645 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7646 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7647 ARCH(6T2);
5e3f878a 7648 tmp = load_reg(s, rm);
9ee6e8bb
PB
7649 shift = (insn >> 7) & 0x1f;
7650 i = ((insn >> 16) & 0x1f) + 1;
7651 if (shift + i > 32)
7652 goto illegal_op;
7653 if (i < 32) {
7654 if (op1 & 0x20) {
5e3f878a 7655 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7656 } else {
5e3f878a 7657 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7658 }
7659 }
5e3f878a 7660 store_reg(s, rd, tmp);
9ee6e8bb
PB
7661 break;
7662 default:
7663 goto illegal_op;
7664 }
7665 break;
7666 }
7667 break;
7668 }
7669 do_ldst:
7670 /* Check for undefined extension instructions
7671 * per the ARM Bible IE:
7672 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7673 */
7674 sh = (0xf << 20) | (0xf << 4);
7675 if (op1 == 0x7 && ((insn & sh) == sh))
7676 {
7677 goto illegal_op;
7678 }
7679 /* load/store byte/word */
7680 rn = (insn >> 16) & 0xf;
7681 rd = (insn >> 12) & 0xf;
b0109805 7682 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7683 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7684 if (insn & (1 << 24))
b0109805 7685 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7686 if (insn & (1 << 20)) {
7687 /* load */
9ee6e8bb 7688 if (insn & (1 << 22)) {
b0109805 7689 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7690 } else {
b0109805 7691 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7692 }
9ee6e8bb
PB
7693 } else {
7694 /* store */
b0109805 7695 tmp = load_reg(s, rd);
9ee6e8bb 7696 if (insn & (1 << 22))
b0109805 7697 gen_st8(tmp, tmp2, i);
9ee6e8bb 7698 else
b0109805 7699 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7700 }
7701 if (!(insn & (1 << 24))) {
b0109805
PB
7702 gen_add_data_offset(s, insn, tmp2);
7703 store_reg(s, rn, tmp2);
7704 } else if (insn & (1 << 21)) {
7705 store_reg(s, rn, tmp2);
7706 } else {
7d1b0095 7707 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7708 }
7709 if (insn & (1 << 20)) {
7710 /* Complete the load. */
be5e7a76 7711 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7712 }
7713 break;
7714 case 0x08:
7715 case 0x09:
7716 {
7717 int j, n, user, loaded_base;
b0109805 7718 TCGv loaded_var;
9ee6e8bb
PB
7719 /* load/store multiple words */
7720 /* XXX: store correct base if write back */
7721 user = 0;
7722 if (insn & (1 << 22)) {
7723 if (IS_USER(s))
7724 goto illegal_op; /* only usable in supervisor mode */
7725
7726 if ((insn & (1 << 15)) == 0)
7727 user = 1;
7728 }
7729 rn = (insn >> 16) & 0xf;
b0109805 7730 addr = load_reg(s, rn);
9ee6e8bb
PB
7731
7732 /* compute total size */
7733 loaded_base = 0;
a50f5b91 7734 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7735 n = 0;
7736 for(i=0;i<16;i++) {
7737 if (insn & (1 << i))
7738 n++;
7739 }
7740 /* XXX: test invalid n == 0 case ? */
7741 if (insn & (1 << 23)) {
7742 if (insn & (1 << 24)) {
7743 /* pre increment */
b0109805 7744 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7745 } else {
7746 /* post increment */
7747 }
7748 } else {
7749 if (insn & (1 << 24)) {
7750 /* pre decrement */
b0109805 7751 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7752 } else {
7753 /* post decrement */
7754 if (n != 1)
b0109805 7755 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7756 }
7757 }
7758 j = 0;
7759 for(i=0;i<16;i++) {
7760 if (insn & (1 << i)) {
7761 if (insn & (1 << 20)) {
7762 /* load */
b0109805 7763 tmp = gen_ld32(addr, IS_USER(s));
be5e7a76 7764 if (user) {
b75263d6 7765 tmp2 = tcg_const_i32(i);
1ce94f81 7766 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 7767 tcg_temp_free_i32(tmp2);
7d1b0095 7768 tcg_temp_free_i32(tmp);
9ee6e8bb 7769 } else if (i == rn) {
b0109805 7770 loaded_var = tmp;
9ee6e8bb
PB
7771 loaded_base = 1;
7772 } else {
be5e7a76 7773 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7774 }
7775 } else {
7776 /* store */
7777 if (i == 15) {
7778 /* special case: r15 = PC + 8 */
7779 val = (long)s->pc + 4;
7d1b0095 7780 tmp = tcg_temp_new_i32();
b0109805 7781 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7782 } else if (user) {
7d1b0095 7783 tmp = tcg_temp_new_i32();
b75263d6 7784 tmp2 = tcg_const_i32(i);
9ef39277 7785 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 7786 tcg_temp_free_i32(tmp2);
9ee6e8bb 7787 } else {
b0109805 7788 tmp = load_reg(s, i);
9ee6e8bb 7789 }
b0109805 7790 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7791 }
7792 j++;
7793 /* no need to add after the last transfer */
7794 if (j != n)
b0109805 7795 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7796 }
7797 }
7798 if (insn & (1 << 21)) {
7799 /* write back */
7800 if (insn & (1 << 23)) {
7801 if (insn & (1 << 24)) {
7802 /* pre increment */
7803 } else {
7804 /* post increment */
b0109805 7805 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7806 }
7807 } else {
7808 if (insn & (1 << 24)) {
7809 /* pre decrement */
7810 if (n != 1)
b0109805 7811 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7812 } else {
7813 /* post decrement */
b0109805 7814 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7815 }
7816 }
b0109805
PB
7817 store_reg(s, rn, addr);
7818 } else {
7d1b0095 7819 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7820 }
7821 if (loaded_base) {
b0109805 7822 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7823 }
7824 if ((insn & (1 << 22)) && !user) {
7825 /* Restore CPSR from SPSR. */
d9ba4830
PB
7826 tmp = load_cpu_field(spsr);
7827 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7828 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7829 s->is_jmp = DISAS_UPDATE;
7830 }
7831 }
7832 break;
7833 case 0xa:
7834 case 0xb:
7835 {
7836 int32_t offset;
7837
7838 /* branch (and link) */
7839 val = (int32_t)s->pc;
7840 if (insn & (1 << 24)) {
7d1b0095 7841 tmp = tcg_temp_new_i32();
5e3f878a
PB
7842 tcg_gen_movi_i32(tmp, val);
7843 store_reg(s, 14, tmp);
9ee6e8bb
PB
7844 }
7845 offset = (((int32_t)insn << 8) >> 8);
7846 val += (offset << 2) + 4;
7847 gen_jmp(s, val);
7848 }
7849 break;
7850 case 0xc:
7851 case 0xd:
7852 case 0xe:
7853 /* Coprocessor. */
7854 if (disas_coproc_insn(env, s, insn))
7855 goto illegal_op;
7856 break;
7857 case 0xf:
7858 /* swi */
5e3f878a 7859 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7860 s->is_jmp = DISAS_SWI;
7861 break;
7862 default:
7863 illegal_op:
bc4a0de0 7864 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7865 break;
7866 }
7867 }
7868}
7869
7870/* Return true if this is a Thumb-2 logical op. */
7871static int
7872thumb2_logic_op(int op)
7873{
7874 return (op < 8);
7875}
7876
7877/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7878 then set condition code flags based on the result of the operation.
7879 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7880 to the high bit of T1.
7881 Returns zero if the opcode is valid. */
7882
7883static int
396e467c 7884gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7885{
7886 int logic_cc;
7887
7888 logic_cc = 0;
7889 switch (op) {
7890 case 0: /* and */
396e467c 7891 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7892 logic_cc = conds;
7893 break;
7894 case 1: /* bic */
f669df27 7895 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7896 logic_cc = conds;
7897 break;
7898 case 2: /* orr */
396e467c 7899 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7900 logic_cc = conds;
7901 break;
7902 case 3: /* orn */
29501f1b 7903 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7904 logic_cc = conds;
7905 break;
7906 case 4: /* eor */
396e467c 7907 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7908 logic_cc = conds;
7909 break;
7910 case 8: /* add */
7911 if (conds)
72485ec4 7912 gen_add_CC(t0, t0, t1);
9ee6e8bb 7913 else
396e467c 7914 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7915 break;
7916 case 10: /* adc */
7917 if (conds)
9ef39277 7918 gen_helper_adc_cc(t0, cpu_env, t0, t1);
9ee6e8bb 7919 else
396e467c 7920 gen_adc(t0, t1);
9ee6e8bb
PB
7921 break;
7922 case 11: /* sbc */
7923 if (conds)
9ef39277 7924 gen_helper_sbc_cc(t0, cpu_env, t0, t1);
9ee6e8bb 7925 else
396e467c 7926 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7927 break;
7928 case 13: /* sub */
7929 if (conds)
72485ec4 7930 gen_sub_CC(t0, t0, t1);
9ee6e8bb 7931 else
396e467c 7932 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7933 break;
7934 case 14: /* rsb */
7935 if (conds)
72485ec4 7936 gen_sub_CC(t0, t1, t0);
9ee6e8bb 7937 else
396e467c 7938 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7939 break;
7940 default: /* 5, 6, 7, 9, 12, 15. */
7941 return 1;
7942 }
7943 if (logic_cc) {
396e467c 7944 gen_logic_CC(t0);
9ee6e8bb 7945 if (shifter_out)
396e467c 7946 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7947 }
7948 return 0;
7949}
7950
7951/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7952 is not legal. */
0ecb72a5 7953static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 7954{
b0109805 7955 uint32_t insn, imm, shift, offset;
9ee6e8bb 7956 uint32_t rd, rn, rm, rs;
b26eefb6 7957 TCGv tmp;
6ddbc6e4
PB
7958 TCGv tmp2;
7959 TCGv tmp3;
b0109805 7960 TCGv addr;
a7812ae4 7961 TCGv_i64 tmp64;
9ee6e8bb
PB
7962 int op;
7963 int shiftop;
7964 int conds;
7965 int logic_cc;
7966
7967 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7968 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7969 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7970 16-bit instructions to get correct prefetch abort behavior. */
7971 insn = insn_hw1;
7972 if ((insn & (1 << 12)) == 0) {
be5e7a76 7973 ARCH(5);
9ee6e8bb
PB
7974 /* Second half of blx. */
7975 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7976 tmp = load_reg(s, 14);
7977 tcg_gen_addi_i32(tmp, tmp, offset);
7978 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7979
7d1b0095 7980 tmp2 = tcg_temp_new_i32();
b0109805 7981 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7982 store_reg(s, 14, tmp2);
7983 gen_bx(s, tmp);
9ee6e8bb
PB
7984 return 0;
7985 }
7986 if (insn & (1 << 11)) {
7987 /* Second half of bl. */
7988 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7989 tmp = load_reg(s, 14);
6a0d8a1d 7990 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7991
7d1b0095 7992 tmp2 = tcg_temp_new_i32();
b0109805 7993 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7994 store_reg(s, 14, tmp2);
7995 gen_bx(s, tmp);
9ee6e8bb
PB
7996 return 0;
7997 }
7998 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7999 /* Instruction spans a page boundary. Implement it as two
8000 16-bit instructions in case the second half causes an
8001 prefetch abort. */
8002 offset = ((int32_t)insn << 21) >> 9;
396e467c 8003 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8004 return 0;
8005 }
8006 /* Fall through to 32-bit decode. */
8007 }
8008
d31dd73e 8009 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
8010 s->pc += 2;
8011 insn |= (uint32_t)insn_hw1 << 16;
8012
8013 if ((insn & 0xf800e800) != 0xf000e800) {
8014 ARCH(6T2);
8015 }
8016
8017 rn = (insn >> 16) & 0xf;
8018 rs = (insn >> 12) & 0xf;
8019 rd = (insn >> 8) & 0xf;
8020 rm = insn & 0xf;
8021 switch ((insn >> 25) & 0xf) {
8022 case 0: case 1: case 2: case 3:
8023 /* 16-bit instructions. Should never happen. */
8024 abort();
8025 case 4:
8026 if (insn & (1 << 22)) {
8027 /* Other load/store, table branch. */
8028 if (insn & 0x01200000) {
8029 /* Load/store doubleword. */
8030 if (rn == 15) {
7d1b0095 8031 addr = tcg_temp_new_i32();
b0109805 8032 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8033 } else {
b0109805 8034 addr = load_reg(s, rn);
9ee6e8bb
PB
8035 }
8036 offset = (insn & 0xff) * 4;
8037 if ((insn & (1 << 23)) == 0)
8038 offset = -offset;
8039 if (insn & (1 << 24)) {
b0109805 8040 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8041 offset = 0;
8042 }
8043 if (insn & (1 << 20)) {
8044 /* ldrd */
b0109805
PB
8045 tmp = gen_ld32(addr, IS_USER(s));
8046 store_reg(s, rs, tmp);
8047 tcg_gen_addi_i32(addr, addr, 4);
8048 tmp = gen_ld32(addr, IS_USER(s));
8049 store_reg(s, rd, tmp);
9ee6e8bb
PB
8050 } else {
8051 /* strd */
b0109805
PB
8052 tmp = load_reg(s, rs);
8053 gen_st32(tmp, addr, IS_USER(s));
8054 tcg_gen_addi_i32(addr, addr, 4);
8055 tmp = load_reg(s, rd);
8056 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8057 }
8058 if (insn & (1 << 21)) {
8059 /* Base writeback. */
8060 if (rn == 15)
8061 goto illegal_op;
b0109805
PB
8062 tcg_gen_addi_i32(addr, addr, offset - 4);
8063 store_reg(s, rn, addr);
8064 } else {
7d1b0095 8065 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8066 }
8067 } else if ((insn & (1 << 23)) == 0) {
8068 /* Load/store exclusive word. */
3174f8e9 8069 addr = tcg_temp_local_new();
98a46317 8070 load_reg_var(s, addr, rn);
426f5abc 8071 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8072 if (insn & (1 << 20)) {
426f5abc 8073 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8074 } else {
426f5abc 8075 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8076 }
3174f8e9 8077 tcg_temp_free(addr);
9ee6e8bb
PB
8078 } else if ((insn & (1 << 6)) == 0) {
8079 /* Table Branch. */
8080 if (rn == 15) {
7d1b0095 8081 addr = tcg_temp_new_i32();
b0109805 8082 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8083 } else {
b0109805 8084 addr = load_reg(s, rn);
9ee6e8bb 8085 }
b26eefb6 8086 tmp = load_reg(s, rm);
b0109805 8087 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8088 if (insn & (1 << 4)) {
8089 /* tbh */
b0109805 8090 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8091 tcg_temp_free_i32(tmp);
b0109805 8092 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 8093 } else { /* tbb */
7d1b0095 8094 tcg_temp_free_i32(tmp);
b0109805 8095 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 8096 }
7d1b0095 8097 tcg_temp_free_i32(addr);
b0109805
PB
8098 tcg_gen_shli_i32(tmp, tmp, 1);
8099 tcg_gen_addi_i32(tmp, tmp, s->pc);
8100 store_reg(s, 15, tmp);
9ee6e8bb
PB
8101 } else {
8102 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 8103 ARCH(7);
9ee6e8bb 8104 op = (insn >> 4) & 0x3;
426f5abc
PB
8105 if (op == 2) {
8106 goto illegal_op;
8107 }
3174f8e9 8108 addr = tcg_temp_local_new();
98a46317 8109 load_reg_var(s, addr, rn);
9ee6e8bb 8110 if (insn & (1 << 20)) {
426f5abc 8111 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8112 } else {
426f5abc 8113 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8114 }
3174f8e9 8115 tcg_temp_free(addr);
9ee6e8bb
PB
8116 }
8117 } else {
8118 /* Load/store multiple, RFE, SRS. */
8119 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8120 /* Not available in user mode. */
b0109805 8121 if (IS_USER(s))
9ee6e8bb
PB
8122 goto illegal_op;
8123 if (insn & (1 << 20)) {
8124 /* rfe */
b0109805
PB
8125 addr = load_reg(s, rn);
8126 if ((insn & (1 << 24)) == 0)
8127 tcg_gen_addi_i32(addr, addr, -8);
8128 /* Load PC into tmp and CPSR into tmp2. */
8129 tmp = gen_ld32(addr, 0);
8130 tcg_gen_addi_i32(addr, addr, 4);
8131 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
8132 if (insn & (1 << 21)) {
8133 /* Base writeback. */
b0109805
PB
8134 if (insn & (1 << 24)) {
8135 tcg_gen_addi_i32(addr, addr, 4);
8136 } else {
8137 tcg_gen_addi_i32(addr, addr, -4);
8138 }
8139 store_reg(s, rn, addr);
8140 } else {
7d1b0095 8141 tcg_temp_free_i32(addr);
9ee6e8bb 8142 }
b0109805 8143 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8144 } else {
8145 /* srs */
8146 op = (insn & 0x1f);
7d1b0095 8147 addr = tcg_temp_new_i32();
39ea3d4e
PM
8148 tmp = tcg_const_i32(op);
8149 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8150 tcg_temp_free_i32(tmp);
9ee6e8bb 8151 if ((insn & (1 << 24)) == 0) {
b0109805 8152 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 8153 }
b0109805
PB
8154 tmp = load_reg(s, 14);
8155 gen_st32(tmp, addr, 0);
8156 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 8157 tmp = tcg_temp_new_i32();
9ef39277 8158 gen_helper_cpsr_read(tmp, cpu_env);
b0109805 8159 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
8160 if (insn & (1 << 21)) {
8161 if ((insn & (1 << 24)) == 0) {
b0109805 8162 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 8163 } else {
b0109805 8164 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8165 }
39ea3d4e
PM
8166 tmp = tcg_const_i32(op);
8167 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8168 tcg_temp_free_i32(tmp);
b0109805 8169 } else {
7d1b0095 8170 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8171 }
8172 }
8173 } else {
5856d44e
YO
8174 int i, loaded_base = 0;
8175 TCGv loaded_var;
9ee6e8bb 8176 /* Load/store multiple. */
b0109805 8177 addr = load_reg(s, rn);
9ee6e8bb
PB
8178 offset = 0;
8179 for (i = 0; i < 16; i++) {
8180 if (insn & (1 << i))
8181 offset += 4;
8182 }
8183 if (insn & (1 << 24)) {
b0109805 8184 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8185 }
8186
5856d44e 8187 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
8188 for (i = 0; i < 16; i++) {
8189 if ((insn & (1 << i)) == 0)
8190 continue;
8191 if (insn & (1 << 20)) {
8192 /* Load. */
b0109805 8193 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 8194 if (i == 15) {
b0109805 8195 gen_bx(s, tmp);
5856d44e
YO
8196 } else if (i == rn) {
8197 loaded_var = tmp;
8198 loaded_base = 1;
9ee6e8bb 8199 } else {
b0109805 8200 store_reg(s, i, tmp);
9ee6e8bb
PB
8201 }
8202 } else {
8203 /* Store. */
b0109805
PB
8204 tmp = load_reg(s, i);
8205 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8206 }
b0109805 8207 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8208 }
5856d44e
YO
8209 if (loaded_base) {
8210 store_reg(s, rn, loaded_var);
8211 }
9ee6e8bb
PB
8212 if (insn & (1 << 21)) {
8213 /* Base register writeback. */
8214 if (insn & (1 << 24)) {
b0109805 8215 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8216 }
8217 /* Fault if writeback register is in register list. */
8218 if (insn & (1 << rn))
8219 goto illegal_op;
b0109805
PB
8220 store_reg(s, rn, addr);
8221 } else {
7d1b0095 8222 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8223 }
8224 }
8225 }
8226 break;
2af9ab77
JB
8227 case 5:
8228
9ee6e8bb 8229 op = (insn >> 21) & 0xf;
2af9ab77
JB
8230 if (op == 6) {
8231 /* Halfword pack. */
8232 tmp = load_reg(s, rn);
8233 tmp2 = load_reg(s, rm);
8234 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8235 if (insn & (1 << 5)) {
8236 /* pkhtb */
8237 if (shift == 0)
8238 shift = 31;
8239 tcg_gen_sari_i32(tmp2, tmp2, shift);
8240 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8241 tcg_gen_ext16u_i32(tmp2, tmp2);
8242 } else {
8243 /* pkhbt */
8244 if (shift)
8245 tcg_gen_shli_i32(tmp2, tmp2, shift);
8246 tcg_gen_ext16u_i32(tmp, tmp);
8247 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8248 }
8249 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8250 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8251 store_reg(s, rd, tmp);
8252 } else {
2af9ab77
JB
8253 /* Data processing register constant shift. */
8254 if (rn == 15) {
7d1b0095 8255 tmp = tcg_temp_new_i32();
2af9ab77
JB
8256 tcg_gen_movi_i32(tmp, 0);
8257 } else {
8258 tmp = load_reg(s, rn);
8259 }
8260 tmp2 = load_reg(s, rm);
8261
8262 shiftop = (insn >> 4) & 3;
8263 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8264 conds = (insn & (1 << 20)) != 0;
8265 logic_cc = (conds && thumb2_logic_op(op));
8266 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8267 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8268 goto illegal_op;
7d1b0095 8269 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8270 if (rd != 15) {
8271 store_reg(s, rd, tmp);
8272 } else {
7d1b0095 8273 tcg_temp_free_i32(tmp);
2af9ab77 8274 }
3174f8e9 8275 }
9ee6e8bb
PB
8276 break;
8277 case 13: /* Misc data processing. */
8278 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8279 if (op < 4 && (insn & 0xf000) != 0xf000)
8280 goto illegal_op;
8281 switch (op) {
8282 case 0: /* Register controlled shift. */
8984bd2e
PB
8283 tmp = load_reg(s, rn);
8284 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8285 if ((insn & 0x70) != 0)
8286 goto illegal_op;
8287 op = (insn >> 21) & 3;
8984bd2e
PB
8288 logic_cc = (insn & (1 << 20)) != 0;
8289 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8290 if (logic_cc)
8291 gen_logic_CC(tmp);
21aeb343 8292 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8293 break;
8294 case 1: /* Sign/zero extend. */
5e3f878a 8295 tmp = load_reg(s, rm);
9ee6e8bb 8296 shift = (insn >> 4) & 3;
1301f322 8297 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8298 rotate, a shift is sufficient. */
8299 if (shift != 0)
f669df27 8300 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8301 op = (insn >> 20) & 7;
8302 switch (op) {
5e3f878a
PB
8303 case 0: gen_sxth(tmp); break;
8304 case 1: gen_uxth(tmp); break;
8305 case 2: gen_sxtb16(tmp); break;
8306 case 3: gen_uxtb16(tmp); break;
8307 case 4: gen_sxtb(tmp); break;
8308 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8309 default: goto illegal_op;
8310 }
8311 if (rn != 15) {
5e3f878a 8312 tmp2 = load_reg(s, rn);
9ee6e8bb 8313 if ((op >> 1) == 1) {
5e3f878a 8314 gen_add16(tmp, tmp2);
9ee6e8bb 8315 } else {
5e3f878a 8316 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8317 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8318 }
8319 }
5e3f878a 8320 store_reg(s, rd, tmp);
9ee6e8bb
PB
8321 break;
8322 case 2: /* SIMD add/subtract. */
8323 op = (insn >> 20) & 7;
8324 shift = (insn >> 4) & 7;
8325 if ((op & 3) == 3 || (shift & 3) == 3)
8326 goto illegal_op;
6ddbc6e4
PB
8327 tmp = load_reg(s, rn);
8328 tmp2 = load_reg(s, rm);
8329 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8330 tcg_temp_free_i32(tmp2);
6ddbc6e4 8331 store_reg(s, rd, tmp);
9ee6e8bb
PB
8332 break;
8333 case 3: /* Other data processing. */
8334 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8335 if (op < 4) {
8336 /* Saturating add/subtract. */
d9ba4830
PB
8337 tmp = load_reg(s, rn);
8338 tmp2 = load_reg(s, rm);
9ee6e8bb 8339 if (op & 1)
9ef39277 8340 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 8341 if (op & 2)
9ef39277 8342 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 8343 else
9ef39277 8344 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8345 tcg_temp_free_i32(tmp2);
9ee6e8bb 8346 } else {
d9ba4830 8347 tmp = load_reg(s, rn);
9ee6e8bb
PB
8348 switch (op) {
8349 case 0x0a: /* rbit */
d9ba4830 8350 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8351 break;
8352 case 0x08: /* rev */
66896cb8 8353 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8354 break;
8355 case 0x09: /* rev16 */
d9ba4830 8356 gen_rev16(tmp);
9ee6e8bb
PB
8357 break;
8358 case 0x0b: /* revsh */
d9ba4830 8359 gen_revsh(tmp);
9ee6e8bb
PB
8360 break;
8361 case 0x10: /* sel */
d9ba4830 8362 tmp2 = load_reg(s, rm);
7d1b0095 8363 tmp3 = tcg_temp_new_i32();
0ecb72a5 8364 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 8365 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8366 tcg_temp_free_i32(tmp3);
8367 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8368 break;
8369 case 0x18: /* clz */
d9ba4830 8370 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8371 break;
8372 default:
8373 goto illegal_op;
8374 }
8375 }
d9ba4830 8376 store_reg(s, rd, tmp);
9ee6e8bb
PB
8377 break;
8378 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8379 op = (insn >> 4) & 0xf;
d9ba4830
PB
8380 tmp = load_reg(s, rn);
8381 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8382 switch ((insn >> 20) & 7) {
8383 case 0: /* 32 x 32 -> 32 */
d9ba4830 8384 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8385 tcg_temp_free_i32(tmp2);
9ee6e8bb 8386 if (rs != 15) {
d9ba4830 8387 tmp2 = load_reg(s, rs);
9ee6e8bb 8388 if (op)
d9ba4830 8389 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8390 else
d9ba4830 8391 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8392 tcg_temp_free_i32(tmp2);
9ee6e8bb 8393 }
9ee6e8bb
PB
8394 break;
8395 case 1: /* 16 x 16 -> 32 */
d9ba4830 8396 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8397 tcg_temp_free_i32(tmp2);
9ee6e8bb 8398 if (rs != 15) {
d9ba4830 8399 tmp2 = load_reg(s, rs);
9ef39277 8400 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8401 tcg_temp_free_i32(tmp2);
9ee6e8bb 8402 }
9ee6e8bb
PB
8403 break;
8404 case 2: /* Dual multiply add. */
8405 case 4: /* Dual multiply subtract. */
8406 if (op)
d9ba4830
PB
8407 gen_swap_half(tmp2);
8408 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8409 if (insn & (1 << 22)) {
e1d177b9 8410 /* This subtraction cannot overflow. */
d9ba4830 8411 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8412 } else {
e1d177b9
PM
8413 /* This addition cannot overflow 32 bits;
8414 * however it may overflow considered as a signed
8415 * operation, in which case we must set the Q flag.
8416 */
9ef39277 8417 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8418 }
7d1b0095 8419 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8420 if (rs != 15)
8421 {
d9ba4830 8422 tmp2 = load_reg(s, rs);
9ef39277 8423 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8424 tcg_temp_free_i32(tmp2);
9ee6e8bb 8425 }
9ee6e8bb
PB
8426 break;
8427 case 3: /* 32 * 16 -> 32msb */
8428 if (op)
d9ba4830 8429 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8430 else
d9ba4830 8431 gen_sxth(tmp2);
a7812ae4
PB
8432 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8433 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8434 tmp = tcg_temp_new_i32();
a7812ae4 8435 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8436 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8437 if (rs != 15)
8438 {
d9ba4830 8439 tmp2 = load_reg(s, rs);
9ef39277 8440 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8441 tcg_temp_free_i32(tmp2);
9ee6e8bb 8442 }
9ee6e8bb 8443 break;
838fa72d
AJ
8444 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8445 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8446 if (rs != 15) {
838fa72d
AJ
8447 tmp = load_reg(s, rs);
8448 if (insn & (1 << 20)) {
8449 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8450 } else {
838fa72d 8451 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8452 }
2c0262af 8453 }
838fa72d
AJ
8454 if (insn & (1 << 4)) {
8455 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8456 }
8457 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8458 tmp = tcg_temp_new_i32();
838fa72d
AJ
8459 tcg_gen_trunc_i64_i32(tmp, tmp64);
8460 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8461 break;
8462 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8463 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8464 tcg_temp_free_i32(tmp2);
9ee6e8bb 8465 if (rs != 15) {
d9ba4830
PB
8466 tmp2 = load_reg(s, rs);
8467 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8468 tcg_temp_free_i32(tmp2);
5fd46862 8469 }
9ee6e8bb 8470 break;
2c0262af 8471 }
d9ba4830 8472 store_reg(s, rd, tmp);
2c0262af 8473 break;
9ee6e8bb
PB
8474 case 6: case 7: /* 64-bit multiply, Divide. */
8475 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8476 tmp = load_reg(s, rn);
8477 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8478 if ((op & 0x50) == 0x10) {
8479 /* sdiv, udiv */
47789990 8480 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 8481 goto illegal_op;
47789990 8482 }
9ee6e8bb 8483 if (op & 0x20)
5e3f878a 8484 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8485 else
5e3f878a 8486 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8487 tcg_temp_free_i32(tmp2);
5e3f878a 8488 store_reg(s, rd, tmp);
9ee6e8bb
PB
8489 } else if ((op & 0xe) == 0xc) {
8490 /* Dual multiply accumulate long. */
8491 if (op & 1)
5e3f878a
PB
8492 gen_swap_half(tmp2);
8493 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8494 if (op & 0x10) {
5e3f878a 8495 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8496 } else {
5e3f878a 8497 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8498 }
7d1b0095 8499 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8500 /* BUGFIX */
8501 tmp64 = tcg_temp_new_i64();
8502 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8503 tcg_temp_free_i32(tmp);
a7812ae4
PB
8504 gen_addq(s, tmp64, rs, rd);
8505 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8506 tcg_temp_free_i64(tmp64);
2c0262af 8507 } else {
9ee6e8bb
PB
8508 if (op & 0x20) {
8509 /* Unsigned 64-bit multiply */
a7812ae4 8510 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8511 } else {
9ee6e8bb
PB
8512 if (op & 8) {
8513 /* smlalxy */
5e3f878a 8514 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8515 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8516 tmp64 = tcg_temp_new_i64();
8517 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8518 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8519 } else {
8520 /* Signed 64-bit multiply */
a7812ae4 8521 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8522 }
b5ff1b31 8523 }
9ee6e8bb
PB
8524 if (op & 4) {
8525 /* umaal */
a7812ae4
PB
8526 gen_addq_lo(s, tmp64, rs);
8527 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8528 } else if (op & 0x40) {
8529 /* 64-bit accumulate. */
a7812ae4 8530 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8531 }
a7812ae4 8532 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8533 tcg_temp_free_i64(tmp64);
5fd46862 8534 }
2c0262af 8535 break;
9ee6e8bb
PB
8536 }
8537 break;
8538 case 6: case 7: case 14: case 15:
8539 /* Coprocessor. */
8540 if (((insn >> 24) & 3) == 3) {
8541 /* Translate into the equivalent ARM encoding. */
f06053e3 8542 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8543 if (disas_neon_data_insn(env, s, insn))
8544 goto illegal_op;
8545 } else {
8546 if (insn & (1 << 28))
8547 goto illegal_op;
8548 if (disas_coproc_insn (env, s, insn))
8549 goto illegal_op;
8550 }
8551 break;
8552 case 8: case 9: case 10: case 11:
8553 if (insn & (1 << 15)) {
8554 /* Branches, misc control. */
8555 if (insn & 0x5000) {
8556 /* Unconditional branch. */
8557 /* signextend(hw1[10:0]) -> offset[:12]. */
8558 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8559 /* hw1[10:0] -> offset[11:1]. */
8560 offset |= (insn & 0x7ff) << 1;
8561 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8562 offset[24:22] already have the same value because of the
8563 sign extension above. */
8564 offset ^= ((~insn) & (1 << 13)) << 10;
8565 offset ^= ((~insn) & (1 << 11)) << 11;
8566
9ee6e8bb
PB
8567 if (insn & (1 << 14)) {
8568 /* Branch and link. */
3174f8e9 8569 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8570 }
3b46e624 8571
b0109805 8572 offset += s->pc;
9ee6e8bb
PB
8573 if (insn & (1 << 12)) {
8574 /* b/bl */
b0109805 8575 gen_jmp(s, offset);
9ee6e8bb
PB
8576 } else {
8577 /* blx */
b0109805 8578 offset &= ~(uint32_t)2;
be5e7a76 8579 /* thumb2 bx, no need to check */
b0109805 8580 gen_bx_im(s, offset);
2c0262af 8581 }
9ee6e8bb
PB
8582 } else if (((insn >> 23) & 7) == 7) {
8583 /* Misc control */
8584 if (insn & (1 << 13))
8585 goto illegal_op;
8586
8587 if (insn & (1 << 26)) {
8588 /* Secure monitor call (v6Z) */
8589 goto illegal_op; /* not implemented. */
2c0262af 8590 } else {
9ee6e8bb
PB
8591 op = (insn >> 20) & 7;
8592 switch (op) {
8593 case 0: /* msr cpsr. */
8594 if (IS_M(env)) {
8984bd2e
PB
8595 tmp = load_reg(s, rn);
8596 addr = tcg_const_i32(insn & 0xff);
8597 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8598 tcg_temp_free_i32(addr);
7d1b0095 8599 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8600 gen_lookup_tb(s);
8601 break;
8602 }
8603 /* fall through */
8604 case 1: /* msr spsr. */
8605 if (IS_M(env))
8606 goto illegal_op;
2fbac54b
FN
8607 tmp = load_reg(s, rn);
8608 if (gen_set_psr(s,
9ee6e8bb 8609 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8610 op == 1, tmp))
9ee6e8bb
PB
8611 goto illegal_op;
8612 break;
8613 case 2: /* cps, nop-hint. */
8614 if (((insn >> 8) & 7) == 0) {
8615 gen_nop_hint(s, insn & 0xff);
8616 }
8617 /* Implemented as NOP in user mode. */
8618 if (IS_USER(s))
8619 break;
8620 offset = 0;
8621 imm = 0;
8622 if (insn & (1 << 10)) {
8623 if (insn & (1 << 7))
8624 offset |= CPSR_A;
8625 if (insn & (1 << 6))
8626 offset |= CPSR_I;
8627 if (insn & (1 << 5))
8628 offset |= CPSR_F;
8629 if (insn & (1 << 9))
8630 imm = CPSR_A | CPSR_I | CPSR_F;
8631 }
8632 if (insn & (1 << 8)) {
8633 offset |= 0x1f;
8634 imm |= (insn & 0x1f);
8635 }
8636 if (offset) {
2fbac54b 8637 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8638 }
8639 break;
8640 case 3: /* Special control operations. */
426f5abc 8641 ARCH(7);
9ee6e8bb
PB
8642 op = (insn >> 4) & 0xf;
8643 switch (op) {
8644 case 2: /* clrex */
426f5abc 8645 gen_clrex(s);
9ee6e8bb
PB
8646 break;
8647 case 4: /* dsb */
8648 case 5: /* dmb */
8649 case 6: /* isb */
8650 /* These execute as NOPs. */
9ee6e8bb
PB
8651 break;
8652 default:
8653 goto illegal_op;
8654 }
8655 break;
8656 case 4: /* bxj */
8657 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8658 tmp = load_reg(s, rn);
8659 gen_bx(s, tmp);
9ee6e8bb
PB
8660 break;
8661 case 5: /* Exception return. */
b8b45b68
RV
8662 if (IS_USER(s)) {
8663 goto illegal_op;
8664 }
8665 if (rn != 14 || rd != 15) {
8666 goto illegal_op;
8667 }
8668 tmp = load_reg(s, rn);
8669 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8670 gen_exception_return(s, tmp);
8671 break;
9ee6e8bb 8672 case 6: /* mrs cpsr. */
7d1b0095 8673 tmp = tcg_temp_new_i32();
9ee6e8bb 8674 if (IS_M(env)) {
8984bd2e
PB
8675 addr = tcg_const_i32(insn & 0xff);
8676 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8677 tcg_temp_free_i32(addr);
9ee6e8bb 8678 } else {
9ef39277 8679 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8680 }
8984bd2e 8681 store_reg(s, rd, tmp);
9ee6e8bb
PB
8682 break;
8683 case 7: /* mrs spsr. */
8684 /* Not accessible in user mode. */
8685 if (IS_USER(s) || IS_M(env))
8686 goto illegal_op;
d9ba4830
PB
8687 tmp = load_cpu_field(spsr);
8688 store_reg(s, rd, tmp);
9ee6e8bb 8689 break;
2c0262af
FB
8690 }
8691 }
9ee6e8bb
PB
8692 } else {
8693 /* Conditional branch. */
8694 op = (insn >> 22) & 0xf;
8695 /* Generate a conditional jump to next instruction. */
8696 s->condlabel = gen_new_label();
d9ba4830 8697 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8698 s->condjmp = 1;
8699
8700 /* offset[11:1] = insn[10:0] */
8701 offset = (insn & 0x7ff) << 1;
8702 /* offset[17:12] = insn[21:16]. */
8703 offset |= (insn & 0x003f0000) >> 4;
8704 /* offset[31:20] = insn[26]. */
8705 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8706 /* offset[18] = insn[13]. */
8707 offset |= (insn & (1 << 13)) << 5;
8708 /* offset[19] = insn[11]. */
8709 offset |= (insn & (1 << 11)) << 8;
8710
8711 /* jump to the offset */
b0109805 8712 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8713 }
8714 } else {
8715 /* Data processing immediate. */
8716 if (insn & (1 << 25)) {
8717 if (insn & (1 << 24)) {
8718 if (insn & (1 << 20))
8719 goto illegal_op;
8720 /* Bitfield/Saturate. */
8721 op = (insn >> 21) & 7;
8722 imm = insn & 0x1f;
8723 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8724 if (rn == 15) {
7d1b0095 8725 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8726 tcg_gen_movi_i32(tmp, 0);
8727 } else {
8728 tmp = load_reg(s, rn);
8729 }
9ee6e8bb
PB
8730 switch (op) {
8731 case 2: /* Signed bitfield extract. */
8732 imm++;
8733 if (shift + imm > 32)
8734 goto illegal_op;
8735 if (imm < 32)
6ddbc6e4 8736 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8737 break;
8738 case 6: /* Unsigned bitfield extract. */
8739 imm++;
8740 if (shift + imm > 32)
8741 goto illegal_op;
8742 if (imm < 32)
6ddbc6e4 8743 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8744 break;
8745 case 3: /* Bitfield insert/clear. */
8746 if (imm < shift)
8747 goto illegal_op;
8748 imm = imm + 1 - shift;
8749 if (imm != 32) {
6ddbc6e4 8750 tmp2 = load_reg(s, rd);
d593c48e 8751 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 8752 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8753 }
8754 break;
8755 case 7:
8756 goto illegal_op;
8757 default: /* Saturate. */
9ee6e8bb
PB
8758 if (shift) {
8759 if (op & 1)
6ddbc6e4 8760 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8761 else
6ddbc6e4 8762 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8763 }
6ddbc6e4 8764 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8765 if (op & 4) {
8766 /* Unsigned. */
9ee6e8bb 8767 if ((op & 1) && shift == 0)
9ef39277 8768 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8769 else
9ef39277 8770 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 8771 } else {
9ee6e8bb 8772 /* Signed. */
9ee6e8bb 8773 if ((op & 1) && shift == 0)
9ef39277 8774 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8775 else
9ef39277 8776 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 8777 }
b75263d6 8778 tcg_temp_free_i32(tmp2);
9ee6e8bb 8779 break;
2c0262af 8780 }
6ddbc6e4 8781 store_reg(s, rd, tmp);
9ee6e8bb
PB
8782 } else {
8783 imm = ((insn & 0x04000000) >> 15)
8784 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8785 if (insn & (1 << 22)) {
8786 /* 16-bit immediate. */
8787 imm |= (insn >> 4) & 0xf000;
8788 if (insn & (1 << 23)) {
8789 /* movt */
5e3f878a 8790 tmp = load_reg(s, rd);
86831435 8791 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8792 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8793 } else {
9ee6e8bb 8794 /* movw */
7d1b0095 8795 tmp = tcg_temp_new_i32();
5e3f878a 8796 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8797 }
8798 } else {
9ee6e8bb
PB
8799 /* Add/sub 12-bit immediate. */
8800 if (rn == 15) {
b0109805 8801 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8802 if (insn & (1 << 23))
b0109805 8803 offset -= imm;
9ee6e8bb 8804 else
b0109805 8805 offset += imm;
7d1b0095 8806 tmp = tcg_temp_new_i32();
5e3f878a 8807 tcg_gen_movi_i32(tmp, offset);
2c0262af 8808 } else {
5e3f878a 8809 tmp = load_reg(s, rn);
9ee6e8bb 8810 if (insn & (1 << 23))
5e3f878a 8811 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8812 else
5e3f878a 8813 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8814 }
9ee6e8bb 8815 }
5e3f878a 8816 store_reg(s, rd, tmp);
191abaa2 8817 }
9ee6e8bb
PB
8818 } else {
8819 int shifter_out = 0;
8820 /* modified 12-bit immediate. */
8821 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8822 imm = (insn & 0xff);
8823 switch (shift) {
8824 case 0: /* XY */
8825 /* Nothing to do. */
8826 break;
8827 case 1: /* 00XY00XY */
8828 imm |= imm << 16;
8829 break;
8830 case 2: /* XY00XY00 */
8831 imm |= imm << 16;
8832 imm <<= 8;
8833 break;
8834 case 3: /* XYXYXYXY */
8835 imm |= imm << 16;
8836 imm |= imm << 8;
8837 break;
8838 default: /* Rotated constant. */
8839 shift = (shift << 1) | (imm >> 7);
8840 imm |= 0x80;
8841 imm = imm << (32 - shift);
8842 shifter_out = 1;
8843 break;
b5ff1b31 8844 }
7d1b0095 8845 tmp2 = tcg_temp_new_i32();
3174f8e9 8846 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8847 rn = (insn >> 16) & 0xf;
3174f8e9 8848 if (rn == 15) {
7d1b0095 8849 tmp = tcg_temp_new_i32();
3174f8e9
FN
8850 tcg_gen_movi_i32(tmp, 0);
8851 } else {
8852 tmp = load_reg(s, rn);
8853 }
9ee6e8bb
PB
8854 op = (insn >> 21) & 0xf;
8855 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8856 shifter_out, tmp, tmp2))
9ee6e8bb 8857 goto illegal_op;
7d1b0095 8858 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8859 rd = (insn >> 8) & 0xf;
8860 if (rd != 15) {
3174f8e9
FN
8861 store_reg(s, rd, tmp);
8862 } else {
7d1b0095 8863 tcg_temp_free_i32(tmp);
2c0262af 8864 }
2c0262af 8865 }
9ee6e8bb
PB
8866 }
8867 break;
8868 case 12: /* Load/store single data item. */
8869 {
8870 int postinc = 0;
8871 int writeback = 0;
b0109805 8872 int user;
9ee6e8bb
PB
8873 if ((insn & 0x01100000) == 0x01000000) {
8874 if (disas_neon_ls_insn(env, s, insn))
c1713132 8875 goto illegal_op;
9ee6e8bb
PB
8876 break;
8877 }
a2fdc890
PM
8878 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8879 if (rs == 15) {
8880 if (!(insn & (1 << 20))) {
8881 goto illegal_op;
8882 }
8883 if (op != 2) {
8884 /* Byte or halfword load space with dest == r15 : memory hints.
8885 * Catch them early so we don't emit pointless addressing code.
8886 * This space is a mix of:
8887 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8888 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8889 * cores)
8890 * unallocated hints, which must be treated as NOPs
8891 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8892 * which is easiest for the decoding logic
8893 * Some space which must UNDEF
8894 */
8895 int op1 = (insn >> 23) & 3;
8896 int op2 = (insn >> 6) & 0x3f;
8897 if (op & 2) {
8898 goto illegal_op;
8899 }
8900 if (rn == 15) {
02afbf64
PM
8901 /* UNPREDICTABLE, unallocated hint or
8902 * PLD/PLDW/PLI (literal)
8903 */
a2fdc890
PM
8904 return 0;
8905 }
8906 if (op1 & 1) {
02afbf64 8907 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
8908 }
8909 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 8910 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
8911 }
8912 /* UNDEF space, or an UNPREDICTABLE */
8913 return 1;
8914 }
8915 }
b0109805 8916 user = IS_USER(s);
9ee6e8bb 8917 if (rn == 15) {
7d1b0095 8918 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8919 /* PC relative. */
8920 /* s->pc has already been incremented by 4. */
8921 imm = s->pc & 0xfffffffc;
8922 if (insn & (1 << 23))
8923 imm += insn & 0xfff;
8924 else
8925 imm -= insn & 0xfff;
b0109805 8926 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8927 } else {
b0109805 8928 addr = load_reg(s, rn);
9ee6e8bb
PB
8929 if (insn & (1 << 23)) {
8930 /* Positive offset. */
8931 imm = insn & 0xfff;
b0109805 8932 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8933 } else {
9ee6e8bb 8934 imm = insn & 0xff;
2a0308c5
PM
8935 switch ((insn >> 8) & 0xf) {
8936 case 0x0: /* Shifted Register. */
9ee6e8bb 8937 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8938 if (shift > 3) {
8939 tcg_temp_free_i32(addr);
18c9b560 8940 goto illegal_op;
2a0308c5 8941 }
b26eefb6 8942 tmp = load_reg(s, rm);
9ee6e8bb 8943 if (shift)
b26eefb6 8944 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8945 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8946 tcg_temp_free_i32(tmp);
9ee6e8bb 8947 break;
2a0308c5 8948 case 0xc: /* Negative offset. */
b0109805 8949 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8950 break;
2a0308c5 8951 case 0xe: /* User privilege. */
b0109805
PB
8952 tcg_gen_addi_i32(addr, addr, imm);
8953 user = 1;
9ee6e8bb 8954 break;
2a0308c5 8955 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8956 imm = -imm;
8957 /* Fall through. */
2a0308c5 8958 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8959 postinc = 1;
8960 writeback = 1;
8961 break;
2a0308c5 8962 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
8963 imm = -imm;
8964 /* Fall through. */
2a0308c5 8965 case 0xf: /* Pre-increment. */
b0109805 8966 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8967 writeback = 1;
8968 break;
8969 default:
2a0308c5 8970 tcg_temp_free_i32(addr);
b7bcbe95 8971 goto illegal_op;
9ee6e8bb
PB
8972 }
8973 }
8974 }
9ee6e8bb
PB
8975 if (insn & (1 << 20)) {
8976 /* Load. */
a2fdc890
PM
8977 switch (op) {
8978 case 0: tmp = gen_ld8u(addr, user); break;
8979 case 4: tmp = gen_ld8s(addr, user); break;
8980 case 1: tmp = gen_ld16u(addr, user); break;
8981 case 5: tmp = gen_ld16s(addr, user); break;
8982 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
8983 default:
8984 tcg_temp_free_i32(addr);
8985 goto illegal_op;
a2fdc890
PM
8986 }
8987 if (rs == 15) {
8988 gen_bx(s, tmp);
9ee6e8bb 8989 } else {
a2fdc890 8990 store_reg(s, rs, tmp);
9ee6e8bb
PB
8991 }
8992 } else {
8993 /* Store. */
b0109805 8994 tmp = load_reg(s, rs);
9ee6e8bb 8995 switch (op) {
b0109805
PB
8996 case 0: gen_st8(tmp, addr, user); break;
8997 case 1: gen_st16(tmp, addr, user); break;
8998 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
8999 default:
9000 tcg_temp_free_i32(addr);
9001 goto illegal_op;
b7bcbe95 9002 }
2c0262af 9003 }
9ee6e8bb 9004 if (postinc)
b0109805
PB
9005 tcg_gen_addi_i32(addr, addr, imm);
9006 if (writeback) {
9007 store_reg(s, rn, addr);
9008 } else {
7d1b0095 9009 tcg_temp_free_i32(addr);
b0109805 9010 }
9ee6e8bb
PB
9011 }
9012 break;
9013 default:
9014 goto illegal_op;
2c0262af 9015 }
9ee6e8bb
PB
9016 return 0;
9017illegal_op:
9018 return 1;
2c0262af
FB
9019}
9020
0ecb72a5 9021static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9022{
9023 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9024 int32_t offset;
9025 int i;
b26eefb6 9026 TCGv tmp;
d9ba4830 9027 TCGv tmp2;
b0109805 9028 TCGv addr;
99c475ab 9029
9ee6e8bb
PB
9030 if (s->condexec_mask) {
9031 cond = s->condexec_cond;
bedd2912
JB
9032 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9033 s->condlabel = gen_new_label();
9034 gen_test_cc(cond ^ 1, s->condlabel);
9035 s->condjmp = 1;
9036 }
9ee6e8bb
PB
9037 }
9038
d31dd73e 9039 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 9040 s->pc += 2;
b5ff1b31 9041
99c475ab
FB
9042 switch (insn >> 12) {
9043 case 0: case 1:
396e467c 9044
99c475ab
FB
9045 rd = insn & 7;
9046 op = (insn >> 11) & 3;
9047 if (op == 3) {
9048 /* add/subtract */
9049 rn = (insn >> 3) & 7;
396e467c 9050 tmp = load_reg(s, rn);
99c475ab
FB
9051 if (insn & (1 << 10)) {
9052 /* immediate */
7d1b0095 9053 tmp2 = tcg_temp_new_i32();
396e467c 9054 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9055 } else {
9056 /* reg */
9057 rm = (insn >> 6) & 7;
396e467c 9058 tmp2 = load_reg(s, rm);
99c475ab 9059 }
9ee6e8bb
PB
9060 if (insn & (1 << 9)) {
9061 if (s->condexec_mask)
396e467c 9062 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9063 else
72485ec4 9064 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
9065 } else {
9066 if (s->condexec_mask)
396e467c 9067 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9068 else
72485ec4 9069 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9070 }
7d1b0095 9071 tcg_temp_free_i32(tmp2);
396e467c 9072 store_reg(s, rd, tmp);
99c475ab
FB
9073 } else {
9074 /* shift immediate */
9075 rm = (insn >> 3) & 7;
9076 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9077 tmp = load_reg(s, rm);
9078 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9079 if (!s->condexec_mask)
9080 gen_logic_CC(tmp);
9081 store_reg(s, rd, tmp);
99c475ab
FB
9082 }
9083 break;
9084 case 2: case 3:
9085 /* arithmetic large immediate */
9086 op = (insn >> 11) & 3;
9087 rd = (insn >> 8) & 0x7;
396e467c 9088 if (op == 0) { /* mov */
7d1b0095 9089 tmp = tcg_temp_new_i32();
396e467c 9090 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9091 if (!s->condexec_mask)
396e467c
FN
9092 gen_logic_CC(tmp);
9093 store_reg(s, rd, tmp);
9094 } else {
9095 tmp = load_reg(s, rd);
7d1b0095 9096 tmp2 = tcg_temp_new_i32();
396e467c
FN
9097 tcg_gen_movi_i32(tmp2, insn & 0xff);
9098 switch (op) {
9099 case 1: /* cmp */
72485ec4 9100 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9101 tcg_temp_free_i32(tmp);
9102 tcg_temp_free_i32(tmp2);
396e467c
FN
9103 break;
9104 case 2: /* add */
9105 if (s->condexec_mask)
9106 tcg_gen_add_i32(tmp, tmp, tmp2);
9107 else
72485ec4 9108 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 9109 tcg_temp_free_i32(tmp2);
396e467c
FN
9110 store_reg(s, rd, tmp);
9111 break;
9112 case 3: /* sub */
9113 if (s->condexec_mask)
9114 tcg_gen_sub_i32(tmp, tmp, tmp2);
9115 else
72485ec4 9116 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 9117 tcg_temp_free_i32(tmp2);
396e467c
FN
9118 store_reg(s, rd, tmp);
9119 break;
9120 }
99c475ab 9121 }
99c475ab
FB
9122 break;
9123 case 4:
9124 if (insn & (1 << 11)) {
9125 rd = (insn >> 8) & 7;
5899f386
FB
9126 /* load pc-relative. Bit 1 of PC is ignored. */
9127 val = s->pc + 2 + ((insn & 0xff) * 4);
9128 val &= ~(uint32_t)2;
7d1b0095 9129 addr = tcg_temp_new_i32();
b0109805
PB
9130 tcg_gen_movi_i32(addr, val);
9131 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 9132 tcg_temp_free_i32(addr);
b0109805 9133 store_reg(s, rd, tmp);
99c475ab
FB
9134 break;
9135 }
9136 if (insn & (1 << 10)) {
9137 /* data processing extended or blx */
9138 rd = (insn & 7) | ((insn >> 4) & 8);
9139 rm = (insn >> 3) & 0xf;
9140 op = (insn >> 8) & 3;
9141 switch (op) {
9142 case 0: /* add */
396e467c
FN
9143 tmp = load_reg(s, rd);
9144 tmp2 = load_reg(s, rm);
9145 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9146 tcg_temp_free_i32(tmp2);
396e467c 9147 store_reg(s, rd, tmp);
99c475ab
FB
9148 break;
9149 case 1: /* cmp */
396e467c
FN
9150 tmp = load_reg(s, rd);
9151 tmp2 = load_reg(s, rm);
72485ec4 9152 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9153 tcg_temp_free_i32(tmp2);
9154 tcg_temp_free_i32(tmp);
99c475ab
FB
9155 break;
9156 case 2: /* mov/cpy */
396e467c
FN
9157 tmp = load_reg(s, rm);
9158 store_reg(s, rd, tmp);
99c475ab
FB
9159 break;
9160 case 3:/* branch [and link] exchange thumb register */
b0109805 9161 tmp = load_reg(s, rm);
99c475ab 9162 if (insn & (1 << 7)) {
be5e7a76 9163 ARCH(5);
99c475ab 9164 val = (uint32_t)s->pc | 1;
7d1b0095 9165 tmp2 = tcg_temp_new_i32();
b0109805
PB
9166 tcg_gen_movi_i32(tmp2, val);
9167 store_reg(s, 14, tmp2);
99c475ab 9168 }
be5e7a76 9169 /* already thumb, no need to check */
d9ba4830 9170 gen_bx(s, tmp);
99c475ab
FB
9171 break;
9172 }
9173 break;
9174 }
9175
9176 /* data processing register */
9177 rd = insn & 7;
9178 rm = (insn >> 3) & 7;
9179 op = (insn >> 6) & 0xf;
9180 if (op == 2 || op == 3 || op == 4 || op == 7) {
9181 /* the shift/rotate ops want the operands backwards */
9182 val = rm;
9183 rm = rd;
9184 rd = val;
9185 val = 1;
9186 } else {
9187 val = 0;
9188 }
9189
396e467c 9190 if (op == 9) { /* neg */
7d1b0095 9191 tmp = tcg_temp_new_i32();
396e467c
FN
9192 tcg_gen_movi_i32(tmp, 0);
9193 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9194 tmp = load_reg(s, rd);
9195 } else {
9196 TCGV_UNUSED(tmp);
9197 }
99c475ab 9198
396e467c 9199 tmp2 = load_reg(s, rm);
5899f386 9200 switch (op) {
99c475ab 9201 case 0x0: /* and */
396e467c 9202 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9203 if (!s->condexec_mask)
396e467c 9204 gen_logic_CC(tmp);
99c475ab
FB
9205 break;
9206 case 0x1: /* eor */
396e467c 9207 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9208 if (!s->condexec_mask)
396e467c 9209 gen_logic_CC(tmp);
99c475ab
FB
9210 break;
9211 case 0x2: /* lsl */
9ee6e8bb 9212 if (s->condexec_mask) {
365af80e 9213 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 9214 } else {
9ef39277 9215 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9216 gen_logic_CC(tmp2);
9ee6e8bb 9217 }
99c475ab
FB
9218 break;
9219 case 0x3: /* lsr */
9ee6e8bb 9220 if (s->condexec_mask) {
365af80e 9221 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 9222 } else {
9ef39277 9223 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9224 gen_logic_CC(tmp2);
9ee6e8bb 9225 }
99c475ab
FB
9226 break;
9227 case 0x4: /* asr */
9ee6e8bb 9228 if (s->condexec_mask) {
365af80e 9229 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 9230 } else {
9ef39277 9231 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9232 gen_logic_CC(tmp2);
9ee6e8bb 9233 }
99c475ab
FB
9234 break;
9235 case 0x5: /* adc */
9ee6e8bb 9236 if (s->condexec_mask)
396e467c 9237 gen_adc(tmp, tmp2);
9ee6e8bb 9238 else
9ef39277 9239 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
99c475ab
FB
9240 break;
9241 case 0x6: /* sbc */
9ee6e8bb 9242 if (s->condexec_mask)
396e467c 9243 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 9244 else
9ef39277 9245 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
99c475ab
FB
9246 break;
9247 case 0x7: /* ror */
9ee6e8bb 9248 if (s->condexec_mask) {
f669df27
AJ
9249 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9250 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9251 } else {
9ef39277 9252 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9253 gen_logic_CC(tmp2);
9ee6e8bb 9254 }
99c475ab
FB
9255 break;
9256 case 0x8: /* tst */
396e467c
FN
9257 tcg_gen_and_i32(tmp, tmp, tmp2);
9258 gen_logic_CC(tmp);
99c475ab 9259 rd = 16;
5899f386 9260 break;
99c475ab 9261 case 0x9: /* neg */
9ee6e8bb 9262 if (s->condexec_mask)
396e467c 9263 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9264 else
72485ec4 9265 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9266 break;
9267 case 0xa: /* cmp */
72485ec4 9268 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9269 rd = 16;
9270 break;
9271 case 0xb: /* cmn */
72485ec4 9272 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
9273 rd = 16;
9274 break;
9275 case 0xc: /* orr */
396e467c 9276 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9277 if (!s->condexec_mask)
396e467c 9278 gen_logic_CC(tmp);
99c475ab
FB
9279 break;
9280 case 0xd: /* mul */
7b2919a0 9281 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9282 if (!s->condexec_mask)
396e467c 9283 gen_logic_CC(tmp);
99c475ab
FB
9284 break;
9285 case 0xe: /* bic */
f669df27 9286 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9287 if (!s->condexec_mask)
396e467c 9288 gen_logic_CC(tmp);
99c475ab
FB
9289 break;
9290 case 0xf: /* mvn */
396e467c 9291 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9292 if (!s->condexec_mask)
396e467c 9293 gen_logic_CC(tmp2);
99c475ab 9294 val = 1;
5899f386 9295 rm = rd;
99c475ab
FB
9296 break;
9297 }
9298 if (rd != 16) {
396e467c
FN
9299 if (val) {
9300 store_reg(s, rm, tmp2);
9301 if (op != 0xf)
7d1b0095 9302 tcg_temp_free_i32(tmp);
396e467c
FN
9303 } else {
9304 store_reg(s, rd, tmp);
7d1b0095 9305 tcg_temp_free_i32(tmp2);
396e467c
FN
9306 }
9307 } else {
7d1b0095
PM
9308 tcg_temp_free_i32(tmp);
9309 tcg_temp_free_i32(tmp2);
99c475ab
FB
9310 }
9311 break;
9312
9313 case 5:
9314 /* load/store register offset. */
9315 rd = insn & 7;
9316 rn = (insn >> 3) & 7;
9317 rm = (insn >> 6) & 7;
9318 op = (insn >> 9) & 7;
b0109805 9319 addr = load_reg(s, rn);
b26eefb6 9320 tmp = load_reg(s, rm);
b0109805 9321 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9322 tcg_temp_free_i32(tmp);
99c475ab
FB
9323
9324 if (op < 3) /* store */
b0109805 9325 tmp = load_reg(s, rd);
99c475ab
FB
9326
9327 switch (op) {
9328 case 0: /* str */
b0109805 9329 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9330 break;
9331 case 1: /* strh */
b0109805 9332 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9333 break;
9334 case 2: /* strb */
b0109805 9335 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9336 break;
9337 case 3: /* ldrsb */
b0109805 9338 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
9339 break;
9340 case 4: /* ldr */
b0109805 9341 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9342 break;
9343 case 5: /* ldrh */
b0109805 9344 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
9345 break;
9346 case 6: /* ldrb */
b0109805 9347 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
9348 break;
9349 case 7: /* ldrsh */
b0109805 9350 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
9351 break;
9352 }
9353 if (op >= 3) /* load */
b0109805 9354 store_reg(s, rd, tmp);
7d1b0095 9355 tcg_temp_free_i32(addr);
99c475ab
FB
9356 break;
9357
9358 case 6:
9359 /* load/store word immediate offset */
9360 rd = insn & 7;
9361 rn = (insn >> 3) & 7;
b0109805 9362 addr = load_reg(s, rn);
99c475ab 9363 val = (insn >> 4) & 0x7c;
b0109805 9364 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9365
9366 if (insn & (1 << 11)) {
9367 /* load */
b0109805
PB
9368 tmp = gen_ld32(addr, IS_USER(s));
9369 store_reg(s, rd, tmp);
99c475ab
FB
9370 } else {
9371 /* store */
b0109805
PB
9372 tmp = load_reg(s, rd);
9373 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9374 }
7d1b0095 9375 tcg_temp_free_i32(addr);
99c475ab
FB
9376 break;
9377
9378 case 7:
9379 /* load/store byte immediate offset */
9380 rd = insn & 7;
9381 rn = (insn >> 3) & 7;
b0109805 9382 addr = load_reg(s, rn);
99c475ab 9383 val = (insn >> 6) & 0x1f;
b0109805 9384 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9385
9386 if (insn & (1 << 11)) {
9387 /* load */
b0109805
PB
9388 tmp = gen_ld8u(addr, IS_USER(s));
9389 store_reg(s, rd, tmp);
99c475ab
FB
9390 } else {
9391 /* store */
b0109805
PB
9392 tmp = load_reg(s, rd);
9393 gen_st8(tmp, addr, IS_USER(s));
99c475ab 9394 }
7d1b0095 9395 tcg_temp_free_i32(addr);
99c475ab
FB
9396 break;
9397
9398 case 8:
9399 /* load/store halfword immediate offset */
9400 rd = insn & 7;
9401 rn = (insn >> 3) & 7;
b0109805 9402 addr = load_reg(s, rn);
99c475ab 9403 val = (insn >> 5) & 0x3e;
b0109805 9404 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9405
9406 if (insn & (1 << 11)) {
9407 /* load */
b0109805
PB
9408 tmp = gen_ld16u(addr, IS_USER(s));
9409 store_reg(s, rd, tmp);
99c475ab
FB
9410 } else {
9411 /* store */
b0109805
PB
9412 tmp = load_reg(s, rd);
9413 gen_st16(tmp, addr, IS_USER(s));
99c475ab 9414 }
7d1b0095 9415 tcg_temp_free_i32(addr);
99c475ab
FB
9416 break;
9417
9418 case 9:
9419 /* load/store from stack */
9420 rd = (insn >> 8) & 7;
b0109805 9421 addr = load_reg(s, 13);
99c475ab 9422 val = (insn & 0xff) * 4;
b0109805 9423 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9424
9425 if (insn & (1 << 11)) {
9426 /* load */
b0109805
PB
9427 tmp = gen_ld32(addr, IS_USER(s));
9428 store_reg(s, rd, tmp);
99c475ab
FB
9429 } else {
9430 /* store */
b0109805
PB
9431 tmp = load_reg(s, rd);
9432 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9433 }
7d1b0095 9434 tcg_temp_free_i32(addr);
99c475ab
FB
9435 break;
9436
9437 case 10:
9438 /* add to high reg */
9439 rd = (insn >> 8) & 7;
5899f386
FB
9440 if (insn & (1 << 11)) {
9441 /* SP */
5e3f878a 9442 tmp = load_reg(s, 13);
5899f386
FB
9443 } else {
9444 /* PC. bit 1 is ignored. */
7d1b0095 9445 tmp = tcg_temp_new_i32();
5e3f878a 9446 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9447 }
99c475ab 9448 val = (insn & 0xff) * 4;
5e3f878a
PB
9449 tcg_gen_addi_i32(tmp, tmp, val);
9450 store_reg(s, rd, tmp);
99c475ab
FB
9451 break;
9452
9453 case 11:
9454 /* misc */
9455 op = (insn >> 8) & 0xf;
9456 switch (op) {
9457 case 0:
9458 /* adjust stack pointer */
b26eefb6 9459 tmp = load_reg(s, 13);
99c475ab
FB
9460 val = (insn & 0x7f) * 4;
9461 if (insn & (1 << 7))
6a0d8a1d 9462 val = -(int32_t)val;
b26eefb6
PB
9463 tcg_gen_addi_i32(tmp, tmp, val);
9464 store_reg(s, 13, tmp);
99c475ab
FB
9465 break;
9466
9ee6e8bb
PB
9467 case 2: /* sign/zero extend. */
9468 ARCH(6);
9469 rd = insn & 7;
9470 rm = (insn >> 3) & 7;
b0109805 9471 tmp = load_reg(s, rm);
9ee6e8bb 9472 switch ((insn >> 6) & 3) {
b0109805
PB
9473 case 0: gen_sxth(tmp); break;
9474 case 1: gen_sxtb(tmp); break;
9475 case 2: gen_uxth(tmp); break;
9476 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9477 }
b0109805 9478 store_reg(s, rd, tmp);
9ee6e8bb 9479 break;
99c475ab
FB
9480 case 4: case 5: case 0xc: case 0xd:
9481 /* push/pop */
b0109805 9482 addr = load_reg(s, 13);
5899f386
FB
9483 if (insn & (1 << 8))
9484 offset = 4;
99c475ab 9485 else
5899f386
FB
9486 offset = 0;
9487 for (i = 0; i < 8; i++) {
9488 if (insn & (1 << i))
9489 offset += 4;
9490 }
9491 if ((insn & (1 << 11)) == 0) {
b0109805 9492 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9493 }
99c475ab
FB
9494 for (i = 0; i < 8; i++) {
9495 if (insn & (1 << i)) {
9496 if (insn & (1 << 11)) {
9497 /* pop */
b0109805
PB
9498 tmp = gen_ld32(addr, IS_USER(s));
9499 store_reg(s, i, tmp);
99c475ab
FB
9500 } else {
9501 /* push */
b0109805
PB
9502 tmp = load_reg(s, i);
9503 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9504 }
5899f386 9505 /* advance to the next address. */
b0109805 9506 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9507 }
9508 }
a50f5b91 9509 TCGV_UNUSED(tmp);
99c475ab
FB
9510 if (insn & (1 << 8)) {
9511 if (insn & (1 << 11)) {
9512 /* pop pc */
b0109805 9513 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9514 /* don't set the pc until the rest of the instruction
9515 has completed */
9516 } else {
9517 /* push lr */
b0109805
PB
9518 tmp = load_reg(s, 14);
9519 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9520 }
b0109805 9521 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9522 }
5899f386 9523 if ((insn & (1 << 11)) == 0) {
b0109805 9524 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9525 }
99c475ab 9526 /* write back the new stack pointer */
b0109805 9527 store_reg(s, 13, addr);
99c475ab 9528 /* set the new PC value */
be5e7a76
DES
9529 if ((insn & 0x0900) == 0x0900) {
9530 store_reg_from_load(env, s, 15, tmp);
9531 }
99c475ab
FB
9532 break;
9533
9ee6e8bb
PB
9534 case 1: case 3: case 9: case 11: /* czb */
9535 rm = insn & 7;
d9ba4830 9536 tmp = load_reg(s, rm);
9ee6e8bb
PB
9537 s->condlabel = gen_new_label();
9538 s->condjmp = 1;
9539 if (insn & (1 << 11))
cb63669a 9540 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9541 else
cb63669a 9542 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9543 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9544 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9545 val = (uint32_t)s->pc + 2;
9546 val += offset;
9547 gen_jmp(s, val);
9548 break;
9549
9550 case 15: /* IT, nop-hint. */
9551 if ((insn & 0xf) == 0) {
9552 gen_nop_hint(s, (insn >> 4) & 0xf);
9553 break;
9554 }
9555 /* If Then. */
9556 s->condexec_cond = (insn >> 4) & 0xe;
9557 s->condexec_mask = insn & 0x1f;
9558 /* No actual code generated for this insn, just setup state. */
9559 break;
9560
06c949e6 9561 case 0xe: /* bkpt */
be5e7a76 9562 ARCH(5);
bc4a0de0 9563 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9564 break;
9565
9ee6e8bb
PB
9566 case 0xa: /* rev */
9567 ARCH(6);
9568 rn = (insn >> 3) & 0x7;
9569 rd = insn & 0x7;
b0109805 9570 tmp = load_reg(s, rn);
9ee6e8bb 9571 switch ((insn >> 6) & 3) {
66896cb8 9572 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9573 case 1: gen_rev16(tmp); break;
9574 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9575 default: goto illegal_op;
9576 }
b0109805 9577 store_reg(s, rd, tmp);
9ee6e8bb
PB
9578 break;
9579
d9e028c1
PM
9580 case 6:
9581 switch ((insn >> 5) & 7) {
9582 case 2:
9583 /* setend */
9584 ARCH(6);
10962fd5
PM
9585 if (((insn >> 3) & 1) != s->bswap_code) {
9586 /* Dynamic endianness switching not implemented. */
d9e028c1
PM
9587 goto illegal_op;
9588 }
9ee6e8bb 9589 break;
d9e028c1
PM
9590 case 3:
9591 /* cps */
9592 ARCH(6);
9593 if (IS_USER(s)) {
9594 break;
8984bd2e 9595 }
d9e028c1
PM
9596 if (IS_M(env)) {
9597 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9598 /* FAULTMASK */
9599 if (insn & 1) {
9600 addr = tcg_const_i32(19);
9601 gen_helper_v7m_msr(cpu_env, addr, tmp);
9602 tcg_temp_free_i32(addr);
9603 }
9604 /* PRIMASK */
9605 if (insn & 2) {
9606 addr = tcg_const_i32(16);
9607 gen_helper_v7m_msr(cpu_env, addr, tmp);
9608 tcg_temp_free_i32(addr);
9609 }
9610 tcg_temp_free_i32(tmp);
9611 gen_lookup_tb(s);
9612 } else {
9613 if (insn & (1 << 4)) {
9614 shift = CPSR_A | CPSR_I | CPSR_F;
9615 } else {
9616 shift = 0;
9617 }
9618 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 9619 }
d9e028c1
PM
9620 break;
9621 default:
9622 goto undef;
9ee6e8bb
PB
9623 }
9624 break;
9625
99c475ab
FB
9626 default:
9627 goto undef;
9628 }
9629 break;
9630
9631 case 12:
a7d3970d 9632 {
99c475ab 9633 /* load/store multiple */
a7d3970d
PM
9634 TCGv loaded_var;
9635 TCGV_UNUSED(loaded_var);
99c475ab 9636 rn = (insn >> 8) & 0x7;
b0109805 9637 addr = load_reg(s, rn);
99c475ab
FB
9638 for (i = 0; i < 8; i++) {
9639 if (insn & (1 << i)) {
99c475ab
FB
9640 if (insn & (1 << 11)) {
9641 /* load */
b0109805 9642 tmp = gen_ld32(addr, IS_USER(s));
a7d3970d
PM
9643 if (i == rn) {
9644 loaded_var = tmp;
9645 } else {
9646 store_reg(s, i, tmp);
9647 }
99c475ab
FB
9648 } else {
9649 /* store */
b0109805
PB
9650 tmp = load_reg(s, i);
9651 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9652 }
5899f386 9653 /* advance to the next address */
b0109805 9654 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9655 }
9656 }
b0109805 9657 if ((insn & (1 << rn)) == 0) {
a7d3970d 9658 /* base reg not in list: base register writeback */
b0109805
PB
9659 store_reg(s, rn, addr);
9660 } else {
a7d3970d
PM
9661 /* base reg in list: if load, complete it now */
9662 if (insn & (1 << 11)) {
9663 store_reg(s, rn, loaded_var);
9664 }
7d1b0095 9665 tcg_temp_free_i32(addr);
b0109805 9666 }
99c475ab 9667 break;
a7d3970d 9668 }
99c475ab
FB
9669 case 13:
9670 /* conditional branch or swi */
9671 cond = (insn >> 8) & 0xf;
9672 if (cond == 0xe)
9673 goto undef;
9674
9675 if (cond == 0xf) {
9676 /* swi */
422ebf69 9677 gen_set_pc_im(s->pc);
9ee6e8bb 9678 s->is_jmp = DISAS_SWI;
99c475ab
FB
9679 break;
9680 }
9681 /* generate a conditional jump to next instruction */
e50e6a20 9682 s->condlabel = gen_new_label();
d9ba4830 9683 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9684 s->condjmp = 1;
99c475ab
FB
9685
9686 /* jump to the offset */
5899f386 9687 val = (uint32_t)s->pc + 2;
99c475ab 9688 offset = ((int32_t)insn << 24) >> 24;
5899f386 9689 val += offset << 1;
8aaca4c0 9690 gen_jmp(s, val);
99c475ab
FB
9691 break;
9692
9693 case 14:
358bf29e 9694 if (insn & (1 << 11)) {
9ee6e8bb
PB
9695 if (disas_thumb2_insn(env, s, insn))
9696 goto undef32;
358bf29e
PB
9697 break;
9698 }
9ee6e8bb 9699 /* unconditional branch */
99c475ab
FB
9700 val = (uint32_t)s->pc;
9701 offset = ((int32_t)insn << 21) >> 21;
9702 val += (offset << 1) + 2;
8aaca4c0 9703 gen_jmp(s, val);
99c475ab
FB
9704 break;
9705
9706 case 15:
9ee6e8bb 9707 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9708 goto undef32;
9ee6e8bb 9709 break;
99c475ab
FB
9710 }
9711 return;
9ee6e8bb 9712undef32:
bc4a0de0 9713 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9714 return;
9715illegal_op:
99c475ab 9716undef:
bc4a0de0 9717 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9718}
9719
2c0262af
FB
9720/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9721 basic block 'tb'. If search_pc is TRUE, also generate PC
9722 information for each intermediate instruction. */
0ecb72a5 9723static inline void gen_intermediate_code_internal(CPUARMState *env,
2cfc5f17
TS
9724 TranslationBlock *tb,
9725 int search_pc)
2c0262af
FB
9726{
9727 DisasContext dc1, *dc = &dc1;
a1d1bb31 9728 CPUBreakpoint *bp;
2c0262af
FB
9729 uint16_t *gen_opc_end;
9730 int j, lj;
0fa85d43 9731 target_ulong pc_start;
b5ff1b31 9732 uint32_t next_page_start;
2e70f6ef
PB
9733 int num_insns;
9734 int max_insns;
3b46e624 9735
2c0262af 9736 /* generate intermediate code */
0fa85d43 9737 pc_start = tb->pc;
3b46e624 9738
2c0262af
FB
9739 dc->tb = tb;
9740
92414b31 9741 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9742
9743 dc->is_jmp = DISAS_NEXT;
9744 dc->pc = pc_start;
8aaca4c0 9745 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9746 dc->condjmp = 0;
7204ab88 9747 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
d8fd2954 9748 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
98eac7ca
PM
9749 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9750 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9751#if !defined(CONFIG_USER_ONLY)
61f74d6a 9752 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9753#endif
5df8bac1 9754 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9755 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9756 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9757 cpu_F0s = tcg_temp_new_i32();
9758 cpu_F1s = tcg_temp_new_i32();
9759 cpu_F0d = tcg_temp_new_i64();
9760 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9761 cpu_V0 = cpu_F0d;
9762 cpu_V1 = cpu_F1d;
e677137d 9763 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9764 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9765 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9766 lj = -1;
2e70f6ef
PB
9767 num_insns = 0;
9768 max_insns = tb->cflags & CF_COUNT_MASK;
9769 if (max_insns == 0)
9770 max_insns = CF_COUNT_MASK;
9771
9772 gen_icount_start();
e12ce78d 9773
3849902c
PM
9774 tcg_clear_temp_count();
9775
e12ce78d
PM
9776 /* A note on handling of the condexec (IT) bits:
9777 *
9778 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 9779 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 9780 * (1) if the condexec bits are not already zero then we write
0ecb72a5 9781 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
9782 * to do it at the end of the block. (For example if we don't do this
9783 * it's hard to identify whether we can safely skip writing condexec
9784 * at the end of the TB, which we definitely want to do for the case
9785 * where a TB doesn't do anything with the IT state at all.)
9786 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 9787 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
9788 * This is done both for leaving the TB at the end, and for leaving
9789 * it because of an exception we know will happen, which is done in
9790 * gen_exception_insn(). The latter is necessary because we need to
9791 * leave the TB with the PC/IT state just prior to execution of the
9792 * instruction which caused the exception.
9793 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 9794 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
9795 * This is handled in the same way as restoration of the
9796 * PC in these situations: we will be called again with search_pc=1
9797 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9798 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9799 * this to restore the condexec bits.
e12ce78d
PM
9800 *
9801 * Note that there are no instructions which can read the condexec
9802 * bits, and none which can write non-static values to them, so
0ecb72a5 9803 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
9804 * middle of a TB.
9805 */
9806
9ee6e8bb
PB
9807 /* Reset the conditional execution bits immediately. This avoids
9808 complications trying to do it at the end of the block. */
98eac7ca 9809 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9810 {
7d1b0095 9811 TCGv tmp = tcg_temp_new_i32();
8f01245e 9812 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9813 store_cpu_field(tmp, condexec_bits);
8f01245e 9814 }
2c0262af 9815 do {
fbb4a2e3
PB
9816#ifdef CONFIG_USER_ONLY
9817 /* Intercept jump to the magic kernel page. */
9818 if (dc->pc >= 0xffff0000) {
9819 /* We always get here via a jump, so know we are not in a
9820 conditional execution block. */
9821 gen_exception(EXCP_KERNEL_TRAP);
9822 dc->is_jmp = DISAS_UPDATE;
9823 break;
9824 }
9825#else
9ee6e8bb
PB
9826 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9827 /* We always get here via a jump, so know we are not in a
9828 conditional execution block. */
d9ba4830 9829 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9830 dc->is_jmp = DISAS_UPDATE;
9831 break;
9ee6e8bb
PB
9832 }
9833#endif
9834
72cf2d4f
BS
9835 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9836 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9837 if (bp->pc == dc->pc) {
bc4a0de0 9838 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9839 /* Advance PC so that clearing the breakpoint will
9840 invalidate this TB. */
9841 dc->pc += 2;
9842 goto done_generating;
1fddef4b
FB
9843 break;
9844 }
9845 }
9846 }
2c0262af 9847 if (search_pc) {
92414b31 9848 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
9849 if (lj < j) {
9850 lj++;
9851 while (lj < j)
ab1103de 9852 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 9853 }
25983cad 9854 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 9855 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 9856 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 9857 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 9858 }
e50e6a20 9859
2e70f6ef
PB
9860 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9861 gen_io_start();
9862
fdefe51c 9863 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
9864 tcg_gen_debug_insn_start(dc->pc);
9865 }
9866
7204ab88 9867 if (dc->thumb) {
9ee6e8bb
PB
9868 disas_thumb_insn(env, dc);
9869 if (dc->condexec_mask) {
9870 dc->condexec_cond = (dc->condexec_cond & 0xe)
9871 | ((dc->condexec_mask >> 4) & 1);
9872 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9873 if (dc->condexec_mask == 0) {
9874 dc->condexec_cond = 0;
9875 }
9876 }
9877 } else {
9878 disas_arm_insn(env, dc);
9879 }
e50e6a20
FB
9880
9881 if (dc->condjmp && !dc->is_jmp) {
9882 gen_set_label(dc->condlabel);
9883 dc->condjmp = 0;
9884 }
3849902c
PM
9885
9886 if (tcg_check_temp_count()) {
9887 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9888 }
9889
aaf2d97d 9890 /* Translation stops when a conditional branch is encountered.
e50e6a20 9891 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9892 * Also stop translation when a page boundary is reached. This
bf20dc07 9893 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9894 num_insns ++;
efd7f486 9895 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
1fddef4b 9896 !env->singlestep_enabled &&
1b530a6d 9897 !singlestep &&
2e70f6ef
PB
9898 dc->pc < next_page_start &&
9899 num_insns < max_insns);
9900
9901 if (tb->cflags & CF_LAST_IO) {
9902 if (dc->condjmp) {
9903 /* FIXME: This can theoretically happen with self-modifying
9904 code. */
9905 cpu_abort(env, "IO on conditional branch instruction");
9906 }
9907 gen_io_end();
9908 }
9ee6e8bb 9909
b5ff1b31 9910 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9911 instruction was a conditional branch or trap, and the PC has
9912 already been written. */
551bd27f 9913 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9914 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9915 if (dc->condjmp) {
9ee6e8bb
PB
9916 gen_set_condexec(dc);
9917 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9918 gen_exception(EXCP_SWI);
9ee6e8bb 9919 } else {
d9ba4830 9920 gen_exception(EXCP_DEBUG);
9ee6e8bb 9921 }
e50e6a20
FB
9922 gen_set_label(dc->condlabel);
9923 }
9924 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9925 gen_set_pc_im(dc->pc);
e50e6a20 9926 dc->condjmp = 0;
8aaca4c0 9927 }
9ee6e8bb
PB
9928 gen_set_condexec(dc);
9929 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9930 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9931 } else {
9932 /* FIXME: Single stepping a WFI insn will not halt
9933 the CPU. */
d9ba4830 9934 gen_exception(EXCP_DEBUG);
9ee6e8bb 9935 }
8aaca4c0 9936 } else {
9ee6e8bb
PB
9937 /* While branches must always occur at the end of an IT block,
9938 there are a few other things that can cause us to terminate
65626741 9939 the TB in the middle of an IT block:
9ee6e8bb
PB
9940 - Exception generating instructions (bkpt, swi, undefined).
9941 - Page boundaries.
9942 - Hardware watchpoints.
9943 Hardware breakpoints have already been handled and skip this code.
9944 */
9945 gen_set_condexec(dc);
8aaca4c0 9946 switch(dc->is_jmp) {
8aaca4c0 9947 case DISAS_NEXT:
6e256c93 9948 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9949 break;
9950 default:
9951 case DISAS_JUMP:
9952 case DISAS_UPDATE:
9953 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9954 tcg_gen_exit_tb(0);
8aaca4c0
FB
9955 break;
9956 case DISAS_TB_JUMP:
9957 /* nothing more to generate */
9958 break;
9ee6e8bb 9959 case DISAS_WFI:
1ce94f81 9960 gen_helper_wfi(cpu_env);
9ee6e8bb
PB
9961 break;
9962 case DISAS_SWI:
d9ba4830 9963 gen_exception(EXCP_SWI);
9ee6e8bb 9964 break;
8aaca4c0 9965 }
e50e6a20
FB
9966 if (dc->condjmp) {
9967 gen_set_label(dc->condlabel);
9ee6e8bb 9968 gen_set_condexec(dc);
6e256c93 9969 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9970 dc->condjmp = 0;
9971 }
2c0262af 9972 }
2e70f6ef 9973
9ee6e8bb 9974done_generating:
2e70f6ef 9975 gen_icount_end(tb, num_insns);
efd7f486 9976 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
9977
9978#ifdef DEBUG_DISAS
8fec2b8c 9979 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9980 qemu_log("----------------\n");
9981 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 9982 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 9983 dc->thumb | (dc->bswap_code << 1));
93fcfe39 9984 qemu_log("\n");
2c0262af
FB
9985 }
9986#endif
b5ff1b31 9987 if (search_pc) {
92414b31 9988 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
9989 lj++;
9990 while (lj <= j)
ab1103de 9991 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 9992 } else {
2c0262af 9993 tb->size = dc->pc - pc_start;
2e70f6ef 9994 tb->icount = num_insns;
b5ff1b31 9995 }
2c0262af
FB
9996}
9997
0ecb72a5 9998void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 9999{
2cfc5f17 10000 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
10001}
10002
0ecb72a5 10003void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 10004{
2cfc5f17 10005 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
10006}
10007
b5ff1b31
FB
10008static const char *cpu_mode_names[16] = {
10009 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10010 "???", "???", "???", "und", "???", "???", "???", "sys"
10011};
9ee6e8bb 10012
0ecb72a5 10013void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 10014 int flags)
2c0262af
FB
10015{
10016 int i;
b5ff1b31 10017 uint32_t psr;
2c0262af
FB
10018
10019 for(i=0;i<16;i++) {
7fe48483 10020 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10021 if ((i % 4) == 3)
7fe48483 10022 cpu_fprintf(f, "\n");
2c0262af 10023 else
7fe48483 10024 cpu_fprintf(f, " ");
2c0262af 10025 }
b5ff1b31 10026 psr = cpsr_read(env);
687fa640
TS
10027 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10028 psr,
b5ff1b31
FB
10029 psr & (1 << 31) ? 'N' : '-',
10030 psr & (1 << 30) ? 'Z' : '-',
10031 psr & (1 << 29) ? 'C' : '-',
10032 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10033 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10034 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10035
f2617cfc
PM
10036 if (flags & CPU_DUMP_FPU) {
10037 int numvfpregs = 0;
10038 if (arm_feature(env, ARM_FEATURE_VFP)) {
10039 numvfpregs += 16;
10040 }
10041 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10042 numvfpregs += 16;
10043 }
10044 for (i = 0; i < numvfpregs; i++) {
10045 uint64_t v = float64_val(env->vfp.regs[i]);
10046 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10047 i * 2, (uint32_t)v,
10048 i * 2 + 1, (uint32_t)(v >> 32),
10049 i, v);
10050 }
10051 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 10052 }
2c0262af 10053}
a6b025d3 10054
0ecb72a5 10055void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 10056{
25983cad 10057 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
e12ce78d 10058 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10059}