]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-arm: Clear IT bits when taking exceptions in v7M
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
2c0262af 28#include "disas.h"
57fec1fe 29#include "tcg-op.h"
79383c9c 30#include "qemu-log.h"
1497c961 31
7b59220e 32#include "helper.h"
1497c961 33#define GEN_HELPER 1
7b59220e 34#include "helper.h"
2c0262af 35
be5e7a76
DES
36#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38/* currently all emulated v5 cores are also v5TE, so don't bother */
39#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
40#define ENABLE_ARCH_5J 0
41#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 45
86753403 46#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 47
2c0262af
FB
48/* internal defines */
49typedef struct DisasContext {
0fa85d43 50 target_ulong pc;
2c0262af 51 int is_jmp;
e50e6a20
FB
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
9ee6e8bb
PB
56 /* Thumb-2 condtional execution bits. */
57 int condexec_mask;
58 int condexec_cond;
2c0262af 59 struct TranslationBlock *tb;
8aaca4c0 60 int singlestep_enabled;
5899f386 61 int thumb;
b5ff1b31
FB
62#if !defined(CONFIG_USER_ONLY)
63 int user;
64#endif
5df8bac1 65 int vfp_enabled;
69d1fc22
PM
66 int vec_len;
67 int vec_stride;
2c0262af
FB
68} DisasContext;
69
e12ce78d
PM
70static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
71
b5ff1b31
FB
72#if defined(CONFIG_USER_ONLY)
73#define IS_USER(s) 1
74#else
75#define IS_USER(s) (s->user)
76#endif
77
9ee6e8bb
PB
78/* These instructions trap after executing, so defer them until after the
79 conditional executions state has been updated. */
80#define DISAS_WFI 4
81#define DISAS_SWI 5
2c0262af 82
a7812ae4 83static TCGv_ptr cpu_env;
ad69471c 84/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 85static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 86static TCGv_i32 cpu_R[16];
426f5abc
PB
87static TCGv_i32 cpu_exclusive_addr;
88static TCGv_i32 cpu_exclusive_val;
89static TCGv_i32 cpu_exclusive_high;
90#ifdef CONFIG_USER_ONLY
91static TCGv_i32 cpu_exclusive_test;
92static TCGv_i32 cpu_exclusive_info;
93#endif
ad69471c 94
b26eefb6 95/* FIXME: These should be removed. */
a7812ae4
PB
96static TCGv cpu_F0s, cpu_F1s;
97static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 98
2e70f6ef
PB
99#include "gen-icount.h"
100
155c3eac
FN
101static const char *regnames[] =
102 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
103 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
104
b26eefb6
PB
105/* initialize TCG globals. */
106void arm_translate_init(void)
107{
155c3eac
FN
108 int i;
109
a7812ae4
PB
110 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
111
155c3eac
FN
112 for (i = 0; i < 16; i++) {
113 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 114 offsetof(CPUARMState, regs[i]),
155c3eac
FN
115 regnames[i]);
116 }
426f5abc 117 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 118 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
426f5abc 119 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 120 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 121 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 122 offsetof(CPUARMState, exclusive_high), "exclusive_high");
426f5abc
PB
123#ifdef CONFIG_USER_ONLY
124 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 125 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 126 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 127 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 128#endif
155c3eac 129
a7812ae4 130#define GEN_HELPER 2
7b59220e 131#include "helper.h"
b26eefb6
PB
132}
133
d9ba4830
PB
134static inline TCGv load_cpu_offset(int offset)
135{
7d1b0095 136 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
137 tcg_gen_ld_i32(tmp, cpu_env, offset);
138 return tmp;
139}
140
0ecb72a5 141#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830
PB
142
143static inline void store_cpu_offset(TCGv var, int offset)
144{
145 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 146 tcg_temp_free_i32(var);
d9ba4830
PB
147}
148
149#define store_cpu_field(var, name) \
0ecb72a5 150 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 151
b26eefb6
PB
152/* Set a variable to the value of a CPU register. */
153static void load_reg_var(DisasContext *s, TCGv var, int reg)
154{
155 if (reg == 15) {
156 uint32_t addr;
157 /* normaly, since we updated PC, we need only to add one insn */
158 if (s->thumb)
159 addr = (long)s->pc + 2;
160 else
161 addr = (long)s->pc + 4;
162 tcg_gen_movi_i32(var, addr);
163 } else {
155c3eac 164 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
165 }
166}
167
168/* Create a new temporary and set it to the value of a CPU register. */
169static inline TCGv load_reg(DisasContext *s, int reg)
170{
7d1b0095 171 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
172 load_reg_var(s, tmp, reg);
173 return tmp;
174}
175
176/* Set a CPU register. The source must be a temporary and will be
177 marked as dead. */
178static void store_reg(DisasContext *s, int reg, TCGv var)
179{
180 if (reg == 15) {
181 tcg_gen_andi_i32(var, var, ~1);
182 s->is_jmp = DISAS_JUMP;
183 }
155c3eac 184 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 185 tcg_temp_free_i32(var);
b26eefb6
PB
186}
187
b26eefb6 188/* Value extensions. */
86831435
PB
189#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
190#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
191#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
192#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
193
1497c961
PB
194#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
195#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 196
b26eefb6 197
b75263d6
JR
198static inline void gen_set_cpsr(TCGv var, uint32_t mask)
199{
200 TCGv tmp_mask = tcg_const_i32(mask);
201 gen_helper_cpsr_write(var, tmp_mask);
202 tcg_temp_free_i32(tmp_mask);
203}
d9ba4830
PB
204/* Set NZCV flags from the high 4 bits of var. */
205#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
206
207static void gen_exception(int excp)
208{
7d1b0095 209 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
210 tcg_gen_movi_i32(tmp, excp);
211 gen_helper_exception(tmp);
7d1b0095 212 tcg_temp_free_i32(tmp);
d9ba4830
PB
213}
214
3670669c
PB
215static void gen_smul_dual(TCGv a, TCGv b)
216{
7d1b0095
PM
217 TCGv tmp1 = tcg_temp_new_i32();
218 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
219 tcg_gen_ext16s_i32(tmp1, a);
220 tcg_gen_ext16s_i32(tmp2, b);
3670669c 221 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 222 tcg_temp_free_i32(tmp2);
3670669c
PB
223 tcg_gen_sari_i32(a, a, 16);
224 tcg_gen_sari_i32(b, b, 16);
225 tcg_gen_mul_i32(b, b, a);
226 tcg_gen_mov_i32(a, tmp1);
7d1b0095 227 tcg_temp_free_i32(tmp1);
3670669c
PB
228}
229
230/* Byteswap each halfword. */
231static void gen_rev16(TCGv var)
232{
7d1b0095 233 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
234 tcg_gen_shri_i32(tmp, var, 8);
235 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
236 tcg_gen_shli_i32(var, var, 8);
237 tcg_gen_andi_i32(var, var, 0xff00ff00);
238 tcg_gen_or_i32(var, var, tmp);
7d1b0095 239 tcg_temp_free_i32(tmp);
3670669c
PB
240}
241
242/* Byteswap low halfword and sign extend. */
243static void gen_revsh(TCGv var)
244{
1a855029
AJ
245 tcg_gen_ext16u_i32(var, var);
246 tcg_gen_bswap16_i32(var, var);
247 tcg_gen_ext16s_i32(var, var);
3670669c
PB
248}
249
250/* Unsigned bitfield extract. */
251static void gen_ubfx(TCGv var, int shift, uint32_t mask)
252{
253 if (shift)
254 tcg_gen_shri_i32(var, var, shift);
255 tcg_gen_andi_i32(var, var, mask);
256}
257
258/* Signed bitfield extract. */
259static void gen_sbfx(TCGv var, int shift, int width)
260{
261 uint32_t signbit;
262
263 if (shift)
264 tcg_gen_sari_i32(var, var, shift);
265 if (shift + width < 32) {
266 signbit = 1u << (width - 1);
267 tcg_gen_andi_i32(var, var, (1u << width) - 1);
268 tcg_gen_xori_i32(var, var, signbit);
269 tcg_gen_subi_i32(var, var, signbit);
270 }
271}
272
273/* Bitfield insertion. Insert val into base. Clobbers base and val. */
274static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
275{
3670669c 276 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
277 tcg_gen_shli_i32(val, val, shift);
278 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
279 tcg_gen_or_i32(dest, base, val);
280}
281
838fa72d
AJ
282/* Return (b << 32) + a. Mark inputs as dead */
283static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 284{
838fa72d
AJ
285 TCGv_i64 tmp64 = tcg_temp_new_i64();
286
287 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 288 tcg_temp_free_i32(b);
838fa72d
AJ
289 tcg_gen_shli_i64(tmp64, tmp64, 32);
290 tcg_gen_add_i64(a, tmp64, a);
291
292 tcg_temp_free_i64(tmp64);
293 return a;
294}
295
296/* Return (b << 32) - a. Mark inputs as dead. */
297static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
298{
299 TCGv_i64 tmp64 = tcg_temp_new_i64();
300
301 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 302 tcg_temp_free_i32(b);
838fa72d
AJ
303 tcg_gen_shli_i64(tmp64, tmp64, 32);
304 tcg_gen_sub_i64(a, tmp64, a);
305
306 tcg_temp_free_i64(tmp64);
307 return a;
3670669c
PB
308}
309
8f01245e
PB
310/* FIXME: Most targets have native widening multiplication.
311 It would be good to use that instead of a full wide multiply. */
5e3f878a 312/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 313static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 314{
a7812ae4
PB
315 TCGv_i64 tmp1 = tcg_temp_new_i64();
316 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
317
318 tcg_gen_extu_i32_i64(tmp1, a);
7d1b0095 319 tcg_temp_free_i32(a);
5e3f878a 320 tcg_gen_extu_i32_i64(tmp2, b);
7d1b0095 321 tcg_temp_free_i32(b);
5e3f878a 322 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 323 tcg_temp_free_i64(tmp2);
5e3f878a
PB
324 return tmp1;
325}
326
a7812ae4 327static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 328{
a7812ae4
PB
329 TCGv_i64 tmp1 = tcg_temp_new_i64();
330 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
331
332 tcg_gen_ext_i32_i64(tmp1, a);
7d1b0095 333 tcg_temp_free_i32(a);
5e3f878a 334 tcg_gen_ext_i32_i64(tmp2, b);
7d1b0095 335 tcg_temp_free_i32(b);
5e3f878a 336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 337 tcg_temp_free_i64(tmp2);
5e3f878a
PB
338 return tmp1;
339}
340
8f01245e
PB
341/* Swap low and high halfwords. */
342static void gen_swap_half(TCGv var)
343{
7d1b0095 344 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
345 tcg_gen_shri_i32(tmp, var, 16);
346 tcg_gen_shli_i32(var, var, 16);
347 tcg_gen_or_i32(var, var, tmp);
7d1b0095 348 tcg_temp_free_i32(tmp);
8f01245e
PB
349}
350
b26eefb6
PB
351/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
352 tmp = (t0 ^ t1) & 0x8000;
353 t0 &= ~0x8000;
354 t1 &= ~0x8000;
355 t0 = (t0 + t1) ^ tmp;
356 */
357
358static void gen_add16(TCGv t0, TCGv t1)
359{
7d1b0095 360 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
361 tcg_gen_xor_i32(tmp, t0, t1);
362 tcg_gen_andi_i32(tmp, tmp, 0x8000);
363 tcg_gen_andi_i32(t0, t0, ~0x8000);
364 tcg_gen_andi_i32(t1, t1, ~0x8000);
365 tcg_gen_add_i32(t0, t0, t1);
366 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
367 tcg_temp_free_i32(tmp);
368 tcg_temp_free_i32(t1);
b26eefb6
PB
369}
370
0ecb72a5 371#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, CF))
9a119ff6 372
b26eefb6
PB
373/* Set CF to the top bit of var. */
374static void gen_set_CF_bit31(TCGv var)
375{
7d1b0095 376 TCGv tmp = tcg_temp_new_i32();
b26eefb6 377 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 378 gen_set_CF(tmp);
7d1b0095 379 tcg_temp_free_i32(tmp);
b26eefb6
PB
380}
381
382/* Set N and Z flags from var. */
383static inline void gen_logic_CC(TCGv var)
384{
0ecb72a5
AF
385 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, NF));
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, ZF));
b26eefb6
PB
387}
388
389/* T0 += T1 + CF. */
396e467c 390static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 391{
d9ba4830 392 TCGv tmp;
396e467c 393 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 394 tmp = load_cpu_field(CF);
396e467c 395 tcg_gen_add_i32(t0, t0, tmp);
7d1b0095 396 tcg_temp_free_i32(tmp);
b26eefb6
PB
397}
398
e9bb4aa9
JR
399/* dest = T0 + T1 + CF. */
400static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
401{
402 TCGv tmp;
403 tcg_gen_add_i32(dest, t0, t1);
404 tmp = load_cpu_field(CF);
405 tcg_gen_add_i32(dest, dest, tmp);
7d1b0095 406 tcg_temp_free_i32(tmp);
e9bb4aa9
JR
407}
408
3670669c
PB
409/* dest = T0 - T1 + CF - 1. */
410static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
411{
d9ba4830 412 TCGv tmp;
3670669c 413 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 414 tmp = load_cpu_field(CF);
3670669c
PB
415 tcg_gen_add_i32(dest, dest, tmp);
416 tcg_gen_subi_i32(dest, dest, 1);
7d1b0095 417 tcg_temp_free_i32(tmp);
3670669c
PB
418}
419
ad69471c
PB
420/* FIXME: Implement this natively. */
421#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
422
9a119ff6 423static void shifter_out_im(TCGv var, int shift)
b26eefb6 424{
7d1b0095 425 TCGv tmp = tcg_temp_new_i32();
9a119ff6
PB
426 if (shift == 0) {
427 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 428 } else {
9a119ff6 429 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 430 if (shift != 31)
9a119ff6
PB
431 tcg_gen_andi_i32(tmp, tmp, 1);
432 }
433 gen_set_CF(tmp);
7d1b0095 434 tcg_temp_free_i32(tmp);
9a119ff6 435}
b26eefb6 436
9a119ff6
PB
437/* Shift by immediate. Includes special handling for shift == 0. */
438static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
439{
440 switch (shiftop) {
441 case 0: /* LSL */
442 if (shift != 0) {
443 if (flags)
444 shifter_out_im(var, 32 - shift);
445 tcg_gen_shli_i32(var, var, shift);
446 }
447 break;
448 case 1: /* LSR */
449 if (shift == 0) {
450 if (flags) {
451 tcg_gen_shri_i32(var, var, 31);
452 gen_set_CF(var);
453 }
454 tcg_gen_movi_i32(var, 0);
455 } else {
456 if (flags)
457 shifter_out_im(var, shift - 1);
458 tcg_gen_shri_i32(var, var, shift);
459 }
460 break;
461 case 2: /* ASR */
462 if (shift == 0)
463 shift = 32;
464 if (flags)
465 shifter_out_im(var, shift - 1);
466 if (shift == 32)
467 shift = 31;
468 tcg_gen_sari_i32(var, var, shift);
469 break;
470 case 3: /* ROR/RRX */
471 if (shift != 0) {
472 if (flags)
473 shifter_out_im(var, shift - 1);
f669df27 474 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 475 } else {
d9ba4830 476 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
477 if (flags)
478 shifter_out_im(var, 0);
479 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
480 tcg_gen_shli_i32(tmp, tmp, 31);
481 tcg_gen_or_i32(var, var, tmp);
7d1b0095 482 tcg_temp_free_i32(tmp);
b26eefb6
PB
483 }
484 }
485};
486
8984bd2e
PB
487static inline void gen_arm_shift_reg(TCGv var, int shiftop,
488 TCGv shift, int flags)
489{
490 if (flags) {
491 switch (shiftop) {
492 case 0: gen_helper_shl_cc(var, var, shift); break;
493 case 1: gen_helper_shr_cc(var, var, shift); break;
494 case 2: gen_helper_sar_cc(var, var, shift); break;
495 case 3: gen_helper_ror_cc(var, var, shift); break;
496 }
497 } else {
498 switch (shiftop) {
499 case 0: gen_helper_shl(var, var, shift); break;
500 case 1: gen_helper_shr(var, var, shift); break;
501 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
502 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
503 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
504 }
505 }
7d1b0095 506 tcg_temp_free_i32(shift);
8984bd2e
PB
507}
508
6ddbc6e4
PB
509#define PAS_OP(pfx) \
510 switch (op2) { \
511 case 0: gen_pas_helper(glue(pfx,add16)); break; \
512 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
513 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
514 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
515 case 4: gen_pas_helper(glue(pfx,add8)); break; \
516 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
517 }
d9ba4830 518static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 519{
a7812ae4 520 TCGv_ptr tmp;
6ddbc6e4
PB
521
522 switch (op1) {
523#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
524 case 1:
a7812ae4 525 tmp = tcg_temp_new_ptr();
0ecb72a5 526 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 527 PAS_OP(s)
b75263d6 528 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
529 break;
530 case 5:
a7812ae4 531 tmp = tcg_temp_new_ptr();
0ecb72a5 532 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 533 PAS_OP(u)
b75263d6 534 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
535 break;
536#undef gen_pas_helper
537#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
538 case 2:
539 PAS_OP(q);
540 break;
541 case 3:
542 PAS_OP(sh);
543 break;
544 case 6:
545 PAS_OP(uq);
546 break;
547 case 7:
548 PAS_OP(uh);
549 break;
550#undef gen_pas_helper
551 }
552}
9ee6e8bb
PB
553#undef PAS_OP
554
6ddbc6e4
PB
555/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
556#define PAS_OP(pfx) \
ed89a2f1 557 switch (op1) { \
6ddbc6e4
PB
558 case 0: gen_pas_helper(glue(pfx,add8)); break; \
559 case 1: gen_pas_helper(glue(pfx,add16)); break; \
560 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
561 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
562 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
563 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
564 }
d9ba4830 565static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 566{
a7812ae4 567 TCGv_ptr tmp;
6ddbc6e4 568
ed89a2f1 569 switch (op2) {
6ddbc6e4
PB
570#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
571 case 0:
a7812ae4 572 tmp = tcg_temp_new_ptr();
0ecb72a5 573 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 574 PAS_OP(s)
b75263d6 575 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
576 break;
577 case 4:
a7812ae4 578 tmp = tcg_temp_new_ptr();
0ecb72a5 579 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 580 PAS_OP(u)
b75263d6 581 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
582 break;
583#undef gen_pas_helper
584#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
585 case 1:
586 PAS_OP(q);
587 break;
588 case 2:
589 PAS_OP(sh);
590 break;
591 case 5:
592 PAS_OP(uq);
593 break;
594 case 6:
595 PAS_OP(uh);
596 break;
597#undef gen_pas_helper
598 }
599}
9ee6e8bb
PB
600#undef PAS_OP
601
d9ba4830
PB
602static void gen_test_cc(int cc, int label)
603{
604 TCGv tmp;
605 TCGv tmp2;
d9ba4830
PB
606 int inv;
607
d9ba4830
PB
608 switch (cc) {
609 case 0: /* eq: Z */
6fbe23d5 610 tmp = load_cpu_field(ZF);
cb63669a 611 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
612 break;
613 case 1: /* ne: !Z */
6fbe23d5 614 tmp = load_cpu_field(ZF);
cb63669a 615 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
616 break;
617 case 2: /* cs: C */
618 tmp = load_cpu_field(CF);
cb63669a 619 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
620 break;
621 case 3: /* cc: !C */
622 tmp = load_cpu_field(CF);
cb63669a 623 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
624 break;
625 case 4: /* mi: N */
6fbe23d5 626 tmp = load_cpu_field(NF);
cb63669a 627 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
628 break;
629 case 5: /* pl: !N */
6fbe23d5 630 tmp = load_cpu_field(NF);
cb63669a 631 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
632 break;
633 case 6: /* vs: V */
634 tmp = load_cpu_field(VF);
cb63669a 635 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
636 break;
637 case 7: /* vc: !V */
638 tmp = load_cpu_field(VF);
cb63669a 639 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
640 break;
641 case 8: /* hi: C && !Z */
642 inv = gen_new_label();
643 tmp = load_cpu_field(CF);
cb63669a 644 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 645 tcg_temp_free_i32(tmp);
6fbe23d5 646 tmp = load_cpu_field(ZF);
cb63669a 647 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
648 gen_set_label(inv);
649 break;
650 case 9: /* ls: !C || Z */
651 tmp = load_cpu_field(CF);
cb63669a 652 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 653 tcg_temp_free_i32(tmp);
6fbe23d5 654 tmp = load_cpu_field(ZF);
cb63669a 655 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
656 break;
657 case 10: /* ge: N == V -> N ^ V == 0 */
658 tmp = load_cpu_field(VF);
6fbe23d5 659 tmp2 = load_cpu_field(NF);
d9ba4830 660 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 661 tcg_temp_free_i32(tmp2);
cb63669a 662 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
663 break;
664 case 11: /* lt: N != V -> N ^ V != 0 */
665 tmp = load_cpu_field(VF);
6fbe23d5 666 tmp2 = load_cpu_field(NF);
d9ba4830 667 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 668 tcg_temp_free_i32(tmp2);
cb63669a 669 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
670 break;
671 case 12: /* gt: !Z && N == V */
672 inv = gen_new_label();
6fbe23d5 673 tmp = load_cpu_field(ZF);
cb63669a 674 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 675 tcg_temp_free_i32(tmp);
d9ba4830 676 tmp = load_cpu_field(VF);
6fbe23d5 677 tmp2 = load_cpu_field(NF);
d9ba4830 678 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 679 tcg_temp_free_i32(tmp2);
cb63669a 680 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
681 gen_set_label(inv);
682 break;
683 case 13: /* le: Z || N != V */
6fbe23d5 684 tmp = load_cpu_field(ZF);
cb63669a 685 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 686 tcg_temp_free_i32(tmp);
d9ba4830 687 tmp = load_cpu_field(VF);
6fbe23d5 688 tmp2 = load_cpu_field(NF);
d9ba4830 689 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 690 tcg_temp_free_i32(tmp2);
cb63669a 691 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
692 break;
693 default:
694 fprintf(stderr, "Bad condition code 0x%x\n", cc);
695 abort();
696 }
7d1b0095 697 tcg_temp_free_i32(tmp);
d9ba4830 698}
2c0262af 699
b1d8e52e 700static const uint8_t table_logic_cc[16] = {
2c0262af
FB
701 1, /* and */
702 1, /* xor */
703 0, /* sub */
704 0, /* rsb */
705 0, /* add */
706 0, /* adc */
707 0, /* sbc */
708 0, /* rsc */
709 1, /* andl */
710 1, /* xorl */
711 0, /* cmp */
712 0, /* cmn */
713 1, /* orr */
714 1, /* mov */
715 1, /* bic */
716 1, /* mvn */
717};
3b46e624 718
d9ba4830
PB
719/* Set PC and Thumb state from an immediate address. */
720static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 721{
b26eefb6 722 TCGv tmp;
99c475ab 723
b26eefb6 724 s->is_jmp = DISAS_UPDATE;
d9ba4830 725 if (s->thumb != (addr & 1)) {
7d1b0095 726 tmp = tcg_temp_new_i32();
d9ba4830 727 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 728 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 729 tcg_temp_free_i32(tmp);
d9ba4830 730 }
155c3eac 731 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
732}
733
734/* Set PC and Thumb state from var. var is marked as dead. */
735static inline void gen_bx(DisasContext *s, TCGv var)
736{
d9ba4830 737 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
738 tcg_gen_andi_i32(cpu_R[15], var, ~1);
739 tcg_gen_andi_i32(var, var, 1);
740 store_cpu_field(var, thumb);
d9ba4830
PB
741}
742
21aeb343
JR
743/* Variant of store_reg which uses branch&exchange logic when storing
744 to r15 in ARM architecture v7 and above. The source must be a temporary
745 and will be marked as dead. */
0ecb72a5 746static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
21aeb343
JR
747 int reg, TCGv var)
748{
749 if (reg == 15 && ENABLE_ARCH_7) {
750 gen_bx(s, var);
751 } else {
752 store_reg(s, reg, var);
753 }
754}
755
be5e7a76
DES
756/* Variant of store_reg which uses branch&exchange logic when storing
757 * to r15 in ARM architecture v5T and above. This is used for storing
758 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
759 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 760static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
be5e7a76
DES
761 int reg, TCGv var)
762{
763 if (reg == 15 && ENABLE_ARCH_5) {
764 gen_bx(s, var);
765 } else {
766 store_reg(s, reg, var);
767 }
768}
769
b0109805
PB
770static inline TCGv gen_ld8s(TCGv addr, int index)
771{
7d1b0095 772 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
773 tcg_gen_qemu_ld8s(tmp, addr, index);
774 return tmp;
775}
776static inline TCGv gen_ld8u(TCGv addr, int index)
777{
7d1b0095 778 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
779 tcg_gen_qemu_ld8u(tmp, addr, index);
780 return tmp;
781}
782static inline TCGv gen_ld16s(TCGv addr, int index)
783{
7d1b0095 784 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
785 tcg_gen_qemu_ld16s(tmp, addr, index);
786 return tmp;
787}
788static inline TCGv gen_ld16u(TCGv addr, int index)
789{
7d1b0095 790 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
791 tcg_gen_qemu_ld16u(tmp, addr, index);
792 return tmp;
793}
794static inline TCGv gen_ld32(TCGv addr, int index)
795{
7d1b0095 796 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
797 tcg_gen_qemu_ld32u(tmp, addr, index);
798 return tmp;
799}
84496233
JR
800static inline TCGv_i64 gen_ld64(TCGv addr, int index)
801{
802 TCGv_i64 tmp = tcg_temp_new_i64();
803 tcg_gen_qemu_ld64(tmp, addr, index);
804 return tmp;
805}
b0109805
PB
806static inline void gen_st8(TCGv val, TCGv addr, int index)
807{
808 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 809 tcg_temp_free_i32(val);
b0109805
PB
810}
811static inline void gen_st16(TCGv val, TCGv addr, int index)
812{
813 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 814 tcg_temp_free_i32(val);
b0109805
PB
815}
816static inline void gen_st32(TCGv val, TCGv addr, int index)
817{
818 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 819 tcg_temp_free_i32(val);
b0109805 820}
84496233
JR
821static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
822{
823 tcg_gen_qemu_st64(val, addr, index);
824 tcg_temp_free_i64(val);
825}
b5ff1b31 826
5e3f878a
PB
827static inline void gen_set_pc_im(uint32_t val)
828{
155c3eac 829 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
830}
831
b5ff1b31
FB
832/* Force a TB lookup after an instruction that changes the CPU state. */
833static inline void gen_lookup_tb(DisasContext *s)
834{
a6445c52 835 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
836 s->is_jmp = DISAS_UPDATE;
837}
838
b0109805
PB
839static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
840 TCGv var)
2c0262af 841{
1e8d4eec 842 int val, rm, shift, shiftop;
b26eefb6 843 TCGv offset;
2c0262af
FB
844
845 if (!(insn & (1 << 25))) {
846 /* immediate */
847 val = insn & 0xfff;
848 if (!(insn & (1 << 23)))
849 val = -val;
537730b9 850 if (val != 0)
b0109805 851 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
852 } else {
853 /* shift/register */
854 rm = (insn) & 0xf;
855 shift = (insn >> 7) & 0x1f;
1e8d4eec 856 shiftop = (insn >> 5) & 3;
b26eefb6 857 offset = load_reg(s, rm);
9a119ff6 858 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 859 if (!(insn & (1 << 23)))
b0109805 860 tcg_gen_sub_i32(var, var, offset);
2c0262af 861 else
b0109805 862 tcg_gen_add_i32(var, var, offset);
7d1b0095 863 tcg_temp_free_i32(offset);
2c0262af
FB
864 }
865}
866
191f9a93 867static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 868 int extra, TCGv var)
2c0262af
FB
869{
870 int val, rm;
b26eefb6 871 TCGv offset;
3b46e624 872
2c0262af
FB
873 if (insn & (1 << 22)) {
874 /* immediate */
875 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
876 if (!(insn & (1 << 23)))
877 val = -val;
18acad92 878 val += extra;
537730b9 879 if (val != 0)
b0109805 880 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
881 } else {
882 /* register */
191f9a93 883 if (extra)
b0109805 884 tcg_gen_addi_i32(var, var, extra);
2c0262af 885 rm = (insn) & 0xf;
b26eefb6 886 offset = load_reg(s, rm);
2c0262af 887 if (!(insn & (1 << 23)))
b0109805 888 tcg_gen_sub_i32(var, var, offset);
2c0262af 889 else
b0109805 890 tcg_gen_add_i32(var, var, offset);
7d1b0095 891 tcg_temp_free_i32(offset);
2c0262af
FB
892 }
893}
894
5aaebd13
PM
895static TCGv_ptr get_fpstatus_ptr(int neon)
896{
897 TCGv_ptr statusptr = tcg_temp_new_ptr();
898 int offset;
899 if (neon) {
0ecb72a5 900 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 901 } else {
0ecb72a5 902 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
903 }
904 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
905 return statusptr;
906}
907
4373f3ce
PB
908#define VFP_OP2(name) \
909static inline void gen_vfp_##name(int dp) \
910{ \
ae1857ec
PM
911 TCGv_ptr fpst = get_fpstatus_ptr(0); \
912 if (dp) { \
913 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
914 } else { \
915 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
916 } \
917 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
918}
919
4373f3ce
PB
920VFP_OP2(add)
921VFP_OP2(sub)
922VFP_OP2(mul)
923VFP_OP2(div)
924
925#undef VFP_OP2
926
605a6aed
PM
927static inline void gen_vfp_F1_mul(int dp)
928{
929 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 930 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 931 if (dp) {
ae1857ec 932 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 933 } else {
ae1857ec 934 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 935 }
ae1857ec 936 tcg_temp_free_ptr(fpst);
605a6aed
PM
937}
938
939static inline void gen_vfp_F1_neg(int dp)
940{
941 /* Like gen_vfp_neg() but put result in F1 */
942 if (dp) {
943 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
944 } else {
945 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
946 }
947}
948
4373f3ce
PB
949static inline void gen_vfp_abs(int dp)
950{
951 if (dp)
952 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
953 else
954 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
955}
956
957static inline void gen_vfp_neg(int dp)
958{
959 if (dp)
960 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
961 else
962 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
963}
964
965static inline void gen_vfp_sqrt(int dp)
966{
967 if (dp)
968 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
969 else
970 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
971}
972
973static inline void gen_vfp_cmp(int dp)
974{
975 if (dp)
976 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
977 else
978 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
979}
980
981static inline void gen_vfp_cmpe(int dp)
982{
983 if (dp)
984 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
985 else
986 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
987}
988
989static inline void gen_vfp_F1_ld0(int dp)
990{
991 if (dp)
5b340b51 992 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 993 else
5b340b51 994 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
995}
996
5500b06c
PM
997#define VFP_GEN_ITOF(name) \
998static inline void gen_vfp_##name(int dp, int neon) \
999{ \
5aaebd13 1000 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1001 if (dp) { \
1002 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1003 } else { \
1004 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1005 } \
b7fa9214 1006 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1007}
1008
5500b06c
PM
1009VFP_GEN_ITOF(uito)
1010VFP_GEN_ITOF(sito)
1011#undef VFP_GEN_ITOF
4373f3ce 1012
5500b06c
PM
1013#define VFP_GEN_FTOI(name) \
1014static inline void gen_vfp_##name(int dp, int neon) \
1015{ \
5aaebd13 1016 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1017 if (dp) { \
1018 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1019 } else { \
1020 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1021 } \
b7fa9214 1022 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1023}
1024
5500b06c
PM
1025VFP_GEN_FTOI(toui)
1026VFP_GEN_FTOI(touiz)
1027VFP_GEN_FTOI(tosi)
1028VFP_GEN_FTOI(tosiz)
1029#undef VFP_GEN_FTOI
4373f3ce
PB
1030
1031#define VFP_GEN_FIX(name) \
5500b06c 1032static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1033{ \
b75263d6 1034 TCGv tmp_shift = tcg_const_i32(shift); \
5aaebd13 1035 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1036 if (dp) { \
1037 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1038 } else { \
1039 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1040 } \
b75263d6 1041 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1042 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1043}
4373f3ce
PB
1044VFP_GEN_FIX(tosh)
1045VFP_GEN_FIX(tosl)
1046VFP_GEN_FIX(touh)
1047VFP_GEN_FIX(toul)
1048VFP_GEN_FIX(shto)
1049VFP_GEN_FIX(slto)
1050VFP_GEN_FIX(uhto)
1051VFP_GEN_FIX(ulto)
1052#undef VFP_GEN_FIX
9ee6e8bb 1053
312eea9f 1054static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1055{
1056 if (dp)
312eea9f 1057 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1058 else
312eea9f 1059 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1060}
1061
312eea9f 1062static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1063{
1064 if (dp)
312eea9f 1065 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1066 else
312eea9f 1067 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1068}
1069
8e96005d
FB
1070static inline long
1071vfp_reg_offset (int dp, int reg)
1072{
1073 if (dp)
1074 return offsetof(CPUARMState, vfp.regs[reg]);
1075 else if (reg & 1) {
1076 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1077 + offsetof(CPU_DoubleU, l.upper);
1078 } else {
1079 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1080 + offsetof(CPU_DoubleU, l.lower);
1081 }
1082}
9ee6e8bb
PB
1083
1084/* Return the offset of a 32-bit piece of a NEON register.
1085 zero is the least significant end of the register. */
1086static inline long
1087neon_reg_offset (int reg, int n)
1088{
1089 int sreg;
1090 sreg = reg * 2 + n;
1091 return vfp_reg_offset(0, sreg);
1092}
1093
8f8e3aa4
PB
1094static TCGv neon_load_reg(int reg, int pass)
1095{
7d1b0095 1096 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1097 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1098 return tmp;
1099}
1100
1101static void neon_store_reg(int reg, int pass, TCGv var)
1102{
1103 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1104 tcg_temp_free_i32(var);
8f8e3aa4
PB
1105}
1106
a7812ae4 1107static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1108{
1109 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1110}
1111
a7812ae4 1112static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1113{
1114 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1115}
1116
4373f3ce
PB
1117#define tcg_gen_ld_f32 tcg_gen_ld_i32
1118#define tcg_gen_ld_f64 tcg_gen_ld_i64
1119#define tcg_gen_st_f32 tcg_gen_st_i32
1120#define tcg_gen_st_f64 tcg_gen_st_i64
1121
b7bcbe95
FB
1122static inline void gen_mov_F0_vreg(int dp, int reg)
1123{
1124 if (dp)
4373f3ce 1125 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1126 else
4373f3ce 1127 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1128}
1129
1130static inline void gen_mov_F1_vreg(int dp, int reg)
1131{
1132 if (dp)
4373f3ce 1133 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1134 else
4373f3ce 1135 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1136}
1137
1138static inline void gen_mov_vreg_F0(int dp, int reg)
1139{
1140 if (dp)
4373f3ce 1141 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1142 else
4373f3ce 1143 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1144}
1145
18c9b560
AZ
1146#define ARM_CP_RW_BIT (1 << 20)
1147
a7812ae4 1148static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1149{
0ecb72a5 1150 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1151}
1152
a7812ae4 1153static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1154{
0ecb72a5 1155 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1156}
1157
da6b5335 1158static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1159{
7d1b0095 1160 TCGv var = tcg_temp_new_i32();
0ecb72a5 1161 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1162 return var;
e677137d
PB
1163}
1164
da6b5335 1165static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1166{
0ecb72a5 1167 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1168 tcg_temp_free_i32(var);
e677137d
PB
1169}
1170
1171static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1172{
1173 iwmmxt_store_reg(cpu_M0, rn);
1174}
1175
1176static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1177{
1178 iwmmxt_load_reg(cpu_M0, rn);
1179}
1180
1181static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1182{
1183 iwmmxt_load_reg(cpu_V1, rn);
1184 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1185}
1186
1187static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1188{
1189 iwmmxt_load_reg(cpu_V1, rn);
1190 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1191}
1192
1193static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1194{
1195 iwmmxt_load_reg(cpu_V1, rn);
1196 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1197}
1198
1199#define IWMMXT_OP(name) \
1200static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1201{ \
1202 iwmmxt_load_reg(cpu_V1, rn); \
1203 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1204}
1205
477955bd
PM
1206#define IWMMXT_OP_ENV(name) \
1207static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1208{ \
1209 iwmmxt_load_reg(cpu_V1, rn); \
1210 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1211}
1212
1213#define IWMMXT_OP_ENV_SIZE(name) \
1214IWMMXT_OP_ENV(name##b) \
1215IWMMXT_OP_ENV(name##w) \
1216IWMMXT_OP_ENV(name##l)
e677137d 1217
477955bd 1218#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1219static inline void gen_op_iwmmxt_##name##_M0(void) \
1220{ \
477955bd 1221 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1222}
1223
1224IWMMXT_OP(maddsq)
1225IWMMXT_OP(madduq)
1226IWMMXT_OP(sadb)
1227IWMMXT_OP(sadw)
1228IWMMXT_OP(mulslw)
1229IWMMXT_OP(mulshw)
1230IWMMXT_OP(mululw)
1231IWMMXT_OP(muluhw)
1232IWMMXT_OP(macsw)
1233IWMMXT_OP(macuw)
1234
477955bd
PM
1235IWMMXT_OP_ENV_SIZE(unpackl)
1236IWMMXT_OP_ENV_SIZE(unpackh)
1237
1238IWMMXT_OP_ENV1(unpacklub)
1239IWMMXT_OP_ENV1(unpackluw)
1240IWMMXT_OP_ENV1(unpacklul)
1241IWMMXT_OP_ENV1(unpackhub)
1242IWMMXT_OP_ENV1(unpackhuw)
1243IWMMXT_OP_ENV1(unpackhul)
1244IWMMXT_OP_ENV1(unpacklsb)
1245IWMMXT_OP_ENV1(unpacklsw)
1246IWMMXT_OP_ENV1(unpacklsl)
1247IWMMXT_OP_ENV1(unpackhsb)
1248IWMMXT_OP_ENV1(unpackhsw)
1249IWMMXT_OP_ENV1(unpackhsl)
1250
1251IWMMXT_OP_ENV_SIZE(cmpeq)
1252IWMMXT_OP_ENV_SIZE(cmpgtu)
1253IWMMXT_OP_ENV_SIZE(cmpgts)
1254
1255IWMMXT_OP_ENV_SIZE(mins)
1256IWMMXT_OP_ENV_SIZE(minu)
1257IWMMXT_OP_ENV_SIZE(maxs)
1258IWMMXT_OP_ENV_SIZE(maxu)
1259
1260IWMMXT_OP_ENV_SIZE(subn)
1261IWMMXT_OP_ENV_SIZE(addn)
1262IWMMXT_OP_ENV_SIZE(subu)
1263IWMMXT_OP_ENV_SIZE(addu)
1264IWMMXT_OP_ENV_SIZE(subs)
1265IWMMXT_OP_ENV_SIZE(adds)
1266
1267IWMMXT_OP_ENV(avgb0)
1268IWMMXT_OP_ENV(avgb1)
1269IWMMXT_OP_ENV(avgw0)
1270IWMMXT_OP_ENV(avgw1)
e677137d
PB
1271
1272IWMMXT_OP(msadb)
1273
477955bd
PM
1274IWMMXT_OP_ENV(packuw)
1275IWMMXT_OP_ENV(packul)
1276IWMMXT_OP_ENV(packuq)
1277IWMMXT_OP_ENV(packsw)
1278IWMMXT_OP_ENV(packsl)
1279IWMMXT_OP_ENV(packsq)
e677137d 1280
e677137d
PB
1281static void gen_op_iwmmxt_set_mup(void)
1282{
1283 TCGv tmp;
1284 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1285 tcg_gen_ori_i32(tmp, tmp, 2);
1286 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1287}
1288
1289static void gen_op_iwmmxt_set_cup(void)
1290{
1291 TCGv tmp;
1292 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1293 tcg_gen_ori_i32(tmp, tmp, 1);
1294 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1295}
1296
1297static void gen_op_iwmmxt_setpsr_nz(void)
1298{
7d1b0095 1299 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1300 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1301 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1302}
1303
1304static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1305{
1306 iwmmxt_load_reg(cpu_V1, rn);
86831435 1307 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1308 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1309}
1310
da6b5335 1311static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1312{
1313 int rd;
1314 uint32_t offset;
da6b5335 1315 TCGv tmp;
18c9b560
AZ
1316
1317 rd = (insn >> 16) & 0xf;
da6b5335 1318 tmp = load_reg(s, rd);
18c9b560
AZ
1319
1320 offset = (insn & 0xff) << ((insn >> 7) & 2);
1321 if (insn & (1 << 24)) {
1322 /* Pre indexed */
1323 if (insn & (1 << 23))
da6b5335 1324 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1325 else
da6b5335
FN
1326 tcg_gen_addi_i32(tmp, tmp, -offset);
1327 tcg_gen_mov_i32(dest, tmp);
18c9b560 1328 if (insn & (1 << 21))
da6b5335
FN
1329 store_reg(s, rd, tmp);
1330 else
7d1b0095 1331 tcg_temp_free_i32(tmp);
18c9b560
AZ
1332 } else if (insn & (1 << 21)) {
1333 /* Post indexed */
da6b5335 1334 tcg_gen_mov_i32(dest, tmp);
18c9b560 1335 if (insn & (1 << 23))
da6b5335 1336 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1337 else
da6b5335
FN
1338 tcg_gen_addi_i32(tmp, tmp, -offset);
1339 store_reg(s, rd, tmp);
18c9b560
AZ
1340 } else if (!(insn & (1 << 23)))
1341 return 1;
1342 return 0;
1343}
1344
da6b5335 1345static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1346{
1347 int rd = (insn >> 0) & 0xf;
da6b5335 1348 TCGv tmp;
18c9b560 1349
da6b5335
FN
1350 if (insn & (1 << 8)) {
1351 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1352 return 1;
da6b5335
FN
1353 } else {
1354 tmp = iwmmxt_load_creg(rd);
1355 }
1356 } else {
7d1b0095 1357 tmp = tcg_temp_new_i32();
da6b5335
FN
1358 iwmmxt_load_reg(cpu_V0, rd);
1359 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1360 }
1361 tcg_gen_andi_i32(tmp, tmp, mask);
1362 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1363 tcg_temp_free_i32(tmp);
18c9b560
AZ
1364 return 0;
1365}
1366
a1c7273b 1367/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1368 (ie. an undefined instruction). */
0ecb72a5 1369static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1370{
1371 int rd, wrd;
1372 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1373 TCGv addr;
1374 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1375
1376 if ((insn & 0x0e000e00) == 0x0c000000) {
1377 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1378 wrd = insn & 0xf;
1379 rdlo = (insn >> 12) & 0xf;
1380 rdhi = (insn >> 16) & 0xf;
1381 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1382 iwmmxt_load_reg(cpu_V0, wrd);
1383 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1384 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1385 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1386 } else { /* TMCRR */
da6b5335
FN
1387 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1388 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1389 gen_op_iwmmxt_set_mup();
1390 }
1391 return 0;
1392 }
1393
1394 wrd = (insn >> 12) & 0xf;
7d1b0095 1395 addr = tcg_temp_new_i32();
da6b5335 1396 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1397 tcg_temp_free_i32(addr);
18c9b560 1398 return 1;
da6b5335 1399 }
18c9b560
AZ
1400 if (insn & ARM_CP_RW_BIT) {
1401 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1402 tmp = tcg_temp_new_i32();
da6b5335
FN
1403 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1404 iwmmxt_store_creg(wrd, tmp);
18c9b560 1405 } else {
e677137d
PB
1406 i = 1;
1407 if (insn & (1 << 8)) {
1408 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1409 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1410 i = 0;
1411 } else { /* WLDRW wRd */
da6b5335 1412 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1413 }
1414 } else {
1415 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1416 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1417 } else { /* WLDRB */
da6b5335 1418 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1419 }
1420 }
1421 if (i) {
1422 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1423 tcg_temp_free_i32(tmp);
e677137d 1424 }
18c9b560
AZ
1425 gen_op_iwmmxt_movq_wRn_M0(wrd);
1426 }
1427 } else {
1428 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1429 tmp = iwmmxt_load_creg(wrd);
1430 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1431 } else {
1432 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1433 tmp = tcg_temp_new_i32();
e677137d
PB
1434 if (insn & (1 << 8)) {
1435 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1436 tcg_temp_free_i32(tmp);
da6b5335 1437 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1438 } else { /* WSTRW wRd */
1439 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1440 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1441 }
1442 } else {
1443 if (insn & (1 << 22)) { /* WSTRH */
1444 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1445 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1446 } else { /* WSTRB */
1447 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1448 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1449 }
1450 }
18c9b560
AZ
1451 }
1452 }
7d1b0095 1453 tcg_temp_free_i32(addr);
18c9b560
AZ
1454 return 0;
1455 }
1456
1457 if ((insn & 0x0f000000) != 0x0e000000)
1458 return 1;
1459
1460 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1461 case 0x000: /* WOR */
1462 wrd = (insn >> 12) & 0xf;
1463 rd0 = (insn >> 0) & 0xf;
1464 rd1 = (insn >> 16) & 0xf;
1465 gen_op_iwmmxt_movq_M0_wRn(rd0);
1466 gen_op_iwmmxt_orq_M0_wRn(rd1);
1467 gen_op_iwmmxt_setpsr_nz();
1468 gen_op_iwmmxt_movq_wRn_M0(wrd);
1469 gen_op_iwmmxt_set_mup();
1470 gen_op_iwmmxt_set_cup();
1471 break;
1472 case 0x011: /* TMCR */
1473 if (insn & 0xf)
1474 return 1;
1475 rd = (insn >> 12) & 0xf;
1476 wrd = (insn >> 16) & 0xf;
1477 switch (wrd) {
1478 case ARM_IWMMXT_wCID:
1479 case ARM_IWMMXT_wCASF:
1480 break;
1481 case ARM_IWMMXT_wCon:
1482 gen_op_iwmmxt_set_cup();
1483 /* Fall through. */
1484 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1485 tmp = iwmmxt_load_creg(wrd);
1486 tmp2 = load_reg(s, rd);
f669df27 1487 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1488 tcg_temp_free_i32(tmp2);
da6b5335 1489 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1490 break;
1491 case ARM_IWMMXT_wCGR0:
1492 case ARM_IWMMXT_wCGR1:
1493 case ARM_IWMMXT_wCGR2:
1494 case ARM_IWMMXT_wCGR3:
1495 gen_op_iwmmxt_set_cup();
da6b5335
FN
1496 tmp = load_reg(s, rd);
1497 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1498 break;
1499 default:
1500 return 1;
1501 }
1502 break;
1503 case 0x100: /* WXOR */
1504 wrd = (insn >> 12) & 0xf;
1505 rd0 = (insn >> 0) & 0xf;
1506 rd1 = (insn >> 16) & 0xf;
1507 gen_op_iwmmxt_movq_M0_wRn(rd0);
1508 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1509 gen_op_iwmmxt_setpsr_nz();
1510 gen_op_iwmmxt_movq_wRn_M0(wrd);
1511 gen_op_iwmmxt_set_mup();
1512 gen_op_iwmmxt_set_cup();
1513 break;
1514 case 0x111: /* TMRC */
1515 if (insn & 0xf)
1516 return 1;
1517 rd = (insn >> 12) & 0xf;
1518 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1519 tmp = iwmmxt_load_creg(wrd);
1520 store_reg(s, rd, tmp);
18c9b560
AZ
1521 break;
1522 case 0x300: /* WANDN */
1523 wrd = (insn >> 12) & 0xf;
1524 rd0 = (insn >> 0) & 0xf;
1525 rd1 = (insn >> 16) & 0xf;
1526 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1527 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1528 gen_op_iwmmxt_andq_M0_wRn(rd1);
1529 gen_op_iwmmxt_setpsr_nz();
1530 gen_op_iwmmxt_movq_wRn_M0(wrd);
1531 gen_op_iwmmxt_set_mup();
1532 gen_op_iwmmxt_set_cup();
1533 break;
1534 case 0x200: /* WAND */
1535 wrd = (insn >> 12) & 0xf;
1536 rd0 = (insn >> 0) & 0xf;
1537 rd1 = (insn >> 16) & 0xf;
1538 gen_op_iwmmxt_movq_M0_wRn(rd0);
1539 gen_op_iwmmxt_andq_M0_wRn(rd1);
1540 gen_op_iwmmxt_setpsr_nz();
1541 gen_op_iwmmxt_movq_wRn_M0(wrd);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1544 break;
1545 case 0x810: case 0xa10: /* WMADD */
1546 wrd = (insn >> 12) & 0xf;
1547 rd0 = (insn >> 0) & 0xf;
1548 rd1 = (insn >> 16) & 0xf;
1549 gen_op_iwmmxt_movq_M0_wRn(rd0);
1550 if (insn & (1 << 21))
1551 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1552 else
1553 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1554 gen_op_iwmmxt_movq_wRn_M0(wrd);
1555 gen_op_iwmmxt_set_mup();
1556 break;
1557 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1558 wrd = (insn >> 12) & 0xf;
1559 rd0 = (insn >> 16) & 0xf;
1560 rd1 = (insn >> 0) & 0xf;
1561 gen_op_iwmmxt_movq_M0_wRn(rd0);
1562 switch ((insn >> 22) & 3) {
1563 case 0:
1564 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1565 break;
1566 case 1:
1567 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1568 break;
1569 case 2:
1570 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1571 break;
1572 case 3:
1573 return 1;
1574 }
1575 gen_op_iwmmxt_movq_wRn_M0(wrd);
1576 gen_op_iwmmxt_set_mup();
1577 gen_op_iwmmxt_set_cup();
1578 break;
1579 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1580 wrd = (insn >> 12) & 0xf;
1581 rd0 = (insn >> 16) & 0xf;
1582 rd1 = (insn >> 0) & 0xf;
1583 gen_op_iwmmxt_movq_M0_wRn(rd0);
1584 switch ((insn >> 22) & 3) {
1585 case 0:
1586 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1587 break;
1588 case 1:
1589 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1590 break;
1591 case 2:
1592 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1593 break;
1594 case 3:
1595 return 1;
1596 }
1597 gen_op_iwmmxt_movq_wRn_M0(wrd);
1598 gen_op_iwmmxt_set_mup();
1599 gen_op_iwmmxt_set_cup();
1600 break;
1601 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1602 wrd = (insn >> 12) & 0xf;
1603 rd0 = (insn >> 16) & 0xf;
1604 rd1 = (insn >> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0);
1606 if (insn & (1 << 22))
1607 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1608 else
1609 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1610 if (!(insn & (1 << 20)))
1611 gen_op_iwmmxt_addl_M0_wRn(wrd);
1612 gen_op_iwmmxt_movq_wRn_M0(wrd);
1613 gen_op_iwmmxt_set_mup();
1614 break;
1615 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1616 wrd = (insn >> 12) & 0xf;
1617 rd0 = (insn >> 16) & 0xf;
1618 rd1 = (insn >> 0) & 0xf;
1619 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1620 if (insn & (1 << 21)) {
1621 if (insn & (1 << 20))
1622 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1623 else
1624 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1625 } else {
1626 if (insn & (1 << 20))
1627 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1628 else
1629 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1630 }
18c9b560
AZ
1631 gen_op_iwmmxt_movq_wRn_M0(wrd);
1632 gen_op_iwmmxt_set_mup();
1633 break;
1634 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1635 wrd = (insn >> 12) & 0xf;
1636 rd0 = (insn >> 16) & 0xf;
1637 rd1 = (insn >> 0) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0);
1639 if (insn & (1 << 21))
1640 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1641 else
1642 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1643 if (!(insn & (1 << 20))) {
e677137d
PB
1644 iwmmxt_load_reg(cpu_V1, wrd);
1645 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1646 }
1647 gen_op_iwmmxt_movq_wRn_M0(wrd);
1648 gen_op_iwmmxt_set_mup();
1649 break;
1650 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1651 wrd = (insn >> 12) & 0xf;
1652 rd0 = (insn >> 16) & 0xf;
1653 rd1 = (insn >> 0) & 0xf;
1654 gen_op_iwmmxt_movq_M0_wRn(rd0);
1655 switch ((insn >> 22) & 3) {
1656 case 0:
1657 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1658 break;
1659 case 1:
1660 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1661 break;
1662 case 2:
1663 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1664 break;
1665 case 3:
1666 return 1;
1667 }
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 gen_op_iwmmxt_set_cup();
1671 break;
1672 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1673 wrd = (insn >> 12) & 0xf;
1674 rd0 = (insn >> 16) & 0xf;
1675 rd1 = (insn >> 0) & 0xf;
1676 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1677 if (insn & (1 << 22)) {
1678 if (insn & (1 << 20))
1679 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1680 else
1681 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1682 } else {
1683 if (insn & (1 << 20))
1684 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1685 else
1686 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1687 }
18c9b560
AZ
1688 gen_op_iwmmxt_movq_wRn_M0(wrd);
1689 gen_op_iwmmxt_set_mup();
1690 gen_op_iwmmxt_set_cup();
1691 break;
1692 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1693 wrd = (insn >> 12) & 0xf;
1694 rd0 = (insn >> 16) & 0xf;
1695 rd1 = (insn >> 0) & 0xf;
1696 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1697 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1698 tcg_gen_andi_i32(tmp, tmp, 7);
1699 iwmmxt_load_reg(cpu_V1, rd1);
1700 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1701 tcg_temp_free_i32(tmp);
18c9b560
AZ
1702 gen_op_iwmmxt_movq_wRn_M0(wrd);
1703 gen_op_iwmmxt_set_mup();
1704 break;
1705 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1706 if (((insn >> 6) & 3) == 3)
1707 return 1;
18c9b560
AZ
1708 rd = (insn >> 12) & 0xf;
1709 wrd = (insn >> 16) & 0xf;
da6b5335 1710 tmp = load_reg(s, rd);
18c9b560
AZ
1711 gen_op_iwmmxt_movq_M0_wRn(wrd);
1712 switch ((insn >> 6) & 3) {
1713 case 0:
da6b5335
FN
1714 tmp2 = tcg_const_i32(0xff);
1715 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1716 break;
1717 case 1:
da6b5335
FN
1718 tmp2 = tcg_const_i32(0xffff);
1719 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1720 break;
1721 case 2:
da6b5335
FN
1722 tmp2 = tcg_const_i32(0xffffffff);
1723 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1724 break;
da6b5335
FN
1725 default:
1726 TCGV_UNUSED(tmp2);
1727 TCGV_UNUSED(tmp3);
18c9b560 1728 }
da6b5335
FN
1729 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1730 tcg_temp_free(tmp3);
1731 tcg_temp_free(tmp2);
7d1b0095 1732 tcg_temp_free_i32(tmp);
18c9b560
AZ
1733 gen_op_iwmmxt_movq_wRn_M0(wrd);
1734 gen_op_iwmmxt_set_mup();
1735 break;
1736 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1737 rd = (insn >> 12) & 0xf;
1738 wrd = (insn >> 16) & 0xf;
da6b5335 1739 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1740 return 1;
1741 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1742 tmp = tcg_temp_new_i32();
18c9b560
AZ
1743 switch ((insn >> 22) & 3) {
1744 case 0:
da6b5335
FN
1745 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1746 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1747 if (insn & 8) {
1748 tcg_gen_ext8s_i32(tmp, tmp);
1749 } else {
1750 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1751 }
1752 break;
1753 case 1:
da6b5335
FN
1754 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1755 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1756 if (insn & 8) {
1757 tcg_gen_ext16s_i32(tmp, tmp);
1758 } else {
1759 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1760 }
1761 break;
1762 case 2:
da6b5335
FN
1763 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1764 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1765 break;
18c9b560 1766 }
da6b5335 1767 store_reg(s, rd, tmp);
18c9b560
AZ
1768 break;
1769 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1770 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1771 return 1;
da6b5335 1772 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1773 switch ((insn >> 22) & 3) {
1774 case 0:
da6b5335 1775 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1776 break;
1777 case 1:
da6b5335 1778 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1779 break;
1780 case 2:
da6b5335 1781 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1782 break;
18c9b560 1783 }
da6b5335
FN
1784 tcg_gen_shli_i32(tmp, tmp, 28);
1785 gen_set_nzcv(tmp);
7d1b0095 1786 tcg_temp_free_i32(tmp);
18c9b560
AZ
1787 break;
1788 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1789 if (((insn >> 6) & 3) == 3)
1790 return 1;
18c9b560
AZ
1791 rd = (insn >> 12) & 0xf;
1792 wrd = (insn >> 16) & 0xf;
da6b5335 1793 tmp = load_reg(s, rd);
18c9b560
AZ
1794 switch ((insn >> 6) & 3) {
1795 case 0:
da6b5335 1796 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1797 break;
1798 case 1:
da6b5335 1799 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1800 break;
1801 case 2:
da6b5335 1802 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1803 break;
18c9b560 1804 }
7d1b0095 1805 tcg_temp_free_i32(tmp);
18c9b560
AZ
1806 gen_op_iwmmxt_movq_wRn_M0(wrd);
1807 gen_op_iwmmxt_set_mup();
1808 break;
1809 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1810 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1811 return 1;
da6b5335 1812 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1813 tmp2 = tcg_temp_new_i32();
da6b5335 1814 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1815 switch ((insn >> 22) & 3) {
1816 case 0:
1817 for (i = 0; i < 7; i ++) {
da6b5335
FN
1818 tcg_gen_shli_i32(tmp2, tmp2, 4);
1819 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1820 }
1821 break;
1822 case 1:
1823 for (i = 0; i < 3; i ++) {
da6b5335
FN
1824 tcg_gen_shli_i32(tmp2, tmp2, 8);
1825 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1826 }
1827 break;
1828 case 2:
da6b5335
FN
1829 tcg_gen_shli_i32(tmp2, tmp2, 16);
1830 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1831 break;
18c9b560 1832 }
da6b5335 1833 gen_set_nzcv(tmp);
7d1b0095
PM
1834 tcg_temp_free_i32(tmp2);
1835 tcg_temp_free_i32(tmp);
18c9b560
AZ
1836 break;
1837 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1838 wrd = (insn >> 12) & 0xf;
1839 rd0 = (insn >> 16) & 0xf;
1840 gen_op_iwmmxt_movq_M0_wRn(rd0);
1841 switch ((insn >> 22) & 3) {
1842 case 0:
e677137d 1843 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1844 break;
1845 case 1:
e677137d 1846 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1847 break;
1848 case 2:
e677137d 1849 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1850 break;
1851 case 3:
1852 return 1;
1853 }
1854 gen_op_iwmmxt_movq_wRn_M0(wrd);
1855 gen_op_iwmmxt_set_mup();
1856 break;
1857 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1858 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1859 return 1;
da6b5335 1860 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1861 tmp2 = tcg_temp_new_i32();
da6b5335 1862 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1863 switch ((insn >> 22) & 3) {
1864 case 0:
1865 for (i = 0; i < 7; i ++) {
da6b5335
FN
1866 tcg_gen_shli_i32(tmp2, tmp2, 4);
1867 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1868 }
1869 break;
1870 case 1:
1871 for (i = 0; i < 3; i ++) {
da6b5335
FN
1872 tcg_gen_shli_i32(tmp2, tmp2, 8);
1873 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1874 }
1875 break;
1876 case 2:
da6b5335
FN
1877 tcg_gen_shli_i32(tmp2, tmp2, 16);
1878 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1879 break;
18c9b560 1880 }
da6b5335 1881 gen_set_nzcv(tmp);
7d1b0095
PM
1882 tcg_temp_free_i32(tmp2);
1883 tcg_temp_free_i32(tmp);
18c9b560
AZ
1884 break;
1885 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1886 rd = (insn >> 12) & 0xf;
1887 rd0 = (insn >> 16) & 0xf;
da6b5335 1888 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1889 return 1;
1890 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1891 tmp = tcg_temp_new_i32();
18c9b560
AZ
1892 switch ((insn >> 22) & 3) {
1893 case 0:
da6b5335 1894 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1895 break;
1896 case 1:
da6b5335 1897 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1898 break;
1899 case 2:
da6b5335 1900 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1901 break;
18c9b560 1902 }
da6b5335 1903 store_reg(s, rd, tmp);
18c9b560
AZ
1904 break;
1905 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1906 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1907 wrd = (insn >> 12) & 0xf;
1908 rd0 = (insn >> 16) & 0xf;
1909 rd1 = (insn >> 0) & 0xf;
1910 gen_op_iwmmxt_movq_M0_wRn(rd0);
1911 switch ((insn >> 22) & 3) {
1912 case 0:
1913 if (insn & (1 << 21))
1914 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1915 else
1916 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1917 break;
1918 case 1:
1919 if (insn & (1 << 21))
1920 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1921 else
1922 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1923 break;
1924 case 2:
1925 if (insn & (1 << 21))
1926 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1927 else
1928 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1929 break;
1930 case 3:
1931 return 1;
1932 }
1933 gen_op_iwmmxt_movq_wRn_M0(wrd);
1934 gen_op_iwmmxt_set_mup();
1935 gen_op_iwmmxt_set_cup();
1936 break;
1937 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1938 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1939 wrd = (insn >> 12) & 0xf;
1940 rd0 = (insn >> 16) & 0xf;
1941 gen_op_iwmmxt_movq_M0_wRn(rd0);
1942 switch ((insn >> 22) & 3) {
1943 case 0:
1944 if (insn & (1 << 21))
1945 gen_op_iwmmxt_unpacklsb_M0();
1946 else
1947 gen_op_iwmmxt_unpacklub_M0();
1948 break;
1949 case 1:
1950 if (insn & (1 << 21))
1951 gen_op_iwmmxt_unpacklsw_M0();
1952 else
1953 gen_op_iwmmxt_unpackluw_M0();
1954 break;
1955 case 2:
1956 if (insn & (1 << 21))
1957 gen_op_iwmmxt_unpacklsl_M0();
1958 else
1959 gen_op_iwmmxt_unpacklul_M0();
1960 break;
1961 case 3:
1962 return 1;
1963 }
1964 gen_op_iwmmxt_movq_wRn_M0(wrd);
1965 gen_op_iwmmxt_set_mup();
1966 gen_op_iwmmxt_set_cup();
1967 break;
1968 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1969 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
1973 switch ((insn >> 22) & 3) {
1974 case 0:
1975 if (insn & (1 << 21))
1976 gen_op_iwmmxt_unpackhsb_M0();
1977 else
1978 gen_op_iwmmxt_unpackhub_M0();
1979 break;
1980 case 1:
1981 if (insn & (1 << 21))
1982 gen_op_iwmmxt_unpackhsw_M0();
1983 else
1984 gen_op_iwmmxt_unpackhuw_M0();
1985 break;
1986 case 2:
1987 if (insn & (1 << 21))
1988 gen_op_iwmmxt_unpackhsl_M0();
1989 else
1990 gen_op_iwmmxt_unpackhul_M0();
1991 break;
1992 case 3:
1993 return 1;
1994 }
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2000 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2001 if (((insn >> 22) & 3) == 0)
2002 return 1;
18c9b560
AZ
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2006 tmp = tcg_temp_new_i32();
da6b5335 2007 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2008 tcg_temp_free_i32(tmp);
18c9b560 2009 return 1;
da6b5335 2010 }
18c9b560 2011 switch ((insn >> 22) & 3) {
18c9b560 2012 case 1:
477955bd 2013 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2014 break;
2015 case 2:
477955bd 2016 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2017 break;
2018 case 3:
477955bd 2019 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2020 break;
2021 }
7d1b0095 2022 tcg_temp_free_i32(tmp);
18c9b560
AZ
2023 gen_op_iwmmxt_movq_wRn_M0(wrd);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2026 break;
2027 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2028 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2029 if (((insn >> 22) & 3) == 0)
2030 return 1;
18c9b560
AZ
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 16) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2034 tmp = tcg_temp_new_i32();
da6b5335 2035 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2036 tcg_temp_free_i32(tmp);
18c9b560 2037 return 1;
da6b5335 2038 }
18c9b560 2039 switch ((insn >> 22) & 3) {
18c9b560 2040 case 1:
477955bd 2041 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2042 break;
2043 case 2:
477955bd 2044 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2045 break;
2046 case 3:
477955bd 2047 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2048 break;
2049 }
7d1b0095 2050 tcg_temp_free_i32(tmp);
18c9b560
AZ
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2054 break;
2055 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2056 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2057 if (((insn >> 22) & 3) == 0)
2058 return 1;
18c9b560
AZ
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2062 tmp = tcg_temp_new_i32();
da6b5335 2063 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2064 tcg_temp_free_i32(tmp);
18c9b560 2065 return 1;
da6b5335 2066 }
18c9b560 2067 switch ((insn >> 22) & 3) {
18c9b560 2068 case 1:
477955bd 2069 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2070 break;
2071 case 2:
477955bd 2072 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2073 break;
2074 case 3:
477955bd 2075 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2076 break;
2077 }
7d1b0095 2078 tcg_temp_free_i32(tmp);
18c9b560
AZ
2079 gen_op_iwmmxt_movq_wRn_M0(wrd);
2080 gen_op_iwmmxt_set_mup();
2081 gen_op_iwmmxt_set_cup();
2082 break;
2083 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2084 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2085 if (((insn >> 22) & 3) == 0)
2086 return 1;
18c9b560
AZ
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2090 tmp = tcg_temp_new_i32();
18c9b560 2091 switch ((insn >> 22) & 3) {
18c9b560 2092 case 1:
da6b5335 2093 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2094 tcg_temp_free_i32(tmp);
18c9b560 2095 return 1;
da6b5335 2096 }
477955bd 2097 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2098 break;
2099 case 2:
da6b5335 2100 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2101 tcg_temp_free_i32(tmp);
18c9b560 2102 return 1;
da6b5335 2103 }
477955bd 2104 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2105 break;
2106 case 3:
da6b5335 2107 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2108 tcg_temp_free_i32(tmp);
18c9b560 2109 return 1;
da6b5335 2110 }
477955bd 2111 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2112 break;
2113 }
7d1b0095 2114 tcg_temp_free_i32(tmp);
18c9b560
AZ
2115 gen_op_iwmmxt_movq_wRn_M0(wrd);
2116 gen_op_iwmmxt_set_mup();
2117 gen_op_iwmmxt_set_cup();
2118 break;
2119 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2120 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2121 wrd = (insn >> 12) & 0xf;
2122 rd0 = (insn >> 16) & 0xf;
2123 rd1 = (insn >> 0) & 0xf;
2124 gen_op_iwmmxt_movq_M0_wRn(rd0);
2125 switch ((insn >> 22) & 3) {
2126 case 0:
2127 if (insn & (1 << 21))
2128 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2129 else
2130 gen_op_iwmmxt_minub_M0_wRn(rd1);
2131 break;
2132 case 1:
2133 if (insn & (1 << 21))
2134 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2135 else
2136 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2137 break;
2138 case 2:
2139 if (insn & (1 << 21))
2140 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2141 else
2142 gen_op_iwmmxt_minul_M0_wRn(rd1);
2143 break;
2144 case 3:
2145 return 1;
2146 }
2147 gen_op_iwmmxt_movq_wRn_M0(wrd);
2148 gen_op_iwmmxt_set_mup();
2149 break;
2150 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2151 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2152 wrd = (insn >> 12) & 0xf;
2153 rd0 = (insn >> 16) & 0xf;
2154 rd1 = (insn >> 0) & 0xf;
2155 gen_op_iwmmxt_movq_M0_wRn(rd0);
2156 switch ((insn >> 22) & 3) {
2157 case 0:
2158 if (insn & (1 << 21))
2159 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2160 else
2161 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2162 break;
2163 case 1:
2164 if (insn & (1 << 21))
2165 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2166 else
2167 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2168 break;
2169 case 2:
2170 if (insn & (1 << 21))
2171 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2172 else
2173 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2174 break;
2175 case 3:
2176 return 1;
2177 }
2178 gen_op_iwmmxt_movq_wRn_M0(wrd);
2179 gen_op_iwmmxt_set_mup();
2180 break;
2181 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2182 case 0x402: case 0x502: case 0x602: case 0x702:
2183 wrd = (insn >> 12) & 0xf;
2184 rd0 = (insn >> 16) & 0xf;
2185 rd1 = (insn >> 0) & 0xf;
2186 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2187 tmp = tcg_const_i32((insn >> 20) & 3);
2188 iwmmxt_load_reg(cpu_V1, rd1);
2189 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2190 tcg_temp_free(tmp);
18c9b560
AZ
2191 gen_op_iwmmxt_movq_wRn_M0(wrd);
2192 gen_op_iwmmxt_set_mup();
2193 break;
2194 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2195 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2196 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2197 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2198 wrd = (insn >> 12) & 0xf;
2199 rd0 = (insn >> 16) & 0xf;
2200 rd1 = (insn >> 0) & 0xf;
2201 gen_op_iwmmxt_movq_M0_wRn(rd0);
2202 switch ((insn >> 20) & 0xf) {
2203 case 0x0:
2204 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2205 break;
2206 case 0x1:
2207 gen_op_iwmmxt_subub_M0_wRn(rd1);
2208 break;
2209 case 0x3:
2210 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2211 break;
2212 case 0x4:
2213 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2214 break;
2215 case 0x5:
2216 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2217 break;
2218 case 0x7:
2219 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2220 break;
2221 case 0x8:
2222 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2223 break;
2224 case 0x9:
2225 gen_op_iwmmxt_subul_M0_wRn(rd1);
2226 break;
2227 case 0xb:
2228 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2229 break;
2230 default:
2231 return 1;
2232 }
2233 gen_op_iwmmxt_movq_wRn_M0(wrd);
2234 gen_op_iwmmxt_set_mup();
2235 gen_op_iwmmxt_set_cup();
2236 break;
2237 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2238 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2239 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2240 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2241 wrd = (insn >> 12) & 0xf;
2242 rd0 = (insn >> 16) & 0xf;
2243 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2244 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2245 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
da6b5335 2246 tcg_temp_free(tmp);
18c9b560
AZ
2247 gen_op_iwmmxt_movq_wRn_M0(wrd);
2248 gen_op_iwmmxt_set_mup();
2249 gen_op_iwmmxt_set_cup();
2250 break;
2251 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2252 case 0x418: case 0x518: case 0x618: case 0x718:
2253 case 0x818: case 0x918: case 0xa18: case 0xb18:
2254 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2255 wrd = (insn >> 12) & 0xf;
2256 rd0 = (insn >> 16) & 0xf;
2257 rd1 = (insn >> 0) & 0xf;
2258 gen_op_iwmmxt_movq_M0_wRn(rd0);
2259 switch ((insn >> 20) & 0xf) {
2260 case 0x0:
2261 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2262 break;
2263 case 0x1:
2264 gen_op_iwmmxt_addub_M0_wRn(rd1);
2265 break;
2266 case 0x3:
2267 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2268 break;
2269 case 0x4:
2270 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2271 break;
2272 case 0x5:
2273 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2274 break;
2275 case 0x7:
2276 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2277 break;
2278 case 0x8:
2279 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2280 break;
2281 case 0x9:
2282 gen_op_iwmmxt_addul_M0_wRn(rd1);
2283 break;
2284 case 0xb:
2285 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2286 break;
2287 default:
2288 return 1;
2289 }
2290 gen_op_iwmmxt_movq_wRn_M0(wrd);
2291 gen_op_iwmmxt_set_mup();
2292 gen_op_iwmmxt_set_cup();
2293 break;
2294 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2295 case 0x408: case 0x508: case 0x608: case 0x708:
2296 case 0x808: case 0x908: case 0xa08: case 0xb08:
2297 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2298 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2299 return 1;
18c9b560
AZ
2300 wrd = (insn >> 12) & 0xf;
2301 rd0 = (insn >> 16) & 0xf;
2302 rd1 = (insn >> 0) & 0xf;
2303 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2304 switch ((insn >> 22) & 3) {
18c9b560
AZ
2305 case 1:
2306 if (insn & (1 << 21))
2307 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2308 else
2309 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2310 break;
2311 case 2:
2312 if (insn & (1 << 21))
2313 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2314 else
2315 gen_op_iwmmxt_packul_M0_wRn(rd1);
2316 break;
2317 case 3:
2318 if (insn & (1 << 21))
2319 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2320 else
2321 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2322 break;
2323 }
2324 gen_op_iwmmxt_movq_wRn_M0(wrd);
2325 gen_op_iwmmxt_set_mup();
2326 gen_op_iwmmxt_set_cup();
2327 break;
2328 case 0x201: case 0x203: case 0x205: case 0x207:
2329 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2330 case 0x211: case 0x213: case 0x215: case 0x217:
2331 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2332 wrd = (insn >> 5) & 0xf;
2333 rd0 = (insn >> 12) & 0xf;
2334 rd1 = (insn >> 0) & 0xf;
2335 if (rd0 == 0xf || rd1 == 0xf)
2336 return 1;
2337 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2338 tmp = load_reg(s, rd0);
2339 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2340 switch ((insn >> 16) & 0xf) {
2341 case 0x0: /* TMIA */
da6b5335 2342 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2343 break;
2344 case 0x8: /* TMIAPH */
da6b5335 2345 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2346 break;
2347 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2348 if (insn & (1 << 16))
da6b5335 2349 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2350 if (insn & (1 << 17))
da6b5335
FN
2351 tcg_gen_shri_i32(tmp2, tmp2, 16);
2352 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2353 break;
2354 default:
7d1b0095
PM
2355 tcg_temp_free_i32(tmp2);
2356 tcg_temp_free_i32(tmp);
18c9b560
AZ
2357 return 1;
2358 }
7d1b0095
PM
2359 tcg_temp_free_i32(tmp2);
2360 tcg_temp_free_i32(tmp);
18c9b560
AZ
2361 gen_op_iwmmxt_movq_wRn_M0(wrd);
2362 gen_op_iwmmxt_set_mup();
2363 break;
2364 default:
2365 return 1;
2366 }
2367
2368 return 0;
2369}
2370
a1c7273b 2371/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2372 (ie. an undefined instruction). */
0ecb72a5 2373static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2374{
2375 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2376 TCGv tmp, tmp2;
18c9b560
AZ
2377
2378 if ((insn & 0x0ff00f10) == 0x0e200010) {
2379 /* Multiply with Internal Accumulate Format */
2380 rd0 = (insn >> 12) & 0xf;
2381 rd1 = insn & 0xf;
2382 acc = (insn >> 5) & 7;
2383
2384 if (acc != 0)
2385 return 1;
2386
3a554c0f
FN
2387 tmp = load_reg(s, rd0);
2388 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2389 switch ((insn >> 16) & 0xf) {
2390 case 0x0: /* MIA */
3a554c0f 2391 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2392 break;
2393 case 0x8: /* MIAPH */
3a554c0f 2394 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2395 break;
2396 case 0xc: /* MIABB */
2397 case 0xd: /* MIABT */
2398 case 0xe: /* MIATB */
2399 case 0xf: /* MIATT */
18c9b560 2400 if (insn & (1 << 16))
3a554c0f 2401 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2402 if (insn & (1 << 17))
3a554c0f
FN
2403 tcg_gen_shri_i32(tmp2, tmp2, 16);
2404 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2405 break;
2406 default:
2407 return 1;
2408 }
7d1b0095
PM
2409 tcg_temp_free_i32(tmp2);
2410 tcg_temp_free_i32(tmp);
18c9b560
AZ
2411
2412 gen_op_iwmmxt_movq_wRn_M0(acc);
2413 return 0;
2414 }
2415
2416 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2417 /* Internal Accumulator Access Format */
2418 rdhi = (insn >> 16) & 0xf;
2419 rdlo = (insn >> 12) & 0xf;
2420 acc = insn & 7;
2421
2422 if (acc != 0)
2423 return 1;
2424
2425 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2426 iwmmxt_load_reg(cpu_V0, acc);
2427 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2428 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2429 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2430 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2431 } else { /* MAR */
3a554c0f
FN
2432 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2433 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2434 }
2435 return 0;
2436 }
2437
2438 return 1;
2439}
2440
c1713132
AZ
2441/* Disassemble system coprocessor instruction. Return nonzero if
2442 instruction is not defined. */
0ecb72a5 2443static int disas_cp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
c1713132 2444{
b75263d6 2445 TCGv tmp, tmp2;
c1713132
AZ
2446 uint32_t rd = (insn >> 12) & 0xf;
2447 uint32_t cp = (insn >> 8) & 0xf;
2448 if (IS_USER(s)) {
2449 return 1;
2450 }
2451
18c9b560 2452 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2453 if (!env->cp[cp].cp_read)
2454 return 1;
8984bd2e 2455 gen_set_pc_im(s->pc);
7d1b0095 2456 tmp = tcg_temp_new_i32();
b75263d6
JR
2457 tmp2 = tcg_const_i32(insn);
2458 gen_helper_get_cp(tmp, cpu_env, tmp2);
2459 tcg_temp_free(tmp2);
8984bd2e 2460 store_reg(s, rd, tmp);
c1713132
AZ
2461 } else {
2462 if (!env->cp[cp].cp_write)
2463 return 1;
8984bd2e
PB
2464 gen_set_pc_im(s->pc);
2465 tmp = load_reg(s, rd);
b75263d6
JR
2466 tmp2 = tcg_const_i32(insn);
2467 gen_helper_set_cp(cpu_env, tmp2, tmp);
2468 tcg_temp_free(tmp2);
7d1b0095 2469 tcg_temp_free_i32(tmp);
c1713132
AZ
2470 }
2471 return 0;
2472}
2473
0ecb72a5 2474static int cp15_user_ok(CPUARMState *env, uint32_t insn)
9ee6e8bb
PB
2475{
2476 int cpn = (insn >> 16) & 0xf;
2477 int cpm = insn & 0xf;
2478 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2479
74594c9d
PM
2480 if (arm_feature(env, ARM_FEATURE_V7) && cpn == 9) {
2481 /* Performance monitor registers fall into three categories:
2482 * (a) always UNDEF in usermode
2483 * (b) UNDEF only if PMUSERENR.EN is 0
2484 * (c) always read OK and UNDEF on write (PMUSERENR only)
2485 */
2486 if ((cpm == 12 && (op < 6)) ||
2487 (cpm == 13 && (op < 3))) {
2488 return env->cp15.c9_pmuserenr;
2489 } else if (cpm == 14 && op == 0 && (insn & ARM_CP_RW_BIT)) {
2490 /* PMUSERENR, read only */
2491 return 1;
2492 }
2493 return 0;
2494 }
2495
9ee6e8bb
PB
2496 if (cpn == 13 && cpm == 0) {
2497 /* TLS register. */
2498 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2499 return 1;
2500 }
9ee6e8bb
PB
2501 return 0;
2502}
2503
0ecb72a5 2504static int cp15_tls_load_store(CPUARMState *env, DisasContext *s, uint32_t insn, uint32_t rd)
3f26c122
RV
2505{
2506 TCGv tmp;
2507 int cpn = (insn >> 16) & 0xf;
2508 int cpm = insn & 0xf;
2509 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2510
2511 if (!arm_feature(env, ARM_FEATURE_V6K))
2512 return 0;
2513
2514 if (!(cpn == 13 && cpm == 0))
2515 return 0;
2516
2517 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2518 switch (op) {
2519 case 2:
c5883be2 2520 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2521 break;
2522 case 3:
c5883be2 2523 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2524 break;
2525 case 4:
c5883be2 2526 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2527 break;
2528 default:
3f26c122
RV
2529 return 0;
2530 }
2531 store_reg(s, rd, tmp);
2532
2533 } else {
2534 tmp = load_reg(s, rd);
2535 switch (op) {
2536 case 2:
c5883be2 2537 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2538 break;
2539 case 3:
c5883be2 2540 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2541 break;
2542 case 4:
c5883be2 2543 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2544 break;
2545 default:
7d1b0095 2546 tcg_temp_free_i32(tmp);
3f26c122
RV
2547 return 0;
2548 }
3f26c122
RV
2549 }
2550 return 1;
2551}
2552
b5ff1b31
FB
2553/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2554 instruction is not defined. */
0ecb72a5 2555static int disas_cp15_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2556{
2557 uint32_t rd;
b75263d6 2558 TCGv tmp, tmp2;
b5ff1b31 2559
9ee6e8bb
PB
2560 /* M profile cores use memory mapped registers instead of cp15. */
2561 if (arm_feature(env, ARM_FEATURE_M))
2562 return 1;
2563
2564 if ((insn & (1 << 25)) == 0) {
2565 if (insn & (1 << 20)) {
2566 /* mrrc */
2567 return 1;
2568 }
2569 /* mcrr. Used for block cache operations, so implement as no-op. */
2570 return 0;
2571 }
2572 if ((insn & (1 << 4)) == 0) {
2573 /* cdp */
2574 return 1;
2575 }
87f19eb2
PM
2576 /* We special case a number of cp15 instructions which were used
2577 * for things which are real instructions in ARMv7. This allows
2578 * them to work in linux-user mode which doesn't provide functional
2579 * get_cp15/set_cp15 helpers, and is more efficient anyway.
cc688901 2580 */
87f19eb2
PM
2581 switch ((insn & 0x0fff0fff)) {
2582 case 0x0e070f90:
cc688901
PM
2583 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2584 * In v7, this must NOP.
2585 */
87f19eb2
PM
2586 if (IS_USER(s)) {
2587 return 1;
2588 }
cc688901
PM
2589 if (!arm_feature(env, ARM_FEATURE_V7)) {
2590 /* Wait for interrupt. */
2591 gen_set_pc_im(s->pc);
2592 s->is_jmp = DISAS_WFI;
2593 }
9332f9da 2594 return 0;
87f19eb2 2595 case 0x0e070f58:
cc688901
PM
2596 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2597 * so this is slightly over-broad.
2598 */
87f19eb2 2599 if (!IS_USER(s) && !arm_feature(env, ARM_FEATURE_V6)) {
cc688901
PM
2600 /* Wait for interrupt. */
2601 gen_set_pc_im(s->pc);
2602 s->is_jmp = DISAS_WFI;
2603 return 0;
2604 }
87f19eb2 2605 /* Otherwise continue to handle via helper function.
cc688901
PM
2606 * In particular, on v7 and some v6 cores this is one of
2607 * the VA-PA registers.
2608 */
87f19eb2
PM
2609 break;
2610 case 0x0e070f3d:
2611 /* 0,c7,c13,1: prefetch-by-MVA in v6, NOP in v7 */
2612 if (arm_feature(env, ARM_FEATURE_V6)) {
2613 return IS_USER(s) ? 1 : 0;
2614 }
2615 break;
2616 case 0x0e070f95: /* 0,c7,c5,4 : ISB */
2617 case 0x0e070f9a: /* 0,c7,c10,4: DSB */
2618 case 0x0e070fba: /* 0,c7,c10,5: DMB */
2619 /* Barriers in both v6 and v7 */
2620 if (arm_feature(env, ARM_FEATURE_V6)) {
2621 return 0;
2622 }
2623 break;
2624 default:
2625 break;
2626 }
2627
2628 if (IS_USER(s) && !cp15_user_ok(env, insn)) {
2629 return 1;
cc688901
PM
2630 }
2631
b5ff1b31 2632 rd = (insn >> 12) & 0xf;
3f26c122
RV
2633
2634 if (cp15_tls_load_store(env, s, insn, rd))
2635 return 0;
2636
b75263d6 2637 tmp2 = tcg_const_i32(insn);
18c9b560 2638 if (insn & ARM_CP_RW_BIT) {
7d1b0095 2639 tmp = tcg_temp_new_i32();
b75263d6 2640 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2641 /* If the destination register is r15 then sets condition codes. */
2642 if (rd != 15)
8984bd2e
PB
2643 store_reg(s, rd, tmp);
2644 else
7d1b0095 2645 tcg_temp_free_i32(tmp);
b5ff1b31 2646 } else {
8984bd2e 2647 tmp = load_reg(s, rd);
b75263d6 2648 gen_helper_set_cp15(cpu_env, tmp2, tmp);
7d1b0095 2649 tcg_temp_free_i32(tmp);
a90b7318
AZ
2650 /* Normally we would always end the TB here, but Linux
2651 * arch/arm/mach-pxa/sleep.S expects two instructions following
2652 * an MMU enable to execute from cache. Imitate this behaviour. */
2653 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2654 (insn & 0x0fff0fff) != 0x0e010f10)
2655 gen_lookup_tb(s);
b5ff1b31 2656 }
b75263d6 2657 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2658 return 0;
2659}
2660
9ee6e8bb
PB
2661#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2662#define VFP_SREG(insn, bigbit, smallbit) \
2663 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2664#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2665 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2666 reg = (((insn) >> (bigbit)) & 0x0f) \
2667 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2668 } else { \
2669 if (insn & (1 << (smallbit))) \
2670 return 1; \
2671 reg = ((insn) >> (bigbit)) & 0x0f; \
2672 }} while (0)
2673
2674#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2675#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2676#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2677#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2678#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2679#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2680
4373f3ce
PB
2681/* Move between integer and VFP cores. */
2682static TCGv gen_vfp_mrs(void)
2683{
7d1b0095 2684 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2685 tcg_gen_mov_i32(tmp, cpu_F0s);
2686 return tmp;
2687}
2688
2689static void gen_vfp_msr(TCGv tmp)
2690{
2691 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2692 tcg_temp_free_i32(tmp);
4373f3ce
PB
2693}
2694
ad69471c
PB
2695static void gen_neon_dup_u8(TCGv var, int shift)
2696{
7d1b0095 2697 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2698 if (shift)
2699 tcg_gen_shri_i32(var, var, shift);
86831435 2700 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2701 tcg_gen_shli_i32(tmp, var, 8);
2702 tcg_gen_or_i32(var, var, tmp);
2703 tcg_gen_shli_i32(tmp, var, 16);
2704 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2705 tcg_temp_free_i32(tmp);
ad69471c
PB
2706}
2707
2708static void gen_neon_dup_low16(TCGv var)
2709{
7d1b0095 2710 TCGv tmp = tcg_temp_new_i32();
86831435 2711 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2712 tcg_gen_shli_i32(tmp, var, 16);
2713 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2714 tcg_temp_free_i32(tmp);
ad69471c
PB
2715}
2716
2717static void gen_neon_dup_high16(TCGv var)
2718{
7d1b0095 2719 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2720 tcg_gen_andi_i32(var, var, 0xffff0000);
2721 tcg_gen_shri_i32(tmp, var, 16);
2722 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2723 tcg_temp_free_i32(tmp);
ad69471c
PB
2724}
2725
8e18cde3
PM
2726static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2727{
2728 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2729 TCGv tmp;
2730 switch (size) {
2731 case 0:
2732 tmp = gen_ld8u(addr, IS_USER(s));
2733 gen_neon_dup_u8(tmp, 0);
2734 break;
2735 case 1:
2736 tmp = gen_ld16u(addr, IS_USER(s));
2737 gen_neon_dup_low16(tmp);
2738 break;
2739 case 2:
2740 tmp = gen_ld32(addr, IS_USER(s));
2741 break;
2742 default: /* Avoid compiler warnings. */
2743 abort();
2744 }
2745 return tmp;
2746}
2747
a1c7273b 2748/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2749 (ie. an undefined instruction). */
0ecb72a5 2750static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2751{
2752 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2753 int dp, veclen;
312eea9f 2754 TCGv addr;
4373f3ce 2755 TCGv tmp;
ad69471c 2756 TCGv tmp2;
b7bcbe95 2757
40f137e1
PB
2758 if (!arm_feature(env, ARM_FEATURE_VFP))
2759 return 1;
2760
5df8bac1 2761 if (!s->vfp_enabled) {
9ee6e8bb 2762 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2763 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2764 return 1;
2765 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2766 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2767 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2768 return 1;
2769 }
b7bcbe95
FB
2770 dp = ((insn & 0xf00) == 0xb00);
2771 switch ((insn >> 24) & 0xf) {
2772 case 0xe:
2773 if (insn & (1 << 4)) {
2774 /* single register transfer */
b7bcbe95
FB
2775 rd = (insn >> 12) & 0xf;
2776 if (dp) {
9ee6e8bb
PB
2777 int size;
2778 int pass;
2779
2780 VFP_DREG_N(rn, insn);
2781 if (insn & 0xf)
b7bcbe95 2782 return 1;
9ee6e8bb
PB
2783 if (insn & 0x00c00060
2784 && !arm_feature(env, ARM_FEATURE_NEON))
2785 return 1;
2786
2787 pass = (insn >> 21) & 1;
2788 if (insn & (1 << 22)) {
2789 size = 0;
2790 offset = ((insn >> 5) & 3) * 8;
2791 } else if (insn & (1 << 5)) {
2792 size = 1;
2793 offset = (insn & (1 << 6)) ? 16 : 0;
2794 } else {
2795 size = 2;
2796 offset = 0;
2797 }
18c9b560 2798 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2799 /* vfp->arm */
ad69471c 2800 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2801 switch (size) {
2802 case 0:
9ee6e8bb 2803 if (offset)
ad69471c 2804 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2805 if (insn & (1 << 23))
ad69471c 2806 gen_uxtb(tmp);
9ee6e8bb 2807 else
ad69471c 2808 gen_sxtb(tmp);
9ee6e8bb
PB
2809 break;
2810 case 1:
9ee6e8bb
PB
2811 if (insn & (1 << 23)) {
2812 if (offset) {
ad69471c 2813 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2814 } else {
ad69471c 2815 gen_uxth(tmp);
9ee6e8bb
PB
2816 }
2817 } else {
2818 if (offset) {
ad69471c 2819 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2820 } else {
ad69471c 2821 gen_sxth(tmp);
9ee6e8bb
PB
2822 }
2823 }
2824 break;
2825 case 2:
9ee6e8bb
PB
2826 break;
2827 }
ad69471c 2828 store_reg(s, rd, tmp);
b7bcbe95
FB
2829 } else {
2830 /* arm->vfp */
ad69471c 2831 tmp = load_reg(s, rd);
9ee6e8bb
PB
2832 if (insn & (1 << 23)) {
2833 /* VDUP */
2834 if (size == 0) {
ad69471c 2835 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2836 } else if (size == 1) {
ad69471c 2837 gen_neon_dup_low16(tmp);
9ee6e8bb 2838 }
cbbccffc 2839 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2840 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2841 tcg_gen_mov_i32(tmp2, tmp);
2842 neon_store_reg(rn, n, tmp2);
2843 }
2844 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2845 } else {
2846 /* VMOV */
2847 switch (size) {
2848 case 0:
ad69471c
PB
2849 tmp2 = neon_load_reg(rn, pass);
2850 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
7d1b0095 2851 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2852 break;
2853 case 1:
ad69471c
PB
2854 tmp2 = neon_load_reg(rn, pass);
2855 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
7d1b0095 2856 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2857 break;
2858 case 2:
9ee6e8bb
PB
2859 break;
2860 }
ad69471c 2861 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2862 }
b7bcbe95 2863 }
9ee6e8bb
PB
2864 } else { /* !dp */
2865 if ((insn & 0x6f) != 0x00)
2866 return 1;
2867 rn = VFP_SREG_N(insn);
18c9b560 2868 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2869 /* vfp->arm */
2870 if (insn & (1 << 21)) {
2871 /* system register */
40f137e1 2872 rn >>= 1;
9ee6e8bb 2873
b7bcbe95 2874 switch (rn) {
40f137e1 2875 case ARM_VFP_FPSID:
4373f3ce 2876 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2877 VFP3 restricts all id registers to privileged
2878 accesses. */
2879 if (IS_USER(s)
2880 && arm_feature(env, ARM_FEATURE_VFP3))
2881 return 1;
4373f3ce 2882 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2883 break;
40f137e1 2884 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2885 if (IS_USER(s))
2886 return 1;
4373f3ce 2887 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2888 break;
40f137e1
PB
2889 case ARM_VFP_FPINST:
2890 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2891 /* Not present in VFP3. */
2892 if (IS_USER(s)
2893 || arm_feature(env, ARM_FEATURE_VFP3))
2894 return 1;
4373f3ce 2895 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2896 break;
40f137e1 2897 case ARM_VFP_FPSCR:
601d70b9 2898 if (rd == 15) {
4373f3ce
PB
2899 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2900 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2901 } else {
7d1b0095 2902 tmp = tcg_temp_new_i32();
4373f3ce
PB
2903 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2904 }
b7bcbe95 2905 break;
9ee6e8bb
PB
2906 case ARM_VFP_MVFR0:
2907 case ARM_VFP_MVFR1:
2908 if (IS_USER(s)
2909 || !arm_feature(env, ARM_FEATURE_VFP3))
2910 return 1;
4373f3ce 2911 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2912 break;
b7bcbe95
FB
2913 default:
2914 return 1;
2915 }
2916 } else {
2917 gen_mov_F0_vreg(0, rn);
4373f3ce 2918 tmp = gen_vfp_mrs();
b7bcbe95
FB
2919 }
2920 if (rd == 15) {
b5ff1b31 2921 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2922 gen_set_nzcv(tmp);
7d1b0095 2923 tcg_temp_free_i32(tmp);
4373f3ce
PB
2924 } else {
2925 store_reg(s, rd, tmp);
2926 }
b7bcbe95
FB
2927 } else {
2928 /* arm->vfp */
4373f3ce 2929 tmp = load_reg(s, rd);
b7bcbe95 2930 if (insn & (1 << 21)) {
40f137e1 2931 rn >>= 1;
b7bcbe95
FB
2932 /* system register */
2933 switch (rn) {
40f137e1 2934 case ARM_VFP_FPSID:
9ee6e8bb
PB
2935 case ARM_VFP_MVFR0:
2936 case ARM_VFP_MVFR1:
b7bcbe95
FB
2937 /* Writes are ignored. */
2938 break;
40f137e1 2939 case ARM_VFP_FPSCR:
4373f3ce 2940 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2941 tcg_temp_free_i32(tmp);
b5ff1b31 2942 gen_lookup_tb(s);
b7bcbe95 2943 break;
40f137e1 2944 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2945 if (IS_USER(s))
2946 return 1;
71b3c3de
JR
2947 /* TODO: VFP subarchitecture support.
2948 * For now, keep the EN bit only */
2949 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2950 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2951 gen_lookup_tb(s);
2952 break;
2953 case ARM_VFP_FPINST:
2954 case ARM_VFP_FPINST2:
4373f3ce 2955 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2956 break;
b7bcbe95
FB
2957 default:
2958 return 1;
2959 }
2960 } else {
4373f3ce 2961 gen_vfp_msr(tmp);
b7bcbe95
FB
2962 gen_mov_vreg_F0(0, rn);
2963 }
2964 }
2965 }
2966 } else {
2967 /* data processing */
2968 /* The opcode is in bits 23, 21, 20 and 6. */
2969 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2970 if (dp) {
2971 if (op == 15) {
2972 /* rn is opcode */
2973 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2974 } else {
2975 /* rn is register number */
9ee6e8bb 2976 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2977 }
2978
04595bf6 2979 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2980 /* Integer or single precision destination. */
9ee6e8bb 2981 rd = VFP_SREG_D(insn);
b7bcbe95 2982 } else {
9ee6e8bb 2983 VFP_DREG_D(rd, insn);
b7bcbe95 2984 }
04595bf6
PM
2985 if (op == 15 &&
2986 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2987 /* VCVT from int is always from S reg regardless of dp bit.
2988 * VCVT with immediate frac_bits has same format as SREG_M
2989 */
2990 rm = VFP_SREG_M(insn);
b7bcbe95 2991 } else {
9ee6e8bb 2992 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2993 }
2994 } else {
9ee6e8bb 2995 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2996 if (op == 15 && rn == 15) {
2997 /* Double precision destination. */
9ee6e8bb
PB
2998 VFP_DREG_D(rd, insn);
2999 } else {
3000 rd = VFP_SREG_D(insn);
3001 }
04595bf6
PM
3002 /* NB that we implicitly rely on the encoding for the frac_bits
3003 * in VCVT of fixed to float being the same as that of an SREG_M
3004 */
9ee6e8bb 3005 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3006 }
3007
69d1fc22 3008 veclen = s->vec_len;
b7bcbe95
FB
3009 if (op == 15 && rn > 3)
3010 veclen = 0;
3011
3012 /* Shut up compiler warnings. */
3013 delta_m = 0;
3014 delta_d = 0;
3015 bank_mask = 0;
3b46e624 3016
b7bcbe95
FB
3017 if (veclen > 0) {
3018 if (dp)
3019 bank_mask = 0xc;
3020 else
3021 bank_mask = 0x18;
3022
3023 /* Figure out what type of vector operation this is. */
3024 if ((rd & bank_mask) == 0) {
3025 /* scalar */
3026 veclen = 0;
3027 } else {
3028 if (dp)
69d1fc22 3029 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3030 else
69d1fc22 3031 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3032
3033 if ((rm & bank_mask) == 0) {
3034 /* mixed scalar/vector */
3035 delta_m = 0;
3036 } else {
3037 /* vector */
3038 delta_m = delta_d;
3039 }
3040 }
3041 }
3042
3043 /* Load the initial operands. */
3044 if (op == 15) {
3045 switch (rn) {
3046 case 16:
3047 case 17:
3048 /* Integer source */
3049 gen_mov_F0_vreg(0, rm);
3050 break;
3051 case 8:
3052 case 9:
3053 /* Compare */
3054 gen_mov_F0_vreg(dp, rd);
3055 gen_mov_F1_vreg(dp, rm);
3056 break;
3057 case 10:
3058 case 11:
3059 /* Compare with zero */
3060 gen_mov_F0_vreg(dp, rd);
3061 gen_vfp_F1_ld0(dp);
3062 break;
9ee6e8bb
PB
3063 case 20:
3064 case 21:
3065 case 22:
3066 case 23:
644ad806
PB
3067 case 28:
3068 case 29:
3069 case 30:
3070 case 31:
9ee6e8bb
PB
3071 /* Source and destination the same. */
3072 gen_mov_F0_vreg(dp, rd);
3073 break;
6e0c0ed1
PM
3074 case 4:
3075 case 5:
3076 case 6:
3077 case 7:
3078 /* VCVTB, VCVTT: only present with the halfprec extension,
3079 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
3080 */
3081 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
3082 return 1;
3083 }
3084 /* Otherwise fall through */
b7bcbe95
FB
3085 default:
3086 /* One source operand. */
3087 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3088 break;
b7bcbe95
FB
3089 }
3090 } else {
3091 /* Two source operands. */
3092 gen_mov_F0_vreg(dp, rn);
3093 gen_mov_F1_vreg(dp, rm);
3094 }
3095
3096 for (;;) {
3097 /* Perform the calculation. */
3098 switch (op) {
605a6aed
PM
3099 case 0: /* VMLA: fd + (fn * fm) */
3100 /* Note that order of inputs to the add matters for NaNs */
3101 gen_vfp_F1_mul(dp);
3102 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3103 gen_vfp_add(dp);
3104 break;
605a6aed 3105 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3106 gen_vfp_mul(dp);
605a6aed
PM
3107 gen_vfp_F1_neg(dp);
3108 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3109 gen_vfp_add(dp);
3110 break;
605a6aed
PM
3111 case 2: /* VNMLS: -fd + (fn * fm) */
3112 /* Note that it isn't valid to replace (-A + B) with (B - A)
3113 * or similar plausible looking simplifications
3114 * because this will give wrong results for NaNs.
3115 */
3116 gen_vfp_F1_mul(dp);
3117 gen_mov_F0_vreg(dp, rd);
3118 gen_vfp_neg(dp);
3119 gen_vfp_add(dp);
b7bcbe95 3120 break;
605a6aed 3121 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3122 gen_vfp_mul(dp);
605a6aed
PM
3123 gen_vfp_F1_neg(dp);
3124 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3125 gen_vfp_neg(dp);
605a6aed 3126 gen_vfp_add(dp);
b7bcbe95
FB
3127 break;
3128 case 4: /* mul: fn * fm */
3129 gen_vfp_mul(dp);
3130 break;
3131 case 5: /* nmul: -(fn * fm) */
3132 gen_vfp_mul(dp);
3133 gen_vfp_neg(dp);
3134 break;
3135 case 6: /* add: fn + fm */
3136 gen_vfp_add(dp);
3137 break;
3138 case 7: /* sub: fn - fm */
3139 gen_vfp_sub(dp);
3140 break;
3141 case 8: /* div: fn / fm */
3142 gen_vfp_div(dp);
3143 break;
da97f52c
PM
3144 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3145 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3146 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3147 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3148 /* These are fused multiply-add, and must be done as one
3149 * floating point operation with no rounding between the
3150 * multiplication and addition steps.
3151 * NB that doing the negations here as separate steps is
3152 * correct : an input NaN should come out with its sign bit
3153 * flipped if it is a negated-input.
3154 */
3155 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3156 return 1;
3157 }
3158 if (dp) {
3159 TCGv_ptr fpst;
3160 TCGv_i64 frd;
3161 if (op & 1) {
3162 /* VFNMS, VFMS */
3163 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3164 }
3165 frd = tcg_temp_new_i64();
3166 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3167 if (op & 2) {
3168 /* VFNMA, VFNMS */
3169 gen_helper_vfp_negd(frd, frd);
3170 }
3171 fpst = get_fpstatus_ptr(0);
3172 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3173 cpu_F1d, frd, fpst);
3174 tcg_temp_free_ptr(fpst);
3175 tcg_temp_free_i64(frd);
3176 } else {
3177 TCGv_ptr fpst;
3178 TCGv_i32 frd;
3179 if (op & 1) {
3180 /* VFNMS, VFMS */
3181 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3182 }
3183 frd = tcg_temp_new_i32();
3184 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3185 if (op & 2) {
3186 gen_helper_vfp_negs(frd, frd);
3187 }
3188 fpst = get_fpstatus_ptr(0);
3189 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3190 cpu_F1s, frd, fpst);
3191 tcg_temp_free_ptr(fpst);
3192 tcg_temp_free_i32(frd);
3193 }
3194 break;
9ee6e8bb
PB
3195 case 14: /* fconst */
3196 if (!arm_feature(env, ARM_FEATURE_VFP3))
3197 return 1;
3198
3199 n = (insn << 12) & 0x80000000;
3200 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3201 if (dp) {
3202 if (i & 0x40)
3203 i |= 0x3f80;
3204 else
3205 i |= 0x4000;
3206 n |= i << 16;
4373f3ce 3207 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3208 } else {
3209 if (i & 0x40)
3210 i |= 0x780;
3211 else
3212 i |= 0x800;
3213 n |= i << 19;
5b340b51 3214 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3215 }
9ee6e8bb 3216 break;
b7bcbe95
FB
3217 case 15: /* extension space */
3218 switch (rn) {
3219 case 0: /* cpy */
3220 /* no-op */
3221 break;
3222 case 1: /* abs */
3223 gen_vfp_abs(dp);
3224 break;
3225 case 2: /* neg */
3226 gen_vfp_neg(dp);
3227 break;
3228 case 3: /* sqrt */
3229 gen_vfp_sqrt(dp);
3230 break;
60011498 3231 case 4: /* vcvtb.f32.f16 */
60011498
PB
3232 tmp = gen_vfp_mrs();
3233 tcg_gen_ext16u_i32(tmp, tmp);
3234 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3235 tcg_temp_free_i32(tmp);
60011498
PB
3236 break;
3237 case 5: /* vcvtt.f32.f16 */
60011498
PB
3238 tmp = gen_vfp_mrs();
3239 tcg_gen_shri_i32(tmp, tmp, 16);
3240 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3241 tcg_temp_free_i32(tmp);
60011498
PB
3242 break;
3243 case 6: /* vcvtb.f16.f32 */
7d1b0095 3244 tmp = tcg_temp_new_i32();
60011498
PB
3245 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3246 gen_mov_F0_vreg(0, rd);
3247 tmp2 = gen_vfp_mrs();
3248 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3249 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3250 tcg_temp_free_i32(tmp2);
60011498
PB
3251 gen_vfp_msr(tmp);
3252 break;
3253 case 7: /* vcvtt.f16.f32 */
7d1b0095 3254 tmp = tcg_temp_new_i32();
60011498
PB
3255 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3256 tcg_gen_shli_i32(tmp, tmp, 16);
3257 gen_mov_F0_vreg(0, rd);
3258 tmp2 = gen_vfp_mrs();
3259 tcg_gen_ext16u_i32(tmp2, tmp2);
3260 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3261 tcg_temp_free_i32(tmp2);
60011498
PB
3262 gen_vfp_msr(tmp);
3263 break;
b7bcbe95
FB
3264 case 8: /* cmp */
3265 gen_vfp_cmp(dp);
3266 break;
3267 case 9: /* cmpe */
3268 gen_vfp_cmpe(dp);
3269 break;
3270 case 10: /* cmpz */
3271 gen_vfp_cmp(dp);
3272 break;
3273 case 11: /* cmpez */
3274 gen_vfp_F1_ld0(dp);
3275 gen_vfp_cmpe(dp);
3276 break;
3277 case 15: /* single<->double conversion */
3278 if (dp)
4373f3ce 3279 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3280 else
4373f3ce 3281 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3282 break;
3283 case 16: /* fuito */
5500b06c 3284 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3285 break;
3286 case 17: /* fsito */
5500b06c 3287 gen_vfp_sito(dp, 0);
b7bcbe95 3288 break;
9ee6e8bb
PB
3289 case 20: /* fshto */
3290 if (!arm_feature(env, ARM_FEATURE_VFP3))
3291 return 1;
5500b06c 3292 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3293 break;
3294 case 21: /* fslto */
3295 if (!arm_feature(env, ARM_FEATURE_VFP3))
3296 return 1;
5500b06c 3297 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3298 break;
3299 case 22: /* fuhto */
3300 if (!arm_feature(env, ARM_FEATURE_VFP3))
3301 return 1;
5500b06c 3302 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3303 break;
3304 case 23: /* fulto */
3305 if (!arm_feature(env, ARM_FEATURE_VFP3))
3306 return 1;
5500b06c 3307 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3308 break;
b7bcbe95 3309 case 24: /* ftoui */
5500b06c 3310 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3311 break;
3312 case 25: /* ftouiz */
5500b06c 3313 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3314 break;
3315 case 26: /* ftosi */
5500b06c 3316 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3317 break;
3318 case 27: /* ftosiz */
5500b06c 3319 gen_vfp_tosiz(dp, 0);
b7bcbe95 3320 break;
9ee6e8bb
PB
3321 case 28: /* ftosh */
3322 if (!arm_feature(env, ARM_FEATURE_VFP3))
3323 return 1;
5500b06c 3324 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3325 break;
3326 case 29: /* ftosl */
3327 if (!arm_feature(env, ARM_FEATURE_VFP3))
3328 return 1;
5500b06c 3329 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3330 break;
3331 case 30: /* ftouh */
3332 if (!arm_feature(env, ARM_FEATURE_VFP3))
3333 return 1;
5500b06c 3334 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3335 break;
3336 case 31: /* ftoul */
3337 if (!arm_feature(env, ARM_FEATURE_VFP3))
3338 return 1;
5500b06c 3339 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3340 break;
b7bcbe95 3341 default: /* undefined */
b7bcbe95
FB
3342 return 1;
3343 }
3344 break;
3345 default: /* undefined */
b7bcbe95
FB
3346 return 1;
3347 }
3348
3349 /* Write back the result. */
3350 if (op == 15 && (rn >= 8 && rn <= 11))
3351 ; /* Comparison, do nothing. */
04595bf6
PM
3352 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3353 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3354 gen_mov_vreg_F0(0, rd);
3355 else if (op == 15 && rn == 15)
3356 /* conversion */
3357 gen_mov_vreg_F0(!dp, rd);
3358 else
3359 gen_mov_vreg_F0(dp, rd);
3360
3361 /* break out of the loop if we have finished */
3362 if (veclen == 0)
3363 break;
3364
3365 if (op == 15 && delta_m == 0) {
3366 /* single source one-many */
3367 while (veclen--) {
3368 rd = ((rd + delta_d) & (bank_mask - 1))
3369 | (rd & bank_mask);
3370 gen_mov_vreg_F0(dp, rd);
3371 }
3372 break;
3373 }
3374 /* Setup the next operands. */
3375 veclen--;
3376 rd = ((rd + delta_d) & (bank_mask - 1))
3377 | (rd & bank_mask);
3378
3379 if (op == 15) {
3380 /* One source operand. */
3381 rm = ((rm + delta_m) & (bank_mask - 1))
3382 | (rm & bank_mask);
3383 gen_mov_F0_vreg(dp, rm);
3384 } else {
3385 /* Two source operands. */
3386 rn = ((rn + delta_d) & (bank_mask - 1))
3387 | (rn & bank_mask);
3388 gen_mov_F0_vreg(dp, rn);
3389 if (delta_m) {
3390 rm = ((rm + delta_m) & (bank_mask - 1))
3391 | (rm & bank_mask);
3392 gen_mov_F1_vreg(dp, rm);
3393 }
3394 }
3395 }
3396 }
3397 break;
3398 case 0xc:
3399 case 0xd:
8387da81 3400 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3401 /* two-register transfer */
3402 rn = (insn >> 16) & 0xf;
3403 rd = (insn >> 12) & 0xf;
3404 if (dp) {
9ee6e8bb
PB
3405 VFP_DREG_M(rm, insn);
3406 } else {
3407 rm = VFP_SREG_M(insn);
3408 }
b7bcbe95 3409
18c9b560 3410 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3411 /* vfp->arm */
3412 if (dp) {
4373f3ce
PB
3413 gen_mov_F0_vreg(0, rm * 2);
3414 tmp = gen_vfp_mrs();
3415 store_reg(s, rd, tmp);
3416 gen_mov_F0_vreg(0, rm * 2 + 1);
3417 tmp = gen_vfp_mrs();
3418 store_reg(s, rn, tmp);
b7bcbe95
FB
3419 } else {
3420 gen_mov_F0_vreg(0, rm);
4373f3ce 3421 tmp = gen_vfp_mrs();
8387da81 3422 store_reg(s, rd, tmp);
b7bcbe95 3423 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3424 tmp = gen_vfp_mrs();
8387da81 3425 store_reg(s, rn, tmp);
b7bcbe95
FB
3426 }
3427 } else {
3428 /* arm->vfp */
3429 if (dp) {
4373f3ce
PB
3430 tmp = load_reg(s, rd);
3431 gen_vfp_msr(tmp);
3432 gen_mov_vreg_F0(0, rm * 2);
3433 tmp = load_reg(s, rn);
3434 gen_vfp_msr(tmp);
3435 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3436 } else {
8387da81 3437 tmp = load_reg(s, rd);
4373f3ce 3438 gen_vfp_msr(tmp);
b7bcbe95 3439 gen_mov_vreg_F0(0, rm);
8387da81 3440 tmp = load_reg(s, rn);
4373f3ce 3441 gen_vfp_msr(tmp);
b7bcbe95
FB
3442 gen_mov_vreg_F0(0, rm + 1);
3443 }
3444 }
3445 } else {
3446 /* Load/store */
3447 rn = (insn >> 16) & 0xf;
3448 if (dp)
9ee6e8bb 3449 VFP_DREG_D(rd, insn);
b7bcbe95 3450 else
9ee6e8bb 3451 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3452 if ((insn & 0x01200000) == 0x01000000) {
3453 /* Single load/store */
3454 offset = (insn & 0xff) << 2;
3455 if ((insn & (1 << 23)) == 0)
3456 offset = -offset;
934814f1
PM
3457 if (s->thumb && rn == 15) {
3458 /* This is actually UNPREDICTABLE */
3459 addr = tcg_temp_new_i32();
3460 tcg_gen_movi_i32(addr, s->pc & ~2);
3461 } else {
3462 addr = load_reg(s, rn);
3463 }
312eea9f 3464 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3465 if (insn & (1 << 20)) {
312eea9f 3466 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3467 gen_mov_vreg_F0(dp, rd);
3468 } else {
3469 gen_mov_F0_vreg(dp, rd);
312eea9f 3470 gen_vfp_st(s, dp, addr);
b7bcbe95 3471 }
7d1b0095 3472 tcg_temp_free_i32(addr);
b7bcbe95
FB
3473 } else {
3474 /* load/store multiple */
934814f1 3475 int w = insn & (1 << 21);
b7bcbe95
FB
3476 if (dp)
3477 n = (insn >> 1) & 0x7f;
3478 else
3479 n = insn & 0xff;
3480
934814f1
PM
3481 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3482 /* P == U , W == 1 => UNDEF */
3483 return 1;
3484 }
3485 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3486 /* UNPREDICTABLE cases for bad immediates: we choose to
3487 * UNDEF to avoid generating huge numbers of TCG ops
3488 */
3489 return 1;
3490 }
3491 if (rn == 15 && w) {
3492 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3493 return 1;
3494 }
3495
3496 if (s->thumb && rn == 15) {
3497 /* This is actually UNPREDICTABLE */
3498 addr = tcg_temp_new_i32();
3499 tcg_gen_movi_i32(addr, s->pc & ~2);
3500 } else {
3501 addr = load_reg(s, rn);
3502 }
b7bcbe95 3503 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3504 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3505
3506 if (dp)
3507 offset = 8;
3508 else
3509 offset = 4;
3510 for (i = 0; i < n; i++) {
18c9b560 3511 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3512 /* load */
312eea9f 3513 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3514 gen_mov_vreg_F0(dp, rd + i);
3515 } else {
3516 /* store */
3517 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3518 gen_vfp_st(s, dp, addr);
b7bcbe95 3519 }
312eea9f 3520 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3521 }
934814f1 3522 if (w) {
b7bcbe95
FB
3523 /* writeback */
3524 if (insn & (1 << 24))
3525 offset = -offset * n;
3526 else if (dp && (insn & 1))
3527 offset = 4;
3528 else
3529 offset = 0;
3530
3531 if (offset != 0)
312eea9f
FN
3532 tcg_gen_addi_i32(addr, addr, offset);
3533 store_reg(s, rn, addr);
3534 } else {
7d1b0095 3535 tcg_temp_free_i32(addr);
b7bcbe95
FB
3536 }
3537 }
3538 }
3539 break;
3540 default:
3541 /* Should never happen. */
3542 return 1;
3543 }
3544 return 0;
3545}
3546
6e256c93 3547static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3548{
6e256c93
FB
3549 TranslationBlock *tb;
3550
3551 tb = s->tb;
3552 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3553 tcg_gen_goto_tb(n);
8984bd2e 3554 gen_set_pc_im(dest);
4b4a72e5 3555 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3556 } else {
8984bd2e 3557 gen_set_pc_im(dest);
57fec1fe 3558 tcg_gen_exit_tb(0);
6e256c93 3559 }
c53be334
FB
3560}
3561
8aaca4c0
FB
3562static inline void gen_jmp (DisasContext *s, uint32_t dest)
3563{
551bd27f 3564 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3565 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3566 if (s->thumb)
d9ba4830
PB
3567 dest |= 1;
3568 gen_bx_im(s, dest);
8aaca4c0 3569 } else {
6e256c93 3570 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3571 s->is_jmp = DISAS_TB_JUMP;
3572 }
3573}
3574
d9ba4830 3575static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3576{
ee097184 3577 if (x)
d9ba4830 3578 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3579 else
d9ba4830 3580 gen_sxth(t0);
ee097184 3581 if (y)
d9ba4830 3582 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3583 else
d9ba4830
PB
3584 gen_sxth(t1);
3585 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3586}
3587
3588/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3589static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3590 uint32_t mask;
3591
3592 mask = 0;
3593 if (flags & (1 << 0))
3594 mask |= 0xff;
3595 if (flags & (1 << 1))
3596 mask |= 0xff00;
3597 if (flags & (1 << 2))
3598 mask |= 0xff0000;
3599 if (flags & (1 << 3))
3600 mask |= 0xff000000;
9ee6e8bb 3601
2ae23e75 3602 /* Mask out undefined bits. */
9ee6e8bb 3603 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3604 if (!arm_feature(env, ARM_FEATURE_V4T))
3605 mask &= ~CPSR_T;
3606 if (!arm_feature(env, ARM_FEATURE_V5))
3607 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3608 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3609 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3610 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3611 mask &= ~CPSR_IT;
9ee6e8bb 3612 /* Mask out execution state bits. */
2ae23e75 3613 if (!spsr)
e160c51c 3614 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3615 /* Mask out privileged bits. */
3616 if (IS_USER(s))
9ee6e8bb 3617 mask &= CPSR_USER;
b5ff1b31
FB
3618 return mask;
3619}
3620
2fbac54b
FN
3621/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3622static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3623{
d9ba4830 3624 TCGv tmp;
b5ff1b31
FB
3625 if (spsr) {
3626 /* ??? This is also undefined in system mode. */
3627 if (IS_USER(s))
3628 return 1;
d9ba4830
PB
3629
3630 tmp = load_cpu_field(spsr);
3631 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3632 tcg_gen_andi_i32(t0, t0, mask);
3633 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3634 store_cpu_field(tmp, spsr);
b5ff1b31 3635 } else {
2fbac54b 3636 gen_set_cpsr(t0, mask);
b5ff1b31 3637 }
7d1b0095 3638 tcg_temp_free_i32(t0);
b5ff1b31
FB
3639 gen_lookup_tb(s);
3640 return 0;
3641}
3642
2fbac54b
FN
3643/* Returns nonzero if access to the PSR is not permitted. */
3644static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3645{
3646 TCGv tmp;
7d1b0095 3647 tmp = tcg_temp_new_i32();
2fbac54b
FN
3648 tcg_gen_movi_i32(tmp, val);
3649 return gen_set_psr(s, mask, spsr, tmp);
3650}
3651
e9bb4aa9
JR
3652/* Generate an old-style exception return. Marks pc as dead. */
3653static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3654{
d9ba4830 3655 TCGv tmp;
e9bb4aa9 3656 store_reg(s, 15, pc);
d9ba4830
PB
3657 tmp = load_cpu_field(spsr);
3658 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3659 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3660 s->is_jmp = DISAS_UPDATE;
3661}
3662
b0109805
PB
3663/* Generate a v6 exception return. Marks both values as dead. */
3664static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3665{
b0109805 3666 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3667 tcg_temp_free_i32(cpsr);
b0109805 3668 store_reg(s, 15, pc);
9ee6e8bb
PB
3669 s->is_jmp = DISAS_UPDATE;
3670}
3b46e624 3671
9ee6e8bb
PB
3672static inline void
3673gen_set_condexec (DisasContext *s)
3674{
3675 if (s->condexec_mask) {
8f01245e 3676 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3677 TCGv tmp = tcg_temp_new_i32();
8f01245e 3678 tcg_gen_movi_i32(tmp, val);
d9ba4830 3679 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3680 }
3681}
3b46e624 3682
bc4a0de0
PM
3683static void gen_exception_insn(DisasContext *s, int offset, int excp)
3684{
3685 gen_set_condexec(s);
3686 gen_set_pc_im(s->pc - offset);
3687 gen_exception(excp);
3688 s->is_jmp = DISAS_JUMP;
3689}
3690
9ee6e8bb
PB
3691static void gen_nop_hint(DisasContext *s, int val)
3692{
3693 switch (val) {
3694 case 3: /* wfi */
8984bd2e 3695 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3696 s->is_jmp = DISAS_WFI;
3697 break;
3698 case 2: /* wfe */
3699 case 4: /* sev */
3700 /* TODO: Implement SEV and WFE. May help SMP performance. */
3701 default: /* nop */
3702 break;
3703 }
3704}
99c475ab 3705
ad69471c 3706#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3707
62698be3 3708static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3709{
3710 switch (size) {
dd8fbd78
FN
3711 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3712 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3713 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3714 default: abort();
9ee6e8bb 3715 }
9ee6e8bb
PB
3716}
3717
dd8fbd78 3718static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3719{
3720 switch (size) {
dd8fbd78
FN
3721 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3722 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3723 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3724 default: return;
3725 }
3726}
3727
3728/* 32-bit pairwise ops end up the same as the elementwise versions. */
3729#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3730#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3731#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3732#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3733
ad69471c
PB
3734#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3735 switch ((size << 1) | u) { \
3736 case 0: \
dd8fbd78 3737 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3738 break; \
3739 case 1: \
dd8fbd78 3740 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3741 break; \
3742 case 2: \
dd8fbd78 3743 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3744 break; \
3745 case 3: \
dd8fbd78 3746 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3747 break; \
3748 case 4: \
dd8fbd78 3749 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3750 break; \
3751 case 5: \
dd8fbd78 3752 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3753 break; \
3754 default: return 1; \
3755 }} while (0)
9ee6e8bb
PB
3756
3757#define GEN_NEON_INTEGER_OP(name) do { \
3758 switch ((size << 1) | u) { \
ad69471c 3759 case 0: \
dd8fbd78 3760 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3761 break; \
3762 case 1: \
dd8fbd78 3763 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3764 break; \
3765 case 2: \
dd8fbd78 3766 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3767 break; \
3768 case 3: \
dd8fbd78 3769 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3770 break; \
3771 case 4: \
dd8fbd78 3772 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3773 break; \
3774 case 5: \
dd8fbd78 3775 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3776 break; \
9ee6e8bb
PB
3777 default: return 1; \
3778 }} while (0)
3779
dd8fbd78 3780static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3781{
7d1b0095 3782 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3783 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3784 return tmp;
9ee6e8bb
PB
3785}
3786
dd8fbd78 3787static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3788{
dd8fbd78 3789 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3790 tcg_temp_free_i32(var);
9ee6e8bb
PB
3791}
3792
dd8fbd78 3793static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3794{
dd8fbd78 3795 TCGv tmp;
9ee6e8bb 3796 if (size == 1) {
0fad6efc
PM
3797 tmp = neon_load_reg(reg & 7, reg >> 4);
3798 if (reg & 8) {
dd8fbd78 3799 gen_neon_dup_high16(tmp);
0fad6efc
PM
3800 } else {
3801 gen_neon_dup_low16(tmp);
dd8fbd78 3802 }
0fad6efc
PM
3803 } else {
3804 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3805 }
dd8fbd78 3806 return tmp;
9ee6e8bb
PB
3807}
3808
02acedf9 3809static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3810{
02acedf9 3811 TCGv tmp, tmp2;
600b828c 3812 if (!q && size == 2) {
02acedf9
PM
3813 return 1;
3814 }
3815 tmp = tcg_const_i32(rd);
3816 tmp2 = tcg_const_i32(rm);
3817 if (q) {
3818 switch (size) {
3819 case 0:
02da0b2d 3820 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3821 break;
3822 case 1:
02da0b2d 3823 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3824 break;
3825 case 2:
02da0b2d 3826 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3827 break;
3828 default:
3829 abort();
3830 }
3831 } else {
3832 switch (size) {
3833 case 0:
02da0b2d 3834 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3835 break;
3836 case 1:
02da0b2d 3837 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3838 break;
3839 default:
3840 abort();
3841 }
3842 }
3843 tcg_temp_free_i32(tmp);
3844 tcg_temp_free_i32(tmp2);
3845 return 0;
19457615
FN
3846}
3847
d68a6f3a 3848static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3849{
3850 TCGv tmp, tmp2;
600b828c 3851 if (!q && size == 2) {
d68a6f3a
PM
3852 return 1;
3853 }
3854 tmp = tcg_const_i32(rd);
3855 tmp2 = tcg_const_i32(rm);
3856 if (q) {
3857 switch (size) {
3858 case 0:
02da0b2d 3859 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3860 break;
3861 case 1:
02da0b2d 3862 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3863 break;
3864 case 2:
02da0b2d 3865 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3866 break;
3867 default:
3868 abort();
3869 }
3870 } else {
3871 switch (size) {
3872 case 0:
02da0b2d 3873 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3874 break;
3875 case 1:
02da0b2d 3876 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3877 break;
3878 default:
3879 abort();
3880 }
3881 }
3882 tcg_temp_free_i32(tmp);
3883 tcg_temp_free_i32(tmp2);
3884 return 0;
19457615
FN
3885}
3886
19457615
FN
3887static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3888{
3889 TCGv rd, tmp;
3890
7d1b0095
PM
3891 rd = tcg_temp_new_i32();
3892 tmp = tcg_temp_new_i32();
19457615
FN
3893
3894 tcg_gen_shli_i32(rd, t0, 8);
3895 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3896 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3897 tcg_gen_or_i32(rd, rd, tmp);
3898
3899 tcg_gen_shri_i32(t1, t1, 8);
3900 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3901 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3902 tcg_gen_or_i32(t1, t1, tmp);
3903 tcg_gen_mov_i32(t0, rd);
3904
7d1b0095
PM
3905 tcg_temp_free_i32(tmp);
3906 tcg_temp_free_i32(rd);
19457615
FN
3907}
3908
3909static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3910{
3911 TCGv rd, tmp;
3912
7d1b0095
PM
3913 rd = tcg_temp_new_i32();
3914 tmp = tcg_temp_new_i32();
19457615
FN
3915
3916 tcg_gen_shli_i32(rd, t0, 16);
3917 tcg_gen_andi_i32(tmp, t1, 0xffff);
3918 tcg_gen_or_i32(rd, rd, tmp);
3919 tcg_gen_shri_i32(t1, t1, 16);
3920 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3921 tcg_gen_or_i32(t1, t1, tmp);
3922 tcg_gen_mov_i32(t0, rd);
3923
7d1b0095
PM
3924 tcg_temp_free_i32(tmp);
3925 tcg_temp_free_i32(rd);
19457615
FN
3926}
3927
3928
9ee6e8bb
PB
3929static struct {
3930 int nregs;
3931 int interleave;
3932 int spacing;
3933} neon_ls_element_type[11] = {
3934 {4, 4, 1},
3935 {4, 4, 2},
3936 {4, 1, 1},
3937 {4, 2, 1},
3938 {3, 3, 1},
3939 {3, 3, 2},
3940 {3, 1, 1},
3941 {1, 1, 1},
3942 {2, 2, 1},
3943 {2, 2, 2},
3944 {2, 1, 1}
3945};
3946
3947/* Translate a NEON load/store element instruction. Return nonzero if the
3948 instruction is invalid. */
0ecb72a5 3949static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3950{
3951 int rd, rn, rm;
3952 int op;
3953 int nregs;
3954 int interleave;
84496233 3955 int spacing;
9ee6e8bb
PB
3956 int stride;
3957 int size;
3958 int reg;
3959 int pass;
3960 int load;
3961 int shift;
9ee6e8bb 3962 int n;
1b2b1e54 3963 TCGv addr;
b0109805 3964 TCGv tmp;
8f8e3aa4 3965 TCGv tmp2;
84496233 3966 TCGv_i64 tmp64;
9ee6e8bb 3967
5df8bac1 3968 if (!s->vfp_enabled)
9ee6e8bb
PB
3969 return 1;
3970 VFP_DREG_D(rd, insn);
3971 rn = (insn >> 16) & 0xf;
3972 rm = insn & 0xf;
3973 load = (insn & (1 << 21)) != 0;
3974 if ((insn & (1 << 23)) == 0) {
3975 /* Load store all elements. */
3976 op = (insn >> 8) & 0xf;
3977 size = (insn >> 6) & 3;
84496233 3978 if (op > 10)
9ee6e8bb 3979 return 1;
f2dd89d0
PM
3980 /* Catch UNDEF cases for bad values of align field */
3981 switch (op & 0xc) {
3982 case 4:
3983 if (((insn >> 5) & 1) == 1) {
3984 return 1;
3985 }
3986 break;
3987 case 8:
3988 if (((insn >> 4) & 3) == 3) {
3989 return 1;
3990 }
3991 break;
3992 default:
3993 break;
3994 }
9ee6e8bb
PB
3995 nregs = neon_ls_element_type[op].nregs;
3996 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3997 spacing = neon_ls_element_type[op].spacing;
3998 if (size == 3 && (interleave | spacing) != 1)
3999 return 1;
e318a60b 4000 addr = tcg_temp_new_i32();
dcc65026 4001 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4002 stride = (1 << size) * interleave;
4003 for (reg = 0; reg < nregs; reg++) {
4004 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4005 load_reg_var(s, addr, rn);
4006 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4007 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4008 load_reg_var(s, addr, rn);
4009 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4010 }
84496233
JR
4011 if (size == 3) {
4012 if (load) {
4013 tmp64 = gen_ld64(addr, IS_USER(s));
4014 neon_store_reg64(tmp64, rd);
4015 tcg_temp_free_i64(tmp64);
4016 } else {
4017 tmp64 = tcg_temp_new_i64();
4018 neon_load_reg64(tmp64, rd);
4019 gen_st64(tmp64, addr, IS_USER(s));
4020 }
4021 tcg_gen_addi_i32(addr, addr, stride);
4022 } else {
4023 for (pass = 0; pass < 2; pass++) {
4024 if (size == 2) {
4025 if (load) {
4026 tmp = gen_ld32(addr, IS_USER(s));
4027 neon_store_reg(rd, pass, tmp);
4028 } else {
4029 tmp = neon_load_reg(rd, pass);
4030 gen_st32(tmp, addr, IS_USER(s));
4031 }
1b2b1e54 4032 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4033 } else if (size == 1) {
4034 if (load) {
4035 tmp = gen_ld16u(addr, IS_USER(s));
4036 tcg_gen_addi_i32(addr, addr, stride);
4037 tmp2 = gen_ld16u(addr, IS_USER(s));
4038 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4039 tcg_gen_shli_i32(tmp2, tmp2, 16);
4040 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4041 tcg_temp_free_i32(tmp2);
84496233
JR
4042 neon_store_reg(rd, pass, tmp);
4043 } else {
4044 tmp = neon_load_reg(rd, pass);
7d1b0095 4045 tmp2 = tcg_temp_new_i32();
84496233
JR
4046 tcg_gen_shri_i32(tmp2, tmp, 16);
4047 gen_st16(tmp, addr, IS_USER(s));
4048 tcg_gen_addi_i32(addr, addr, stride);
4049 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 4050 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4051 }
84496233
JR
4052 } else /* size == 0 */ {
4053 if (load) {
4054 TCGV_UNUSED(tmp2);
4055 for (n = 0; n < 4; n++) {
4056 tmp = gen_ld8u(addr, IS_USER(s));
4057 tcg_gen_addi_i32(addr, addr, stride);
4058 if (n == 0) {
4059 tmp2 = tmp;
4060 } else {
41ba8341
PB
4061 tcg_gen_shli_i32(tmp, tmp, n * 8);
4062 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4063 tcg_temp_free_i32(tmp);
84496233 4064 }
9ee6e8bb 4065 }
84496233
JR
4066 neon_store_reg(rd, pass, tmp2);
4067 } else {
4068 tmp2 = neon_load_reg(rd, pass);
4069 for (n = 0; n < 4; n++) {
7d1b0095 4070 tmp = tcg_temp_new_i32();
84496233
JR
4071 if (n == 0) {
4072 tcg_gen_mov_i32(tmp, tmp2);
4073 } else {
4074 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4075 }
4076 gen_st8(tmp, addr, IS_USER(s));
4077 tcg_gen_addi_i32(addr, addr, stride);
4078 }
7d1b0095 4079 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4080 }
4081 }
4082 }
4083 }
84496233 4084 rd += spacing;
9ee6e8bb 4085 }
e318a60b 4086 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4087 stride = nregs * 8;
4088 } else {
4089 size = (insn >> 10) & 3;
4090 if (size == 3) {
4091 /* Load single element to all lanes. */
8e18cde3
PM
4092 int a = (insn >> 4) & 1;
4093 if (!load) {
9ee6e8bb 4094 return 1;
8e18cde3 4095 }
9ee6e8bb
PB
4096 size = (insn >> 6) & 3;
4097 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4098
4099 if (size == 3) {
4100 if (nregs != 4 || a == 0) {
9ee6e8bb 4101 return 1;
99c475ab 4102 }
8e18cde3
PM
4103 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4104 size = 2;
4105 }
4106 if (nregs == 1 && a == 1 && size == 0) {
4107 return 1;
4108 }
4109 if (nregs == 3 && a == 1) {
4110 return 1;
4111 }
e318a60b 4112 addr = tcg_temp_new_i32();
8e18cde3
PM
4113 load_reg_var(s, addr, rn);
4114 if (nregs == 1) {
4115 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4116 tmp = gen_load_and_replicate(s, addr, size);
4117 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4118 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4119 if (insn & (1 << 5)) {
4120 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4121 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4122 }
4123 tcg_temp_free_i32(tmp);
4124 } else {
4125 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4126 stride = (insn & (1 << 5)) ? 2 : 1;
4127 for (reg = 0; reg < nregs; reg++) {
4128 tmp = gen_load_and_replicate(s, addr, size);
4129 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4130 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4131 tcg_temp_free_i32(tmp);
4132 tcg_gen_addi_i32(addr, addr, 1 << size);
4133 rd += stride;
4134 }
9ee6e8bb 4135 }
e318a60b 4136 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4137 stride = (1 << size) * nregs;
4138 } else {
4139 /* Single element. */
93262b16 4140 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4141 pass = (insn >> 7) & 1;
4142 switch (size) {
4143 case 0:
4144 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4145 stride = 1;
4146 break;
4147 case 1:
4148 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4149 stride = (insn & (1 << 5)) ? 2 : 1;
4150 break;
4151 case 2:
4152 shift = 0;
9ee6e8bb
PB
4153 stride = (insn & (1 << 6)) ? 2 : 1;
4154 break;
4155 default:
4156 abort();
4157 }
4158 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4159 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4160 switch (nregs) {
4161 case 1:
4162 if (((idx & (1 << size)) != 0) ||
4163 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4164 return 1;
4165 }
4166 break;
4167 case 3:
4168 if ((idx & 1) != 0) {
4169 return 1;
4170 }
4171 /* fall through */
4172 case 2:
4173 if (size == 2 && (idx & 2) != 0) {
4174 return 1;
4175 }
4176 break;
4177 case 4:
4178 if ((size == 2) && ((idx & 3) == 3)) {
4179 return 1;
4180 }
4181 break;
4182 default:
4183 abort();
4184 }
4185 if ((rd + stride * (nregs - 1)) > 31) {
4186 /* Attempts to write off the end of the register file
4187 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4188 * the neon_load_reg() would write off the end of the array.
4189 */
4190 return 1;
4191 }
e318a60b 4192 addr = tcg_temp_new_i32();
dcc65026 4193 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4194 for (reg = 0; reg < nregs; reg++) {
4195 if (load) {
9ee6e8bb
PB
4196 switch (size) {
4197 case 0:
1b2b1e54 4198 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
4199 break;
4200 case 1:
1b2b1e54 4201 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
4202 break;
4203 case 2:
1b2b1e54 4204 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 4205 break;
a50f5b91
PB
4206 default: /* Avoid compiler warnings. */
4207 abort();
9ee6e8bb
PB
4208 }
4209 if (size != 2) {
8f8e3aa4
PB
4210 tmp2 = neon_load_reg(rd, pass);
4211 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
7d1b0095 4212 tcg_temp_free_i32(tmp2);
9ee6e8bb 4213 }
8f8e3aa4 4214 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4215 } else { /* Store */
8f8e3aa4
PB
4216 tmp = neon_load_reg(rd, pass);
4217 if (shift)
4218 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4219 switch (size) {
4220 case 0:
1b2b1e54 4221 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4222 break;
4223 case 1:
1b2b1e54 4224 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4225 break;
4226 case 2:
1b2b1e54 4227 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4228 break;
99c475ab 4229 }
99c475ab 4230 }
9ee6e8bb 4231 rd += stride;
1b2b1e54 4232 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4233 }
e318a60b 4234 tcg_temp_free_i32(addr);
9ee6e8bb 4235 stride = nregs * (1 << size);
99c475ab 4236 }
9ee6e8bb
PB
4237 }
4238 if (rm != 15) {
b26eefb6
PB
4239 TCGv base;
4240
4241 base = load_reg(s, rn);
9ee6e8bb 4242 if (rm == 13) {
b26eefb6 4243 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4244 } else {
b26eefb6
PB
4245 TCGv index;
4246 index = load_reg(s, rm);
4247 tcg_gen_add_i32(base, base, index);
7d1b0095 4248 tcg_temp_free_i32(index);
9ee6e8bb 4249 }
b26eefb6 4250 store_reg(s, rn, base);
9ee6e8bb
PB
4251 }
4252 return 0;
4253}
3b46e624 4254
8f8e3aa4
PB
4255/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4256static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4257{
4258 tcg_gen_and_i32(t, t, c);
f669df27 4259 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4260 tcg_gen_or_i32(dest, t, f);
4261}
4262
a7812ae4 4263static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4264{
4265 switch (size) {
4266 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4267 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4268 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4269 default: abort();
4270 }
4271}
4272
a7812ae4 4273static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4274{
4275 switch (size) {
02da0b2d
PM
4276 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4277 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4278 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4279 default: abort();
4280 }
4281}
4282
a7812ae4 4283static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4284{
4285 switch (size) {
02da0b2d
PM
4286 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4287 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4288 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4289 default: abort();
4290 }
4291}
4292
af1bbf30
JR
4293static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4294{
4295 switch (size) {
02da0b2d
PM
4296 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4297 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4298 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4299 default: abort();
4300 }
4301}
4302
ad69471c
PB
4303static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4304 int q, int u)
4305{
4306 if (q) {
4307 if (u) {
4308 switch (size) {
4309 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4310 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4311 default: abort();
4312 }
4313 } else {
4314 switch (size) {
4315 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4316 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4317 default: abort();
4318 }
4319 }
4320 } else {
4321 if (u) {
4322 switch (size) {
b408a9b0
CL
4323 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4324 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4325 default: abort();
4326 }
4327 } else {
4328 switch (size) {
4329 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4330 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4331 default: abort();
4332 }
4333 }
4334 }
4335}
4336
a7812ae4 4337static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4338{
4339 if (u) {
4340 switch (size) {
4341 case 0: gen_helper_neon_widen_u8(dest, src); break;
4342 case 1: gen_helper_neon_widen_u16(dest, src); break;
4343 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4344 default: abort();
4345 }
4346 } else {
4347 switch (size) {
4348 case 0: gen_helper_neon_widen_s8(dest, src); break;
4349 case 1: gen_helper_neon_widen_s16(dest, src); break;
4350 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4351 default: abort();
4352 }
4353 }
7d1b0095 4354 tcg_temp_free_i32(src);
ad69471c
PB
4355}
4356
4357static inline void gen_neon_addl(int size)
4358{
4359 switch (size) {
4360 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4361 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4362 case 2: tcg_gen_add_i64(CPU_V001); break;
4363 default: abort();
4364 }
4365}
4366
4367static inline void gen_neon_subl(int size)
4368{
4369 switch (size) {
4370 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4371 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4372 case 2: tcg_gen_sub_i64(CPU_V001); break;
4373 default: abort();
4374 }
4375}
4376
a7812ae4 4377static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4378{
4379 switch (size) {
4380 case 0: gen_helper_neon_negl_u16(var, var); break;
4381 case 1: gen_helper_neon_negl_u32(var, var); break;
4382 case 2: gen_helper_neon_negl_u64(var, var); break;
4383 default: abort();
4384 }
4385}
4386
a7812ae4 4387static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4388{
4389 switch (size) {
02da0b2d
PM
4390 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4391 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4392 default: abort();
4393 }
4394}
4395
a7812ae4 4396static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4397{
a7812ae4 4398 TCGv_i64 tmp;
ad69471c
PB
4399
4400 switch ((size << 1) | u) {
4401 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4402 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4403 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4404 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4405 case 4:
4406 tmp = gen_muls_i64_i32(a, b);
4407 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4408 tcg_temp_free_i64(tmp);
ad69471c
PB
4409 break;
4410 case 5:
4411 tmp = gen_mulu_i64_i32(a, b);
4412 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4413 tcg_temp_free_i64(tmp);
ad69471c
PB
4414 break;
4415 default: abort();
4416 }
c6067f04
CL
4417
4418 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4419 Don't forget to clean them now. */
4420 if (size < 2) {
7d1b0095
PM
4421 tcg_temp_free_i32(a);
4422 tcg_temp_free_i32(b);
c6067f04 4423 }
ad69471c
PB
4424}
4425
c33171c7
PM
4426static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4427{
4428 if (op) {
4429 if (u) {
4430 gen_neon_unarrow_sats(size, dest, src);
4431 } else {
4432 gen_neon_narrow(size, dest, src);
4433 }
4434 } else {
4435 if (u) {
4436 gen_neon_narrow_satu(size, dest, src);
4437 } else {
4438 gen_neon_narrow_sats(size, dest, src);
4439 }
4440 }
4441}
4442
62698be3
PM
4443/* Symbolic constants for op fields for Neon 3-register same-length.
4444 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4445 * table A7-9.
4446 */
4447#define NEON_3R_VHADD 0
4448#define NEON_3R_VQADD 1
4449#define NEON_3R_VRHADD 2
4450#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4451#define NEON_3R_VHSUB 4
4452#define NEON_3R_VQSUB 5
4453#define NEON_3R_VCGT 6
4454#define NEON_3R_VCGE 7
4455#define NEON_3R_VSHL 8
4456#define NEON_3R_VQSHL 9
4457#define NEON_3R_VRSHL 10
4458#define NEON_3R_VQRSHL 11
4459#define NEON_3R_VMAX 12
4460#define NEON_3R_VMIN 13
4461#define NEON_3R_VABD 14
4462#define NEON_3R_VABA 15
4463#define NEON_3R_VADD_VSUB 16
4464#define NEON_3R_VTST_VCEQ 17
4465#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4466#define NEON_3R_VMUL 19
4467#define NEON_3R_VPMAX 20
4468#define NEON_3R_VPMIN 21
4469#define NEON_3R_VQDMULH_VQRDMULH 22
4470#define NEON_3R_VPADD 23
da97f52c 4471#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4472#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4473#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4474#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4475#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4476#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4477#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4478
4479static const uint8_t neon_3r_sizes[] = {
4480 [NEON_3R_VHADD] = 0x7,
4481 [NEON_3R_VQADD] = 0xf,
4482 [NEON_3R_VRHADD] = 0x7,
4483 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4484 [NEON_3R_VHSUB] = 0x7,
4485 [NEON_3R_VQSUB] = 0xf,
4486 [NEON_3R_VCGT] = 0x7,
4487 [NEON_3R_VCGE] = 0x7,
4488 [NEON_3R_VSHL] = 0xf,
4489 [NEON_3R_VQSHL] = 0xf,
4490 [NEON_3R_VRSHL] = 0xf,
4491 [NEON_3R_VQRSHL] = 0xf,
4492 [NEON_3R_VMAX] = 0x7,
4493 [NEON_3R_VMIN] = 0x7,
4494 [NEON_3R_VABD] = 0x7,
4495 [NEON_3R_VABA] = 0x7,
4496 [NEON_3R_VADD_VSUB] = 0xf,
4497 [NEON_3R_VTST_VCEQ] = 0x7,
4498 [NEON_3R_VML] = 0x7,
4499 [NEON_3R_VMUL] = 0x7,
4500 [NEON_3R_VPMAX] = 0x7,
4501 [NEON_3R_VPMIN] = 0x7,
4502 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4503 [NEON_3R_VPADD] = 0x7,
da97f52c 4504 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4505 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4506 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4507 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4508 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4509 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4510 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4511};
4512
600b828c
PM
4513/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4514 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4515 * table A7-13.
4516 */
4517#define NEON_2RM_VREV64 0
4518#define NEON_2RM_VREV32 1
4519#define NEON_2RM_VREV16 2
4520#define NEON_2RM_VPADDL 4
4521#define NEON_2RM_VPADDL_U 5
4522#define NEON_2RM_VCLS 8
4523#define NEON_2RM_VCLZ 9
4524#define NEON_2RM_VCNT 10
4525#define NEON_2RM_VMVN 11
4526#define NEON_2RM_VPADAL 12
4527#define NEON_2RM_VPADAL_U 13
4528#define NEON_2RM_VQABS 14
4529#define NEON_2RM_VQNEG 15
4530#define NEON_2RM_VCGT0 16
4531#define NEON_2RM_VCGE0 17
4532#define NEON_2RM_VCEQ0 18
4533#define NEON_2RM_VCLE0 19
4534#define NEON_2RM_VCLT0 20
4535#define NEON_2RM_VABS 22
4536#define NEON_2RM_VNEG 23
4537#define NEON_2RM_VCGT0_F 24
4538#define NEON_2RM_VCGE0_F 25
4539#define NEON_2RM_VCEQ0_F 26
4540#define NEON_2RM_VCLE0_F 27
4541#define NEON_2RM_VCLT0_F 28
4542#define NEON_2RM_VABS_F 30
4543#define NEON_2RM_VNEG_F 31
4544#define NEON_2RM_VSWP 32
4545#define NEON_2RM_VTRN 33
4546#define NEON_2RM_VUZP 34
4547#define NEON_2RM_VZIP 35
4548#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4549#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4550#define NEON_2RM_VSHLL 38
4551#define NEON_2RM_VCVT_F16_F32 44
4552#define NEON_2RM_VCVT_F32_F16 46
4553#define NEON_2RM_VRECPE 56
4554#define NEON_2RM_VRSQRTE 57
4555#define NEON_2RM_VRECPE_F 58
4556#define NEON_2RM_VRSQRTE_F 59
4557#define NEON_2RM_VCVT_FS 60
4558#define NEON_2RM_VCVT_FU 61
4559#define NEON_2RM_VCVT_SF 62
4560#define NEON_2RM_VCVT_UF 63
4561
4562static int neon_2rm_is_float_op(int op)
4563{
4564 /* Return true if this neon 2reg-misc op is float-to-float */
4565 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4566 op >= NEON_2RM_VRECPE_F);
4567}
4568
4569/* Each entry in this array has bit n set if the insn allows
4570 * size value n (otherwise it will UNDEF). Since unallocated
4571 * op values will have no bits set they always UNDEF.
4572 */
4573static const uint8_t neon_2rm_sizes[] = {
4574 [NEON_2RM_VREV64] = 0x7,
4575 [NEON_2RM_VREV32] = 0x3,
4576 [NEON_2RM_VREV16] = 0x1,
4577 [NEON_2RM_VPADDL] = 0x7,
4578 [NEON_2RM_VPADDL_U] = 0x7,
4579 [NEON_2RM_VCLS] = 0x7,
4580 [NEON_2RM_VCLZ] = 0x7,
4581 [NEON_2RM_VCNT] = 0x1,
4582 [NEON_2RM_VMVN] = 0x1,
4583 [NEON_2RM_VPADAL] = 0x7,
4584 [NEON_2RM_VPADAL_U] = 0x7,
4585 [NEON_2RM_VQABS] = 0x7,
4586 [NEON_2RM_VQNEG] = 0x7,
4587 [NEON_2RM_VCGT0] = 0x7,
4588 [NEON_2RM_VCGE0] = 0x7,
4589 [NEON_2RM_VCEQ0] = 0x7,
4590 [NEON_2RM_VCLE0] = 0x7,
4591 [NEON_2RM_VCLT0] = 0x7,
4592 [NEON_2RM_VABS] = 0x7,
4593 [NEON_2RM_VNEG] = 0x7,
4594 [NEON_2RM_VCGT0_F] = 0x4,
4595 [NEON_2RM_VCGE0_F] = 0x4,
4596 [NEON_2RM_VCEQ0_F] = 0x4,
4597 [NEON_2RM_VCLE0_F] = 0x4,
4598 [NEON_2RM_VCLT0_F] = 0x4,
4599 [NEON_2RM_VABS_F] = 0x4,
4600 [NEON_2RM_VNEG_F] = 0x4,
4601 [NEON_2RM_VSWP] = 0x1,
4602 [NEON_2RM_VTRN] = 0x7,
4603 [NEON_2RM_VUZP] = 0x7,
4604 [NEON_2RM_VZIP] = 0x7,
4605 [NEON_2RM_VMOVN] = 0x7,
4606 [NEON_2RM_VQMOVN] = 0x7,
4607 [NEON_2RM_VSHLL] = 0x7,
4608 [NEON_2RM_VCVT_F16_F32] = 0x2,
4609 [NEON_2RM_VCVT_F32_F16] = 0x2,
4610 [NEON_2RM_VRECPE] = 0x4,
4611 [NEON_2RM_VRSQRTE] = 0x4,
4612 [NEON_2RM_VRECPE_F] = 0x4,
4613 [NEON_2RM_VRSQRTE_F] = 0x4,
4614 [NEON_2RM_VCVT_FS] = 0x4,
4615 [NEON_2RM_VCVT_FU] = 0x4,
4616 [NEON_2RM_VCVT_SF] = 0x4,
4617 [NEON_2RM_VCVT_UF] = 0x4,
4618};
4619
9ee6e8bb
PB
4620/* Translate a NEON data processing instruction. Return nonzero if the
4621 instruction is invalid.
ad69471c
PB
4622 We process data in a mixture of 32-bit and 64-bit chunks.
4623 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4624
0ecb72a5 4625static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4626{
4627 int op;
4628 int q;
4629 int rd, rn, rm;
4630 int size;
4631 int shift;
4632 int pass;
4633 int count;
4634 int pairwise;
4635 int u;
ca9a32e4 4636 uint32_t imm, mask;
b75263d6 4637 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4638 TCGv_i64 tmp64;
9ee6e8bb 4639
5df8bac1 4640 if (!s->vfp_enabled)
9ee6e8bb
PB
4641 return 1;
4642 q = (insn & (1 << 6)) != 0;
4643 u = (insn >> 24) & 1;
4644 VFP_DREG_D(rd, insn);
4645 VFP_DREG_N(rn, insn);
4646 VFP_DREG_M(rm, insn);
4647 size = (insn >> 20) & 3;
4648 if ((insn & (1 << 23)) == 0) {
4649 /* Three register same length. */
4650 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4651 /* Catch invalid op and bad size combinations: UNDEF */
4652 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4653 return 1;
4654 }
25f84f79
PM
4655 /* All insns of this form UNDEF for either this condition or the
4656 * superset of cases "Q==1"; we catch the latter later.
4657 */
4658 if (q && ((rd | rn | rm) & 1)) {
4659 return 1;
4660 }
62698be3
PM
4661 if (size == 3 && op != NEON_3R_LOGIC) {
4662 /* 64-bit element instructions. */
9ee6e8bb 4663 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4664 neon_load_reg64(cpu_V0, rn + pass);
4665 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4666 switch (op) {
62698be3 4667 case NEON_3R_VQADD:
9ee6e8bb 4668 if (u) {
02da0b2d
PM
4669 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4670 cpu_V0, cpu_V1);
2c0262af 4671 } else {
02da0b2d
PM
4672 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4673 cpu_V0, cpu_V1);
2c0262af 4674 }
9ee6e8bb 4675 break;
62698be3 4676 case NEON_3R_VQSUB:
9ee6e8bb 4677 if (u) {
02da0b2d
PM
4678 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4679 cpu_V0, cpu_V1);
ad69471c 4680 } else {
02da0b2d
PM
4681 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4682 cpu_V0, cpu_V1);
ad69471c
PB
4683 }
4684 break;
62698be3 4685 case NEON_3R_VSHL:
ad69471c
PB
4686 if (u) {
4687 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4688 } else {
4689 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4690 }
4691 break;
62698be3 4692 case NEON_3R_VQSHL:
ad69471c 4693 if (u) {
02da0b2d
PM
4694 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4695 cpu_V1, cpu_V0);
ad69471c 4696 } else {
02da0b2d
PM
4697 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4698 cpu_V1, cpu_V0);
ad69471c
PB
4699 }
4700 break;
62698be3 4701 case NEON_3R_VRSHL:
ad69471c
PB
4702 if (u) {
4703 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4704 } else {
ad69471c
PB
4705 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4706 }
4707 break;
62698be3 4708 case NEON_3R_VQRSHL:
ad69471c 4709 if (u) {
02da0b2d
PM
4710 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4711 cpu_V1, cpu_V0);
ad69471c 4712 } else {
02da0b2d
PM
4713 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4714 cpu_V1, cpu_V0);
1e8d4eec 4715 }
9ee6e8bb 4716 break;
62698be3 4717 case NEON_3R_VADD_VSUB:
9ee6e8bb 4718 if (u) {
ad69471c 4719 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4720 } else {
ad69471c 4721 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4722 }
4723 break;
4724 default:
4725 abort();
2c0262af 4726 }
ad69471c 4727 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4728 }
9ee6e8bb 4729 return 0;
2c0262af 4730 }
25f84f79 4731 pairwise = 0;
9ee6e8bb 4732 switch (op) {
62698be3
PM
4733 case NEON_3R_VSHL:
4734 case NEON_3R_VQSHL:
4735 case NEON_3R_VRSHL:
4736 case NEON_3R_VQRSHL:
9ee6e8bb 4737 {
ad69471c
PB
4738 int rtmp;
4739 /* Shift instruction operands are reversed. */
4740 rtmp = rn;
9ee6e8bb 4741 rn = rm;
ad69471c 4742 rm = rtmp;
9ee6e8bb 4743 }
2c0262af 4744 break;
25f84f79
PM
4745 case NEON_3R_VPADD:
4746 if (u) {
4747 return 1;
4748 }
4749 /* Fall through */
62698be3
PM
4750 case NEON_3R_VPMAX:
4751 case NEON_3R_VPMIN:
9ee6e8bb 4752 pairwise = 1;
2c0262af 4753 break;
25f84f79
PM
4754 case NEON_3R_FLOAT_ARITH:
4755 pairwise = (u && size < 2); /* if VPADD (float) */
4756 break;
4757 case NEON_3R_FLOAT_MINMAX:
4758 pairwise = u; /* if VPMIN/VPMAX (float) */
4759 break;
4760 case NEON_3R_FLOAT_CMP:
4761 if (!u && size) {
4762 /* no encoding for U=0 C=1x */
4763 return 1;
4764 }
4765 break;
4766 case NEON_3R_FLOAT_ACMP:
4767 if (!u) {
4768 return 1;
4769 }
4770 break;
4771 case NEON_3R_VRECPS_VRSQRTS:
4772 if (u) {
4773 return 1;
4774 }
2c0262af 4775 break;
25f84f79
PM
4776 case NEON_3R_VMUL:
4777 if (u && (size != 0)) {
4778 /* UNDEF on invalid size for polynomial subcase */
4779 return 1;
4780 }
2c0262af 4781 break;
da97f52c
PM
4782 case NEON_3R_VFM:
4783 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4784 return 1;
4785 }
4786 break;
9ee6e8bb 4787 default:
2c0262af 4788 break;
9ee6e8bb 4789 }
dd8fbd78 4790
25f84f79
PM
4791 if (pairwise && q) {
4792 /* All the pairwise insns UNDEF if Q is set */
4793 return 1;
4794 }
4795
9ee6e8bb
PB
4796 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4797
4798 if (pairwise) {
4799 /* Pairwise. */
a5a14945
JR
4800 if (pass < 1) {
4801 tmp = neon_load_reg(rn, 0);
4802 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4803 } else {
a5a14945
JR
4804 tmp = neon_load_reg(rm, 0);
4805 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4806 }
4807 } else {
4808 /* Elementwise. */
dd8fbd78
FN
4809 tmp = neon_load_reg(rn, pass);
4810 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4811 }
4812 switch (op) {
62698be3 4813 case NEON_3R_VHADD:
9ee6e8bb
PB
4814 GEN_NEON_INTEGER_OP(hadd);
4815 break;
62698be3 4816 case NEON_3R_VQADD:
02da0b2d 4817 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4818 break;
62698be3 4819 case NEON_3R_VRHADD:
9ee6e8bb 4820 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4821 break;
62698be3 4822 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4823 switch ((u << 2) | size) {
4824 case 0: /* VAND */
dd8fbd78 4825 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4826 break;
4827 case 1: /* BIC */
f669df27 4828 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4829 break;
4830 case 2: /* VORR */
dd8fbd78 4831 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4832 break;
4833 case 3: /* VORN */
f669df27 4834 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4835 break;
4836 case 4: /* VEOR */
dd8fbd78 4837 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4838 break;
4839 case 5: /* VBSL */
dd8fbd78
FN
4840 tmp3 = neon_load_reg(rd, pass);
4841 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4842 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4843 break;
4844 case 6: /* VBIT */
dd8fbd78
FN
4845 tmp3 = neon_load_reg(rd, pass);
4846 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4847 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4848 break;
4849 case 7: /* VBIF */
dd8fbd78
FN
4850 tmp3 = neon_load_reg(rd, pass);
4851 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4852 tcg_temp_free_i32(tmp3);
9ee6e8bb 4853 break;
2c0262af
FB
4854 }
4855 break;
62698be3 4856 case NEON_3R_VHSUB:
9ee6e8bb
PB
4857 GEN_NEON_INTEGER_OP(hsub);
4858 break;
62698be3 4859 case NEON_3R_VQSUB:
02da0b2d 4860 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4861 break;
62698be3 4862 case NEON_3R_VCGT:
9ee6e8bb
PB
4863 GEN_NEON_INTEGER_OP(cgt);
4864 break;
62698be3 4865 case NEON_3R_VCGE:
9ee6e8bb
PB
4866 GEN_NEON_INTEGER_OP(cge);
4867 break;
62698be3 4868 case NEON_3R_VSHL:
ad69471c 4869 GEN_NEON_INTEGER_OP(shl);
2c0262af 4870 break;
62698be3 4871 case NEON_3R_VQSHL:
02da0b2d 4872 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4873 break;
62698be3 4874 case NEON_3R_VRSHL:
ad69471c 4875 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4876 break;
62698be3 4877 case NEON_3R_VQRSHL:
02da0b2d 4878 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4879 break;
62698be3 4880 case NEON_3R_VMAX:
9ee6e8bb
PB
4881 GEN_NEON_INTEGER_OP(max);
4882 break;
62698be3 4883 case NEON_3R_VMIN:
9ee6e8bb
PB
4884 GEN_NEON_INTEGER_OP(min);
4885 break;
62698be3 4886 case NEON_3R_VABD:
9ee6e8bb
PB
4887 GEN_NEON_INTEGER_OP(abd);
4888 break;
62698be3 4889 case NEON_3R_VABA:
9ee6e8bb 4890 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4891 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4892 tmp2 = neon_load_reg(rd, pass);
4893 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4894 break;
62698be3 4895 case NEON_3R_VADD_VSUB:
9ee6e8bb 4896 if (!u) { /* VADD */
62698be3 4897 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4898 } else { /* VSUB */
4899 switch (size) {
dd8fbd78
FN
4900 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4901 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4902 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4903 default: abort();
9ee6e8bb
PB
4904 }
4905 }
4906 break;
62698be3 4907 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4908 if (!u) { /* VTST */
4909 switch (size) {
dd8fbd78
FN
4910 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4911 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4912 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4913 default: abort();
9ee6e8bb
PB
4914 }
4915 } else { /* VCEQ */
4916 switch (size) {
dd8fbd78
FN
4917 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4918 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4919 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4920 default: abort();
9ee6e8bb
PB
4921 }
4922 }
4923 break;
62698be3 4924 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4925 switch (size) {
dd8fbd78
FN
4926 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4927 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4928 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4929 default: abort();
9ee6e8bb 4930 }
7d1b0095 4931 tcg_temp_free_i32(tmp2);
dd8fbd78 4932 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4933 if (u) { /* VMLS */
dd8fbd78 4934 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4935 } else { /* VMLA */
dd8fbd78 4936 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4937 }
4938 break;
62698be3 4939 case NEON_3R_VMUL:
9ee6e8bb 4940 if (u) { /* polynomial */
dd8fbd78 4941 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4942 } else { /* Integer */
4943 switch (size) {
dd8fbd78
FN
4944 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4945 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4946 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4947 default: abort();
9ee6e8bb
PB
4948 }
4949 }
4950 break;
62698be3 4951 case NEON_3R_VPMAX:
9ee6e8bb
PB
4952 GEN_NEON_INTEGER_OP(pmax);
4953 break;
62698be3 4954 case NEON_3R_VPMIN:
9ee6e8bb
PB
4955 GEN_NEON_INTEGER_OP(pmin);
4956 break;
62698be3 4957 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4958 if (!u) { /* VQDMULH */
4959 switch (size) {
02da0b2d
PM
4960 case 1:
4961 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4962 break;
4963 case 2:
4964 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4965 break;
62698be3 4966 default: abort();
9ee6e8bb 4967 }
62698be3 4968 } else { /* VQRDMULH */
9ee6e8bb 4969 switch (size) {
02da0b2d
PM
4970 case 1:
4971 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4972 break;
4973 case 2:
4974 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4975 break;
62698be3 4976 default: abort();
9ee6e8bb
PB
4977 }
4978 }
4979 break;
62698be3 4980 case NEON_3R_VPADD:
9ee6e8bb 4981 switch (size) {
dd8fbd78
FN
4982 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4983 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4984 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4985 default: abort();
9ee6e8bb
PB
4986 }
4987 break;
62698be3 4988 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4989 {
4990 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4991 switch ((u << 2) | size) {
4992 case 0: /* VADD */
aa47cfdd
PM
4993 case 4: /* VPADD */
4994 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4995 break;
4996 case 2: /* VSUB */
aa47cfdd 4997 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4998 break;
4999 case 6: /* VABD */
aa47cfdd 5000 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5001 break;
5002 default:
62698be3 5003 abort();
9ee6e8bb 5004 }
aa47cfdd 5005 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5006 break;
aa47cfdd 5007 }
62698be3 5008 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5009 {
5010 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5011 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5012 if (!u) {
7d1b0095 5013 tcg_temp_free_i32(tmp2);
dd8fbd78 5014 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5015 if (size == 0) {
aa47cfdd 5016 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5017 } else {
aa47cfdd 5018 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5019 }
5020 }
aa47cfdd 5021 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5022 break;
aa47cfdd 5023 }
62698be3 5024 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5025 {
5026 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5027 if (!u) {
aa47cfdd 5028 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5029 } else {
aa47cfdd
PM
5030 if (size == 0) {
5031 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5032 } else {
5033 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5034 }
b5ff1b31 5035 }
aa47cfdd 5036 tcg_temp_free_ptr(fpstatus);
2c0262af 5037 break;
aa47cfdd 5038 }
62698be3 5039 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5040 {
5041 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5042 if (size == 0) {
5043 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5044 } else {
5045 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5046 }
5047 tcg_temp_free_ptr(fpstatus);
2c0262af 5048 break;
aa47cfdd 5049 }
62698be3 5050 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5051 {
5052 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5053 if (size == 0) {
5054 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
5055 } else {
5056 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
5057 }
5058 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5059 break;
aa47cfdd 5060 }
62698be3 5061 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 5062 if (size == 0)
dd8fbd78 5063 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 5064 else
dd8fbd78 5065 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 5066 break;
da97f52c
PM
5067 case NEON_3R_VFM:
5068 {
5069 /* VFMA, VFMS: fused multiply-add */
5070 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5071 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5072 if (size) {
5073 /* VFMS */
5074 gen_helper_vfp_negs(tmp, tmp);
5075 }
5076 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5077 tcg_temp_free_i32(tmp3);
5078 tcg_temp_free_ptr(fpstatus);
5079 break;
5080 }
9ee6e8bb
PB
5081 default:
5082 abort();
2c0262af 5083 }
7d1b0095 5084 tcg_temp_free_i32(tmp2);
dd8fbd78 5085
9ee6e8bb
PB
5086 /* Save the result. For elementwise operations we can put it
5087 straight into the destination register. For pairwise operations
5088 we have to be careful to avoid clobbering the source operands. */
5089 if (pairwise && rd == rm) {
dd8fbd78 5090 neon_store_scratch(pass, tmp);
9ee6e8bb 5091 } else {
dd8fbd78 5092 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5093 }
5094
5095 } /* for pass */
5096 if (pairwise && rd == rm) {
5097 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5098 tmp = neon_load_scratch(pass);
5099 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5100 }
5101 }
ad69471c 5102 /* End of 3 register same size operations. */
9ee6e8bb
PB
5103 } else if (insn & (1 << 4)) {
5104 if ((insn & 0x00380080) != 0) {
5105 /* Two registers and shift. */
5106 op = (insn >> 8) & 0xf;
5107 if (insn & (1 << 7)) {
cc13115b
PM
5108 /* 64-bit shift. */
5109 if (op > 7) {
5110 return 1;
5111 }
9ee6e8bb
PB
5112 size = 3;
5113 } else {
5114 size = 2;
5115 while ((insn & (1 << (size + 19))) == 0)
5116 size--;
5117 }
5118 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5119 /* To avoid excessive dumplication of ops we implement shift
5120 by immediate using the variable shift operations. */
5121 if (op < 8) {
5122 /* Shift by immediate:
5123 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5124 if (q && ((rd | rm) & 1)) {
5125 return 1;
5126 }
5127 if (!u && (op == 4 || op == 6)) {
5128 return 1;
5129 }
9ee6e8bb
PB
5130 /* Right shifts are encoded as N - shift, where N is the
5131 element size in bits. */
5132 if (op <= 4)
5133 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5134 if (size == 3) {
5135 count = q + 1;
5136 } else {
5137 count = q ? 4: 2;
5138 }
5139 switch (size) {
5140 case 0:
5141 imm = (uint8_t) shift;
5142 imm |= imm << 8;
5143 imm |= imm << 16;
5144 break;
5145 case 1:
5146 imm = (uint16_t) shift;
5147 imm |= imm << 16;
5148 break;
5149 case 2:
5150 case 3:
5151 imm = shift;
5152 break;
5153 default:
5154 abort();
5155 }
5156
5157 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5158 if (size == 3) {
5159 neon_load_reg64(cpu_V0, rm + pass);
5160 tcg_gen_movi_i64(cpu_V1, imm);
5161 switch (op) {
5162 case 0: /* VSHR */
5163 case 1: /* VSRA */
5164 if (u)
5165 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5166 else
ad69471c 5167 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5168 break;
ad69471c
PB
5169 case 2: /* VRSHR */
5170 case 3: /* VRSRA */
5171 if (u)
5172 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5173 else
ad69471c 5174 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5175 break;
ad69471c 5176 case 4: /* VSRI */
ad69471c
PB
5177 case 5: /* VSHL, VSLI */
5178 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5179 break;
0322b26e 5180 case 6: /* VQSHLU */
02da0b2d
PM
5181 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5182 cpu_V0, cpu_V1);
ad69471c 5183 break;
0322b26e
PM
5184 case 7: /* VQSHL */
5185 if (u) {
02da0b2d 5186 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5187 cpu_V0, cpu_V1);
5188 } else {
02da0b2d 5189 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5190 cpu_V0, cpu_V1);
5191 }
9ee6e8bb 5192 break;
9ee6e8bb 5193 }
ad69471c
PB
5194 if (op == 1 || op == 3) {
5195 /* Accumulate. */
5371cb81 5196 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5197 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5198 } else if (op == 4 || (op == 5 && u)) {
5199 /* Insert */
923e6509
CL
5200 neon_load_reg64(cpu_V1, rd + pass);
5201 uint64_t mask;
5202 if (shift < -63 || shift > 63) {
5203 mask = 0;
5204 } else {
5205 if (op == 4) {
5206 mask = 0xffffffffffffffffull >> -shift;
5207 } else {
5208 mask = 0xffffffffffffffffull << shift;
5209 }
5210 }
5211 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5212 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5213 }
5214 neon_store_reg64(cpu_V0, rd + pass);
5215 } else { /* size < 3 */
5216 /* Operands in T0 and T1. */
dd8fbd78 5217 tmp = neon_load_reg(rm, pass);
7d1b0095 5218 tmp2 = tcg_temp_new_i32();
dd8fbd78 5219 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5220 switch (op) {
5221 case 0: /* VSHR */
5222 case 1: /* VSRA */
5223 GEN_NEON_INTEGER_OP(shl);
5224 break;
5225 case 2: /* VRSHR */
5226 case 3: /* VRSRA */
5227 GEN_NEON_INTEGER_OP(rshl);
5228 break;
5229 case 4: /* VSRI */
ad69471c
PB
5230 case 5: /* VSHL, VSLI */
5231 switch (size) {
dd8fbd78
FN
5232 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5233 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5234 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5235 default: abort();
ad69471c
PB
5236 }
5237 break;
0322b26e 5238 case 6: /* VQSHLU */
ad69471c 5239 switch (size) {
0322b26e 5240 case 0:
02da0b2d
PM
5241 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5242 tmp, tmp2);
0322b26e
PM
5243 break;
5244 case 1:
02da0b2d
PM
5245 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5246 tmp, tmp2);
0322b26e
PM
5247 break;
5248 case 2:
02da0b2d
PM
5249 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5250 tmp, tmp2);
0322b26e
PM
5251 break;
5252 default:
cc13115b 5253 abort();
ad69471c
PB
5254 }
5255 break;
0322b26e 5256 case 7: /* VQSHL */
02da0b2d 5257 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5258 break;
ad69471c 5259 }
7d1b0095 5260 tcg_temp_free_i32(tmp2);
ad69471c
PB
5261
5262 if (op == 1 || op == 3) {
5263 /* Accumulate. */
dd8fbd78 5264 tmp2 = neon_load_reg(rd, pass);
5371cb81 5265 gen_neon_add(size, tmp, tmp2);
7d1b0095 5266 tcg_temp_free_i32(tmp2);
ad69471c
PB
5267 } else if (op == 4 || (op == 5 && u)) {
5268 /* Insert */
5269 switch (size) {
5270 case 0:
5271 if (op == 4)
ca9a32e4 5272 mask = 0xff >> -shift;
ad69471c 5273 else
ca9a32e4
JR
5274 mask = (uint8_t)(0xff << shift);
5275 mask |= mask << 8;
5276 mask |= mask << 16;
ad69471c
PB
5277 break;
5278 case 1:
5279 if (op == 4)
ca9a32e4 5280 mask = 0xffff >> -shift;
ad69471c 5281 else
ca9a32e4
JR
5282 mask = (uint16_t)(0xffff << shift);
5283 mask |= mask << 16;
ad69471c
PB
5284 break;
5285 case 2:
ca9a32e4
JR
5286 if (shift < -31 || shift > 31) {
5287 mask = 0;
5288 } else {
5289 if (op == 4)
5290 mask = 0xffffffffu >> -shift;
5291 else
5292 mask = 0xffffffffu << shift;
5293 }
ad69471c
PB
5294 break;
5295 default:
5296 abort();
5297 }
dd8fbd78 5298 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5299 tcg_gen_andi_i32(tmp, tmp, mask);
5300 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5301 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5302 tcg_temp_free_i32(tmp2);
ad69471c 5303 }
dd8fbd78 5304 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5305 }
5306 } /* for pass */
5307 } else if (op < 10) {
ad69471c 5308 /* Shift by immediate and narrow:
9ee6e8bb 5309 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5310 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5311 if (rm & 1) {
5312 return 1;
5313 }
9ee6e8bb
PB
5314 shift = shift - (1 << (size + 3));
5315 size++;
92cdfaeb 5316 if (size == 3) {
a7812ae4 5317 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5318 neon_load_reg64(cpu_V0, rm);
5319 neon_load_reg64(cpu_V1, rm + 1);
5320 for (pass = 0; pass < 2; pass++) {
5321 TCGv_i64 in;
5322 if (pass == 0) {
5323 in = cpu_V0;
5324 } else {
5325 in = cpu_V1;
5326 }
ad69471c 5327 if (q) {
0b36f4cd 5328 if (input_unsigned) {
92cdfaeb 5329 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5330 } else {
92cdfaeb 5331 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5332 }
ad69471c 5333 } else {
0b36f4cd 5334 if (input_unsigned) {
92cdfaeb 5335 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5336 } else {
92cdfaeb 5337 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5338 }
ad69471c 5339 }
7d1b0095 5340 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5341 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5342 neon_store_reg(rd, pass, tmp);
5343 } /* for pass */
5344 tcg_temp_free_i64(tmp64);
5345 } else {
5346 if (size == 1) {
5347 imm = (uint16_t)shift;
5348 imm |= imm << 16;
2c0262af 5349 } else {
92cdfaeb
PM
5350 /* size == 2 */
5351 imm = (uint32_t)shift;
5352 }
5353 tmp2 = tcg_const_i32(imm);
5354 tmp4 = neon_load_reg(rm + 1, 0);
5355 tmp5 = neon_load_reg(rm + 1, 1);
5356 for (pass = 0; pass < 2; pass++) {
5357 if (pass == 0) {
5358 tmp = neon_load_reg(rm, 0);
5359 } else {
5360 tmp = tmp4;
5361 }
0b36f4cd
CL
5362 gen_neon_shift_narrow(size, tmp, tmp2, q,
5363 input_unsigned);
92cdfaeb
PM
5364 if (pass == 0) {
5365 tmp3 = neon_load_reg(rm, 1);
5366 } else {
5367 tmp3 = tmp5;
5368 }
0b36f4cd
CL
5369 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5370 input_unsigned);
36aa55dc 5371 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5372 tcg_temp_free_i32(tmp);
5373 tcg_temp_free_i32(tmp3);
5374 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5375 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5376 neon_store_reg(rd, pass, tmp);
5377 } /* for pass */
c6067f04 5378 tcg_temp_free_i32(tmp2);
b75263d6 5379 }
9ee6e8bb 5380 } else if (op == 10) {
cc13115b
PM
5381 /* VSHLL, VMOVL */
5382 if (q || (rd & 1)) {
9ee6e8bb 5383 return 1;
cc13115b 5384 }
ad69471c
PB
5385 tmp = neon_load_reg(rm, 0);
5386 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5387 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5388 if (pass == 1)
5389 tmp = tmp2;
5390
5391 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5392
9ee6e8bb
PB
5393 if (shift != 0) {
5394 /* The shift is less than the width of the source
ad69471c
PB
5395 type, so we can just shift the whole register. */
5396 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5397 /* Widen the result of shift: we need to clear
5398 * the potential overflow bits resulting from
5399 * left bits of the narrow input appearing as
5400 * right bits of left the neighbour narrow
5401 * input. */
ad69471c
PB
5402 if (size < 2 || !u) {
5403 uint64_t imm64;
5404 if (size == 0) {
5405 imm = (0xffu >> (8 - shift));
5406 imm |= imm << 16;
acdf01ef 5407 } else if (size == 1) {
ad69471c 5408 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5409 } else {
5410 /* size == 2 */
5411 imm = 0xffffffff >> (32 - shift);
5412 }
5413 if (size < 2) {
5414 imm64 = imm | (((uint64_t)imm) << 32);
5415 } else {
5416 imm64 = imm;
9ee6e8bb 5417 }
acdf01ef 5418 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5419 }
5420 }
ad69471c 5421 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5422 }
f73534a5 5423 } else if (op >= 14) {
9ee6e8bb 5424 /* VCVT fixed-point. */
cc13115b
PM
5425 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5426 return 1;
5427 }
f73534a5
PM
5428 /* We have already masked out the must-be-1 top bit of imm6,
5429 * hence this 32-shift where the ARM ARM has 64-imm6.
5430 */
5431 shift = 32 - shift;
9ee6e8bb 5432 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5433 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5434 if (!(op & 1)) {
9ee6e8bb 5435 if (u)
5500b06c 5436 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5437 else
5500b06c 5438 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5439 } else {
5440 if (u)
5500b06c 5441 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5442 else
5500b06c 5443 gen_vfp_tosl(0, shift, 1);
2c0262af 5444 }
4373f3ce 5445 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5446 }
5447 } else {
9ee6e8bb
PB
5448 return 1;
5449 }
5450 } else { /* (insn & 0x00380080) == 0 */
5451 int invert;
7d80fee5
PM
5452 if (q && (rd & 1)) {
5453 return 1;
5454 }
9ee6e8bb
PB
5455
5456 op = (insn >> 8) & 0xf;
5457 /* One register and immediate. */
5458 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5459 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5460 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5461 * We choose to not special-case this and will behave as if a
5462 * valid constant encoding of 0 had been given.
5463 */
9ee6e8bb
PB
5464 switch (op) {
5465 case 0: case 1:
5466 /* no-op */
5467 break;
5468 case 2: case 3:
5469 imm <<= 8;
5470 break;
5471 case 4: case 5:
5472 imm <<= 16;
5473 break;
5474 case 6: case 7:
5475 imm <<= 24;
5476 break;
5477 case 8: case 9:
5478 imm |= imm << 16;
5479 break;
5480 case 10: case 11:
5481 imm = (imm << 8) | (imm << 24);
5482 break;
5483 case 12:
8e31209e 5484 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5485 break;
5486 case 13:
5487 imm = (imm << 16) | 0xffff;
5488 break;
5489 case 14:
5490 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5491 if (invert)
5492 imm = ~imm;
5493 break;
5494 case 15:
7d80fee5
PM
5495 if (invert) {
5496 return 1;
5497 }
9ee6e8bb
PB
5498 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5499 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5500 break;
5501 }
5502 if (invert)
5503 imm = ~imm;
5504
9ee6e8bb
PB
5505 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5506 if (op & 1 && op < 12) {
ad69471c 5507 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5508 if (invert) {
5509 /* The immediate value has already been inverted, so
5510 BIC becomes AND. */
ad69471c 5511 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5512 } else {
ad69471c 5513 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5514 }
9ee6e8bb 5515 } else {
ad69471c 5516 /* VMOV, VMVN. */
7d1b0095 5517 tmp = tcg_temp_new_i32();
9ee6e8bb 5518 if (op == 14 && invert) {
a5a14945 5519 int n;
ad69471c
PB
5520 uint32_t val;
5521 val = 0;
9ee6e8bb
PB
5522 for (n = 0; n < 4; n++) {
5523 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5524 val |= 0xff << (n * 8);
9ee6e8bb 5525 }
ad69471c
PB
5526 tcg_gen_movi_i32(tmp, val);
5527 } else {
5528 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5529 }
9ee6e8bb 5530 }
ad69471c 5531 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5532 }
5533 }
e4b3861d 5534 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5535 if (size != 3) {
5536 op = (insn >> 8) & 0xf;
5537 if ((insn & (1 << 6)) == 0) {
5538 /* Three registers of different lengths. */
5539 int src1_wide;
5540 int src2_wide;
5541 int prewiden;
695272dc
PM
5542 /* undefreq: bit 0 : UNDEF if size != 0
5543 * bit 1 : UNDEF if size == 0
5544 * bit 2 : UNDEF if U == 1
5545 * Note that [1:0] set implies 'always UNDEF'
5546 */
5547 int undefreq;
5548 /* prewiden, src1_wide, src2_wide, undefreq */
5549 static const int neon_3reg_wide[16][4] = {
5550 {1, 0, 0, 0}, /* VADDL */
5551 {1, 1, 0, 0}, /* VADDW */
5552 {1, 0, 0, 0}, /* VSUBL */
5553 {1, 1, 0, 0}, /* VSUBW */
5554 {0, 1, 1, 0}, /* VADDHN */
5555 {0, 0, 0, 0}, /* VABAL */
5556 {0, 1, 1, 0}, /* VSUBHN */
5557 {0, 0, 0, 0}, /* VABDL */
5558 {0, 0, 0, 0}, /* VMLAL */
5559 {0, 0, 0, 6}, /* VQDMLAL */
5560 {0, 0, 0, 0}, /* VMLSL */
5561 {0, 0, 0, 6}, /* VQDMLSL */
5562 {0, 0, 0, 0}, /* Integer VMULL */
5563 {0, 0, 0, 2}, /* VQDMULL */
5564 {0, 0, 0, 5}, /* Polynomial VMULL */
5565 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5566 };
5567
5568 prewiden = neon_3reg_wide[op][0];
5569 src1_wide = neon_3reg_wide[op][1];
5570 src2_wide = neon_3reg_wide[op][2];
695272dc 5571 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5572
695272dc
PM
5573 if (((undefreq & 1) && (size != 0)) ||
5574 ((undefreq & 2) && (size == 0)) ||
5575 ((undefreq & 4) && u)) {
5576 return 1;
5577 }
5578 if ((src1_wide && (rn & 1)) ||
5579 (src2_wide && (rm & 1)) ||
5580 (!src2_wide && (rd & 1))) {
ad69471c 5581 return 1;
695272dc 5582 }
ad69471c 5583
9ee6e8bb
PB
5584 /* Avoid overlapping operands. Wide source operands are
5585 always aligned so will never overlap with wide
5586 destinations in problematic ways. */
8f8e3aa4 5587 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5588 tmp = neon_load_reg(rm, 1);
5589 neon_store_scratch(2, tmp);
8f8e3aa4 5590 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5591 tmp = neon_load_reg(rn, 1);
5592 neon_store_scratch(2, tmp);
9ee6e8bb 5593 }
a50f5b91 5594 TCGV_UNUSED(tmp3);
9ee6e8bb 5595 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5596 if (src1_wide) {
5597 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5598 TCGV_UNUSED(tmp);
9ee6e8bb 5599 } else {
ad69471c 5600 if (pass == 1 && rd == rn) {
dd8fbd78 5601 tmp = neon_load_scratch(2);
9ee6e8bb 5602 } else {
ad69471c
PB
5603 tmp = neon_load_reg(rn, pass);
5604 }
5605 if (prewiden) {
5606 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5607 }
5608 }
ad69471c
PB
5609 if (src2_wide) {
5610 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5611 TCGV_UNUSED(tmp2);
9ee6e8bb 5612 } else {
ad69471c 5613 if (pass == 1 && rd == rm) {
dd8fbd78 5614 tmp2 = neon_load_scratch(2);
9ee6e8bb 5615 } else {
ad69471c
PB
5616 tmp2 = neon_load_reg(rm, pass);
5617 }
5618 if (prewiden) {
5619 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5620 }
9ee6e8bb
PB
5621 }
5622 switch (op) {
5623 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5624 gen_neon_addl(size);
9ee6e8bb 5625 break;
79b0e534 5626 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5627 gen_neon_subl(size);
9ee6e8bb
PB
5628 break;
5629 case 5: case 7: /* VABAL, VABDL */
5630 switch ((size << 1) | u) {
ad69471c
PB
5631 case 0:
5632 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5633 break;
5634 case 1:
5635 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5636 break;
5637 case 2:
5638 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5639 break;
5640 case 3:
5641 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5642 break;
5643 case 4:
5644 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5645 break;
5646 case 5:
5647 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5648 break;
9ee6e8bb
PB
5649 default: abort();
5650 }
7d1b0095
PM
5651 tcg_temp_free_i32(tmp2);
5652 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5653 break;
5654 case 8: case 9: case 10: case 11: case 12: case 13:
5655 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5656 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5657 break;
5658 case 14: /* Polynomial VMULL */
e5ca24cb 5659 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5660 tcg_temp_free_i32(tmp2);
5661 tcg_temp_free_i32(tmp);
e5ca24cb 5662 break;
695272dc
PM
5663 default: /* 15 is RESERVED: caught earlier */
5664 abort();
9ee6e8bb 5665 }
ebcd88ce
PM
5666 if (op == 13) {
5667 /* VQDMULL */
5668 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5669 neon_store_reg64(cpu_V0, rd + pass);
5670 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5671 /* Accumulate. */
ebcd88ce 5672 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5673 switch (op) {
4dc064e6
PM
5674 case 10: /* VMLSL */
5675 gen_neon_negl(cpu_V0, size);
5676 /* Fall through */
5677 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5678 gen_neon_addl(size);
9ee6e8bb
PB
5679 break;
5680 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5681 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5682 if (op == 11) {
5683 gen_neon_negl(cpu_V0, size);
5684 }
ad69471c
PB
5685 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5686 break;
9ee6e8bb
PB
5687 default:
5688 abort();
5689 }
ad69471c 5690 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5691 } else if (op == 4 || op == 6) {
5692 /* Narrowing operation. */
7d1b0095 5693 tmp = tcg_temp_new_i32();
79b0e534 5694 if (!u) {
9ee6e8bb 5695 switch (size) {
ad69471c
PB
5696 case 0:
5697 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5698 break;
5699 case 1:
5700 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5701 break;
5702 case 2:
5703 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5704 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5705 break;
9ee6e8bb
PB
5706 default: abort();
5707 }
5708 } else {
5709 switch (size) {
ad69471c
PB
5710 case 0:
5711 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5712 break;
5713 case 1:
5714 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5715 break;
5716 case 2:
5717 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5718 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5719 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5720 break;
9ee6e8bb
PB
5721 default: abort();
5722 }
5723 }
ad69471c
PB
5724 if (pass == 0) {
5725 tmp3 = tmp;
5726 } else {
5727 neon_store_reg(rd, 0, tmp3);
5728 neon_store_reg(rd, 1, tmp);
5729 }
9ee6e8bb
PB
5730 } else {
5731 /* Write back the result. */
ad69471c 5732 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5733 }
5734 }
5735 } else {
3e3326df
PM
5736 /* Two registers and a scalar. NB that for ops of this form
5737 * the ARM ARM labels bit 24 as Q, but it is in our variable
5738 * 'u', not 'q'.
5739 */
5740 if (size == 0) {
5741 return 1;
5742 }
9ee6e8bb 5743 switch (op) {
9ee6e8bb 5744 case 1: /* Float VMLA scalar */
9ee6e8bb 5745 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5746 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5747 if (size == 1) {
5748 return 1;
5749 }
5750 /* fall through */
5751 case 0: /* Integer VMLA scalar */
5752 case 4: /* Integer VMLS scalar */
5753 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5754 case 12: /* VQDMULH scalar */
5755 case 13: /* VQRDMULH scalar */
3e3326df
PM
5756 if (u && ((rd | rn) & 1)) {
5757 return 1;
5758 }
dd8fbd78
FN
5759 tmp = neon_get_scalar(size, rm);
5760 neon_store_scratch(0, tmp);
9ee6e8bb 5761 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5762 tmp = neon_load_scratch(0);
5763 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5764 if (op == 12) {
5765 if (size == 1) {
02da0b2d 5766 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5767 } else {
02da0b2d 5768 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5769 }
5770 } else if (op == 13) {
5771 if (size == 1) {
02da0b2d 5772 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5773 } else {
02da0b2d 5774 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5775 }
5776 } else if (op & 1) {
aa47cfdd
PM
5777 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5778 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5779 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5780 } else {
5781 switch (size) {
dd8fbd78
FN
5782 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5783 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5784 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5785 default: abort();
9ee6e8bb
PB
5786 }
5787 }
7d1b0095 5788 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5789 if (op < 8) {
5790 /* Accumulate. */
dd8fbd78 5791 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5792 switch (op) {
5793 case 0:
dd8fbd78 5794 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5795 break;
5796 case 1:
aa47cfdd
PM
5797 {
5798 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5799 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5800 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5801 break;
aa47cfdd 5802 }
9ee6e8bb 5803 case 4:
dd8fbd78 5804 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5805 break;
5806 case 5:
aa47cfdd
PM
5807 {
5808 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5809 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5810 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5811 break;
aa47cfdd 5812 }
9ee6e8bb
PB
5813 default:
5814 abort();
5815 }
7d1b0095 5816 tcg_temp_free_i32(tmp2);
9ee6e8bb 5817 }
dd8fbd78 5818 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5819 }
5820 break;
9ee6e8bb 5821 case 3: /* VQDMLAL scalar */
9ee6e8bb 5822 case 7: /* VQDMLSL scalar */
9ee6e8bb 5823 case 11: /* VQDMULL scalar */
3e3326df 5824 if (u == 1) {
ad69471c 5825 return 1;
3e3326df
PM
5826 }
5827 /* fall through */
5828 case 2: /* VMLAL sclar */
5829 case 6: /* VMLSL scalar */
5830 case 10: /* VMULL scalar */
5831 if (rd & 1) {
5832 return 1;
5833 }
dd8fbd78 5834 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5835 /* We need a copy of tmp2 because gen_neon_mull
5836 * deletes it during pass 0. */
7d1b0095 5837 tmp4 = tcg_temp_new_i32();
c6067f04 5838 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5839 tmp3 = neon_load_reg(rn, 1);
ad69471c 5840
9ee6e8bb 5841 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5842 if (pass == 0) {
5843 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5844 } else {
dd8fbd78 5845 tmp = tmp3;
c6067f04 5846 tmp2 = tmp4;
9ee6e8bb 5847 }
ad69471c 5848 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5849 if (op != 11) {
5850 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5851 }
9ee6e8bb 5852 switch (op) {
4dc064e6
PM
5853 case 6:
5854 gen_neon_negl(cpu_V0, size);
5855 /* Fall through */
5856 case 2:
ad69471c 5857 gen_neon_addl(size);
9ee6e8bb
PB
5858 break;
5859 case 3: case 7:
ad69471c 5860 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5861 if (op == 7) {
5862 gen_neon_negl(cpu_V0, size);
5863 }
ad69471c 5864 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5865 break;
5866 case 10:
5867 /* no-op */
5868 break;
5869 case 11:
ad69471c 5870 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5871 break;
5872 default:
5873 abort();
5874 }
ad69471c 5875 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5876 }
dd8fbd78 5877
dd8fbd78 5878
9ee6e8bb
PB
5879 break;
5880 default: /* 14 and 15 are RESERVED */
5881 return 1;
5882 }
5883 }
5884 } else { /* size == 3 */
5885 if (!u) {
5886 /* Extract. */
9ee6e8bb 5887 imm = (insn >> 8) & 0xf;
ad69471c
PB
5888
5889 if (imm > 7 && !q)
5890 return 1;
5891
52579ea1
PM
5892 if (q && ((rd | rn | rm) & 1)) {
5893 return 1;
5894 }
5895
ad69471c
PB
5896 if (imm == 0) {
5897 neon_load_reg64(cpu_V0, rn);
5898 if (q) {
5899 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5900 }
ad69471c
PB
5901 } else if (imm == 8) {
5902 neon_load_reg64(cpu_V0, rn + 1);
5903 if (q) {
5904 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5905 }
ad69471c 5906 } else if (q) {
a7812ae4 5907 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5908 if (imm < 8) {
5909 neon_load_reg64(cpu_V0, rn);
a7812ae4 5910 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5911 } else {
5912 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5913 neon_load_reg64(tmp64, rm);
ad69471c
PB
5914 }
5915 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5916 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5917 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5918 if (imm < 8) {
5919 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5920 } else {
ad69471c
PB
5921 neon_load_reg64(cpu_V1, rm + 1);
5922 imm -= 8;
9ee6e8bb 5923 }
ad69471c 5924 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5925 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5926 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5927 tcg_temp_free_i64(tmp64);
ad69471c 5928 } else {
a7812ae4 5929 /* BUGFIX */
ad69471c 5930 neon_load_reg64(cpu_V0, rn);
a7812ae4 5931 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5932 neon_load_reg64(cpu_V1, rm);
a7812ae4 5933 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5934 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5935 }
5936 neon_store_reg64(cpu_V0, rd);
5937 if (q) {
5938 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5939 }
5940 } else if ((insn & (1 << 11)) == 0) {
5941 /* Two register misc. */
5942 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5943 size = (insn >> 18) & 3;
600b828c
PM
5944 /* UNDEF for unknown op values and bad op-size combinations */
5945 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5946 return 1;
5947 }
fc2a9b37
PM
5948 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5949 q && ((rm | rd) & 1)) {
5950 return 1;
5951 }
9ee6e8bb 5952 switch (op) {
600b828c 5953 case NEON_2RM_VREV64:
9ee6e8bb 5954 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5955 tmp = neon_load_reg(rm, pass * 2);
5956 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5957 switch (size) {
dd8fbd78
FN
5958 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5959 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5960 case 2: /* no-op */ break;
5961 default: abort();
5962 }
dd8fbd78 5963 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5964 if (size == 2) {
dd8fbd78 5965 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5966 } else {
9ee6e8bb 5967 switch (size) {
dd8fbd78
FN
5968 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5969 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5970 default: abort();
5971 }
dd8fbd78 5972 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5973 }
5974 }
5975 break;
600b828c
PM
5976 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5977 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5978 for (pass = 0; pass < q + 1; pass++) {
5979 tmp = neon_load_reg(rm, pass * 2);
5980 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5981 tmp = neon_load_reg(rm, pass * 2 + 1);
5982 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5983 switch (size) {
5984 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5985 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5986 case 2: tcg_gen_add_i64(CPU_V001); break;
5987 default: abort();
5988 }
600b828c 5989 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5990 /* Accumulate. */
ad69471c
PB
5991 neon_load_reg64(cpu_V1, rd + pass);
5992 gen_neon_addl(size);
9ee6e8bb 5993 }
ad69471c 5994 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5995 }
5996 break;
600b828c 5997 case NEON_2RM_VTRN:
9ee6e8bb 5998 if (size == 2) {
a5a14945 5999 int n;
9ee6e8bb 6000 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6001 tmp = neon_load_reg(rm, n);
6002 tmp2 = neon_load_reg(rd, n + 1);
6003 neon_store_reg(rm, n, tmp2);
6004 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6005 }
6006 } else {
6007 goto elementwise;
6008 }
6009 break;
600b828c 6010 case NEON_2RM_VUZP:
02acedf9 6011 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6012 return 1;
9ee6e8bb
PB
6013 }
6014 break;
600b828c 6015 case NEON_2RM_VZIP:
d68a6f3a 6016 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6017 return 1;
9ee6e8bb
PB
6018 }
6019 break;
600b828c
PM
6020 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6021 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6022 if (rm & 1) {
6023 return 1;
6024 }
a50f5b91 6025 TCGV_UNUSED(tmp2);
9ee6e8bb 6026 for (pass = 0; pass < 2; pass++) {
ad69471c 6027 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6028 tmp = tcg_temp_new_i32();
600b828c
PM
6029 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6030 tmp, cpu_V0);
ad69471c
PB
6031 if (pass == 0) {
6032 tmp2 = tmp;
6033 } else {
6034 neon_store_reg(rd, 0, tmp2);
6035 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6036 }
9ee6e8bb
PB
6037 }
6038 break;
600b828c 6039 case NEON_2RM_VSHLL:
fc2a9b37 6040 if (q || (rd & 1)) {
9ee6e8bb 6041 return 1;
600b828c 6042 }
ad69471c
PB
6043 tmp = neon_load_reg(rm, 0);
6044 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6045 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6046 if (pass == 1)
6047 tmp = tmp2;
6048 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6049 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6050 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6051 }
6052 break;
600b828c 6053 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
6054 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6055 q || (rm & 1)) {
6056 return 1;
6057 }
7d1b0095
PM
6058 tmp = tcg_temp_new_i32();
6059 tmp2 = tcg_temp_new_i32();
60011498 6060 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6061 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6062 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6063 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6064 tcg_gen_shli_i32(tmp2, tmp2, 16);
6065 tcg_gen_or_i32(tmp2, tmp2, tmp);
6066 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6067 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6068 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6069 neon_store_reg(rd, 0, tmp2);
7d1b0095 6070 tmp2 = tcg_temp_new_i32();
2d981da7 6071 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6072 tcg_gen_shli_i32(tmp2, tmp2, 16);
6073 tcg_gen_or_i32(tmp2, tmp2, tmp);
6074 neon_store_reg(rd, 1, tmp2);
7d1b0095 6075 tcg_temp_free_i32(tmp);
60011498 6076 break;
600b828c 6077 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
6078 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6079 q || (rd & 1)) {
6080 return 1;
6081 }
7d1b0095 6082 tmp3 = tcg_temp_new_i32();
60011498
PB
6083 tmp = neon_load_reg(rm, 0);
6084 tmp2 = neon_load_reg(rm, 1);
6085 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6086 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6087 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6088 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6089 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6090 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6091 tcg_temp_free_i32(tmp);
60011498 6092 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6093 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6094 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6095 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6096 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6097 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6098 tcg_temp_free_i32(tmp2);
6099 tcg_temp_free_i32(tmp3);
60011498 6100 break;
9ee6e8bb
PB
6101 default:
6102 elementwise:
6103 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6104 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6105 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6106 neon_reg_offset(rm, pass));
dd8fbd78 6107 TCGV_UNUSED(tmp);
9ee6e8bb 6108 } else {
dd8fbd78 6109 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6110 }
6111 switch (op) {
600b828c 6112 case NEON_2RM_VREV32:
9ee6e8bb 6113 switch (size) {
dd8fbd78
FN
6114 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6115 case 1: gen_swap_half(tmp); break;
600b828c 6116 default: abort();
9ee6e8bb
PB
6117 }
6118 break;
600b828c 6119 case NEON_2RM_VREV16:
dd8fbd78 6120 gen_rev16(tmp);
9ee6e8bb 6121 break;
600b828c 6122 case NEON_2RM_VCLS:
9ee6e8bb 6123 switch (size) {
dd8fbd78
FN
6124 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6125 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6126 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6127 default: abort();
9ee6e8bb
PB
6128 }
6129 break;
600b828c 6130 case NEON_2RM_VCLZ:
9ee6e8bb 6131 switch (size) {
dd8fbd78
FN
6132 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6133 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6134 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6135 default: abort();
9ee6e8bb
PB
6136 }
6137 break;
600b828c 6138 case NEON_2RM_VCNT:
dd8fbd78 6139 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6140 break;
600b828c 6141 case NEON_2RM_VMVN:
dd8fbd78 6142 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6143 break;
600b828c 6144 case NEON_2RM_VQABS:
9ee6e8bb 6145 switch (size) {
02da0b2d
PM
6146 case 0:
6147 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6148 break;
6149 case 1:
6150 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6151 break;
6152 case 2:
6153 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6154 break;
600b828c 6155 default: abort();
9ee6e8bb
PB
6156 }
6157 break;
600b828c 6158 case NEON_2RM_VQNEG:
9ee6e8bb 6159 switch (size) {
02da0b2d
PM
6160 case 0:
6161 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6162 break;
6163 case 1:
6164 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6165 break;
6166 case 2:
6167 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6168 break;
600b828c 6169 default: abort();
9ee6e8bb
PB
6170 }
6171 break;
600b828c 6172 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6173 tmp2 = tcg_const_i32(0);
9ee6e8bb 6174 switch(size) {
dd8fbd78
FN
6175 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6176 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6177 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6178 default: abort();
9ee6e8bb 6179 }
dd8fbd78 6180 tcg_temp_free(tmp2);
600b828c 6181 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6182 tcg_gen_not_i32(tmp, tmp);
600b828c 6183 }
9ee6e8bb 6184 break;
600b828c 6185 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6186 tmp2 = tcg_const_i32(0);
9ee6e8bb 6187 switch(size) {
dd8fbd78
FN
6188 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6189 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6190 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6191 default: abort();
9ee6e8bb 6192 }
dd8fbd78 6193 tcg_temp_free(tmp2);
600b828c 6194 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6195 tcg_gen_not_i32(tmp, tmp);
600b828c 6196 }
9ee6e8bb 6197 break;
600b828c 6198 case NEON_2RM_VCEQ0:
dd8fbd78 6199 tmp2 = tcg_const_i32(0);
9ee6e8bb 6200 switch(size) {
dd8fbd78
FN
6201 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6202 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6203 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6204 default: abort();
9ee6e8bb 6205 }
dd8fbd78 6206 tcg_temp_free(tmp2);
9ee6e8bb 6207 break;
600b828c 6208 case NEON_2RM_VABS:
9ee6e8bb 6209 switch(size) {
dd8fbd78
FN
6210 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6211 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6212 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6213 default: abort();
9ee6e8bb
PB
6214 }
6215 break;
600b828c 6216 case NEON_2RM_VNEG:
dd8fbd78
FN
6217 tmp2 = tcg_const_i32(0);
6218 gen_neon_rsb(size, tmp, tmp2);
6219 tcg_temp_free(tmp2);
9ee6e8bb 6220 break;
600b828c 6221 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6222 {
6223 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6224 tmp2 = tcg_const_i32(0);
aa47cfdd 6225 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6226 tcg_temp_free(tmp2);
aa47cfdd 6227 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6228 break;
aa47cfdd 6229 }
600b828c 6230 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6231 {
6232 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6233 tmp2 = tcg_const_i32(0);
aa47cfdd 6234 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6235 tcg_temp_free(tmp2);
aa47cfdd 6236 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6237 break;
aa47cfdd 6238 }
600b828c 6239 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6240 {
6241 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6242 tmp2 = tcg_const_i32(0);
aa47cfdd 6243 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6244 tcg_temp_free(tmp2);
aa47cfdd 6245 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6246 break;
aa47cfdd 6247 }
600b828c 6248 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6249 {
6250 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6251 tmp2 = tcg_const_i32(0);
aa47cfdd 6252 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6253 tcg_temp_free(tmp2);
aa47cfdd 6254 tcg_temp_free_ptr(fpstatus);
0e326109 6255 break;
aa47cfdd 6256 }
600b828c 6257 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6258 {
6259 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6260 tmp2 = tcg_const_i32(0);
aa47cfdd 6261 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6262 tcg_temp_free(tmp2);
aa47cfdd 6263 tcg_temp_free_ptr(fpstatus);
0e326109 6264 break;
aa47cfdd 6265 }
600b828c 6266 case NEON_2RM_VABS_F:
4373f3ce 6267 gen_vfp_abs(0);
9ee6e8bb 6268 break;
600b828c 6269 case NEON_2RM_VNEG_F:
4373f3ce 6270 gen_vfp_neg(0);
9ee6e8bb 6271 break;
600b828c 6272 case NEON_2RM_VSWP:
dd8fbd78
FN
6273 tmp2 = neon_load_reg(rd, pass);
6274 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6275 break;
600b828c 6276 case NEON_2RM_VTRN:
dd8fbd78 6277 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6278 switch (size) {
dd8fbd78
FN
6279 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6280 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6281 default: abort();
9ee6e8bb 6282 }
dd8fbd78 6283 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6284 break;
600b828c 6285 case NEON_2RM_VRECPE:
dd8fbd78 6286 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6287 break;
600b828c 6288 case NEON_2RM_VRSQRTE:
dd8fbd78 6289 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6290 break;
600b828c 6291 case NEON_2RM_VRECPE_F:
4373f3ce 6292 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6293 break;
600b828c 6294 case NEON_2RM_VRSQRTE_F:
4373f3ce 6295 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6296 break;
600b828c 6297 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6298 gen_vfp_sito(0, 1);
9ee6e8bb 6299 break;
600b828c 6300 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6301 gen_vfp_uito(0, 1);
9ee6e8bb 6302 break;
600b828c 6303 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6304 gen_vfp_tosiz(0, 1);
9ee6e8bb 6305 break;
600b828c 6306 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6307 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6308 break;
6309 default:
600b828c
PM
6310 /* Reserved op values were caught by the
6311 * neon_2rm_sizes[] check earlier.
6312 */
6313 abort();
9ee6e8bb 6314 }
600b828c 6315 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6316 tcg_gen_st_f32(cpu_F0s, cpu_env,
6317 neon_reg_offset(rd, pass));
9ee6e8bb 6318 } else {
dd8fbd78 6319 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6320 }
6321 }
6322 break;
6323 }
6324 } else if ((insn & (1 << 10)) == 0) {
6325 /* VTBL, VTBX. */
56907d77
PM
6326 int n = ((insn >> 8) & 3) + 1;
6327 if ((rn + n) > 32) {
6328 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6329 * helper function running off the end of the register file.
6330 */
6331 return 1;
6332 }
6333 n <<= 3;
9ee6e8bb 6334 if (insn & (1 << 6)) {
8f8e3aa4 6335 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6336 } else {
7d1b0095 6337 tmp = tcg_temp_new_i32();
8f8e3aa4 6338 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6339 }
8f8e3aa4 6340 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6341 tmp4 = tcg_const_i32(rn);
6342 tmp5 = tcg_const_i32(n);
6343 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
7d1b0095 6344 tcg_temp_free_i32(tmp);
9ee6e8bb 6345 if (insn & (1 << 6)) {
8f8e3aa4 6346 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6347 } else {
7d1b0095 6348 tmp = tcg_temp_new_i32();
8f8e3aa4 6349 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6350 }
8f8e3aa4 6351 tmp3 = neon_load_reg(rm, 1);
b75263d6 6352 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6353 tcg_temp_free_i32(tmp5);
6354 tcg_temp_free_i32(tmp4);
8f8e3aa4 6355 neon_store_reg(rd, 0, tmp2);
3018f259 6356 neon_store_reg(rd, 1, tmp3);
7d1b0095 6357 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6358 } else if ((insn & 0x380) == 0) {
6359 /* VDUP */
133da6aa
JR
6360 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6361 return 1;
6362 }
9ee6e8bb 6363 if (insn & (1 << 19)) {
dd8fbd78 6364 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6365 } else {
dd8fbd78 6366 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6367 }
6368 if (insn & (1 << 16)) {
dd8fbd78 6369 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6370 } else if (insn & (1 << 17)) {
6371 if ((insn >> 18) & 1)
dd8fbd78 6372 gen_neon_dup_high16(tmp);
9ee6e8bb 6373 else
dd8fbd78 6374 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6375 }
6376 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6377 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6378 tcg_gen_mov_i32(tmp2, tmp);
6379 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6380 }
7d1b0095 6381 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6382 } else {
6383 return 1;
6384 }
6385 }
6386 }
6387 return 0;
6388}
6389
0ecb72a5 6390static int disas_cp14_read(CPUARMState * env, DisasContext *s, uint32_t insn)
fe1479c3
PB
6391{
6392 int crn = (insn >> 16) & 0xf;
6393 int crm = insn & 0xf;
6394 int op1 = (insn >> 21) & 7;
6395 int op2 = (insn >> 5) & 7;
6396 int rt = (insn >> 12) & 0xf;
6397 TCGv tmp;
6398
ca27c052
PM
6399 /* Minimal set of debug registers, since we don't support debug */
6400 if (op1 == 0 && crn == 0 && op2 == 0) {
6401 switch (crm) {
6402 case 0:
6403 /* DBGDIDR: just RAZ. In particular this means the
6404 * "debug architecture version" bits will read as
6405 * a reserved value, which should cause Linux to
6406 * not try to use the debug hardware.
6407 */
6408 tmp = tcg_const_i32(0);
6409 store_reg(s, rt, tmp);
6410 return 0;
6411 case 1:
6412 case 2:
6413 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6414 * don't implement memory mapped debug components
6415 */
6416 if (ENABLE_ARCH_7) {
6417 tmp = tcg_const_i32(0);
6418 store_reg(s, rt, tmp);
6419 return 0;
6420 }
6421 break;
6422 default:
6423 break;
6424 }
6425 }
6426
fe1479c3
PB
6427 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6428 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6429 /* TEECR */
6430 if (IS_USER(s))
6431 return 1;
6432 tmp = load_cpu_field(teecr);
6433 store_reg(s, rt, tmp);
6434 return 0;
6435 }
6436 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6437 /* TEEHBR */
6438 if (IS_USER(s) && (env->teecr & 1))
6439 return 1;
6440 tmp = load_cpu_field(teehbr);
6441 store_reg(s, rt, tmp);
6442 return 0;
6443 }
6444 }
fe1479c3
PB
6445 return 1;
6446}
6447
0ecb72a5 6448static int disas_cp14_write(CPUARMState * env, DisasContext *s, uint32_t insn)
fe1479c3
PB
6449{
6450 int crn = (insn >> 16) & 0xf;
6451 int crm = insn & 0xf;
6452 int op1 = (insn >> 21) & 7;
6453 int op2 = (insn >> 5) & 7;
6454 int rt = (insn >> 12) & 0xf;
6455 TCGv tmp;
6456
6457 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6458 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6459 /* TEECR */
6460 if (IS_USER(s))
6461 return 1;
6462 tmp = load_reg(s, rt);
6463 gen_helper_set_teecr(cpu_env, tmp);
7d1b0095 6464 tcg_temp_free_i32(tmp);
fe1479c3
PB
6465 return 0;
6466 }
6467 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6468 /* TEEHBR */
6469 if (IS_USER(s) && (env->teecr & 1))
6470 return 1;
6471 tmp = load_reg(s, rt);
6472 store_cpu_field(tmp, teehbr);
6473 return 0;
6474 }
6475 }
fe1479c3
PB
6476 return 1;
6477}
6478
0ecb72a5 6479static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
6480{
6481 int cpnum;
6482
6483 cpnum = (insn >> 8) & 0xf;
6484 if (arm_feature(env, ARM_FEATURE_XSCALE)
6485 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6486 return 1;
6487
6488 switch (cpnum) {
6489 case 0:
6490 case 1:
6491 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6492 return disas_iwmmxt_insn(env, s, insn);
6493 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6494 return disas_dsp_insn(env, s, insn);
6495 }
6496 return 1;
6497 case 10:
6498 case 11:
6499 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
6500 case 14:
6501 /* Coprocessors 7-15 are architecturally reserved by ARM.
6502 Unfortunately Intel decided to ignore this. */
6503 if (arm_feature(env, ARM_FEATURE_XSCALE))
6504 goto board;
6505 if (insn & (1 << 20))
6506 return disas_cp14_read(env, s, insn);
6507 else
6508 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
6509 case 15:
6510 return disas_cp15_insn (env, s, insn);
6511 default:
fe1479c3 6512 board:
9ee6e8bb
PB
6513 /* Unknown coprocessor. See if the board has hooked it. */
6514 return disas_cp_insn (env, s, insn);
6515 }
6516}
6517
5e3f878a
PB
6518
6519/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6520static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
6521{
6522 TCGv tmp;
7d1b0095 6523 tmp = tcg_temp_new_i32();
5e3f878a
PB
6524 tcg_gen_trunc_i64_i32(tmp, val);
6525 store_reg(s, rlow, tmp);
7d1b0095 6526 tmp = tcg_temp_new_i32();
5e3f878a
PB
6527 tcg_gen_shri_i64(val, val, 32);
6528 tcg_gen_trunc_i64_i32(tmp, val);
6529 store_reg(s, rhigh, tmp);
6530}
6531
6532/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6533static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6534{
a7812ae4 6535 TCGv_i64 tmp;
5e3f878a
PB
6536 TCGv tmp2;
6537
36aa55dc 6538 /* Load value and extend to 64 bits. */
a7812ae4 6539 tmp = tcg_temp_new_i64();
5e3f878a
PB
6540 tmp2 = load_reg(s, rlow);
6541 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6542 tcg_temp_free_i32(tmp2);
5e3f878a 6543 tcg_gen_add_i64(val, val, tmp);
b75263d6 6544 tcg_temp_free_i64(tmp);
5e3f878a
PB
6545}
6546
6547/* load and add a 64-bit value from a register pair. */
a7812ae4 6548static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6549{
a7812ae4 6550 TCGv_i64 tmp;
36aa55dc
PB
6551 TCGv tmpl;
6552 TCGv tmph;
5e3f878a
PB
6553
6554 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6555 tmpl = load_reg(s, rlow);
6556 tmph = load_reg(s, rhigh);
a7812ae4 6557 tmp = tcg_temp_new_i64();
36aa55dc 6558 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6559 tcg_temp_free_i32(tmpl);
6560 tcg_temp_free_i32(tmph);
5e3f878a 6561 tcg_gen_add_i64(val, val, tmp);
b75263d6 6562 tcg_temp_free_i64(tmp);
5e3f878a
PB
6563}
6564
6565/* Set N and Z flags from a 64-bit value. */
a7812ae4 6566static void gen_logicq_cc(TCGv_i64 val)
5e3f878a 6567{
7d1b0095 6568 TCGv tmp = tcg_temp_new_i32();
5e3f878a 6569 gen_helper_logicq_cc(tmp, val);
6fbe23d5 6570 gen_logic_CC(tmp);
7d1b0095 6571 tcg_temp_free_i32(tmp);
5e3f878a
PB
6572}
6573
426f5abc
PB
6574/* Load/Store exclusive instructions are implemented by remembering
6575 the value/address loaded, and seeing if these are the same
6576 when the store is performed. This should be is sufficient to implement
6577 the architecturally mandated semantics, and avoids having to monitor
6578 regular stores.
6579
6580 In system emulation mode only one CPU will be running at once, so
6581 this sequence is effectively atomic. In user emulation mode we
6582 throw an exception and handle the atomic operation elsewhere. */
6583static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6584 TCGv addr, int size)
6585{
6586 TCGv tmp;
6587
6588 switch (size) {
6589 case 0:
6590 tmp = gen_ld8u(addr, IS_USER(s));
6591 break;
6592 case 1:
6593 tmp = gen_ld16u(addr, IS_USER(s));
6594 break;
6595 case 2:
6596 case 3:
6597 tmp = gen_ld32(addr, IS_USER(s));
6598 break;
6599 default:
6600 abort();
6601 }
6602 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6603 store_reg(s, rt, tmp);
6604 if (size == 3) {
7d1b0095 6605 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6606 tcg_gen_addi_i32(tmp2, addr, 4);
6607 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6608 tcg_temp_free_i32(tmp2);
426f5abc
PB
6609 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6610 store_reg(s, rt2, tmp);
6611 }
6612 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6613}
6614
6615static void gen_clrex(DisasContext *s)
6616{
6617 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6618}
6619
6620#ifdef CONFIG_USER_ONLY
6621static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6622 TCGv addr, int size)
6623{
6624 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6625 tcg_gen_movi_i32(cpu_exclusive_info,
6626 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6627 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6628}
6629#else
6630static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6631 TCGv addr, int size)
6632{
6633 TCGv tmp;
6634 int done_label;
6635 int fail_label;
6636
6637 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6638 [addr] = {Rt};
6639 {Rd} = 0;
6640 } else {
6641 {Rd} = 1;
6642 } */
6643 fail_label = gen_new_label();
6644 done_label = gen_new_label();
6645 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6646 switch (size) {
6647 case 0:
6648 tmp = gen_ld8u(addr, IS_USER(s));
6649 break;
6650 case 1:
6651 tmp = gen_ld16u(addr, IS_USER(s));
6652 break;
6653 case 2:
6654 case 3:
6655 tmp = gen_ld32(addr, IS_USER(s));
6656 break;
6657 default:
6658 abort();
6659 }
6660 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6661 tcg_temp_free_i32(tmp);
426f5abc 6662 if (size == 3) {
7d1b0095 6663 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6664 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6665 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6666 tcg_temp_free_i32(tmp2);
426f5abc 6667 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6668 tcg_temp_free_i32(tmp);
426f5abc
PB
6669 }
6670 tmp = load_reg(s, rt);
6671 switch (size) {
6672 case 0:
6673 gen_st8(tmp, addr, IS_USER(s));
6674 break;
6675 case 1:
6676 gen_st16(tmp, addr, IS_USER(s));
6677 break;
6678 case 2:
6679 case 3:
6680 gen_st32(tmp, addr, IS_USER(s));
6681 break;
6682 default:
6683 abort();
6684 }
6685 if (size == 3) {
6686 tcg_gen_addi_i32(addr, addr, 4);
6687 tmp = load_reg(s, rt2);
6688 gen_st32(tmp, addr, IS_USER(s));
6689 }
6690 tcg_gen_movi_i32(cpu_R[rd], 0);
6691 tcg_gen_br(done_label);
6692 gen_set_label(fail_label);
6693 tcg_gen_movi_i32(cpu_R[rd], 1);
6694 gen_set_label(done_label);
6695 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6696}
6697#endif
6698
0ecb72a5 6699static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
6700{
6701 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6702 TCGv tmp;
3670669c 6703 TCGv tmp2;
6ddbc6e4 6704 TCGv tmp3;
b0109805 6705 TCGv addr;
a7812ae4 6706 TCGv_i64 tmp64;
9ee6e8bb
PB
6707
6708 insn = ldl_code(s->pc);
6709 s->pc += 4;
6710
6711 /* M variants do not implement ARM mode. */
6712 if (IS_M(env))
6713 goto illegal_op;
6714 cond = insn >> 28;
6715 if (cond == 0xf){
be5e7a76
DES
6716 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6717 * choose to UNDEF. In ARMv5 and above the space is used
6718 * for miscellaneous unconditional instructions.
6719 */
6720 ARCH(5);
6721
9ee6e8bb
PB
6722 /* Unconditional instructions. */
6723 if (((insn >> 25) & 7) == 1) {
6724 /* NEON Data processing. */
6725 if (!arm_feature(env, ARM_FEATURE_NEON))
6726 goto illegal_op;
6727
6728 if (disas_neon_data_insn(env, s, insn))
6729 goto illegal_op;
6730 return;
6731 }
6732 if ((insn & 0x0f100000) == 0x04000000) {
6733 /* NEON load/store. */
6734 if (!arm_feature(env, ARM_FEATURE_NEON))
6735 goto illegal_op;
6736
6737 if (disas_neon_ls_insn(env, s, insn))
6738 goto illegal_op;
6739 return;
6740 }
3d185e5d
PM
6741 if (((insn & 0x0f30f000) == 0x0510f000) ||
6742 ((insn & 0x0f30f010) == 0x0710f000)) {
6743 if ((insn & (1 << 22)) == 0) {
6744 /* PLDW; v7MP */
6745 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6746 goto illegal_op;
6747 }
6748 }
6749 /* Otherwise PLD; v5TE+ */
be5e7a76 6750 ARCH(5TE);
3d185e5d
PM
6751 return;
6752 }
6753 if (((insn & 0x0f70f000) == 0x0450f000) ||
6754 ((insn & 0x0f70f010) == 0x0650f000)) {
6755 ARCH(7);
6756 return; /* PLI; V7 */
6757 }
6758 if (((insn & 0x0f700000) == 0x04100000) ||
6759 ((insn & 0x0f700010) == 0x06100000)) {
6760 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6761 goto illegal_op;
6762 }
6763 return; /* v7MP: Unallocated memory hint: must NOP */
6764 }
6765
6766 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6767 ARCH(6);
6768 /* setend */
6769 if (insn & (1 << 9)) {
6770 /* BE8 mode not implemented. */
6771 goto illegal_op;
6772 }
6773 return;
6774 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6775 switch ((insn >> 4) & 0xf) {
6776 case 1: /* clrex */
6777 ARCH(6K);
426f5abc 6778 gen_clrex(s);
9ee6e8bb
PB
6779 return;
6780 case 4: /* dsb */
6781 case 5: /* dmb */
6782 case 6: /* isb */
6783 ARCH(7);
6784 /* We don't emulate caches so these are a no-op. */
6785 return;
6786 default:
6787 goto illegal_op;
6788 }
6789 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6790 /* srs */
c67b6b71 6791 int32_t offset;
9ee6e8bb
PB
6792 if (IS_USER(s))
6793 goto illegal_op;
6794 ARCH(6);
6795 op1 = (insn & 0x1f);
7d1b0095 6796 addr = tcg_temp_new_i32();
39ea3d4e
PM
6797 tmp = tcg_const_i32(op1);
6798 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6799 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6800 i = (insn >> 23) & 3;
6801 switch (i) {
6802 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6803 case 1: offset = 0; break; /* IA */
6804 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6805 case 3: offset = 4; break; /* IB */
6806 default: abort();
6807 }
6808 if (offset)
b0109805
PB
6809 tcg_gen_addi_i32(addr, addr, offset);
6810 tmp = load_reg(s, 14);
6811 gen_st32(tmp, addr, 0);
c67b6b71 6812 tmp = load_cpu_field(spsr);
b0109805
PB
6813 tcg_gen_addi_i32(addr, addr, 4);
6814 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6815 if (insn & (1 << 21)) {
6816 /* Base writeback. */
6817 switch (i) {
6818 case 0: offset = -8; break;
c67b6b71
FN
6819 case 1: offset = 4; break;
6820 case 2: offset = -4; break;
9ee6e8bb
PB
6821 case 3: offset = 0; break;
6822 default: abort();
6823 }
6824 if (offset)
c67b6b71 6825 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6826 tmp = tcg_const_i32(op1);
6827 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6828 tcg_temp_free_i32(tmp);
7d1b0095 6829 tcg_temp_free_i32(addr);
b0109805 6830 } else {
7d1b0095 6831 tcg_temp_free_i32(addr);
9ee6e8bb 6832 }
a990f58f 6833 return;
ea825eee 6834 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6835 /* rfe */
c67b6b71 6836 int32_t offset;
9ee6e8bb
PB
6837 if (IS_USER(s))
6838 goto illegal_op;
6839 ARCH(6);
6840 rn = (insn >> 16) & 0xf;
b0109805 6841 addr = load_reg(s, rn);
9ee6e8bb
PB
6842 i = (insn >> 23) & 3;
6843 switch (i) {
b0109805 6844 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6845 case 1: offset = 0; break; /* IA */
6846 case 2: offset = -8; break; /* DB */
b0109805 6847 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6848 default: abort();
6849 }
6850 if (offset)
b0109805
PB
6851 tcg_gen_addi_i32(addr, addr, offset);
6852 /* Load PC into tmp and CPSR into tmp2. */
6853 tmp = gen_ld32(addr, 0);
6854 tcg_gen_addi_i32(addr, addr, 4);
6855 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6856 if (insn & (1 << 21)) {
6857 /* Base writeback. */
6858 switch (i) {
b0109805 6859 case 0: offset = -8; break;
c67b6b71
FN
6860 case 1: offset = 4; break;
6861 case 2: offset = -4; break;
b0109805 6862 case 3: offset = 0; break;
9ee6e8bb
PB
6863 default: abort();
6864 }
6865 if (offset)
b0109805
PB
6866 tcg_gen_addi_i32(addr, addr, offset);
6867 store_reg(s, rn, addr);
6868 } else {
7d1b0095 6869 tcg_temp_free_i32(addr);
9ee6e8bb 6870 }
b0109805 6871 gen_rfe(s, tmp, tmp2);
c67b6b71 6872 return;
9ee6e8bb
PB
6873 } else if ((insn & 0x0e000000) == 0x0a000000) {
6874 /* branch link and change to thumb (blx <offset>) */
6875 int32_t offset;
6876
6877 val = (uint32_t)s->pc;
7d1b0095 6878 tmp = tcg_temp_new_i32();
d9ba4830
PB
6879 tcg_gen_movi_i32(tmp, val);
6880 store_reg(s, 14, tmp);
9ee6e8bb
PB
6881 /* Sign-extend the 24-bit offset */
6882 offset = (((int32_t)insn) << 8) >> 8;
6883 /* offset * 4 + bit24 * 2 + (thumb bit) */
6884 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6885 /* pipeline offset */
6886 val += 4;
be5e7a76 6887 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6888 gen_bx_im(s, val);
9ee6e8bb
PB
6889 return;
6890 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6891 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6892 /* iWMMXt register transfer. */
6893 if (env->cp15.c15_cpar & (1 << 1))
6894 if (!disas_iwmmxt_insn(env, s, insn))
6895 return;
6896 }
6897 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6898 /* Coprocessor double register transfer. */
be5e7a76 6899 ARCH(5TE);
9ee6e8bb
PB
6900 } else if ((insn & 0x0f000010) == 0x0e000010) {
6901 /* Additional coprocessor register transfer. */
7997d92f 6902 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6903 uint32_t mask;
6904 uint32_t val;
6905 /* cps (privileged) */
6906 if (IS_USER(s))
6907 return;
6908 mask = val = 0;
6909 if (insn & (1 << 19)) {
6910 if (insn & (1 << 8))
6911 mask |= CPSR_A;
6912 if (insn & (1 << 7))
6913 mask |= CPSR_I;
6914 if (insn & (1 << 6))
6915 mask |= CPSR_F;
6916 if (insn & (1 << 18))
6917 val |= mask;
6918 }
7997d92f 6919 if (insn & (1 << 17)) {
9ee6e8bb
PB
6920 mask |= CPSR_M;
6921 val |= (insn & 0x1f);
6922 }
6923 if (mask) {
2fbac54b 6924 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6925 }
6926 return;
6927 }
6928 goto illegal_op;
6929 }
6930 if (cond != 0xe) {
6931 /* if not always execute, we generate a conditional jump to
6932 next instruction */
6933 s->condlabel = gen_new_label();
d9ba4830 6934 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6935 s->condjmp = 1;
6936 }
6937 if ((insn & 0x0f900000) == 0x03000000) {
6938 if ((insn & (1 << 21)) == 0) {
6939 ARCH(6T2);
6940 rd = (insn >> 12) & 0xf;
6941 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6942 if ((insn & (1 << 22)) == 0) {
6943 /* MOVW */
7d1b0095 6944 tmp = tcg_temp_new_i32();
5e3f878a 6945 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6946 } else {
6947 /* MOVT */
5e3f878a 6948 tmp = load_reg(s, rd);
86831435 6949 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6950 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6951 }
5e3f878a 6952 store_reg(s, rd, tmp);
9ee6e8bb
PB
6953 } else {
6954 if (((insn >> 12) & 0xf) != 0xf)
6955 goto illegal_op;
6956 if (((insn >> 16) & 0xf) == 0) {
6957 gen_nop_hint(s, insn & 0xff);
6958 } else {
6959 /* CPSR = immediate */
6960 val = insn & 0xff;
6961 shift = ((insn >> 8) & 0xf) * 2;
6962 if (shift)
6963 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6964 i = ((insn & (1 << 22)) != 0);
2fbac54b 6965 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6966 goto illegal_op;
6967 }
6968 }
6969 } else if ((insn & 0x0f900000) == 0x01000000
6970 && (insn & 0x00000090) != 0x00000090) {
6971 /* miscellaneous instructions */
6972 op1 = (insn >> 21) & 3;
6973 sh = (insn >> 4) & 0xf;
6974 rm = insn & 0xf;
6975 switch (sh) {
6976 case 0x0: /* move program status register */
6977 if (op1 & 1) {
6978 /* PSR = reg */
2fbac54b 6979 tmp = load_reg(s, rm);
9ee6e8bb 6980 i = ((op1 & 2) != 0);
2fbac54b 6981 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6982 goto illegal_op;
6983 } else {
6984 /* reg = PSR */
6985 rd = (insn >> 12) & 0xf;
6986 if (op1 & 2) {
6987 if (IS_USER(s))
6988 goto illegal_op;
d9ba4830 6989 tmp = load_cpu_field(spsr);
9ee6e8bb 6990 } else {
7d1b0095 6991 tmp = tcg_temp_new_i32();
d9ba4830 6992 gen_helper_cpsr_read(tmp);
9ee6e8bb 6993 }
d9ba4830 6994 store_reg(s, rd, tmp);
9ee6e8bb
PB
6995 }
6996 break;
6997 case 0x1:
6998 if (op1 == 1) {
6999 /* branch/exchange thumb (bx). */
be5e7a76 7000 ARCH(4T);
d9ba4830
PB
7001 tmp = load_reg(s, rm);
7002 gen_bx(s, tmp);
9ee6e8bb
PB
7003 } else if (op1 == 3) {
7004 /* clz */
be5e7a76 7005 ARCH(5);
9ee6e8bb 7006 rd = (insn >> 12) & 0xf;
1497c961
PB
7007 tmp = load_reg(s, rm);
7008 gen_helper_clz(tmp, tmp);
7009 store_reg(s, rd, tmp);
9ee6e8bb
PB
7010 } else {
7011 goto illegal_op;
7012 }
7013 break;
7014 case 0x2:
7015 if (op1 == 1) {
7016 ARCH(5J); /* bxj */
7017 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7018 tmp = load_reg(s, rm);
7019 gen_bx(s, tmp);
9ee6e8bb
PB
7020 } else {
7021 goto illegal_op;
7022 }
7023 break;
7024 case 0x3:
7025 if (op1 != 1)
7026 goto illegal_op;
7027
be5e7a76 7028 ARCH(5);
9ee6e8bb 7029 /* branch link/exchange thumb (blx) */
d9ba4830 7030 tmp = load_reg(s, rm);
7d1b0095 7031 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7032 tcg_gen_movi_i32(tmp2, s->pc);
7033 store_reg(s, 14, tmp2);
7034 gen_bx(s, tmp);
9ee6e8bb
PB
7035 break;
7036 case 0x5: /* saturating add/subtract */
be5e7a76 7037 ARCH(5TE);
9ee6e8bb
PB
7038 rd = (insn >> 12) & 0xf;
7039 rn = (insn >> 16) & 0xf;
b40d0353 7040 tmp = load_reg(s, rm);
5e3f878a 7041 tmp2 = load_reg(s, rn);
9ee6e8bb 7042 if (op1 & 2)
5e3f878a 7043 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 7044 if (op1 & 1)
5e3f878a 7045 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 7046 else
5e3f878a 7047 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 7048 tcg_temp_free_i32(tmp2);
5e3f878a 7049 store_reg(s, rd, tmp);
9ee6e8bb 7050 break;
49e14940
AL
7051 case 7:
7052 /* SMC instruction (op1 == 3)
7053 and undefined instructions (op1 == 0 || op1 == 2)
7054 will trap */
7055 if (op1 != 1) {
7056 goto illegal_op;
7057 }
7058 /* bkpt */
be5e7a76 7059 ARCH(5);
bc4a0de0 7060 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
7061 break;
7062 case 0x8: /* signed multiply */
7063 case 0xa:
7064 case 0xc:
7065 case 0xe:
be5e7a76 7066 ARCH(5TE);
9ee6e8bb
PB
7067 rs = (insn >> 8) & 0xf;
7068 rn = (insn >> 12) & 0xf;
7069 rd = (insn >> 16) & 0xf;
7070 if (op1 == 1) {
7071 /* (32 * 16) >> 16 */
5e3f878a
PB
7072 tmp = load_reg(s, rm);
7073 tmp2 = load_reg(s, rs);
9ee6e8bb 7074 if (sh & 4)
5e3f878a 7075 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7076 else
5e3f878a 7077 gen_sxth(tmp2);
a7812ae4
PB
7078 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7079 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7080 tmp = tcg_temp_new_i32();
a7812ae4 7081 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7082 tcg_temp_free_i64(tmp64);
9ee6e8bb 7083 if ((sh & 2) == 0) {
5e3f878a
PB
7084 tmp2 = load_reg(s, rn);
7085 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7086 tcg_temp_free_i32(tmp2);
9ee6e8bb 7087 }
5e3f878a 7088 store_reg(s, rd, tmp);
9ee6e8bb
PB
7089 } else {
7090 /* 16 * 16 */
5e3f878a
PB
7091 tmp = load_reg(s, rm);
7092 tmp2 = load_reg(s, rs);
7093 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7094 tcg_temp_free_i32(tmp2);
9ee6e8bb 7095 if (op1 == 2) {
a7812ae4
PB
7096 tmp64 = tcg_temp_new_i64();
7097 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7098 tcg_temp_free_i32(tmp);
a7812ae4
PB
7099 gen_addq(s, tmp64, rn, rd);
7100 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7101 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7102 } else {
7103 if (op1 == 0) {
5e3f878a
PB
7104 tmp2 = load_reg(s, rn);
7105 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7106 tcg_temp_free_i32(tmp2);
9ee6e8bb 7107 }
5e3f878a 7108 store_reg(s, rd, tmp);
9ee6e8bb
PB
7109 }
7110 }
7111 break;
7112 default:
7113 goto illegal_op;
7114 }
7115 } else if (((insn & 0x0e000000) == 0 &&
7116 (insn & 0x00000090) != 0x90) ||
7117 ((insn & 0x0e000000) == (1 << 25))) {
7118 int set_cc, logic_cc, shiftop;
7119
7120 op1 = (insn >> 21) & 0xf;
7121 set_cc = (insn >> 20) & 1;
7122 logic_cc = table_logic_cc[op1] & set_cc;
7123
7124 /* data processing instruction */
7125 if (insn & (1 << 25)) {
7126 /* immediate operand */
7127 val = insn & 0xff;
7128 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7129 if (shift) {
9ee6e8bb 7130 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7131 }
7d1b0095 7132 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7133 tcg_gen_movi_i32(tmp2, val);
7134 if (logic_cc && shift) {
7135 gen_set_CF_bit31(tmp2);
7136 }
9ee6e8bb
PB
7137 } else {
7138 /* register */
7139 rm = (insn) & 0xf;
e9bb4aa9 7140 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7141 shiftop = (insn >> 5) & 3;
7142 if (!(insn & (1 << 4))) {
7143 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7144 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7145 } else {
7146 rs = (insn >> 8) & 0xf;
8984bd2e 7147 tmp = load_reg(s, rs);
e9bb4aa9 7148 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7149 }
7150 }
7151 if (op1 != 0x0f && op1 != 0x0d) {
7152 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7153 tmp = load_reg(s, rn);
7154 } else {
7155 TCGV_UNUSED(tmp);
9ee6e8bb
PB
7156 }
7157 rd = (insn >> 12) & 0xf;
7158 switch(op1) {
7159 case 0x00:
e9bb4aa9
JR
7160 tcg_gen_and_i32(tmp, tmp, tmp2);
7161 if (logic_cc) {
7162 gen_logic_CC(tmp);
7163 }
21aeb343 7164 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7165 break;
7166 case 0x01:
e9bb4aa9
JR
7167 tcg_gen_xor_i32(tmp, tmp, tmp2);
7168 if (logic_cc) {
7169 gen_logic_CC(tmp);
7170 }
21aeb343 7171 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7172 break;
7173 case 0x02:
7174 if (set_cc && rd == 15) {
7175 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7176 if (IS_USER(s)) {
9ee6e8bb 7177 goto illegal_op;
e9bb4aa9
JR
7178 }
7179 gen_helper_sub_cc(tmp, tmp, tmp2);
7180 gen_exception_return(s, tmp);
9ee6e8bb 7181 } else {
e9bb4aa9
JR
7182 if (set_cc) {
7183 gen_helper_sub_cc(tmp, tmp, tmp2);
7184 } else {
7185 tcg_gen_sub_i32(tmp, tmp, tmp2);
7186 }
21aeb343 7187 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7188 }
7189 break;
7190 case 0x03:
e9bb4aa9
JR
7191 if (set_cc) {
7192 gen_helper_sub_cc(tmp, tmp2, tmp);
7193 } else {
7194 tcg_gen_sub_i32(tmp, tmp2, tmp);
7195 }
21aeb343 7196 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7197 break;
7198 case 0x04:
e9bb4aa9
JR
7199 if (set_cc) {
7200 gen_helper_add_cc(tmp, tmp, tmp2);
7201 } else {
7202 tcg_gen_add_i32(tmp, tmp, tmp2);
7203 }
21aeb343 7204 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7205 break;
7206 case 0x05:
e9bb4aa9
JR
7207 if (set_cc) {
7208 gen_helper_adc_cc(tmp, tmp, tmp2);
7209 } else {
7210 gen_add_carry(tmp, tmp, tmp2);
7211 }
21aeb343 7212 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7213 break;
7214 case 0x06:
e9bb4aa9
JR
7215 if (set_cc) {
7216 gen_helper_sbc_cc(tmp, tmp, tmp2);
7217 } else {
7218 gen_sub_carry(tmp, tmp, tmp2);
7219 }
21aeb343 7220 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7221 break;
7222 case 0x07:
e9bb4aa9
JR
7223 if (set_cc) {
7224 gen_helper_sbc_cc(tmp, tmp2, tmp);
7225 } else {
7226 gen_sub_carry(tmp, tmp2, tmp);
7227 }
21aeb343 7228 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7229 break;
7230 case 0x08:
7231 if (set_cc) {
e9bb4aa9
JR
7232 tcg_gen_and_i32(tmp, tmp, tmp2);
7233 gen_logic_CC(tmp);
9ee6e8bb 7234 }
7d1b0095 7235 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7236 break;
7237 case 0x09:
7238 if (set_cc) {
e9bb4aa9
JR
7239 tcg_gen_xor_i32(tmp, tmp, tmp2);
7240 gen_logic_CC(tmp);
9ee6e8bb 7241 }
7d1b0095 7242 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7243 break;
7244 case 0x0a:
7245 if (set_cc) {
e9bb4aa9 7246 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 7247 }
7d1b0095 7248 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7249 break;
7250 case 0x0b:
7251 if (set_cc) {
e9bb4aa9 7252 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 7253 }
7d1b0095 7254 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7255 break;
7256 case 0x0c:
e9bb4aa9
JR
7257 tcg_gen_or_i32(tmp, tmp, tmp2);
7258 if (logic_cc) {
7259 gen_logic_CC(tmp);
7260 }
21aeb343 7261 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7262 break;
7263 case 0x0d:
7264 if (logic_cc && rd == 15) {
7265 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7266 if (IS_USER(s)) {
9ee6e8bb 7267 goto illegal_op;
e9bb4aa9
JR
7268 }
7269 gen_exception_return(s, tmp2);
9ee6e8bb 7270 } else {
e9bb4aa9
JR
7271 if (logic_cc) {
7272 gen_logic_CC(tmp2);
7273 }
21aeb343 7274 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7275 }
7276 break;
7277 case 0x0e:
f669df27 7278 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7279 if (logic_cc) {
7280 gen_logic_CC(tmp);
7281 }
21aeb343 7282 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7283 break;
7284 default:
7285 case 0x0f:
e9bb4aa9
JR
7286 tcg_gen_not_i32(tmp2, tmp2);
7287 if (logic_cc) {
7288 gen_logic_CC(tmp2);
7289 }
21aeb343 7290 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7291 break;
7292 }
e9bb4aa9 7293 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7294 tcg_temp_free_i32(tmp2);
e9bb4aa9 7295 }
9ee6e8bb
PB
7296 } else {
7297 /* other instructions */
7298 op1 = (insn >> 24) & 0xf;
7299 switch(op1) {
7300 case 0x0:
7301 case 0x1:
7302 /* multiplies, extra load/stores */
7303 sh = (insn >> 5) & 3;
7304 if (sh == 0) {
7305 if (op1 == 0x0) {
7306 rd = (insn >> 16) & 0xf;
7307 rn = (insn >> 12) & 0xf;
7308 rs = (insn >> 8) & 0xf;
7309 rm = (insn) & 0xf;
7310 op1 = (insn >> 20) & 0xf;
7311 switch (op1) {
7312 case 0: case 1: case 2: case 3: case 6:
7313 /* 32 bit mul */
5e3f878a
PB
7314 tmp = load_reg(s, rs);
7315 tmp2 = load_reg(s, rm);
7316 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7317 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7318 if (insn & (1 << 22)) {
7319 /* Subtract (mls) */
7320 ARCH(6T2);
5e3f878a
PB
7321 tmp2 = load_reg(s, rn);
7322 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7323 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7324 } else if (insn & (1 << 21)) {
7325 /* Add */
5e3f878a
PB
7326 tmp2 = load_reg(s, rn);
7327 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7328 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7329 }
7330 if (insn & (1 << 20))
5e3f878a
PB
7331 gen_logic_CC(tmp);
7332 store_reg(s, rd, tmp);
9ee6e8bb 7333 break;
8aac08b1
AJ
7334 case 4:
7335 /* 64 bit mul double accumulate (UMAAL) */
7336 ARCH(6);
7337 tmp = load_reg(s, rs);
7338 tmp2 = load_reg(s, rm);
7339 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7340 gen_addq_lo(s, tmp64, rn);
7341 gen_addq_lo(s, tmp64, rd);
7342 gen_storeq_reg(s, rn, rd, tmp64);
7343 tcg_temp_free_i64(tmp64);
7344 break;
7345 case 8: case 9: case 10: case 11:
7346 case 12: case 13: case 14: case 15:
7347 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7348 tmp = load_reg(s, rs);
7349 tmp2 = load_reg(s, rm);
8aac08b1 7350 if (insn & (1 << 22)) {
a7812ae4 7351 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 7352 } else {
a7812ae4 7353 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
7354 }
7355 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 7356 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 7357 }
8aac08b1 7358 if (insn & (1 << 20)) {
a7812ae4 7359 gen_logicq_cc(tmp64);
8aac08b1 7360 }
a7812ae4 7361 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7362 tcg_temp_free_i64(tmp64);
9ee6e8bb 7363 break;
8aac08b1
AJ
7364 default:
7365 goto illegal_op;
9ee6e8bb
PB
7366 }
7367 } else {
7368 rn = (insn >> 16) & 0xf;
7369 rd = (insn >> 12) & 0xf;
7370 if (insn & (1 << 23)) {
7371 /* load/store exclusive */
86753403
PB
7372 op1 = (insn >> 21) & 0x3;
7373 if (op1)
a47f43d2 7374 ARCH(6K);
86753403
PB
7375 else
7376 ARCH(6);
3174f8e9 7377 addr = tcg_temp_local_new_i32();
98a46317 7378 load_reg_var(s, addr, rn);
9ee6e8bb 7379 if (insn & (1 << 20)) {
86753403
PB
7380 switch (op1) {
7381 case 0: /* ldrex */
426f5abc 7382 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7383 break;
7384 case 1: /* ldrexd */
426f5abc 7385 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7386 break;
7387 case 2: /* ldrexb */
426f5abc 7388 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7389 break;
7390 case 3: /* ldrexh */
426f5abc 7391 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7392 break;
7393 default:
7394 abort();
7395 }
9ee6e8bb
PB
7396 } else {
7397 rm = insn & 0xf;
86753403
PB
7398 switch (op1) {
7399 case 0: /* strex */
426f5abc 7400 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7401 break;
7402 case 1: /* strexd */
502e64fe 7403 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7404 break;
7405 case 2: /* strexb */
426f5abc 7406 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7407 break;
7408 case 3: /* strexh */
426f5abc 7409 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7410 break;
7411 default:
7412 abort();
7413 }
9ee6e8bb 7414 }
3174f8e9 7415 tcg_temp_free(addr);
9ee6e8bb
PB
7416 } else {
7417 /* SWP instruction */
7418 rm = (insn) & 0xf;
7419
8984bd2e
PB
7420 /* ??? This is not really atomic. However we know
7421 we never have multiple CPUs running in parallel,
7422 so it is good enough. */
7423 addr = load_reg(s, rn);
7424 tmp = load_reg(s, rm);
9ee6e8bb 7425 if (insn & (1 << 22)) {
8984bd2e
PB
7426 tmp2 = gen_ld8u(addr, IS_USER(s));
7427 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7428 } else {
8984bd2e
PB
7429 tmp2 = gen_ld32(addr, IS_USER(s));
7430 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7431 }
7d1b0095 7432 tcg_temp_free_i32(addr);
8984bd2e 7433 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7434 }
7435 }
7436 } else {
7437 int address_offset;
7438 int load;
7439 /* Misc load/store */
7440 rn = (insn >> 16) & 0xf;
7441 rd = (insn >> 12) & 0xf;
b0109805 7442 addr = load_reg(s, rn);
9ee6e8bb 7443 if (insn & (1 << 24))
b0109805 7444 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7445 address_offset = 0;
7446 if (insn & (1 << 20)) {
7447 /* load */
7448 switch(sh) {
7449 case 1:
b0109805 7450 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
7451 break;
7452 case 2:
b0109805 7453 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
7454 break;
7455 default:
7456 case 3:
b0109805 7457 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
7458 break;
7459 }
7460 load = 1;
7461 } else if (sh & 2) {
be5e7a76 7462 ARCH(5TE);
9ee6e8bb
PB
7463 /* doubleword */
7464 if (sh & 1) {
7465 /* store */
b0109805
PB
7466 tmp = load_reg(s, rd);
7467 gen_st32(tmp, addr, IS_USER(s));
7468 tcg_gen_addi_i32(addr, addr, 4);
7469 tmp = load_reg(s, rd + 1);
7470 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7471 load = 0;
7472 } else {
7473 /* load */
b0109805
PB
7474 tmp = gen_ld32(addr, IS_USER(s));
7475 store_reg(s, rd, tmp);
7476 tcg_gen_addi_i32(addr, addr, 4);
7477 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
7478 rd++;
7479 load = 1;
7480 }
7481 address_offset = -4;
7482 } else {
7483 /* store */
b0109805
PB
7484 tmp = load_reg(s, rd);
7485 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7486 load = 0;
7487 }
7488 /* Perform base writeback before the loaded value to
7489 ensure correct behavior with overlapping index registers.
7490 ldrd with base writeback is is undefined if the
7491 destination and index registers overlap. */
7492 if (!(insn & (1 << 24))) {
b0109805
PB
7493 gen_add_datah_offset(s, insn, address_offset, addr);
7494 store_reg(s, rn, addr);
9ee6e8bb
PB
7495 } else if (insn & (1 << 21)) {
7496 if (address_offset)
b0109805
PB
7497 tcg_gen_addi_i32(addr, addr, address_offset);
7498 store_reg(s, rn, addr);
7499 } else {
7d1b0095 7500 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7501 }
7502 if (load) {
7503 /* Complete the load. */
b0109805 7504 store_reg(s, rd, tmp);
9ee6e8bb
PB
7505 }
7506 }
7507 break;
7508 case 0x4:
7509 case 0x5:
7510 goto do_ldst;
7511 case 0x6:
7512 case 0x7:
7513 if (insn & (1 << 4)) {
7514 ARCH(6);
7515 /* Armv6 Media instructions. */
7516 rm = insn & 0xf;
7517 rn = (insn >> 16) & 0xf;
2c0262af 7518 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7519 rs = (insn >> 8) & 0xf;
7520 switch ((insn >> 23) & 3) {
7521 case 0: /* Parallel add/subtract. */
7522 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7523 tmp = load_reg(s, rn);
7524 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7525 sh = (insn >> 5) & 7;
7526 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7527 goto illegal_op;
6ddbc6e4 7528 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7529 tcg_temp_free_i32(tmp2);
6ddbc6e4 7530 store_reg(s, rd, tmp);
9ee6e8bb
PB
7531 break;
7532 case 1:
7533 if ((insn & 0x00700020) == 0) {
6c95676b 7534 /* Halfword pack. */
3670669c
PB
7535 tmp = load_reg(s, rn);
7536 tmp2 = load_reg(s, rm);
9ee6e8bb 7537 shift = (insn >> 7) & 0x1f;
3670669c
PB
7538 if (insn & (1 << 6)) {
7539 /* pkhtb */
22478e79
AZ
7540 if (shift == 0)
7541 shift = 31;
7542 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7543 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7544 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7545 } else {
7546 /* pkhbt */
22478e79
AZ
7547 if (shift)
7548 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7549 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7550 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7551 }
7552 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7553 tcg_temp_free_i32(tmp2);
3670669c 7554 store_reg(s, rd, tmp);
9ee6e8bb
PB
7555 } else if ((insn & 0x00200020) == 0x00200000) {
7556 /* [us]sat */
6ddbc6e4 7557 tmp = load_reg(s, rm);
9ee6e8bb
PB
7558 shift = (insn >> 7) & 0x1f;
7559 if (insn & (1 << 6)) {
7560 if (shift == 0)
7561 shift = 31;
6ddbc6e4 7562 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7563 } else {
6ddbc6e4 7564 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7565 }
7566 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7567 tmp2 = tcg_const_i32(sh);
7568 if (insn & (1 << 22))
7569 gen_helper_usat(tmp, tmp, tmp2);
7570 else
7571 gen_helper_ssat(tmp, tmp, tmp2);
7572 tcg_temp_free_i32(tmp2);
6ddbc6e4 7573 store_reg(s, rd, tmp);
9ee6e8bb
PB
7574 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7575 /* [us]sat16 */
6ddbc6e4 7576 tmp = load_reg(s, rm);
9ee6e8bb 7577 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7578 tmp2 = tcg_const_i32(sh);
7579 if (insn & (1 << 22))
7580 gen_helper_usat16(tmp, tmp, tmp2);
7581 else
7582 gen_helper_ssat16(tmp, tmp, tmp2);
7583 tcg_temp_free_i32(tmp2);
6ddbc6e4 7584 store_reg(s, rd, tmp);
9ee6e8bb
PB
7585 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7586 /* Select bytes. */
6ddbc6e4
PB
7587 tmp = load_reg(s, rn);
7588 tmp2 = load_reg(s, rm);
7d1b0095 7589 tmp3 = tcg_temp_new_i32();
0ecb72a5 7590 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 7591 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7592 tcg_temp_free_i32(tmp3);
7593 tcg_temp_free_i32(tmp2);
6ddbc6e4 7594 store_reg(s, rd, tmp);
9ee6e8bb 7595 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7596 tmp = load_reg(s, rm);
9ee6e8bb 7597 shift = (insn >> 10) & 3;
1301f322 7598 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7599 rotate, a shift is sufficient. */
7600 if (shift != 0)
f669df27 7601 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7602 op1 = (insn >> 20) & 7;
7603 switch (op1) {
5e3f878a
PB
7604 case 0: gen_sxtb16(tmp); break;
7605 case 2: gen_sxtb(tmp); break;
7606 case 3: gen_sxth(tmp); break;
7607 case 4: gen_uxtb16(tmp); break;
7608 case 6: gen_uxtb(tmp); break;
7609 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7610 default: goto illegal_op;
7611 }
7612 if (rn != 15) {
5e3f878a 7613 tmp2 = load_reg(s, rn);
9ee6e8bb 7614 if ((op1 & 3) == 0) {
5e3f878a 7615 gen_add16(tmp, tmp2);
9ee6e8bb 7616 } else {
5e3f878a 7617 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7618 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7619 }
7620 }
6c95676b 7621 store_reg(s, rd, tmp);
9ee6e8bb
PB
7622 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7623 /* rev */
b0109805 7624 tmp = load_reg(s, rm);
9ee6e8bb
PB
7625 if (insn & (1 << 22)) {
7626 if (insn & (1 << 7)) {
b0109805 7627 gen_revsh(tmp);
9ee6e8bb
PB
7628 } else {
7629 ARCH(6T2);
b0109805 7630 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7631 }
7632 } else {
7633 if (insn & (1 << 7))
b0109805 7634 gen_rev16(tmp);
9ee6e8bb 7635 else
66896cb8 7636 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7637 }
b0109805 7638 store_reg(s, rd, tmp);
9ee6e8bb
PB
7639 } else {
7640 goto illegal_op;
7641 }
7642 break;
7643 case 2: /* Multiplies (Type 3). */
41e9564d
PM
7644 switch ((insn >> 20) & 0x7) {
7645 case 5:
7646 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7647 /* op2 not 00x or 11x : UNDEF */
7648 goto illegal_op;
7649 }
838fa72d
AJ
7650 /* Signed multiply most significant [accumulate].
7651 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
7652 tmp = load_reg(s, rm);
7653 tmp2 = load_reg(s, rs);
a7812ae4 7654 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7655
955a7dd5 7656 if (rd != 15) {
838fa72d 7657 tmp = load_reg(s, rd);
9ee6e8bb 7658 if (insn & (1 << 6)) {
838fa72d 7659 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7660 } else {
838fa72d 7661 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7662 }
7663 }
838fa72d
AJ
7664 if (insn & (1 << 5)) {
7665 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7666 }
7667 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7668 tmp = tcg_temp_new_i32();
838fa72d
AJ
7669 tcg_gen_trunc_i64_i32(tmp, tmp64);
7670 tcg_temp_free_i64(tmp64);
955a7dd5 7671 store_reg(s, rn, tmp);
41e9564d
PM
7672 break;
7673 case 0:
7674 case 4:
7675 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7676 if (insn & (1 << 7)) {
7677 goto illegal_op;
7678 }
7679 tmp = load_reg(s, rm);
7680 tmp2 = load_reg(s, rs);
9ee6e8bb 7681 if (insn & (1 << 5))
5e3f878a
PB
7682 gen_swap_half(tmp2);
7683 gen_smul_dual(tmp, tmp2);
5e3f878a 7684 if (insn & (1 << 6)) {
e1d177b9 7685 /* This subtraction cannot overflow. */
5e3f878a
PB
7686 tcg_gen_sub_i32(tmp, tmp, tmp2);
7687 } else {
e1d177b9
PM
7688 /* This addition cannot overflow 32 bits;
7689 * however it may overflow considered as a signed
7690 * operation, in which case we must set the Q flag.
7691 */
7692 gen_helper_add_setq(tmp, tmp, tmp2);
5e3f878a 7693 }
7d1b0095 7694 tcg_temp_free_i32(tmp2);
9ee6e8bb 7695 if (insn & (1 << 22)) {
5e3f878a 7696 /* smlald, smlsld */
a7812ae4
PB
7697 tmp64 = tcg_temp_new_i64();
7698 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7699 tcg_temp_free_i32(tmp);
a7812ae4
PB
7700 gen_addq(s, tmp64, rd, rn);
7701 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7702 tcg_temp_free_i64(tmp64);
9ee6e8bb 7703 } else {
5e3f878a 7704 /* smuad, smusd, smlad, smlsd */
22478e79 7705 if (rd != 15)
9ee6e8bb 7706 {
22478e79 7707 tmp2 = load_reg(s, rd);
5e3f878a 7708 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7709 tcg_temp_free_i32(tmp2);
9ee6e8bb 7710 }
22478e79 7711 store_reg(s, rn, tmp);
9ee6e8bb 7712 }
41e9564d 7713 break;
b8b8ea05
PM
7714 case 1:
7715 case 3:
7716 /* SDIV, UDIV */
7717 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7718 goto illegal_op;
7719 }
7720 if (((insn >> 5) & 7) || (rd != 15)) {
7721 goto illegal_op;
7722 }
7723 tmp = load_reg(s, rm);
7724 tmp2 = load_reg(s, rs);
7725 if (insn & (1 << 21)) {
7726 gen_helper_udiv(tmp, tmp, tmp2);
7727 } else {
7728 gen_helper_sdiv(tmp, tmp, tmp2);
7729 }
7730 tcg_temp_free_i32(tmp2);
7731 store_reg(s, rn, tmp);
7732 break;
41e9564d
PM
7733 default:
7734 goto illegal_op;
9ee6e8bb
PB
7735 }
7736 break;
7737 case 3:
7738 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7739 switch (op1) {
7740 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7741 ARCH(6);
7742 tmp = load_reg(s, rm);
7743 tmp2 = load_reg(s, rs);
7744 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7745 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7746 if (rd != 15) {
7747 tmp2 = load_reg(s, rd);
6ddbc6e4 7748 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7749 tcg_temp_free_i32(tmp2);
9ee6e8bb 7750 }
ded9d295 7751 store_reg(s, rn, tmp);
9ee6e8bb
PB
7752 break;
7753 case 0x20: case 0x24: case 0x28: case 0x2c:
7754 /* Bitfield insert/clear. */
7755 ARCH(6T2);
7756 shift = (insn >> 7) & 0x1f;
7757 i = (insn >> 16) & 0x1f;
7758 i = i + 1 - shift;
7759 if (rm == 15) {
7d1b0095 7760 tmp = tcg_temp_new_i32();
5e3f878a 7761 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7762 } else {
5e3f878a 7763 tmp = load_reg(s, rm);
9ee6e8bb
PB
7764 }
7765 if (i != 32) {
5e3f878a 7766 tmp2 = load_reg(s, rd);
8f8e3aa4 7767 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7d1b0095 7768 tcg_temp_free_i32(tmp2);
9ee6e8bb 7769 }
5e3f878a 7770 store_reg(s, rd, tmp);
9ee6e8bb
PB
7771 break;
7772 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7773 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7774 ARCH(6T2);
5e3f878a 7775 tmp = load_reg(s, rm);
9ee6e8bb
PB
7776 shift = (insn >> 7) & 0x1f;
7777 i = ((insn >> 16) & 0x1f) + 1;
7778 if (shift + i > 32)
7779 goto illegal_op;
7780 if (i < 32) {
7781 if (op1 & 0x20) {
5e3f878a 7782 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7783 } else {
5e3f878a 7784 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7785 }
7786 }
5e3f878a 7787 store_reg(s, rd, tmp);
9ee6e8bb
PB
7788 break;
7789 default:
7790 goto illegal_op;
7791 }
7792 break;
7793 }
7794 break;
7795 }
7796 do_ldst:
7797 /* Check for undefined extension instructions
7798 * per the ARM Bible IE:
7799 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7800 */
7801 sh = (0xf << 20) | (0xf << 4);
7802 if (op1 == 0x7 && ((insn & sh) == sh))
7803 {
7804 goto illegal_op;
7805 }
7806 /* load/store byte/word */
7807 rn = (insn >> 16) & 0xf;
7808 rd = (insn >> 12) & 0xf;
b0109805 7809 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7810 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7811 if (insn & (1 << 24))
b0109805 7812 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7813 if (insn & (1 << 20)) {
7814 /* load */
9ee6e8bb 7815 if (insn & (1 << 22)) {
b0109805 7816 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7817 } else {
b0109805 7818 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7819 }
9ee6e8bb
PB
7820 } else {
7821 /* store */
b0109805 7822 tmp = load_reg(s, rd);
9ee6e8bb 7823 if (insn & (1 << 22))
b0109805 7824 gen_st8(tmp, tmp2, i);
9ee6e8bb 7825 else
b0109805 7826 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7827 }
7828 if (!(insn & (1 << 24))) {
b0109805
PB
7829 gen_add_data_offset(s, insn, tmp2);
7830 store_reg(s, rn, tmp2);
7831 } else if (insn & (1 << 21)) {
7832 store_reg(s, rn, tmp2);
7833 } else {
7d1b0095 7834 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7835 }
7836 if (insn & (1 << 20)) {
7837 /* Complete the load. */
be5e7a76 7838 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7839 }
7840 break;
7841 case 0x08:
7842 case 0x09:
7843 {
7844 int j, n, user, loaded_base;
b0109805 7845 TCGv loaded_var;
9ee6e8bb
PB
7846 /* load/store multiple words */
7847 /* XXX: store correct base if write back */
7848 user = 0;
7849 if (insn & (1 << 22)) {
7850 if (IS_USER(s))
7851 goto illegal_op; /* only usable in supervisor mode */
7852
7853 if ((insn & (1 << 15)) == 0)
7854 user = 1;
7855 }
7856 rn = (insn >> 16) & 0xf;
b0109805 7857 addr = load_reg(s, rn);
9ee6e8bb
PB
7858
7859 /* compute total size */
7860 loaded_base = 0;
a50f5b91 7861 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7862 n = 0;
7863 for(i=0;i<16;i++) {
7864 if (insn & (1 << i))
7865 n++;
7866 }
7867 /* XXX: test invalid n == 0 case ? */
7868 if (insn & (1 << 23)) {
7869 if (insn & (1 << 24)) {
7870 /* pre increment */
b0109805 7871 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7872 } else {
7873 /* post increment */
7874 }
7875 } else {
7876 if (insn & (1 << 24)) {
7877 /* pre decrement */
b0109805 7878 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7879 } else {
7880 /* post decrement */
7881 if (n != 1)
b0109805 7882 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7883 }
7884 }
7885 j = 0;
7886 for(i=0;i<16;i++) {
7887 if (insn & (1 << i)) {
7888 if (insn & (1 << 20)) {
7889 /* load */
b0109805 7890 tmp = gen_ld32(addr, IS_USER(s));
be5e7a76 7891 if (user) {
b75263d6
JR
7892 tmp2 = tcg_const_i32(i);
7893 gen_helper_set_user_reg(tmp2, tmp);
7894 tcg_temp_free_i32(tmp2);
7d1b0095 7895 tcg_temp_free_i32(tmp);
9ee6e8bb 7896 } else if (i == rn) {
b0109805 7897 loaded_var = tmp;
9ee6e8bb
PB
7898 loaded_base = 1;
7899 } else {
be5e7a76 7900 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7901 }
7902 } else {
7903 /* store */
7904 if (i == 15) {
7905 /* special case: r15 = PC + 8 */
7906 val = (long)s->pc + 4;
7d1b0095 7907 tmp = tcg_temp_new_i32();
b0109805 7908 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7909 } else if (user) {
7d1b0095 7910 tmp = tcg_temp_new_i32();
b75263d6
JR
7911 tmp2 = tcg_const_i32(i);
7912 gen_helper_get_user_reg(tmp, tmp2);
7913 tcg_temp_free_i32(tmp2);
9ee6e8bb 7914 } else {
b0109805 7915 tmp = load_reg(s, i);
9ee6e8bb 7916 }
b0109805 7917 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7918 }
7919 j++;
7920 /* no need to add after the last transfer */
7921 if (j != n)
b0109805 7922 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7923 }
7924 }
7925 if (insn & (1 << 21)) {
7926 /* write back */
7927 if (insn & (1 << 23)) {
7928 if (insn & (1 << 24)) {
7929 /* pre increment */
7930 } else {
7931 /* post increment */
b0109805 7932 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7933 }
7934 } else {
7935 if (insn & (1 << 24)) {
7936 /* pre decrement */
7937 if (n != 1)
b0109805 7938 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7939 } else {
7940 /* post decrement */
b0109805 7941 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7942 }
7943 }
b0109805
PB
7944 store_reg(s, rn, addr);
7945 } else {
7d1b0095 7946 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7947 }
7948 if (loaded_base) {
b0109805 7949 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7950 }
7951 if ((insn & (1 << 22)) && !user) {
7952 /* Restore CPSR from SPSR. */
d9ba4830
PB
7953 tmp = load_cpu_field(spsr);
7954 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7955 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7956 s->is_jmp = DISAS_UPDATE;
7957 }
7958 }
7959 break;
7960 case 0xa:
7961 case 0xb:
7962 {
7963 int32_t offset;
7964
7965 /* branch (and link) */
7966 val = (int32_t)s->pc;
7967 if (insn & (1 << 24)) {
7d1b0095 7968 tmp = tcg_temp_new_i32();
5e3f878a
PB
7969 tcg_gen_movi_i32(tmp, val);
7970 store_reg(s, 14, tmp);
9ee6e8bb
PB
7971 }
7972 offset = (((int32_t)insn << 8) >> 8);
7973 val += (offset << 2) + 4;
7974 gen_jmp(s, val);
7975 }
7976 break;
7977 case 0xc:
7978 case 0xd:
7979 case 0xe:
7980 /* Coprocessor. */
7981 if (disas_coproc_insn(env, s, insn))
7982 goto illegal_op;
7983 break;
7984 case 0xf:
7985 /* swi */
5e3f878a 7986 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7987 s->is_jmp = DISAS_SWI;
7988 break;
7989 default:
7990 illegal_op:
bc4a0de0 7991 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7992 break;
7993 }
7994 }
7995}
7996
7997/* Return true if this is a Thumb-2 logical op. */
7998static int
7999thumb2_logic_op(int op)
8000{
8001 return (op < 8);
8002}
8003
8004/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8005 then set condition code flags based on the result of the operation.
8006 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8007 to the high bit of T1.
8008 Returns zero if the opcode is valid. */
8009
8010static int
396e467c 8011gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
8012{
8013 int logic_cc;
8014
8015 logic_cc = 0;
8016 switch (op) {
8017 case 0: /* and */
396e467c 8018 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
8019 logic_cc = conds;
8020 break;
8021 case 1: /* bic */
f669df27 8022 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
8023 logic_cc = conds;
8024 break;
8025 case 2: /* orr */
396e467c 8026 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
8027 logic_cc = conds;
8028 break;
8029 case 3: /* orn */
29501f1b 8030 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
8031 logic_cc = conds;
8032 break;
8033 case 4: /* eor */
396e467c 8034 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
8035 logic_cc = conds;
8036 break;
8037 case 8: /* add */
8038 if (conds)
396e467c 8039 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 8040 else
396e467c 8041 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
8042 break;
8043 case 10: /* adc */
8044 if (conds)
396e467c 8045 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 8046 else
396e467c 8047 gen_adc(t0, t1);
9ee6e8bb
PB
8048 break;
8049 case 11: /* sbc */
8050 if (conds)
396e467c 8051 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 8052 else
396e467c 8053 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
8054 break;
8055 case 13: /* sub */
8056 if (conds)
396e467c 8057 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 8058 else
396e467c 8059 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
8060 break;
8061 case 14: /* rsb */
8062 if (conds)
396e467c 8063 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 8064 else
396e467c 8065 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
8066 break;
8067 default: /* 5, 6, 7, 9, 12, 15. */
8068 return 1;
8069 }
8070 if (logic_cc) {
396e467c 8071 gen_logic_CC(t0);
9ee6e8bb 8072 if (shifter_out)
396e467c 8073 gen_set_CF_bit31(t1);
9ee6e8bb
PB
8074 }
8075 return 0;
8076}
8077
8078/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8079 is not legal. */
0ecb72a5 8080static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 8081{
b0109805 8082 uint32_t insn, imm, shift, offset;
9ee6e8bb 8083 uint32_t rd, rn, rm, rs;
b26eefb6 8084 TCGv tmp;
6ddbc6e4
PB
8085 TCGv tmp2;
8086 TCGv tmp3;
b0109805 8087 TCGv addr;
a7812ae4 8088 TCGv_i64 tmp64;
9ee6e8bb
PB
8089 int op;
8090 int shiftop;
8091 int conds;
8092 int logic_cc;
8093
8094 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8095 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8096 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8097 16-bit instructions to get correct prefetch abort behavior. */
8098 insn = insn_hw1;
8099 if ((insn & (1 << 12)) == 0) {
be5e7a76 8100 ARCH(5);
9ee6e8bb
PB
8101 /* Second half of blx. */
8102 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8103 tmp = load_reg(s, 14);
8104 tcg_gen_addi_i32(tmp, tmp, offset);
8105 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8106
7d1b0095 8107 tmp2 = tcg_temp_new_i32();
b0109805 8108 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8109 store_reg(s, 14, tmp2);
8110 gen_bx(s, tmp);
9ee6e8bb
PB
8111 return 0;
8112 }
8113 if (insn & (1 << 11)) {
8114 /* Second half of bl. */
8115 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8116 tmp = load_reg(s, 14);
6a0d8a1d 8117 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8118
7d1b0095 8119 tmp2 = tcg_temp_new_i32();
b0109805 8120 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8121 store_reg(s, 14, tmp2);
8122 gen_bx(s, tmp);
9ee6e8bb
PB
8123 return 0;
8124 }
8125 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8126 /* Instruction spans a page boundary. Implement it as two
8127 16-bit instructions in case the second half causes an
8128 prefetch abort. */
8129 offset = ((int32_t)insn << 21) >> 9;
396e467c 8130 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8131 return 0;
8132 }
8133 /* Fall through to 32-bit decode. */
8134 }
8135
8136 insn = lduw_code(s->pc);
8137 s->pc += 2;
8138 insn |= (uint32_t)insn_hw1 << 16;
8139
8140 if ((insn & 0xf800e800) != 0xf000e800) {
8141 ARCH(6T2);
8142 }
8143
8144 rn = (insn >> 16) & 0xf;
8145 rs = (insn >> 12) & 0xf;
8146 rd = (insn >> 8) & 0xf;
8147 rm = insn & 0xf;
8148 switch ((insn >> 25) & 0xf) {
8149 case 0: case 1: case 2: case 3:
8150 /* 16-bit instructions. Should never happen. */
8151 abort();
8152 case 4:
8153 if (insn & (1 << 22)) {
8154 /* Other load/store, table branch. */
8155 if (insn & 0x01200000) {
8156 /* Load/store doubleword. */
8157 if (rn == 15) {
7d1b0095 8158 addr = tcg_temp_new_i32();
b0109805 8159 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8160 } else {
b0109805 8161 addr = load_reg(s, rn);
9ee6e8bb
PB
8162 }
8163 offset = (insn & 0xff) * 4;
8164 if ((insn & (1 << 23)) == 0)
8165 offset = -offset;
8166 if (insn & (1 << 24)) {
b0109805 8167 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8168 offset = 0;
8169 }
8170 if (insn & (1 << 20)) {
8171 /* ldrd */
b0109805
PB
8172 tmp = gen_ld32(addr, IS_USER(s));
8173 store_reg(s, rs, tmp);
8174 tcg_gen_addi_i32(addr, addr, 4);
8175 tmp = gen_ld32(addr, IS_USER(s));
8176 store_reg(s, rd, tmp);
9ee6e8bb
PB
8177 } else {
8178 /* strd */
b0109805
PB
8179 tmp = load_reg(s, rs);
8180 gen_st32(tmp, addr, IS_USER(s));
8181 tcg_gen_addi_i32(addr, addr, 4);
8182 tmp = load_reg(s, rd);
8183 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8184 }
8185 if (insn & (1 << 21)) {
8186 /* Base writeback. */
8187 if (rn == 15)
8188 goto illegal_op;
b0109805
PB
8189 tcg_gen_addi_i32(addr, addr, offset - 4);
8190 store_reg(s, rn, addr);
8191 } else {
7d1b0095 8192 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8193 }
8194 } else if ((insn & (1 << 23)) == 0) {
8195 /* Load/store exclusive word. */
3174f8e9 8196 addr = tcg_temp_local_new();
98a46317 8197 load_reg_var(s, addr, rn);
426f5abc 8198 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8199 if (insn & (1 << 20)) {
426f5abc 8200 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8201 } else {
426f5abc 8202 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8203 }
3174f8e9 8204 tcg_temp_free(addr);
9ee6e8bb
PB
8205 } else if ((insn & (1 << 6)) == 0) {
8206 /* Table Branch. */
8207 if (rn == 15) {
7d1b0095 8208 addr = tcg_temp_new_i32();
b0109805 8209 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8210 } else {
b0109805 8211 addr = load_reg(s, rn);
9ee6e8bb 8212 }
b26eefb6 8213 tmp = load_reg(s, rm);
b0109805 8214 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8215 if (insn & (1 << 4)) {
8216 /* tbh */
b0109805 8217 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8218 tcg_temp_free_i32(tmp);
b0109805 8219 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 8220 } else { /* tbb */
7d1b0095 8221 tcg_temp_free_i32(tmp);
b0109805 8222 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 8223 }
7d1b0095 8224 tcg_temp_free_i32(addr);
b0109805
PB
8225 tcg_gen_shli_i32(tmp, tmp, 1);
8226 tcg_gen_addi_i32(tmp, tmp, s->pc);
8227 store_reg(s, 15, tmp);
9ee6e8bb
PB
8228 } else {
8229 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 8230 ARCH(7);
9ee6e8bb 8231 op = (insn >> 4) & 0x3;
426f5abc
PB
8232 if (op == 2) {
8233 goto illegal_op;
8234 }
3174f8e9 8235 addr = tcg_temp_local_new();
98a46317 8236 load_reg_var(s, addr, rn);
9ee6e8bb 8237 if (insn & (1 << 20)) {
426f5abc 8238 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8239 } else {
426f5abc 8240 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8241 }
3174f8e9 8242 tcg_temp_free(addr);
9ee6e8bb
PB
8243 }
8244 } else {
8245 /* Load/store multiple, RFE, SRS. */
8246 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8247 /* Not available in user mode. */
b0109805 8248 if (IS_USER(s))
9ee6e8bb
PB
8249 goto illegal_op;
8250 if (insn & (1 << 20)) {
8251 /* rfe */
b0109805
PB
8252 addr = load_reg(s, rn);
8253 if ((insn & (1 << 24)) == 0)
8254 tcg_gen_addi_i32(addr, addr, -8);
8255 /* Load PC into tmp and CPSR into tmp2. */
8256 tmp = gen_ld32(addr, 0);
8257 tcg_gen_addi_i32(addr, addr, 4);
8258 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
8259 if (insn & (1 << 21)) {
8260 /* Base writeback. */
b0109805
PB
8261 if (insn & (1 << 24)) {
8262 tcg_gen_addi_i32(addr, addr, 4);
8263 } else {
8264 tcg_gen_addi_i32(addr, addr, -4);
8265 }
8266 store_reg(s, rn, addr);
8267 } else {
7d1b0095 8268 tcg_temp_free_i32(addr);
9ee6e8bb 8269 }
b0109805 8270 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8271 } else {
8272 /* srs */
8273 op = (insn & 0x1f);
7d1b0095 8274 addr = tcg_temp_new_i32();
39ea3d4e
PM
8275 tmp = tcg_const_i32(op);
8276 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8277 tcg_temp_free_i32(tmp);
9ee6e8bb 8278 if ((insn & (1 << 24)) == 0) {
b0109805 8279 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 8280 }
b0109805
PB
8281 tmp = load_reg(s, 14);
8282 gen_st32(tmp, addr, 0);
8283 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 8284 tmp = tcg_temp_new_i32();
b0109805
PB
8285 gen_helper_cpsr_read(tmp);
8286 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
8287 if (insn & (1 << 21)) {
8288 if ((insn & (1 << 24)) == 0) {
b0109805 8289 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 8290 } else {
b0109805 8291 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8292 }
39ea3d4e
PM
8293 tmp = tcg_const_i32(op);
8294 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8295 tcg_temp_free_i32(tmp);
b0109805 8296 } else {
7d1b0095 8297 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8298 }
8299 }
8300 } else {
5856d44e
YO
8301 int i, loaded_base = 0;
8302 TCGv loaded_var;
9ee6e8bb 8303 /* Load/store multiple. */
b0109805 8304 addr = load_reg(s, rn);
9ee6e8bb
PB
8305 offset = 0;
8306 for (i = 0; i < 16; i++) {
8307 if (insn & (1 << i))
8308 offset += 4;
8309 }
8310 if (insn & (1 << 24)) {
b0109805 8311 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8312 }
8313
5856d44e 8314 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
8315 for (i = 0; i < 16; i++) {
8316 if ((insn & (1 << i)) == 0)
8317 continue;
8318 if (insn & (1 << 20)) {
8319 /* Load. */
b0109805 8320 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 8321 if (i == 15) {
b0109805 8322 gen_bx(s, tmp);
5856d44e
YO
8323 } else if (i == rn) {
8324 loaded_var = tmp;
8325 loaded_base = 1;
9ee6e8bb 8326 } else {
b0109805 8327 store_reg(s, i, tmp);
9ee6e8bb
PB
8328 }
8329 } else {
8330 /* Store. */
b0109805
PB
8331 tmp = load_reg(s, i);
8332 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8333 }
b0109805 8334 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8335 }
5856d44e
YO
8336 if (loaded_base) {
8337 store_reg(s, rn, loaded_var);
8338 }
9ee6e8bb
PB
8339 if (insn & (1 << 21)) {
8340 /* Base register writeback. */
8341 if (insn & (1 << 24)) {
b0109805 8342 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8343 }
8344 /* Fault if writeback register is in register list. */
8345 if (insn & (1 << rn))
8346 goto illegal_op;
b0109805
PB
8347 store_reg(s, rn, addr);
8348 } else {
7d1b0095 8349 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8350 }
8351 }
8352 }
8353 break;
2af9ab77
JB
8354 case 5:
8355
9ee6e8bb 8356 op = (insn >> 21) & 0xf;
2af9ab77
JB
8357 if (op == 6) {
8358 /* Halfword pack. */
8359 tmp = load_reg(s, rn);
8360 tmp2 = load_reg(s, rm);
8361 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8362 if (insn & (1 << 5)) {
8363 /* pkhtb */
8364 if (shift == 0)
8365 shift = 31;
8366 tcg_gen_sari_i32(tmp2, tmp2, shift);
8367 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8368 tcg_gen_ext16u_i32(tmp2, tmp2);
8369 } else {
8370 /* pkhbt */
8371 if (shift)
8372 tcg_gen_shli_i32(tmp2, tmp2, shift);
8373 tcg_gen_ext16u_i32(tmp, tmp);
8374 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8375 }
8376 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8377 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8378 store_reg(s, rd, tmp);
8379 } else {
2af9ab77
JB
8380 /* Data processing register constant shift. */
8381 if (rn == 15) {
7d1b0095 8382 tmp = tcg_temp_new_i32();
2af9ab77
JB
8383 tcg_gen_movi_i32(tmp, 0);
8384 } else {
8385 tmp = load_reg(s, rn);
8386 }
8387 tmp2 = load_reg(s, rm);
8388
8389 shiftop = (insn >> 4) & 3;
8390 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8391 conds = (insn & (1 << 20)) != 0;
8392 logic_cc = (conds && thumb2_logic_op(op));
8393 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8394 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8395 goto illegal_op;
7d1b0095 8396 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8397 if (rd != 15) {
8398 store_reg(s, rd, tmp);
8399 } else {
7d1b0095 8400 tcg_temp_free_i32(tmp);
2af9ab77 8401 }
3174f8e9 8402 }
9ee6e8bb
PB
8403 break;
8404 case 13: /* Misc data processing. */
8405 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8406 if (op < 4 && (insn & 0xf000) != 0xf000)
8407 goto illegal_op;
8408 switch (op) {
8409 case 0: /* Register controlled shift. */
8984bd2e
PB
8410 tmp = load_reg(s, rn);
8411 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8412 if ((insn & 0x70) != 0)
8413 goto illegal_op;
8414 op = (insn >> 21) & 3;
8984bd2e
PB
8415 logic_cc = (insn & (1 << 20)) != 0;
8416 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8417 if (logic_cc)
8418 gen_logic_CC(tmp);
21aeb343 8419 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8420 break;
8421 case 1: /* Sign/zero extend. */
5e3f878a 8422 tmp = load_reg(s, rm);
9ee6e8bb 8423 shift = (insn >> 4) & 3;
1301f322 8424 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8425 rotate, a shift is sufficient. */
8426 if (shift != 0)
f669df27 8427 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8428 op = (insn >> 20) & 7;
8429 switch (op) {
5e3f878a
PB
8430 case 0: gen_sxth(tmp); break;
8431 case 1: gen_uxth(tmp); break;
8432 case 2: gen_sxtb16(tmp); break;
8433 case 3: gen_uxtb16(tmp); break;
8434 case 4: gen_sxtb(tmp); break;
8435 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8436 default: goto illegal_op;
8437 }
8438 if (rn != 15) {
5e3f878a 8439 tmp2 = load_reg(s, rn);
9ee6e8bb 8440 if ((op >> 1) == 1) {
5e3f878a 8441 gen_add16(tmp, tmp2);
9ee6e8bb 8442 } else {
5e3f878a 8443 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8444 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8445 }
8446 }
5e3f878a 8447 store_reg(s, rd, tmp);
9ee6e8bb
PB
8448 break;
8449 case 2: /* SIMD add/subtract. */
8450 op = (insn >> 20) & 7;
8451 shift = (insn >> 4) & 7;
8452 if ((op & 3) == 3 || (shift & 3) == 3)
8453 goto illegal_op;
6ddbc6e4
PB
8454 tmp = load_reg(s, rn);
8455 tmp2 = load_reg(s, rm);
8456 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8457 tcg_temp_free_i32(tmp2);
6ddbc6e4 8458 store_reg(s, rd, tmp);
9ee6e8bb
PB
8459 break;
8460 case 3: /* Other data processing. */
8461 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8462 if (op < 4) {
8463 /* Saturating add/subtract. */
d9ba4830
PB
8464 tmp = load_reg(s, rn);
8465 tmp2 = load_reg(s, rm);
9ee6e8bb 8466 if (op & 1)
4809c612
JB
8467 gen_helper_double_saturate(tmp, tmp);
8468 if (op & 2)
d9ba4830 8469 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 8470 else
d9ba4830 8471 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 8472 tcg_temp_free_i32(tmp2);
9ee6e8bb 8473 } else {
d9ba4830 8474 tmp = load_reg(s, rn);
9ee6e8bb
PB
8475 switch (op) {
8476 case 0x0a: /* rbit */
d9ba4830 8477 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8478 break;
8479 case 0x08: /* rev */
66896cb8 8480 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8481 break;
8482 case 0x09: /* rev16 */
d9ba4830 8483 gen_rev16(tmp);
9ee6e8bb
PB
8484 break;
8485 case 0x0b: /* revsh */
d9ba4830 8486 gen_revsh(tmp);
9ee6e8bb
PB
8487 break;
8488 case 0x10: /* sel */
d9ba4830 8489 tmp2 = load_reg(s, rm);
7d1b0095 8490 tmp3 = tcg_temp_new_i32();
0ecb72a5 8491 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 8492 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8493 tcg_temp_free_i32(tmp3);
8494 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8495 break;
8496 case 0x18: /* clz */
d9ba4830 8497 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8498 break;
8499 default:
8500 goto illegal_op;
8501 }
8502 }
d9ba4830 8503 store_reg(s, rd, tmp);
9ee6e8bb
PB
8504 break;
8505 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8506 op = (insn >> 4) & 0xf;
d9ba4830
PB
8507 tmp = load_reg(s, rn);
8508 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8509 switch ((insn >> 20) & 7) {
8510 case 0: /* 32 x 32 -> 32 */
d9ba4830 8511 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8512 tcg_temp_free_i32(tmp2);
9ee6e8bb 8513 if (rs != 15) {
d9ba4830 8514 tmp2 = load_reg(s, rs);
9ee6e8bb 8515 if (op)
d9ba4830 8516 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8517 else
d9ba4830 8518 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8519 tcg_temp_free_i32(tmp2);
9ee6e8bb 8520 }
9ee6e8bb
PB
8521 break;
8522 case 1: /* 16 x 16 -> 32 */
d9ba4830 8523 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8524 tcg_temp_free_i32(tmp2);
9ee6e8bb 8525 if (rs != 15) {
d9ba4830
PB
8526 tmp2 = load_reg(s, rs);
8527 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8528 tcg_temp_free_i32(tmp2);
9ee6e8bb 8529 }
9ee6e8bb
PB
8530 break;
8531 case 2: /* Dual multiply add. */
8532 case 4: /* Dual multiply subtract. */
8533 if (op)
d9ba4830
PB
8534 gen_swap_half(tmp2);
8535 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8536 if (insn & (1 << 22)) {
e1d177b9 8537 /* This subtraction cannot overflow. */
d9ba4830 8538 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8539 } else {
e1d177b9
PM
8540 /* This addition cannot overflow 32 bits;
8541 * however it may overflow considered as a signed
8542 * operation, in which case we must set the Q flag.
8543 */
8544 gen_helper_add_setq(tmp, tmp, tmp2);
9ee6e8bb 8545 }
7d1b0095 8546 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8547 if (rs != 15)
8548 {
d9ba4830
PB
8549 tmp2 = load_reg(s, rs);
8550 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8551 tcg_temp_free_i32(tmp2);
9ee6e8bb 8552 }
9ee6e8bb
PB
8553 break;
8554 case 3: /* 32 * 16 -> 32msb */
8555 if (op)
d9ba4830 8556 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8557 else
d9ba4830 8558 gen_sxth(tmp2);
a7812ae4
PB
8559 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8560 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8561 tmp = tcg_temp_new_i32();
a7812ae4 8562 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8563 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8564 if (rs != 15)
8565 {
d9ba4830
PB
8566 tmp2 = load_reg(s, rs);
8567 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8568 tcg_temp_free_i32(tmp2);
9ee6e8bb 8569 }
9ee6e8bb 8570 break;
838fa72d
AJ
8571 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8572 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8573 if (rs != 15) {
838fa72d
AJ
8574 tmp = load_reg(s, rs);
8575 if (insn & (1 << 20)) {
8576 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8577 } else {
838fa72d 8578 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8579 }
2c0262af 8580 }
838fa72d
AJ
8581 if (insn & (1 << 4)) {
8582 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8583 }
8584 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8585 tmp = tcg_temp_new_i32();
838fa72d
AJ
8586 tcg_gen_trunc_i64_i32(tmp, tmp64);
8587 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8588 break;
8589 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8590 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8591 tcg_temp_free_i32(tmp2);
9ee6e8bb 8592 if (rs != 15) {
d9ba4830
PB
8593 tmp2 = load_reg(s, rs);
8594 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8595 tcg_temp_free_i32(tmp2);
5fd46862 8596 }
9ee6e8bb 8597 break;
2c0262af 8598 }
d9ba4830 8599 store_reg(s, rd, tmp);
2c0262af 8600 break;
9ee6e8bb
PB
8601 case 6: case 7: /* 64-bit multiply, Divide. */
8602 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8603 tmp = load_reg(s, rn);
8604 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8605 if ((op & 0x50) == 0x10) {
8606 /* sdiv, udiv */
47789990 8607 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 8608 goto illegal_op;
47789990 8609 }
9ee6e8bb 8610 if (op & 0x20)
5e3f878a 8611 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8612 else
5e3f878a 8613 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8614 tcg_temp_free_i32(tmp2);
5e3f878a 8615 store_reg(s, rd, tmp);
9ee6e8bb
PB
8616 } else if ((op & 0xe) == 0xc) {
8617 /* Dual multiply accumulate long. */
8618 if (op & 1)
5e3f878a
PB
8619 gen_swap_half(tmp2);
8620 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8621 if (op & 0x10) {
5e3f878a 8622 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8623 } else {
5e3f878a 8624 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8625 }
7d1b0095 8626 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8627 /* BUGFIX */
8628 tmp64 = tcg_temp_new_i64();
8629 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8630 tcg_temp_free_i32(tmp);
a7812ae4
PB
8631 gen_addq(s, tmp64, rs, rd);
8632 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8633 tcg_temp_free_i64(tmp64);
2c0262af 8634 } else {
9ee6e8bb
PB
8635 if (op & 0x20) {
8636 /* Unsigned 64-bit multiply */
a7812ae4 8637 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8638 } else {
9ee6e8bb
PB
8639 if (op & 8) {
8640 /* smlalxy */
5e3f878a 8641 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8642 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8643 tmp64 = tcg_temp_new_i64();
8644 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8645 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8646 } else {
8647 /* Signed 64-bit multiply */
a7812ae4 8648 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8649 }
b5ff1b31 8650 }
9ee6e8bb
PB
8651 if (op & 4) {
8652 /* umaal */
a7812ae4
PB
8653 gen_addq_lo(s, tmp64, rs);
8654 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8655 } else if (op & 0x40) {
8656 /* 64-bit accumulate. */
a7812ae4 8657 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8658 }
a7812ae4 8659 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8660 tcg_temp_free_i64(tmp64);
5fd46862 8661 }
2c0262af 8662 break;
9ee6e8bb
PB
8663 }
8664 break;
8665 case 6: case 7: case 14: case 15:
8666 /* Coprocessor. */
8667 if (((insn >> 24) & 3) == 3) {
8668 /* Translate into the equivalent ARM encoding. */
f06053e3 8669 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8670 if (disas_neon_data_insn(env, s, insn))
8671 goto illegal_op;
8672 } else {
8673 if (insn & (1 << 28))
8674 goto illegal_op;
8675 if (disas_coproc_insn (env, s, insn))
8676 goto illegal_op;
8677 }
8678 break;
8679 case 8: case 9: case 10: case 11:
8680 if (insn & (1 << 15)) {
8681 /* Branches, misc control. */
8682 if (insn & 0x5000) {
8683 /* Unconditional branch. */
8684 /* signextend(hw1[10:0]) -> offset[:12]. */
8685 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8686 /* hw1[10:0] -> offset[11:1]. */
8687 offset |= (insn & 0x7ff) << 1;
8688 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8689 offset[24:22] already have the same value because of the
8690 sign extension above. */
8691 offset ^= ((~insn) & (1 << 13)) << 10;
8692 offset ^= ((~insn) & (1 << 11)) << 11;
8693
9ee6e8bb
PB
8694 if (insn & (1 << 14)) {
8695 /* Branch and link. */
3174f8e9 8696 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8697 }
3b46e624 8698
b0109805 8699 offset += s->pc;
9ee6e8bb
PB
8700 if (insn & (1 << 12)) {
8701 /* b/bl */
b0109805 8702 gen_jmp(s, offset);
9ee6e8bb
PB
8703 } else {
8704 /* blx */
b0109805 8705 offset &= ~(uint32_t)2;
be5e7a76 8706 /* thumb2 bx, no need to check */
b0109805 8707 gen_bx_im(s, offset);
2c0262af 8708 }
9ee6e8bb
PB
8709 } else if (((insn >> 23) & 7) == 7) {
8710 /* Misc control */
8711 if (insn & (1 << 13))
8712 goto illegal_op;
8713
8714 if (insn & (1 << 26)) {
8715 /* Secure monitor call (v6Z) */
8716 goto illegal_op; /* not implemented. */
2c0262af 8717 } else {
9ee6e8bb
PB
8718 op = (insn >> 20) & 7;
8719 switch (op) {
8720 case 0: /* msr cpsr. */
8721 if (IS_M(env)) {
8984bd2e
PB
8722 tmp = load_reg(s, rn);
8723 addr = tcg_const_i32(insn & 0xff);
8724 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8725 tcg_temp_free_i32(addr);
7d1b0095 8726 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8727 gen_lookup_tb(s);
8728 break;
8729 }
8730 /* fall through */
8731 case 1: /* msr spsr. */
8732 if (IS_M(env))
8733 goto illegal_op;
2fbac54b
FN
8734 tmp = load_reg(s, rn);
8735 if (gen_set_psr(s,
9ee6e8bb 8736 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8737 op == 1, tmp))
9ee6e8bb
PB
8738 goto illegal_op;
8739 break;
8740 case 2: /* cps, nop-hint. */
8741 if (((insn >> 8) & 7) == 0) {
8742 gen_nop_hint(s, insn & 0xff);
8743 }
8744 /* Implemented as NOP in user mode. */
8745 if (IS_USER(s))
8746 break;
8747 offset = 0;
8748 imm = 0;
8749 if (insn & (1 << 10)) {
8750 if (insn & (1 << 7))
8751 offset |= CPSR_A;
8752 if (insn & (1 << 6))
8753 offset |= CPSR_I;
8754 if (insn & (1 << 5))
8755 offset |= CPSR_F;
8756 if (insn & (1 << 9))
8757 imm = CPSR_A | CPSR_I | CPSR_F;
8758 }
8759 if (insn & (1 << 8)) {
8760 offset |= 0x1f;
8761 imm |= (insn & 0x1f);
8762 }
8763 if (offset) {
2fbac54b 8764 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8765 }
8766 break;
8767 case 3: /* Special control operations. */
426f5abc 8768 ARCH(7);
9ee6e8bb
PB
8769 op = (insn >> 4) & 0xf;
8770 switch (op) {
8771 case 2: /* clrex */
426f5abc 8772 gen_clrex(s);
9ee6e8bb
PB
8773 break;
8774 case 4: /* dsb */
8775 case 5: /* dmb */
8776 case 6: /* isb */
8777 /* These execute as NOPs. */
9ee6e8bb
PB
8778 break;
8779 default:
8780 goto illegal_op;
8781 }
8782 break;
8783 case 4: /* bxj */
8784 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8785 tmp = load_reg(s, rn);
8786 gen_bx(s, tmp);
9ee6e8bb
PB
8787 break;
8788 case 5: /* Exception return. */
b8b45b68
RV
8789 if (IS_USER(s)) {
8790 goto illegal_op;
8791 }
8792 if (rn != 14 || rd != 15) {
8793 goto illegal_op;
8794 }
8795 tmp = load_reg(s, rn);
8796 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8797 gen_exception_return(s, tmp);
8798 break;
9ee6e8bb 8799 case 6: /* mrs cpsr. */
7d1b0095 8800 tmp = tcg_temp_new_i32();
9ee6e8bb 8801 if (IS_M(env)) {
8984bd2e
PB
8802 addr = tcg_const_i32(insn & 0xff);
8803 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8804 tcg_temp_free_i32(addr);
9ee6e8bb 8805 } else {
8984bd2e 8806 gen_helper_cpsr_read(tmp);
9ee6e8bb 8807 }
8984bd2e 8808 store_reg(s, rd, tmp);
9ee6e8bb
PB
8809 break;
8810 case 7: /* mrs spsr. */
8811 /* Not accessible in user mode. */
8812 if (IS_USER(s) || IS_M(env))
8813 goto illegal_op;
d9ba4830
PB
8814 tmp = load_cpu_field(spsr);
8815 store_reg(s, rd, tmp);
9ee6e8bb 8816 break;
2c0262af
FB
8817 }
8818 }
9ee6e8bb
PB
8819 } else {
8820 /* Conditional branch. */
8821 op = (insn >> 22) & 0xf;
8822 /* Generate a conditional jump to next instruction. */
8823 s->condlabel = gen_new_label();
d9ba4830 8824 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8825 s->condjmp = 1;
8826
8827 /* offset[11:1] = insn[10:0] */
8828 offset = (insn & 0x7ff) << 1;
8829 /* offset[17:12] = insn[21:16]. */
8830 offset |= (insn & 0x003f0000) >> 4;
8831 /* offset[31:20] = insn[26]. */
8832 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8833 /* offset[18] = insn[13]. */
8834 offset |= (insn & (1 << 13)) << 5;
8835 /* offset[19] = insn[11]. */
8836 offset |= (insn & (1 << 11)) << 8;
8837
8838 /* jump to the offset */
b0109805 8839 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8840 }
8841 } else {
8842 /* Data processing immediate. */
8843 if (insn & (1 << 25)) {
8844 if (insn & (1 << 24)) {
8845 if (insn & (1 << 20))
8846 goto illegal_op;
8847 /* Bitfield/Saturate. */
8848 op = (insn >> 21) & 7;
8849 imm = insn & 0x1f;
8850 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8851 if (rn == 15) {
7d1b0095 8852 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8853 tcg_gen_movi_i32(tmp, 0);
8854 } else {
8855 tmp = load_reg(s, rn);
8856 }
9ee6e8bb
PB
8857 switch (op) {
8858 case 2: /* Signed bitfield extract. */
8859 imm++;
8860 if (shift + imm > 32)
8861 goto illegal_op;
8862 if (imm < 32)
6ddbc6e4 8863 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8864 break;
8865 case 6: /* Unsigned bitfield extract. */
8866 imm++;
8867 if (shift + imm > 32)
8868 goto illegal_op;
8869 if (imm < 32)
6ddbc6e4 8870 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8871 break;
8872 case 3: /* Bitfield insert/clear. */
8873 if (imm < shift)
8874 goto illegal_op;
8875 imm = imm + 1 - shift;
8876 if (imm != 32) {
6ddbc6e4 8877 tmp2 = load_reg(s, rd);
8f8e3aa4 8878 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7d1b0095 8879 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8880 }
8881 break;
8882 case 7:
8883 goto illegal_op;
8884 default: /* Saturate. */
9ee6e8bb
PB
8885 if (shift) {
8886 if (op & 1)
6ddbc6e4 8887 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8888 else
6ddbc6e4 8889 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8890 }
6ddbc6e4 8891 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8892 if (op & 4) {
8893 /* Unsigned. */
9ee6e8bb 8894 if ((op & 1) && shift == 0)
6ddbc6e4 8895 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8896 else
6ddbc6e4 8897 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8898 } else {
9ee6e8bb 8899 /* Signed. */
9ee6e8bb 8900 if ((op & 1) && shift == 0)
6ddbc6e4 8901 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8902 else
6ddbc6e4 8903 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8904 }
b75263d6 8905 tcg_temp_free_i32(tmp2);
9ee6e8bb 8906 break;
2c0262af 8907 }
6ddbc6e4 8908 store_reg(s, rd, tmp);
9ee6e8bb
PB
8909 } else {
8910 imm = ((insn & 0x04000000) >> 15)
8911 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8912 if (insn & (1 << 22)) {
8913 /* 16-bit immediate. */
8914 imm |= (insn >> 4) & 0xf000;
8915 if (insn & (1 << 23)) {
8916 /* movt */
5e3f878a 8917 tmp = load_reg(s, rd);
86831435 8918 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8919 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8920 } else {
9ee6e8bb 8921 /* movw */
7d1b0095 8922 tmp = tcg_temp_new_i32();
5e3f878a 8923 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8924 }
8925 } else {
9ee6e8bb
PB
8926 /* Add/sub 12-bit immediate. */
8927 if (rn == 15) {
b0109805 8928 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8929 if (insn & (1 << 23))
b0109805 8930 offset -= imm;
9ee6e8bb 8931 else
b0109805 8932 offset += imm;
7d1b0095 8933 tmp = tcg_temp_new_i32();
5e3f878a 8934 tcg_gen_movi_i32(tmp, offset);
2c0262af 8935 } else {
5e3f878a 8936 tmp = load_reg(s, rn);
9ee6e8bb 8937 if (insn & (1 << 23))
5e3f878a 8938 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8939 else
5e3f878a 8940 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8941 }
9ee6e8bb 8942 }
5e3f878a 8943 store_reg(s, rd, tmp);
191abaa2 8944 }
9ee6e8bb
PB
8945 } else {
8946 int shifter_out = 0;
8947 /* modified 12-bit immediate. */
8948 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8949 imm = (insn & 0xff);
8950 switch (shift) {
8951 case 0: /* XY */
8952 /* Nothing to do. */
8953 break;
8954 case 1: /* 00XY00XY */
8955 imm |= imm << 16;
8956 break;
8957 case 2: /* XY00XY00 */
8958 imm |= imm << 16;
8959 imm <<= 8;
8960 break;
8961 case 3: /* XYXYXYXY */
8962 imm |= imm << 16;
8963 imm |= imm << 8;
8964 break;
8965 default: /* Rotated constant. */
8966 shift = (shift << 1) | (imm >> 7);
8967 imm |= 0x80;
8968 imm = imm << (32 - shift);
8969 shifter_out = 1;
8970 break;
b5ff1b31 8971 }
7d1b0095 8972 tmp2 = tcg_temp_new_i32();
3174f8e9 8973 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8974 rn = (insn >> 16) & 0xf;
3174f8e9 8975 if (rn == 15) {
7d1b0095 8976 tmp = tcg_temp_new_i32();
3174f8e9
FN
8977 tcg_gen_movi_i32(tmp, 0);
8978 } else {
8979 tmp = load_reg(s, rn);
8980 }
9ee6e8bb
PB
8981 op = (insn >> 21) & 0xf;
8982 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8983 shifter_out, tmp, tmp2))
9ee6e8bb 8984 goto illegal_op;
7d1b0095 8985 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8986 rd = (insn >> 8) & 0xf;
8987 if (rd != 15) {
3174f8e9
FN
8988 store_reg(s, rd, tmp);
8989 } else {
7d1b0095 8990 tcg_temp_free_i32(tmp);
2c0262af 8991 }
2c0262af 8992 }
9ee6e8bb
PB
8993 }
8994 break;
8995 case 12: /* Load/store single data item. */
8996 {
8997 int postinc = 0;
8998 int writeback = 0;
b0109805 8999 int user;
9ee6e8bb
PB
9000 if ((insn & 0x01100000) == 0x01000000) {
9001 if (disas_neon_ls_insn(env, s, insn))
c1713132 9002 goto illegal_op;
9ee6e8bb
PB
9003 break;
9004 }
a2fdc890
PM
9005 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9006 if (rs == 15) {
9007 if (!(insn & (1 << 20))) {
9008 goto illegal_op;
9009 }
9010 if (op != 2) {
9011 /* Byte or halfword load space with dest == r15 : memory hints.
9012 * Catch them early so we don't emit pointless addressing code.
9013 * This space is a mix of:
9014 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9015 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9016 * cores)
9017 * unallocated hints, which must be treated as NOPs
9018 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9019 * which is easiest for the decoding logic
9020 * Some space which must UNDEF
9021 */
9022 int op1 = (insn >> 23) & 3;
9023 int op2 = (insn >> 6) & 0x3f;
9024 if (op & 2) {
9025 goto illegal_op;
9026 }
9027 if (rn == 15) {
02afbf64
PM
9028 /* UNPREDICTABLE, unallocated hint or
9029 * PLD/PLDW/PLI (literal)
9030 */
a2fdc890
PM
9031 return 0;
9032 }
9033 if (op1 & 1) {
02afbf64 9034 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9035 }
9036 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 9037 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9038 }
9039 /* UNDEF space, or an UNPREDICTABLE */
9040 return 1;
9041 }
9042 }
b0109805 9043 user = IS_USER(s);
9ee6e8bb 9044 if (rn == 15) {
7d1b0095 9045 addr = tcg_temp_new_i32();
9ee6e8bb
PB
9046 /* PC relative. */
9047 /* s->pc has already been incremented by 4. */
9048 imm = s->pc & 0xfffffffc;
9049 if (insn & (1 << 23))
9050 imm += insn & 0xfff;
9051 else
9052 imm -= insn & 0xfff;
b0109805 9053 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 9054 } else {
b0109805 9055 addr = load_reg(s, rn);
9ee6e8bb
PB
9056 if (insn & (1 << 23)) {
9057 /* Positive offset. */
9058 imm = insn & 0xfff;
b0109805 9059 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 9060 } else {
9ee6e8bb 9061 imm = insn & 0xff;
2a0308c5
PM
9062 switch ((insn >> 8) & 0xf) {
9063 case 0x0: /* Shifted Register. */
9ee6e8bb 9064 shift = (insn >> 4) & 0xf;
2a0308c5
PM
9065 if (shift > 3) {
9066 tcg_temp_free_i32(addr);
18c9b560 9067 goto illegal_op;
2a0308c5 9068 }
b26eefb6 9069 tmp = load_reg(s, rm);
9ee6e8bb 9070 if (shift)
b26eefb6 9071 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 9072 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9073 tcg_temp_free_i32(tmp);
9ee6e8bb 9074 break;
2a0308c5 9075 case 0xc: /* Negative offset. */
b0109805 9076 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 9077 break;
2a0308c5 9078 case 0xe: /* User privilege. */
b0109805
PB
9079 tcg_gen_addi_i32(addr, addr, imm);
9080 user = 1;
9ee6e8bb 9081 break;
2a0308c5 9082 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
9083 imm = -imm;
9084 /* Fall through. */
2a0308c5 9085 case 0xb: /* Post-increment. */
9ee6e8bb
PB
9086 postinc = 1;
9087 writeback = 1;
9088 break;
2a0308c5 9089 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
9090 imm = -imm;
9091 /* Fall through. */
2a0308c5 9092 case 0xf: /* Pre-increment. */
b0109805 9093 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
9094 writeback = 1;
9095 break;
9096 default:
2a0308c5 9097 tcg_temp_free_i32(addr);
b7bcbe95 9098 goto illegal_op;
9ee6e8bb
PB
9099 }
9100 }
9101 }
9ee6e8bb
PB
9102 if (insn & (1 << 20)) {
9103 /* Load. */
a2fdc890
PM
9104 switch (op) {
9105 case 0: tmp = gen_ld8u(addr, user); break;
9106 case 4: tmp = gen_ld8s(addr, user); break;
9107 case 1: tmp = gen_ld16u(addr, user); break;
9108 case 5: tmp = gen_ld16s(addr, user); break;
9109 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
9110 default:
9111 tcg_temp_free_i32(addr);
9112 goto illegal_op;
a2fdc890
PM
9113 }
9114 if (rs == 15) {
9115 gen_bx(s, tmp);
9ee6e8bb 9116 } else {
a2fdc890 9117 store_reg(s, rs, tmp);
9ee6e8bb
PB
9118 }
9119 } else {
9120 /* Store. */
b0109805 9121 tmp = load_reg(s, rs);
9ee6e8bb 9122 switch (op) {
b0109805
PB
9123 case 0: gen_st8(tmp, addr, user); break;
9124 case 1: gen_st16(tmp, addr, user); break;
9125 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
9126 default:
9127 tcg_temp_free_i32(addr);
9128 goto illegal_op;
b7bcbe95 9129 }
2c0262af 9130 }
9ee6e8bb 9131 if (postinc)
b0109805
PB
9132 tcg_gen_addi_i32(addr, addr, imm);
9133 if (writeback) {
9134 store_reg(s, rn, addr);
9135 } else {
7d1b0095 9136 tcg_temp_free_i32(addr);
b0109805 9137 }
9ee6e8bb
PB
9138 }
9139 break;
9140 default:
9141 goto illegal_op;
2c0262af 9142 }
9ee6e8bb
PB
9143 return 0;
9144illegal_op:
9145 return 1;
2c0262af
FB
9146}
9147
0ecb72a5 9148static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9149{
9150 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9151 int32_t offset;
9152 int i;
b26eefb6 9153 TCGv tmp;
d9ba4830 9154 TCGv tmp2;
b0109805 9155 TCGv addr;
99c475ab 9156
9ee6e8bb
PB
9157 if (s->condexec_mask) {
9158 cond = s->condexec_cond;
bedd2912
JB
9159 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9160 s->condlabel = gen_new_label();
9161 gen_test_cc(cond ^ 1, s->condlabel);
9162 s->condjmp = 1;
9163 }
9ee6e8bb
PB
9164 }
9165
b5ff1b31 9166 insn = lduw_code(s->pc);
99c475ab 9167 s->pc += 2;
b5ff1b31 9168
99c475ab
FB
9169 switch (insn >> 12) {
9170 case 0: case 1:
396e467c 9171
99c475ab
FB
9172 rd = insn & 7;
9173 op = (insn >> 11) & 3;
9174 if (op == 3) {
9175 /* add/subtract */
9176 rn = (insn >> 3) & 7;
396e467c 9177 tmp = load_reg(s, rn);
99c475ab
FB
9178 if (insn & (1 << 10)) {
9179 /* immediate */
7d1b0095 9180 tmp2 = tcg_temp_new_i32();
396e467c 9181 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9182 } else {
9183 /* reg */
9184 rm = (insn >> 6) & 7;
396e467c 9185 tmp2 = load_reg(s, rm);
99c475ab 9186 }
9ee6e8bb
PB
9187 if (insn & (1 << 9)) {
9188 if (s->condexec_mask)
396e467c 9189 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9190 else
396e467c 9191 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
9192 } else {
9193 if (s->condexec_mask)
396e467c 9194 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9195 else
396e467c 9196 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 9197 }
7d1b0095 9198 tcg_temp_free_i32(tmp2);
396e467c 9199 store_reg(s, rd, tmp);
99c475ab
FB
9200 } else {
9201 /* shift immediate */
9202 rm = (insn >> 3) & 7;
9203 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9204 tmp = load_reg(s, rm);
9205 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9206 if (!s->condexec_mask)
9207 gen_logic_CC(tmp);
9208 store_reg(s, rd, tmp);
99c475ab
FB
9209 }
9210 break;
9211 case 2: case 3:
9212 /* arithmetic large immediate */
9213 op = (insn >> 11) & 3;
9214 rd = (insn >> 8) & 0x7;
396e467c 9215 if (op == 0) { /* mov */
7d1b0095 9216 tmp = tcg_temp_new_i32();
396e467c 9217 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9218 if (!s->condexec_mask)
396e467c
FN
9219 gen_logic_CC(tmp);
9220 store_reg(s, rd, tmp);
9221 } else {
9222 tmp = load_reg(s, rd);
7d1b0095 9223 tmp2 = tcg_temp_new_i32();
396e467c
FN
9224 tcg_gen_movi_i32(tmp2, insn & 0xff);
9225 switch (op) {
9226 case 1: /* cmp */
9227 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
9228 tcg_temp_free_i32(tmp);
9229 tcg_temp_free_i32(tmp2);
396e467c
FN
9230 break;
9231 case 2: /* add */
9232 if (s->condexec_mask)
9233 tcg_gen_add_i32(tmp, tmp, tmp2);
9234 else
9235 gen_helper_add_cc(tmp, tmp, tmp2);
7d1b0095 9236 tcg_temp_free_i32(tmp2);
396e467c
FN
9237 store_reg(s, rd, tmp);
9238 break;
9239 case 3: /* sub */
9240 if (s->condexec_mask)
9241 tcg_gen_sub_i32(tmp, tmp, tmp2);
9242 else
9243 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095 9244 tcg_temp_free_i32(tmp2);
396e467c
FN
9245 store_reg(s, rd, tmp);
9246 break;
9247 }
99c475ab 9248 }
99c475ab
FB
9249 break;
9250 case 4:
9251 if (insn & (1 << 11)) {
9252 rd = (insn >> 8) & 7;
5899f386
FB
9253 /* load pc-relative. Bit 1 of PC is ignored. */
9254 val = s->pc + 2 + ((insn & 0xff) * 4);
9255 val &= ~(uint32_t)2;
7d1b0095 9256 addr = tcg_temp_new_i32();
b0109805
PB
9257 tcg_gen_movi_i32(addr, val);
9258 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 9259 tcg_temp_free_i32(addr);
b0109805 9260 store_reg(s, rd, tmp);
99c475ab
FB
9261 break;
9262 }
9263 if (insn & (1 << 10)) {
9264 /* data processing extended or blx */
9265 rd = (insn & 7) | ((insn >> 4) & 8);
9266 rm = (insn >> 3) & 0xf;
9267 op = (insn >> 8) & 3;
9268 switch (op) {
9269 case 0: /* add */
396e467c
FN
9270 tmp = load_reg(s, rd);
9271 tmp2 = load_reg(s, rm);
9272 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9273 tcg_temp_free_i32(tmp2);
396e467c 9274 store_reg(s, rd, tmp);
99c475ab
FB
9275 break;
9276 case 1: /* cmp */
396e467c
FN
9277 tmp = load_reg(s, rd);
9278 tmp2 = load_reg(s, rm);
9279 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
9280 tcg_temp_free_i32(tmp2);
9281 tcg_temp_free_i32(tmp);
99c475ab
FB
9282 break;
9283 case 2: /* mov/cpy */
396e467c
FN
9284 tmp = load_reg(s, rm);
9285 store_reg(s, rd, tmp);
99c475ab
FB
9286 break;
9287 case 3:/* branch [and link] exchange thumb register */
b0109805 9288 tmp = load_reg(s, rm);
99c475ab 9289 if (insn & (1 << 7)) {
be5e7a76 9290 ARCH(5);
99c475ab 9291 val = (uint32_t)s->pc | 1;
7d1b0095 9292 tmp2 = tcg_temp_new_i32();
b0109805
PB
9293 tcg_gen_movi_i32(tmp2, val);
9294 store_reg(s, 14, tmp2);
99c475ab 9295 }
be5e7a76 9296 /* already thumb, no need to check */
d9ba4830 9297 gen_bx(s, tmp);
99c475ab
FB
9298 break;
9299 }
9300 break;
9301 }
9302
9303 /* data processing register */
9304 rd = insn & 7;
9305 rm = (insn >> 3) & 7;
9306 op = (insn >> 6) & 0xf;
9307 if (op == 2 || op == 3 || op == 4 || op == 7) {
9308 /* the shift/rotate ops want the operands backwards */
9309 val = rm;
9310 rm = rd;
9311 rd = val;
9312 val = 1;
9313 } else {
9314 val = 0;
9315 }
9316
396e467c 9317 if (op == 9) { /* neg */
7d1b0095 9318 tmp = tcg_temp_new_i32();
396e467c
FN
9319 tcg_gen_movi_i32(tmp, 0);
9320 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9321 tmp = load_reg(s, rd);
9322 } else {
9323 TCGV_UNUSED(tmp);
9324 }
99c475ab 9325
396e467c 9326 tmp2 = load_reg(s, rm);
5899f386 9327 switch (op) {
99c475ab 9328 case 0x0: /* and */
396e467c 9329 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9330 if (!s->condexec_mask)
396e467c 9331 gen_logic_CC(tmp);
99c475ab
FB
9332 break;
9333 case 0x1: /* eor */
396e467c 9334 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9335 if (!s->condexec_mask)
396e467c 9336 gen_logic_CC(tmp);
99c475ab
FB
9337 break;
9338 case 0x2: /* lsl */
9ee6e8bb 9339 if (s->condexec_mask) {
396e467c 9340 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 9341 } else {
396e467c
FN
9342 gen_helper_shl_cc(tmp2, tmp2, tmp);
9343 gen_logic_CC(tmp2);
9ee6e8bb 9344 }
99c475ab
FB
9345 break;
9346 case 0x3: /* lsr */
9ee6e8bb 9347 if (s->condexec_mask) {
396e467c 9348 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 9349 } else {
396e467c
FN
9350 gen_helper_shr_cc(tmp2, tmp2, tmp);
9351 gen_logic_CC(tmp2);
9ee6e8bb 9352 }
99c475ab
FB
9353 break;
9354 case 0x4: /* asr */
9ee6e8bb 9355 if (s->condexec_mask) {
396e467c 9356 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 9357 } else {
396e467c
FN
9358 gen_helper_sar_cc(tmp2, tmp2, tmp);
9359 gen_logic_CC(tmp2);
9ee6e8bb 9360 }
99c475ab
FB
9361 break;
9362 case 0x5: /* adc */
9ee6e8bb 9363 if (s->condexec_mask)
396e467c 9364 gen_adc(tmp, tmp2);
9ee6e8bb 9365 else
396e467c 9366 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
9367 break;
9368 case 0x6: /* sbc */
9ee6e8bb 9369 if (s->condexec_mask)
396e467c 9370 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 9371 else
396e467c 9372 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
9373 break;
9374 case 0x7: /* ror */
9ee6e8bb 9375 if (s->condexec_mask) {
f669df27
AJ
9376 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9377 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9378 } else {
396e467c
FN
9379 gen_helper_ror_cc(tmp2, tmp2, tmp);
9380 gen_logic_CC(tmp2);
9ee6e8bb 9381 }
99c475ab
FB
9382 break;
9383 case 0x8: /* tst */
396e467c
FN
9384 tcg_gen_and_i32(tmp, tmp, tmp2);
9385 gen_logic_CC(tmp);
99c475ab 9386 rd = 16;
5899f386 9387 break;
99c475ab 9388 case 0x9: /* neg */
9ee6e8bb 9389 if (s->condexec_mask)
396e467c 9390 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9391 else
396e467c 9392 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9393 break;
9394 case 0xa: /* cmp */
396e467c 9395 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9396 rd = 16;
9397 break;
9398 case 0xb: /* cmn */
396e467c 9399 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
9400 rd = 16;
9401 break;
9402 case 0xc: /* orr */
396e467c 9403 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9404 if (!s->condexec_mask)
396e467c 9405 gen_logic_CC(tmp);
99c475ab
FB
9406 break;
9407 case 0xd: /* mul */
7b2919a0 9408 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9409 if (!s->condexec_mask)
396e467c 9410 gen_logic_CC(tmp);
99c475ab
FB
9411 break;
9412 case 0xe: /* bic */
f669df27 9413 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9414 if (!s->condexec_mask)
396e467c 9415 gen_logic_CC(tmp);
99c475ab
FB
9416 break;
9417 case 0xf: /* mvn */
396e467c 9418 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9419 if (!s->condexec_mask)
396e467c 9420 gen_logic_CC(tmp2);
99c475ab 9421 val = 1;
5899f386 9422 rm = rd;
99c475ab
FB
9423 break;
9424 }
9425 if (rd != 16) {
396e467c
FN
9426 if (val) {
9427 store_reg(s, rm, tmp2);
9428 if (op != 0xf)
7d1b0095 9429 tcg_temp_free_i32(tmp);
396e467c
FN
9430 } else {
9431 store_reg(s, rd, tmp);
7d1b0095 9432 tcg_temp_free_i32(tmp2);
396e467c
FN
9433 }
9434 } else {
7d1b0095
PM
9435 tcg_temp_free_i32(tmp);
9436 tcg_temp_free_i32(tmp2);
99c475ab
FB
9437 }
9438 break;
9439
9440 case 5:
9441 /* load/store register offset. */
9442 rd = insn & 7;
9443 rn = (insn >> 3) & 7;
9444 rm = (insn >> 6) & 7;
9445 op = (insn >> 9) & 7;
b0109805 9446 addr = load_reg(s, rn);
b26eefb6 9447 tmp = load_reg(s, rm);
b0109805 9448 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9449 tcg_temp_free_i32(tmp);
99c475ab
FB
9450
9451 if (op < 3) /* store */
b0109805 9452 tmp = load_reg(s, rd);
99c475ab
FB
9453
9454 switch (op) {
9455 case 0: /* str */
b0109805 9456 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9457 break;
9458 case 1: /* strh */
b0109805 9459 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9460 break;
9461 case 2: /* strb */
b0109805 9462 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9463 break;
9464 case 3: /* ldrsb */
b0109805 9465 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
9466 break;
9467 case 4: /* ldr */
b0109805 9468 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9469 break;
9470 case 5: /* ldrh */
b0109805 9471 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
9472 break;
9473 case 6: /* ldrb */
b0109805 9474 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
9475 break;
9476 case 7: /* ldrsh */
b0109805 9477 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
9478 break;
9479 }
9480 if (op >= 3) /* load */
b0109805 9481 store_reg(s, rd, tmp);
7d1b0095 9482 tcg_temp_free_i32(addr);
99c475ab
FB
9483 break;
9484
9485 case 6:
9486 /* load/store word immediate offset */
9487 rd = insn & 7;
9488 rn = (insn >> 3) & 7;
b0109805 9489 addr = load_reg(s, rn);
99c475ab 9490 val = (insn >> 4) & 0x7c;
b0109805 9491 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9492
9493 if (insn & (1 << 11)) {
9494 /* load */
b0109805
PB
9495 tmp = gen_ld32(addr, IS_USER(s));
9496 store_reg(s, rd, tmp);
99c475ab
FB
9497 } else {
9498 /* store */
b0109805
PB
9499 tmp = load_reg(s, rd);
9500 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9501 }
7d1b0095 9502 tcg_temp_free_i32(addr);
99c475ab
FB
9503 break;
9504
9505 case 7:
9506 /* load/store byte immediate offset */
9507 rd = insn & 7;
9508 rn = (insn >> 3) & 7;
b0109805 9509 addr = load_reg(s, rn);
99c475ab 9510 val = (insn >> 6) & 0x1f;
b0109805 9511 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9512
9513 if (insn & (1 << 11)) {
9514 /* load */
b0109805
PB
9515 tmp = gen_ld8u(addr, IS_USER(s));
9516 store_reg(s, rd, tmp);
99c475ab
FB
9517 } else {
9518 /* store */
b0109805
PB
9519 tmp = load_reg(s, rd);
9520 gen_st8(tmp, addr, IS_USER(s));
99c475ab 9521 }
7d1b0095 9522 tcg_temp_free_i32(addr);
99c475ab
FB
9523 break;
9524
9525 case 8:
9526 /* load/store halfword immediate offset */
9527 rd = insn & 7;
9528 rn = (insn >> 3) & 7;
b0109805 9529 addr = load_reg(s, rn);
99c475ab 9530 val = (insn >> 5) & 0x3e;
b0109805 9531 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9532
9533 if (insn & (1 << 11)) {
9534 /* load */
b0109805
PB
9535 tmp = gen_ld16u(addr, IS_USER(s));
9536 store_reg(s, rd, tmp);
99c475ab
FB
9537 } else {
9538 /* store */
b0109805
PB
9539 tmp = load_reg(s, rd);
9540 gen_st16(tmp, addr, IS_USER(s));
99c475ab 9541 }
7d1b0095 9542 tcg_temp_free_i32(addr);
99c475ab
FB
9543 break;
9544
9545 case 9:
9546 /* load/store from stack */
9547 rd = (insn >> 8) & 7;
b0109805 9548 addr = load_reg(s, 13);
99c475ab 9549 val = (insn & 0xff) * 4;
b0109805 9550 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9551
9552 if (insn & (1 << 11)) {
9553 /* load */
b0109805
PB
9554 tmp = gen_ld32(addr, IS_USER(s));
9555 store_reg(s, rd, tmp);
99c475ab
FB
9556 } else {
9557 /* store */
b0109805
PB
9558 tmp = load_reg(s, rd);
9559 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9560 }
7d1b0095 9561 tcg_temp_free_i32(addr);
99c475ab
FB
9562 break;
9563
9564 case 10:
9565 /* add to high reg */
9566 rd = (insn >> 8) & 7;
5899f386
FB
9567 if (insn & (1 << 11)) {
9568 /* SP */
5e3f878a 9569 tmp = load_reg(s, 13);
5899f386
FB
9570 } else {
9571 /* PC. bit 1 is ignored. */
7d1b0095 9572 tmp = tcg_temp_new_i32();
5e3f878a 9573 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9574 }
99c475ab 9575 val = (insn & 0xff) * 4;
5e3f878a
PB
9576 tcg_gen_addi_i32(tmp, tmp, val);
9577 store_reg(s, rd, tmp);
99c475ab
FB
9578 break;
9579
9580 case 11:
9581 /* misc */
9582 op = (insn >> 8) & 0xf;
9583 switch (op) {
9584 case 0:
9585 /* adjust stack pointer */
b26eefb6 9586 tmp = load_reg(s, 13);
99c475ab
FB
9587 val = (insn & 0x7f) * 4;
9588 if (insn & (1 << 7))
6a0d8a1d 9589 val = -(int32_t)val;
b26eefb6
PB
9590 tcg_gen_addi_i32(tmp, tmp, val);
9591 store_reg(s, 13, tmp);
99c475ab
FB
9592 break;
9593
9ee6e8bb
PB
9594 case 2: /* sign/zero extend. */
9595 ARCH(6);
9596 rd = insn & 7;
9597 rm = (insn >> 3) & 7;
b0109805 9598 tmp = load_reg(s, rm);
9ee6e8bb 9599 switch ((insn >> 6) & 3) {
b0109805
PB
9600 case 0: gen_sxth(tmp); break;
9601 case 1: gen_sxtb(tmp); break;
9602 case 2: gen_uxth(tmp); break;
9603 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9604 }
b0109805 9605 store_reg(s, rd, tmp);
9ee6e8bb 9606 break;
99c475ab
FB
9607 case 4: case 5: case 0xc: case 0xd:
9608 /* push/pop */
b0109805 9609 addr = load_reg(s, 13);
5899f386
FB
9610 if (insn & (1 << 8))
9611 offset = 4;
99c475ab 9612 else
5899f386
FB
9613 offset = 0;
9614 for (i = 0; i < 8; i++) {
9615 if (insn & (1 << i))
9616 offset += 4;
9617 }
9618 if ((insn & (1 << 11)) == 0) {
b0109805 9619 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9620 }
99c475ab
FB
9621 for (i = 0; i < 8; i++) {
9622 if (insn & (1 << i)) {
9623 if (insn & (1 << 11)) {
9624 /* pop */
b0109805
PB
9625 tmp = gen_ld32(addr, IS_USER(s));
9626 store_reg(s, i, tmp);
99c475ab
FB
9627 } else {
9628 /* push */
b0109805
PB
9629 tmp = load_reg(s, i);
9630 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9631 }
5899f386 9632 /* advance to the next address. */
b0109805 9633 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9634 }
9635 }
a50f5b91 9636 TCGV_UNUSED(tmp);
99c475ab
FB
9637 if (insn & (1 << 8)) {
9638 if (insn & (1 << 11)) {
9639 /* pop pc */
b0109805 9640 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9641 /* don't set the pc until the rest of the instruction
9642 has completed */
9643 } else {
9644 /* push lr */
b0109805
PB
9645 tmp = load_reg(s, 14);
9646 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9647 }
b0109805 9648 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9649 }
5899f386 9650 if ((insn & (1 << 11)) == 0) {
b0109805 9651 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9652 }
99c475ab 9653 /* write back the new stack pointer */
b0109805 9654 store_reg(s, 13, addr);
99c475ab 9655 /* set the new PC value */
be5e7a76
DES
9656 if ((insn & 0x0900) == 0x0900) {
9657 store_reg_from_load(env, s, 15, tmp);
9658 }
99c475ab
FB
9659 break;
9660
9ee6e8bb
PB
9661 case 1: case 3: case 9: case 11: /* czb */
9662 rm = insn & 7;
d9ba4830 9663 tmp = load_reg(s, rm);
9ee6e8bb
PB
9664 s->condlabel = gen_new_label();
9665 s->condjmp = 1;
9666 if (insn & (1 << 11))
cb63669a 9667 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9668 else
cb63669a 9669 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9670 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9671 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9672 val = (uint32_t)s->pc + 2;
9673 val += offset;
9674 gen_jmp(s, val);
9675 break;
9676
9677 case 15: /* IT, nop-hint. */
9678 if ((insn & 0xf) == 0) {
9679 gen_nop_hint(s, (insn >> 4) & 0xf);
9680 break;
9681 }
9682 /* If Then. */
9683 s->condexec_cond = (insn >> 4) & 0xe;
9684 s->condexec_mask = insn & 0x1f;
9685 /* No actual code generated for this insn, just setup state. */
9686 break;
9687
06c949e6 9688 case 0xe: /* bkpt */
be5e7a76 9689 ARCH(5);
bc4a0de0 9690 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9691 break;
9692
9ee6e8bb
PB
9693 case 0xa: /* rev */
9694 ARCH(6);
9695 rn = (insn >> 3) & 0x7;
9696 rd = insn & 0x7;
b0109805 9697 tmp = load_reg(s, rn);
9ee6e8bb 9698 switch ((insn >> 6) & 3) {
66896cb8 9699 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9700 case 1: gen_rev16(tmp); break;
9701 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9702 default: goto illegal_op;
9703 }
b0109805 9704 store_reg(s, rd, tmp);
9ee6e8bb
PB
9705 break;
9706
9707 case 6: /* cps */
9708 ARCH(6);
9709 if (IS_USER(s))
9710 break;
9711 if (IS_M(env)) {
8984bd2e 9712 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
d3cb6e2b 9713 /* FAULTMASK */
8984bd2e 9714 if (insn & 1) {
d3cb6e2b 9715 addr = tcg_const_i32(19);
8984bd2e 9716 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9717 tcg_temp_free_i32(addr);
8984bd2e 9718 }
d3cb6e2b 9719 /* PRIMASK */
8984bd2e 9720 if (insn & 2) {
d3cb6e2b 9721 addr = tcg_const_i32(16);
8984bd2e 9722 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9723 tcg_temp_free_i32(addr);
8984bd2e 9724 }
b75263d6 9725 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9726 gen_lookup_tb(s);
9727 } else {
9728 if (insn & (1 << 4))
9729 shift = CPSR_A | CPSR_I | CPSR_F;
9730 else
9731 shift = 0;
fa26df03 9732 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
9733 }
9734 break;
9735
99c475ab
FB
9736 default:
9737 goto undef;
9738 }
9739 break;
9740
9741 case 12:
a7d3970d 9742 {
99c475ab 9743 /* load/store multiple */
a7d3970d
PM
9744 TCGv loaded_var;
9745 TCGV_UNUSED(loaded_var);
99c475ab 9746 rn = (insn >> 8) & 0x7;
b0109805 9747 addr = load_reg(s, rn);
99c475ab
FB
9748 for (i = 0; i < 8; i++) {
9749 if (insn & (1 << i)) {
99c475ab
FB
9750 if (insn & (1 << 11)) {
9751 /* load */
b0109805 9752 tmp = gen_ld32(addr, IS_USER(s));
a7d3970d
PM
9753 if (i == rn) {
9754 loaded_var = tmp;
9755 } else {
9756 store_reg(s, i, tmp);
9757 }
99c475ab
FB
9758 } else {
9759 /* store */
b0109805
PB
9760 tmp = load_reg(s, i);
9761 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9762 }
5899f386 9763 /* advance to the next address */
b0109805 9764 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9765 }
9766 }
b0109805 9767 if ((insn & (1 << rn)) == 0) {
a7d3970d 9768 /* base reg not in list: base register writeback */
b0109805
PB
9769 store_reg(s, rn, addr);
9770 } else {
a7d3970d
PM
9771 /* base reg in list: if load, complete it now */
9772 if (insn & (1 << 11)) {
9773 store_reg(s, rn, loaded_var);
9774 }
7d1b0095 9775 tcg_temp_free_i32(addr);
b0109805 9776 }
99c475ab 9777 break;
a7d3970d 9778 }
99c475ab
FB
9779 case 13:
9780 /* conditional branch or swi */
9781 cond = (insn >> 8) & 0xf;
9782 if (cond == 0xe)
9783 goto undef;
9784
9785 if (cond == 0xf) {
9786 /* swi */
422ebf69 9787 gen_set_pc_im(s->pc);
9ee6e8bb 9788 s->is_jmp = DISAS_SWI;
99c475ab
FB
9789 break;
9790 }
9791 /* generate a conditional jump to next instruction */
e50e6a20 9792 s->condlabel = gen_new_label();
d9ba4830 9793 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9794 s->condjmp = 1;
99c475ab
FB
9795
9796 /* jump to the offset */
5899f386 9797 val = (uint32_t)s->pc + 2;
99c475ab 9798 offset = ((int32_t)insn << 24) >> 24;
5899f386 9799 val += offset << 1;
8aaca4c0 9800 gen_jmp(s, val);
99c475ab
FB
9801 break;
9802
9803 case 14:
358bf29e 9804 if (insn & (1 << 11)) {
9ee6e8bb
PB
9805 if (disas_thumb2_insn(env, s, insn))
9806 goto undef32;
358bf29e
PB
9807 break;
9808 }
9ee6e8bb 9809 /* unconditional branch */
99c475ab
FB
9810 val = (uint32_t)s->pc;
9811 offset = ((int32_t)insn << 21) >> 21;
9812 val += (offset << 1) + 2;
8aaca4c0 9813 gen_jmp(s, val);
99c475ab
FB
9814 break;
9815
9816 case 15:
9ee6e8bb 9817 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9818 goto undef32;
9ee6e8bb 9819 break;
99c475ab
FB
9820 }
9821 return;
9ee6e8bb 9822undef32:
bc4a0de0 9823 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9824 return;
9825illegal_op:
99c475ab 9826undef:
bc4a0de0 9827 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9828}
9829
2c0262af
FB
9830/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9831 basic block 'tb'. If search_pc is TRUE, also generate PC
9832 information for each intermediate instruction. */
0ecb72a5 9833static inline void gen_intermediate_code_internal(CPUARMState *env,
2cfc5f17
TS
9834 TranslationBlock *tb,
9835 int search_pc)
2c0262af
FB
9836{
9837 DisasContext dc1, *dc = &dc1;
a1d1bb31 9838 CPUBreakpoint *bp;
2c0262af
FB
9839 uint16_t *gen_opc_end;
9840 int j, lj;
0fa85d43 9841 target_ulong pc_start;
b5ff1b31 9842 uint32_t next_page_start;
2e70f6ef
PB
9843 int num_insns;
9844 int max_insns;
3b46e624 9845
2c0262af 9846 /* generate intermediate code */
0fa85d43 9847 pc_start = tb->pc;
3b46e624 9848
2c0262af
FB
9849 dc->tb = tb;
9850
2c0262af 9851 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9852
9853 dc->is_jmp = DISAS_NEXT;
9854 dc->pc = pc_start;
8aaca4c0 9855 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9856 dc->condjmp = 0;
7204ab88 9857 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
98eac7ca
PM
9858 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9859 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9860#if !defined(CONFIG_USER_ONLY)
61f74d6a 9861 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9862#endif
5df8bac1 9863 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9864 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9865 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9866 cpu_F0s = tcg_temp_new_i32();
9867 cpu_F1s = tcg_temp_new_i32();
9868 cpu_F0d = tcg_temp_new_i64();
9869 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9870 cpu_V0 = cpu_F0d;
9871 cpu_V1 = cpu_F1d;
e677137d 9872 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9873 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9874 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9875 lj = -1;
2e70f6ef
PB
9876 num_insns = 0;
9877 max_insns = tb->cflags & CF_COUNT_MASK;
9878 if (max_insns == 0)
9879 max_insns = CF_COUNT_MASK;
9880
9881 gen_icount_start();
e12ce78d 9882
3849902c
PM
9883 tcg_clear_temp_count();
9884
e12ce78d
PM
9885 /* A note on handling of the condexec (IT) bits:
9886 *
9887 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 9888 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 9889 * (1) if the condexec bits are not already zero then we write
0ecb72a5 9890 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
9891 * to do it at the end of the block. (For example if we don't do this
9892 * it's hard to identify whether we can safely skip writing condexec
9893 * at the end of the TB, which we definitely want to do for the case
9894 * where a TB doesn't do anything with the IT state at all.)
9895 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 9896 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
9897 * This is done both for leaving the TB at the end, and for leaving
9898 * it because of an exception we know will happen, which is done in
9899 * gen_exception_insn(). The latter is necessary because we need to
9900 * leave the TB with the PC/IT state just prior to execution of the
9901 * instruction which caused the exception.
9902 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 9903 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
9904 * This is handled in the same way as restoration of the
9905 * PC in these situations: we will be called again with search_pc=1
9906 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9907 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9908 * this to restore the condexec bits.
e12ce78d
PM
9909 *
9910 * Note that there are no instructions which can read the condexec
9911 * bits, and none which can write non-static values to them, so
0ecb72a5 9912 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
9913 * middle of a TB.
9914 */
9915
9ee6e8bb
PB
9916 /* Reset the conditional execution bits immediately. This avoids
9917 complications trying to do it at the end of the block. */
98eac7ca 9918 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9919 {
7d1b0095 9920 TCGv tmp = tcg_temp_new_i32();
8f01245e 9921 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9922 store_cpu_field(tmp, condexec_bits);
8f01245e 9923 }
2c0262af 9924 do {
fbb4a2e3
PB
9925#ifdef CONFIG_USER_ONLY
9926 /* Intercept jump to the magic kernel page. */
9927 if (dc->pc >= 0xffff0000) {
9928 /* We always get here via a jump, so know we are not in a
9929 conditional execution block. */
9930 gen_exception(EXCP_KERNEL_TRAP);
9931 dc->is_jmp = DISAS_UPDATE;
9932 break;
9933 }
9934#else
9ee6e8bb
PB
9935 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9936 /* We always get here via a jump, so know we are not in a
9937 conditional execution block. */
d9ba4830 9938 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9939 dc->is_jmp = DISAS_UPDATE;
9940 break;
9ee6e8bb
PB
9941 }
9942#endif
9943
72cf2d4f
BS
9944 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9945 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9946 if (bp->pc == dc->pc) {
bc4a0de0 9947 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9948 /* Advance PC so that clearing the breakpoint will
9949 invalidate this TB. */
9950 dc->pc += 2;
9951 goto done_generating;
1fddef4b
FB
9952 break;
9953 }
9954 }
9955 }
2c0262af
FB
9956 if (search_pc) {
9957 j = gen_opc_ptr - gen_opc_buf;
9958 if (lj < j) {
9959 lj++;
9960 while (lj < j)
9961 gen_opc_instr_start[lj++] = 0;
9962 }
0fa85d43 9963 gen_opc_pc[lj] = dc->pc;
e12ce78d 9964 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
2c0262af 9965 gen_opc_instr_start[lj] = 1;
2e70f6ef 9966 gen_opc_icount[lj] = num_insns;
2c0262af 9967 }
e50e6a20 9968
2e70f6ef
PB
9969 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9970 gen_io_start();
9971
5642463a
PM
9972 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9973 tcg_gen_debug_insn_start(dc->pc);
9974 }
9975
7204ab88 9976 if (dc->thumb) {
9ee6e8bb
PB
9977 disas_thumb_insn(env, dc);
9978 if (dc->condexec_mask) {
9979 dc->condexec_cond = (dc->condexec_cond & 0xe)
9980 | ((dc->condexec_mask >> 4) & 1);
9981 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9982 if (dc->condexec_mask == 0) {
9983 dc->condexec_cond = 0;
9984 }
9985 }
9986 } else {
9987 disas_arm_insn(env, dc);
9988 }
e50e6a20
FB
9989
9990 if (dc->condjmp && !dc->is_jmp) {
9991 gen_set_label(dc->condlabel);
9992 dc->condjmp = 0;
9993 }
3849902c
PM
9994
9995 if (tcg_check_temp_count()) {
9996 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9997 }
9998
aaf2d97d 9999 /* Translation stops when a conditional branch is encountered.
e50e6a20 10000 * Otherwise the subsequent code could get translated several times.
b5ff1b31 10001 * Also stop translation when a page boundary is reached. This
bf20dc07 10002 * ensures prefetch aborts occur at the right place. */
2e70f6ef 10003 num_insns ++;
1fddef4b
FB
10004 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
10005 !env->singlestep_enabled &&
1b530a6d 10006 !singlestep &&
2e70f6ef
PB
10007 dc->pc < next_page_start &&
10008 num_insns < max_insns);
10009
10010 if (tb->cflags & CF_LAST_IO) {
10011 if (dc->condjmp) {
10012 /* FIXME: This can theoretically happen with self-modifying
10013 code. */
10014 cpu_abort(env, "IO on conditional branch instruction");
10015 }
10016 gen_io_end();
10017 }
9ee6e8bb 10018
b5ff1b31 10019 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
10020 instruction was a conditional branch or trap, and the PC has
10021 already been written. */
551bd27f 10022 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 10023 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 10024 if (dc->condjmp) {
9ee6e8bb
PB
10025 gen_set_condexec(dc);
10026 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 10027 gen_exception(EXCP_SWI);
9ee6e8bb 10028 } else {
d9ba4830 10029 gen_exception(EXCP_DEBUG);
9ee6e8bb 10030 }
e50e6a20
FB
10031 gen_set_label(dc->condlabel);
10032 }
10033 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 10034 gen_set_pc_im(dc->pc);
e50e6a20 10035 dc->condjmp = 0;
8aaca4c0 10036 }
9ee6e8bb
PB
10037 gen_set_condexec(dc);
10038 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 10039 gen_exception(EXCP_SWI);
9ee6e8bb
PB
10040 } else {
10041 /* FIXME: Single stepping a WFI insn will not halt
10042 the CPU. */
d9ba4830 10043 gen_exception(EXCP_DEBUG);
9ee6e8bb 10044 }
8aaca4c0 10045 } else {
9ee6e8bb
PB
10046 /* While branches must always occur at the end of an IT block,
10047 there are a few other things that can cause us to terminate
10048 the TB in the middel of an IT block:
10049 - Exception generating instructions (bkpt, swi, undefined).
10050 - Page boundaries.
10051 - Hardware watchpoints.
10052 Hardware breakpoints have already been handled and skip this code.
10053 */
10054 gen_set_condexec(dc);
8aaca4c0 10055 switch(dc->is_jmp) {
8aaca4c0 10056 case DISAS_NEXT:
6e256c93 10057 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
10058 break;
10059 default:
10060 case DISAS_JUMP:
10061 case DISAS_UPDATE:
10062 /* indicate that the hash table must be used to find the next TB */
57fec1fe 10063 tcg_gen_exit_tb(0);
8aaca4c0
FB
10064 break;
10065 case DISAS_TB_JUMP:
10066 /* nothing more to generate */
10067 break;
9ee6e8bb 10068 case DISAS_WFI:
d9ba4830 10069 gen_helper_wfi();
9ee6e8bb
PB
10070 break;
10071 case DISAS_SWI:
d9ba4830 10072 gen_exception(EXCP_SWI);
9ee6e8bb 10073 break;
8aaca4c0 10074 }
e50e6a20
FB
10075 if (dc->condjmp) {
10076 gen_set_label(dc->condlabel);
9ee6e8bb 10077 gen_set_condexec(dc);
6e256c93 10078 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
10079 dc->condjmp = 0;
10080 }
2c0262af 10081 }
2e70f6ef 10082
9ee6e8bb 10083done_generating:
2e70f6ef 10084 gen_icount_end(tb, num_insns);
2c0262af
FB
10085 *gen_opc_ptr = INDEX_op_end;
10086
10087#ifdef DEBUG_DISAS
8fec2b8c 10088 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
10089 qemu_log("----------------\n");
10090 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7204ab88 10091 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
93fcfe39 10092 qemu_log("\n");
2c0262af
FB
10093 }
10094#endif
b5ff1b31
FB
10095 if (search_pc) {
10096 j = gen_opc_ptr - gen_opc_buf;
10097 lj++;
10098 while (lj <= j)
10099 gen_opc_instr_start[lj++] = 0;
b5ff1b31 10100 } else {
2c0262af 10101 tb->size = dc->pc - pc_start;
2e70f6ef 10102 tb->icount = num_insns;
b5ff1b31 10103 }
2c0262af
FB
10104}
10105
0ecb72a5 10106void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 10107{
2cfc5f17 10108 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
10109}
10110
0ecb72a5 10111void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 10112{
2cfc5f17 10113 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
10114}
10115
b5ff1b31
FB
10116static const char *cpu_mode_names[16] = {
10117 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10118 "???", "???", "???", "und", "???", "???", "???", "sys"
10119};
9ee6e8bb 10120
0ecb72a5 10121void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 10122 int flags)
2c0262af
FB
10123{
10124 int i;
06e80fc9 10125#if 0
bc380d17 10126 union {
b7bcbe95
FB
10127 uint32_t i;
10128 float s;
10129 } s0, s1;
10130 CPU_DoubleU d;
a94a6abf
PB
10131 /* ??? This assumes float64 and double have the same layout.
10132 Oh well, it's only debug dumps. */
10133 union {
10134 float64 f64;
10135 double d;
10136 } d0;
06e80fc9 10137#endif
b5ff1b31 10138 uint32_t psr;
2c0262af
FB
10139
10140 for(i=0;i<16;i++) {
7fe48483 10141 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10142 if ((i % 4) == 3)
7fe48483 10143 cpu_fprintf(f, "\n");
2c0262af 10144 else
7fe48483 10145 cpu_fprintf(f, " ");
2c0262af 10146 }
b5ff1b31 10147 psr = cpsr_read(env);
687fa640
TS
10148 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10149 psr,
b5ff1b31
FB
10150 psr & (1 << 31) ? 'N' : '-',
10151 psr & (1 << 30) ? 'Z' : '-',
10152 psr & (1 << 29) ? 'C' : '-',
10153 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10154 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10155 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10156
5e3f878a 10157#if 0
b7bcbe95 10158 for (i = 0; i < 16; i++) {
8e96005d
FB
10159 d.d = env->vfp.regs[i];
10160 s0.i = d.l.lower;
10161 s1.i = d.l.upper;
a94a6abf
PB
10162 d0.f64 = d.d;
10163 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 10164 i * 2, (int)s0.i, s0.s,
a94a6abf 10165 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 10166 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 10167 d0.d);
b7bcbe95 10168 }
40f137e1 10169 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 10170#endif
2c0262af 10171}
a6b025d3 10172
0ecb72a5 10173void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
10174{
10175 env->regs[15] = gen_opc_pc[pc_pos];
e12ce78d 10176 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10177}