]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-arm: Fix 11MPCore cache type register value
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
2c0262af 28#include "disas.h"
57fec1fe 29#include "tcg-op.h"
79383c9c 30#include "qemu-log.h"
1497c961 31
7b59220e 32#include "helper.h"
1497c961 33#define GEN_HELPER 1
7b59220e 34#include "helper.h"
2c0262af 35
be5e7a76
DES
36#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38/* currently all emulated v5 cores are also v5TE, so don't bother */
39#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
40#define ENABLE_ARCH_5J 0
41#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 45
86753403 46#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 47
2c0262af
FB
48/* internal defines */
49typedef struct DisasContext {
0fa85d43 50 target_ulong pc;
2c0262af 51 int is_jmp;
e50e6a20
FB
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
9ee6e8bb
PB
56 /* Thumb-2 condtional execution bits. */
57 int condexec_mask;
58 int condexec_cond;
2c0262af 59 struct TranslationBlock *tb;
8aaca4c0 60 int singlestep_enabled;
5899f386 61 int thumb;
d8fd2954 62 int bswap_code;
b5ff1b31
FB
63#if !defined(CONFIG_USER_ONLY)
64 int user;
65#endif
5df8bac1 66 int vfp_enabled;
69d1fc22
PM
67 int vec_len;
68 int vec_stride;
2c0262af
FB
69} DisasContext;
70
e12ce78d
PM
71static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
b5ff1b31
FB
73#if defined(CONFIG_USER_ONLY)
74#define IS_USER(s) 1
75#else
76#define IS_USER(s) (s->user)
77#endif
78
9ee6e8bb
PB
79/* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
81#define DISAS_WFI 4
82#define DISAS_SWI 5
2c0262af 83
a7812ae4 84static TCGv_ptr cpu_env;
ad69471c 85/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 86static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 87static TCGv_i32 cpu_R[16];
426f5abc
PB
88static TCGv_i32 cpu_exclusive_addr;
89static TCGv_i32 cpu_exclusive_val;
90static TCGv_i32 cpu_exclusive_high;
91#ifdef CONFIG_USER_ONLY
92static TCGv_i32 cpu_exclusive_test;
93static TCGv_i32 cpu_exclusive_info;
94#endif
ad69471c 95
b26eefb6 96/* FIXME: These should be removed. */
a7812ae4
PB
97static TCGv cpu_F0s, cpu_F1s;
98static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 99
2e70f6ef
PB
100#include "gen-icount.h"
101
155c3eac
FN
102static const char *regnames[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
105
b26eefb6
PB
106/* initialize TCG globals. */
107void arm_translate_init(void)
108{
155c3eac
FN
109 int i;
110
a7812ae4
PB
111 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
112
155c3eac
FN
113 for (i = 0; i < 16; i++) {
114 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 115 offsetof(CPUARMState, regs[i]),
155c3eac
FN
116 regnames[i]);
117 }
426f5abc 118 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 119 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
426f5abc 120 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 121 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 122 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 123 offsetof(CPUARMState, exclusive_high), "exclusive_high");
426f5abc
PB
124#ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 126 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 127 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 128 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 129#endif
155c3eac 130
a7812ae4 131#define GEN_HELPER 2
7b59220e 132#include "helper.h"
b26eefb6
PB
133}
134
d9ba4830
PB
135static inline TCGv load_cpu_offset(int offset)
136{
7d1b0095 137 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
138 tcg_gen_ld_i32(tmp, cpu_env, offset);
139 return tmp;
140}
141
0ecb72a5 142#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830
PB
143
144static inline void store_cpu_offset(TCGv var, int offset)
145{
146 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 147 tcg_temp_free_i32(var);
d9ba4830
PB
148}
149
150#define store_cpu_field(var, name) \
0ecb72a5 151 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 152
b26eefb6
PB
153/* Set a variable to the value of a CPU register. */
154static void load_reg_var(DisasContext *s, TCGv var, int reg)
155{
156 if (reg == 15) {
157 uint32_t addr;
158 /* normaly, since we updated PC, we need only to add one insn */
159 if (s->thumb)
160 addr = (long)s->pc + 2;
161 else
162 addr = (long)s->pc + 4;
163 tcg_gen_movi_i32(var, addr);
164 } else {
155c3eac 165 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
166 }
167}
168
169/* Create a new temporary and set it to the value of a CPU register. */
170static inline TCGv load_reg(DisasContext *s, int reg)
171{
7d1b0095 172 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
173 load_reg_var(s, tmp, reg);
174 return tmp;
175}
176
177/* Set a CPU register. The source must be a temporary and will be
178 marked as dead. */
179static void store_reg(DisasContext *s, int reg, TCGv var)
180{
181 if (reg == 15) {
182 tcg_gen_andi_i32(var, var, ~1);
183 s->is_jmp = DISAS_JUMP;
184 }
155c3eac 185 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 186 tcg_temp_free_i32(var);
b26eefb6
PB
187}
188
b26eefb6 189/* Value extensions. */
86831435
PB
190#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
192#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
194
1497c961
PB
195#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 197
b26eefb6 198
b75263d6
JR
199static inline void gen_set_cpsr(TCGv var, uint32_t mask)
200{
201 TCGv tmp_mask = tcg_const_i32(mask);
202 gen_helper_cpsr_write(var, tmp_mask);
203 tcg_temp_free_i32(tmp_mask);
204}
d9ba4830
PB
205/* Set NZCV flags from the high 4 bits of var. */
206#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
207
208static void gen_exception(int excp)
209{
7d1b0095 210 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
211 tcg_gen_movi_i32(tmp, excp);
212 gen_helper_exception(tmp);
7d1b0095 213 tcg_temp_free_i32(tmp);
d9ba4830
PB
214}
215
3670669c
PB
216static void gen_smul_dual(TCGv a, TCGv b)
217{
7d1b0095
PM
218 TCGv tmp1 = tcg_temp_new_i32();
219 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
220 tcg_gen_ext16s_i32(tmp1, a);
221 tcg_gen_ext16s_i32(tmp2, b);
3670669c 222 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 223 tcg_temp_free_i32(tmp2);
3670669c
PB
224 tcg_gen_sari_i32(a, a, 16);
225 tcg_gen_sari_i32(b, b, 16);
226 tcg_gen_mul_i32(b, b, a);
227 tcg_gen_mov_i32(a, tmp1);
7d1b0095 228 tcg_temp_free_i32(tmp1);
3670669c
PB
229}
230
231/* Byteswap each halfword. */
232static void gen_rev16(TCGv var)
233{
7d1b0095 234 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
235 tcg_gen_shri_i32(tmp, var, 8);
236 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
237 tcg_gen_shli_i32(var, var, 8);
238 tcg_gen_andi_i32(var, var, 0xff00ff00);
239 tcg_gen_or_i32(var, var, tmp);
7d1b0095 240 tcg_temp_free_i32(tmp);
3670669c
PB
241}
242
243/* Byteswap low halfword and sign extend. */
244static void gen_revsh(TCGv var)
245{
1a855029
AJ
246 tcg_gen_ext16u_i32(var, var);
247 tcg_gen_bswap16_i32(var, var);
248 tcg_gen_ext16s_i32(var, var);
3670669c
PB
249}
250
251/* Unsigned bitfield extract. */
252static void gen_ubfx(TCGv var, int shift, uint32_t mask)
253{
254 if (shift)
255 tcg_gen_shri_i32(var, var, shift);
256 tcg_gen_andi_i32(var, var, mask);
257}
258
259/* Signed bitfield extract. */
260static void gen_sbfx(TCGv var, int shift, int width)
261{
262 uint32_t signbit;
263
264 if (shift)
265 tcg_gen_sari_i32(var, var, shift);
266 if (shift + width < 32) {
267 signbit = 1u << (width - 1);
268 tcg_gen_andi_i32(var, var, (1u << width) - 1);
269 tcg_gen_xori_i32(var, var, signbit);
270 tcg_gen_subi_i32(var, var, signbit);
271 }
272}
273
274/* Bitfield insertion. Insert val into base. Clobbers base and val. */
275static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
276{
3670669c 277 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
278 tcg_gen_shli_i32(val, val, shift);
279 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
280 tcg_gen_or_i32(dest, base, val);
281}
282
838fa72d
AJ
283/* Return (b << 32) + a. Mark inputs as dead */
284static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 285{
838fa72d
AJ
286 TCGv_i64 tmp64 = tcg_temp_new_i64();
287
288 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 289 tcg_temp_free_i32(b);
838fa72d
AJ
290 tcg_gen_shli_i64(tmp64, tmp64, 32);
291 tcg_gen_add_i64(a, tmp64, a);
292
293 tcg_temp_free_i64(tmp64);
294 return a;
295}
296
297/* Return (b << 32) - a. Mark inputs as dead. */
298static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
299{
300 TCGv_i64 tmp64 = tcg_temp_new_i64();
301
302 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 303 tcg_temp_free_i32(b);
838fa72d
AJ
304 tcg_gen_shli_i64(tmp64, tmp64, 32);
305 tcg_gen_sub_i64(a, tmp64, a);
306
307 tcg_temp_free_i64(tmp64);
308 return a;
3670669c
PB
309}
310
8f01245e
PB
311/* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
5e3f878a 313/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 314static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 315{
a7812ae4
PB
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
318
319 tcg_gen_extu_i32_i64(tmp1, a);
7d1b0095 320 tcg_temp_free_i32(a);
5e3f878a 321 tcg_gen_extu_i32_i64(tmp2, b);
7d1b0095 322 tcg_temp_free_i32(b);
5e3f878a 323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 324 tcg_temp_free_i64(tmp2);
5e3f878a
PB
325 return tmp1;
326}
327
a7812ae4 328static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 329{
a7812ae4
PB
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
332
333 tcg_gen_ext_i32_i64(tmp1, a);
7d1b0095 334 tcg_temp_free_i32(a);
5e3f878a 335 tcg_gen_ext_i32_i64(tmp2, b);
7d1b0095 336 tcg_temp_free_i32(b);
5e3f878a 337 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 338 tcg_temp_free_i64(tmp2);
5e3f878a
PB
339 return tmp1;
340}
341
8f01245e
PB
342/* Swap low and high halfwords. */
343static void gen_swap_half(TCGv var)
344{
7d1b0095 345 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
346 tcg_gen_shri_i32(tmp, var, 16);
347 tcg_gen_shli_i32(var, var, 16);
348 tcg_gen_or_i32(var, var, tmp);
7d1b0095 349 tcg_temp_free_i32(tmp);
8f01245e
PB
350}
351
b26eefb6
PB
352/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
354 t0 &= ~0x8000;
355 t1 &= ~0x8000;
356 t0 = (t0 + t1) ^ tmp;
357 */
358
359static void gen_add16(TCGv t0, TCGv t1)
360{
7d1b0095 361 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
362 tcg_gen_xor_i32(tmp, t0, t1);
363 tcg_gen_andi_i32(tmp, tmp, 0x8000);
364 tcg_gen_andi_i32(t0, t0, ~0x8000);
365 tcg_gen_andi_i32(t1, t1, ~0x8000);
366 tcg_gen_add_i32(t0, t0, t1);
367 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
368 tcg_temp_free_i32(tmp);
369 tcg_temp_free_i32(t1);
b26eefb6
PB
370}
371
0ecb72a5 372#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, CF))
9a119ff6 373
b26eefb6
PB
374/* Set CF to the top bit of var. */
375static void gen_set_CF_bit31(TCGv var)
376{
7d1b0095 377 TCGv tmp = tcg_temp_new_i32();
b26eefb6 378 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 379 gen_set_CF(tmp);
7d1b0095 380 tcg_temp_free_i32(tmp);
b26eefb6
PB
381}
382
383/* Set N and Z flags from var. */
384static inline void gen_logic_CC(TCGv var)
385{
0ecb72a5
AF
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, NF));
387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, ZF));
b26eefb6
PB
388}
389
390/* T0 += T1 + CF. */
396e467c 391static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 392{
d9ba4830 393 TCGv tmp;
396e467c 394 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 395 tmp = load_cpu_field(CF);
396e467c 396 tcg_gen_add_i32(t0, t0, tmp);
7d1b0095 397 tcg_temp_free_i32(tmp);
b26eefb6
PB
398}
399
e9bb4aa9
JR
400/* dest = T0 + T1 + CF. */
401static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
402{
403 TCGv tmp;
404 tcg_gen_add_i32(dest, t0, t1);
405 tmp = load_cpu_field(CF);
406 tcg_gen_add_i32(dest, dest, tmp);
7d1b0095 407 tcg_temp_free_i32(tmp);
e9bb4aa9
JR
408}
409
3670669c
PB
410/* dest = T0 - T1 + CF - 1. */
411static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
412{
d9ba4830 413 TCGv tmp;
3670669c 414 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 415 tmp = load_cpu_field(CF);
3670669c
PB
416 tcg_gen_add_i32(dest, dest, tmp);
417 tcg_gen_subi_i32(dest, dest, 1);
7d1b0095 418 tcg_temp_free_i32(tmp);
3670669c
PB
419}
420
ad69471c
PB
421/* FIXME: Implement this natively. */
422#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
423
9a119ff6 424static void shifter_out_im(TCGv var, int shift)
b26eefb6 425{
7d1b0095 426 TCGv tmp = tcg_temp_new_i32();
9a119ff6
PB
427 if (shift == 0) {
428 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 429 } else {
9a119ff6 430 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 431 if (shift != 31)
9a119ff6
PB
432 tcg_gen_andi_i32(tmp, tmp, 1);
433 }
434 gen_set_CF(tmp);
7d1b0095 435 tcg_temp_free_i32(tmp);
9a119ff6 436}
b26eefb6 437
9a119ff6
PB
438/* Shift by immediate. Includes special handling for shift == 0. */
439static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
440{
441 switch (shiftop) {
442 case 0: /* LSL */
443 if (shift != 0) {
444 if (flags)
445 shifter_out_im(var, 32 - shift);
446 tcg_gen_shli_i32(var, var, shift);
447 }
448 break;
449 case 1: /* LSR */
450 if (shift == 0) {
451 if (flags) {
452 tcg_gen_shri_i32(var, var, 31);
453 gen_set_CF(var);
454 }
455 tcg_gen_movi_i32(var, 0);
456 } else {
457 if (flags)
458 shifter_out_im(var, shift - 1);
459 tcg_gen_shri_i32(var, var, shift);
460 }
461 break;
462 case 2: /* ASR */
463 if (shift == 0)
464 shift = 32;
465 if (flags)
466 shifter_out_im(var, shift - 1);
467 if (shift == 32)
468 shift = 31;
469 tcg_gen_sari_i32(var, var, shift);
470 break;
471 case 3: /* ROR/RRX */
472 if (shift != 0) {
473 if (flags)
474 shifter_out_im(var, shift - 1);
f669df27 475 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 476 } else {
d9ba4830 477 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
478 if (flags)
479 shifter_out_im(var, 0);
480 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
481 tcg_gen_shli_i32(tmp, tmp, 31);
482 tcg_gen_or_i32(var, var, tmp);
7d1b0095 483 tcg_temp_free_i32(tmp);
b26eefb6
PB
484 }
485 }
486};
487
8984bd2e
PB
488static inline void gen_arm_shift_reg(TCGv var, int shiftop,
489 TCGv shift, int flags)
490{
491 if (flags) {
492 switch (shiftop) {
493 case 0: gen_helper_shl_cc(var, var, shift); break;
494 case 1: gen_helper_shr_cc(var, var, shift); break;
495 case 2: gen_helper_sar_cc(var, var, shift); break;
496 case 3: gen_helper_ror_cc(var, var, shift); break;
497 }
498 } else {
499 switch (shiftop) {
500 case 0: gen_helper_shl(var, var, shift); break;
501 case 1: gen_helper_shr(var, var, shift); break;
502 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
503 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
504 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
505 }
506 }
7d1b0095 507 tcg_temp_free_i32(shift);
8984bd2e
PB
508}
509
6ddbc6e4
PB
510#define PAS_OP(pfx) \
511 switch (op2) { \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
518 }
d9ba4830 519static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 520{
a7812ae4 521 TCGv_ptr tmp;
6ddbc6e4
PB
522
523 switch (op1) {
524#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
525 case 1:
a7812ae4 526 tmp = tcg_temp_new_ptr();
0ecb72a5 527 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 528 PAS_OP(s)
b75263d6 529 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
530 break;
531 case 5:
a7812ae4 532 tmp = tcg_temp_new_ptr();
0ecb72a5 533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 534 PAS_OP(u)
b75263d6 535 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
536 break;
537#undef gen_pas_helper
538#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
539 case 2:
540 PAS_OP(q);
541 break;
542 case 3:
543 PAS_OP(sh);
544 break;
545 case 6:
546 PAS_OP(uq);
547 break;
548 case 7:
549 PAS_OP(uh);
550 break;
551#undef gen_pas_helper
552 }
553}
9ee6e8bb
PB
554#undef PAS_OP
555
6ddbc6e4
PB
556/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557#define PAS_OP(pfx) \
ed89a2f1 558 switch (op1) { \
6ddbc6e4
PB
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
565 }
d9ba4830 566static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 567{
a7812ae4 568 TCGv_ptr tmp;
6ddbc6e4 569
ed89a2f1 570 switch (op2) {
6ddbc6e4
PB
571#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
572 case 0:
a7812ae4 573 tmp = tcg_temp_new_ptr();
0ecb72a5 574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 575 PAS_OP(s)
b75263d6 576 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
577 break;
578 case 4:
a7812ae4 579 tmp = tcg_temp_new_ptr();
0ecb72a5 580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 581 PAS_OP(u)
b75263d6 582 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
583 break;
584#undef gen_pas_helper
585#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
586 case 1:
587 PAS_OP(q);
588 break;
589 case 2:
590 PAS_OP(sh);
591 break;
592 case 5:
593 PAS_OP(uq);
594 break;
595 case 6:
596 PAS_OP(uh);
597 break;
598#undef gen_pas_helper
599 }
600}
9ee6e8bb
PB
601#undef PAS_OP
602
d9ba4830
PB
603static void gen_test_cc(int cc, int label)
604{
605 TCGv tmp;
606 TCGv tmp2;
d9ba4830
PB
607 int inv;
608
d9ba4830
PB
609 switch (cc) {
610 case 0: /* eq: Z */
6fbe23d5 611 tmp = load_cpu_field(ZF);
cb63669a 612 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
613 break;
614 case 1: /* ne: !Z */
6fbe23d5 615 tmp = load_cpu_field(ZF);
cb63669a 616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
617 break;
618 case 2: /* cs: C */
619 tmp = load_cpu_field(CF);
cb63669a 620 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
621 break;
622 case 3: /* cc: !C */
623 tmp = load_cpu_field(CF);
cb63669a 624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
625 break;
626 case 4: /* mi: N */
6fbe23d5 627 tmp = load_cpu_field(NF);
cb63669a 628 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
629 break;
630 case 5: /* pl: !N */
6fbe23d5 631 tmp = load_cpu_field(NF);
cb63669a 632 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
633 break;
634 case 6: /* vs: V */
635 tmp = load_cpu_field(VF);
cb63669a 636 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
637 break;
638 case 7: /* vc: !V */
639 tmp = load_cpu_field(VF);
cb63669a 640 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
641 break;
642 case 8: /* hi: C && !Z */
643 inv = gen_new_label();
644 tmp = load_cpu_field(CF);
cb63669a 645 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 646 tcg_temp_free_i32(tmp);
6fbe23d5 647 tmp = load_cpu_field(ZF);
cb63669a 648 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
649 gen_set_label(inv);
650 break;
651 case 9: /* ls: !C || Z */
652 tmp = load_cpu_field(CF);
cb63669a 653 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 654 tcg_temp_free_i32(tmp);
6fbe23d5 655 tmp = load_cpu_field(ZF);
cb63669a 656 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
657 break;
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp = load_cpu_field(VF);
6fbe23d5 660 tmp2 = load_cpu_field(NF);
d9ba4830 661 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 662 tcg_temp_free_i32(tmp2);
cb63669a 663 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
664 break;
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp = load_cpu_field(VF);
6fbe23d5 667 tmp2 = load_cpu_field(NF);
d9ba4830 668 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 669 tcg_temp_free_i32(tmp2);
cb63669a 670 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
671 break;
672 case 12: /* gt: !Z && N == V */
673 inv = gen_new_label();
6fbe23d5 674 tmp = load_cpu_field(ZF);
cb63669a 675 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 676 tcg_temp_free_i32(tmp);
d9ba4830 677 tmp = load_cpu_field(VF);
6fbe23d5 678 tmp2 = load_cpu_field(NF);
d9ba4830 679 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 680 tcg_temp_free_i32(tmp2);
cb63669a 681 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
682 gen_set_label(inv);
683 break;
684 case 13: /* le: Z || N != V */
6fbe23d5 685 tmp = load_cpu_field(ZF);
cb63669a 686 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 687 tcg_temp_free_i32(tmp);
d9ba4830 688 tmp = load_cpu_field(VF);
6fbe23d5 689 tmp2 = load_cpu_field(NF);
d9ba4830 690 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 691 tcg_temp_free_i32(tmp2);
cb63669a 692 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
693 break;
694 default:
695 fprintf(stderr, "Bad condition code 0x%x\n", cc);
696 abort();
697 }
7d1b0095 698 tcg_temp_free_i32(tmp);
d9ba4830 699}
2c0262af 700
b1d8e52e 701static const uint8_t table_logic_cc[16] = {
2c0262af
FB
702 1, /* and */
703 1, /* xor */
704 0, /* sub */
705 0, /* rsb */
706 0, /* add */
707 0, /* adc */
708 0, /* sbc */
709 0, /* rsc */
710 1, /* andl */
711 1, /* xorl */
712 0, /* cmp */
713 0, /* cmn */
714 1, /* orr */
715 1, /* mov */
716 1, /* bic */
717 1, /* mvn */
718};
3b46e624 719
d9ba4830
PB
720/* Set PC and Thumb state from an immediate address. */
721static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 722{
b26eefb6 723 TCGv tmp;
99c475ab 724
b26eefb6 725 s->is_jmp = DISAS_UPDATE;
d9ba4830 726 if (s->thumb != (addr & 1)) {
7d1b0095 727 tmp = tcg_temp_new_i32();
d9ba4830 728 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 729 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 730 tcg_temp_free_i32(tmp);
d9ba4830 731 }
155c3eac 732 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
733}
734
735/* Set PC and Thumb state from var. var is marked as dead. */
736static inline void gen_bx(DisasContext *s, TCGv var)
737{
d9ba4830 738 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
739 tcg_gen_andi_i32(cpu_R[15], var, ~1);
740 tcg_gen_andi_i32(var, var, 1);
741 store_cpu_field(var, thumb);
d9ba4830
PB
742}
743
21aeb343
JR
744/* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
0ecb72a5 747static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
21aeb343
JR
748 int reg, TCGv var)
749{
750 if (reg == 15 && ENABLE_ARCH_7) {
751 gen_bx(s, var);
752 } else {
753 store_reg(s, reg, var);
754 }
755}
756
be5e7a76
DES
757/* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 761static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
be5e7a76
DES
762 int reg, TCGv var)
763{
764 if (reg == 15 && ENABLE_ARCH_5) {
765 gen_bx(s, var);
766 } else {
767 store_reg(s, reg, var);
768 }
769}
770
b0109805
PB
771static inline TCGv gen_ld8s(TCGv addr, int index)
772{
7d1b0095 773 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
774 tcg_gen_qemu_ld8s(tmp, addr, index);
775 return tmp;
776}
777static inline TCGv gen_ld8u(TCGv addr, int index)
778{
7d1b0095 779 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
780 tcg_gen_qemu_ld8u(tmp, addr, index);
781 return tmp;
782}
783static inline TCGv gen_ld16s(TCGv addr, int index)
784{
7d1b0095 785 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
786 tcg_gen_qemu_ld16s(tmp, addr, index);
787 return tmp;
788}
789static inline TCGv gen_ld16u(TCGv addr, int index)
790{
7d1b0095 791 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
792 tcg_gen_qemu_ld16u(tmp, addr, index);
793 return tmp;
794}
795static inline TCGv gen_ld32(TCGv addr, int index)
796{
7d1b0095 797 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
798 tcg_gen_qemu_ld32u(tmp, addr, index);
799 return tmp;
800}
84496233
JR
801static inline TCGv_i64 gen_ld64(TCGv addr, int index)
802{
803 TCGv_i64 tmp = tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp, addr, index);
805 return tmp;
806}
b0109805
PB
807static inline void gen_st8(TCGv val, TCGv addr, int index)
808{
809 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 810 tcg_temp_free_i32(val);
b0109805
PB
811}
812static inline void gen_st16(TCGv val, TCGv addr, int index)
813{
814 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 815 tcg_temp_free_i32(val);
b0109805
PB
816}
817static inline void gen_st32(TCGv val, TCGv addr, int index)
818{
819 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 820 tcg_temp_free_i32(val);
b0109805 821}
84496233
JR
822static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
823{
824 tcg_gen_qemu_st64(val, addr, index);
825 tcg_temp_free_i64(val);
826}
b5ff1b31 827
5e3f878a
PB
828static inline void gen_set_pc_im(uint32_t val)
829{
155c3eac 830 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
831}
832
b5ff1b31
FB
833/* Force a TB lookup after an instruction that changes the CPU state. */
834static inline void gen_lookup_tb(DisasContext *s)
835{
a6445c52 836 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
837 s->is_jmp = DISAS_UPDATE;
838}
839
b0109805
PB
840static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
841 TCGv var)
2c0262af 842{
1e8d4eec 843 int val, rm, shift, shiftop;
b26eefb6 844 TCGv offset;
2c0262af
FB
845
846 if (!(insn & (1 << 25))) {
847 /* immediate */
848 val = insn & 0xfff;
849 if (!(insn & (1 << 23)))
850 val = -val;
537730b9 851 if (val != 0)
b0109805 852 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
853 } else {
854 /* shift/register */
855 rm = (insn) & 0xf;
856 shift = (insn >> 7) & 0x1f;
1e8d4eec 857 shiftop = (insn >> 5) & 3;
b26eefb6 858 offset = load_reg(s, rm);
9a119ff6 859 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 860 if (!(insn & (1 << 23)))
b0109805 861 tcg_gen_sub_i32(var, var, offset);
2c0262af 862 else
b0109805 863 tcg_gen_add_i32(var, var, offset);
7d1b0095 864 tcg_temp_free_i32(offset);
2c0262af
FB
865 }
866}
867
191f9a93 868static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 869 int extra, TCGv var)
2c0262af
FB
870{
871 int val, rm;
b26eefb6 872 TCGv offset;
3b46e624 873
2c0262af
FB
874 if (insn & (1 << 22)) {
875 /* immediate */
876 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
877 if (!(insn & (1 << 23)))
878 val = -val;
18acad92 879 val += extra;
537730b9 880 if (val != 0)
b0109805 881 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
882 } else {
883 /* register */
191f9a93 884 if (extra)
b0109805 885 tcg_gen_addi_i32(var, var, extra);
2c0262af 886 rm = (insn) & 0xf;
b26eefb6 887 offset = load_reg(s, rm);
2c0262af 888 if (!(insn & (1 << 23)))
b0109805 889 tcg_gen_sub_i32(var, var, offset);
2c0262af 890 else
b0109805 891 tcg_gen_add_i32(var, var, offset);
7d1b0095 892 tcg_temp_free_i32(offset);
2c0262af
FB
893 }
894}
895
5aaebd13
PM
896static TCGv_ptr get_fpstatus_ptr(int neon)
897{
898 TCGv_ptr statusptr = tcg_temp_new_ptr();
899 int offset;
900 if (neon) {
0ecb72a5 901 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 902 } else {
0ecb72a5 903 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
904 }
905 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
906 return statusptr;
907}
908
4373f3ce
PB
909#define VFP_OP2(name) \
910static inline void gen_vfp_##name(int dp) \
911{ \
ae1857ec
PM
912 TCGv_ptr fpst = get_fpstatus_ptr(0); \
913 if (dp) { \
914 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
915 } else { \
916 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
917 } \
918 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
919}
920
4373f3ce
PB
921VFP_OP2(add)
922VFP_OP2(sub)
923VFP_OP2(mul)
924VFP_OP2(div)
925
926#undef VFP_OP2
927
605a6aed
PM
928static inline void gen_vfp_F1_mul(int dp)
929{
930 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 931 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 932 if (dp) {
ae1857ec 933 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 934 } else {
ae1857ec 935 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 936 }
ae1857ec 937 tcg_temp_free_ptr(fpst);
605a6aed
PM
938}
939
940static inline void gen_vfp_F1_neg(int dp)
941{
942 /* Like gen_vfp_neg() but put result in F1 */
943 if (dp) {
944 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
945 } else {
946 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
947 }
948}
949
4373f3ce
PB
950static inline void gen_vfp_abs(int dp)
951{
952 if (dp)
953 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
954 else
955 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
956}
957
958static inline void gen_vfp_neg(int dp)
959{
960 if (dp)
961 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
962 else
963 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
964}
965
966static inline void gen_vfp_sqrt(int dp)
967{
968 if (dp)
969 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
970 else
971 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
972}
973
974static inline void gen_vfp_cmp(int dp)
975{
976 if (dp)
977 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
978 else
979 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
980}
981
982static inline void gen_vfp_cmpe(int dp)
983{
984 if (dp)
985 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
986 else
987 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
988}
989
990static inline void gen_vfp_F1_ld0(int dp)
991{
992 if (dp)
5b340b51 993 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 994 else
5b340b51 995 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
996}
997
5500b06c
PM
998#define VFP_GEN_ITOF(name) \
999static inline void gen_vfp_##name(int dp, int neon) \
1000{ \
5aaebd13 1001 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1002 if (dp) { \
1003 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1004 } else { \
1005 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1006 } \
b7fa9214 1007 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1008}
1009
5500b06c
PM
1010VFP_GEN_ITOF(uito)
1011VFP_GEN_ITOF(sito)
1012#undef VFP_GEN_ITOF
4373f3ce 1013
5500b06c
PM
1014#define VFP_GEN_FTOI(name) \
1015static inline void gen_vfp_##name(int dp, int neon) \
1016{ \
5aaebd13 1017 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1018 if (dp) { \
1019 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1020 } else { \
1021 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1022 } \
b7fa9214 1023 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1024}
1025
5500b06c
PM
1026VFP_GEN_FTOI(toui)
1027VFP_GEN_FTOI(touiz)
1028VFP_GEN_FTOI(tosi)
1029VFP_GEN_FTOI(tosiz)
1030#undef VFP_GEN_FTOI
4373f3ce
PB
1031
1032#define VFP_GEN_FIX(name) \
5500b06c 1033static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1034{ \
b75263d6 1035 TCGv tmp_shift = tcg_const_i32(shift); \
5aaebd13 1036 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1037 if (dp) { \
1038 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1039 } else { \
1040 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1041 } \
b75263d6 1042 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1043 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1044}
4373f3ce
PB
1045VFP_GEN_FIX(tosh)
1046VFP_GEN_FIX(tosl)
1047VFP_GEN_FIX(touh)
1048VFP_GEN_FIX(toul)
1049VFP_GEN_FIX(shto)
1050VFP_GEN_FIX(slto)
1051VFP_GEN_FIX(uhto)
1052VFP_GEN_FIX(ulto)
1053#undef VFP_GEN_FIX
9ee6e8bb 1054
312eea9f 1055static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1056{
1057 if (dp)
312eea9f 1058 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1059 else
312eea9f 1060 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1061}
1062
312eea9f 1063static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1064{
1065 if (dp)
312eea9f 1066 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1067 else
312eea9f 1068 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1069}
1070
8e96005d
FB
1071static inline long
1072vfp_reg_offset (int dp, int reg)
1073{
1074 if (dp)
1075 return offsetof(CPUARMState, vfp.regs[reg]);
1076 else if (reg & 1) {
1077 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1078 + offsetof(CPU_DoubleU, l.upper);
1079 } else {
1080 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1081 + offsetof(CPU_DoubleU, l.lower);
1082 }
1083}
9ee6e8bb
PB
1084
1085/* Return the offset of a 32-bit piece of a NEON register.
1086 zero is the least significant end of the register. */
1087static inline long
1088neon_reg_offset (int reg, int n)
1089{
1090 int sreg;
1091 sreg = reg * 2 + n;
1092 return vfp_reg_offset(0, sreg);
1093}
1094
8f8e3aa4
PB
1095static TCGv neon_load_reg(int reg, int pass)
1096{
7d1b0095 1097 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1098 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1099 return tmp;
1100}
1101
1102static void neon_store_reg(int reg, int pass, TCGv var)
1103{
1104 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1105 tcg_temp_free_i32(var);
8f8e3aa4
PB
1106}
1107
a7812ae4 1108static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1109{
1110 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1111}
1112
a7812ae4 1113static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1114{
1115 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1116}
1117
4373f3ce
PB
1118#define tcg_gen_ld_f32 tcg_gen_ld_i32
1119#define tcg_gen_ld_f64 tcg_gen_ld_i64
1120#define tcg_gen_st_f32 tcg_gen_st_i32
1121#define tcg_gen_st_f64 tcg_gen_st_i64
1122
b7bcbe95
FB
1123static inline void gen_mov_F0_vreg(int dp, int reg)
1124{
1125 if (dp)
4373f3ce 1126 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1127 else
4373f3ce 1128 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1129}
1130
1131static inline void gen_mov_F1_vreg(int dp, int reg)
1132{
1133 if (dp)
4373f3ce 1134 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1135 else
4373f3ce 1136 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1137}
1138
1139static inline void gen_mov_vreg_F0(int dp, int reg)
1140{
1141 if (dp)
4373f3ce 1142 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1143 else
4373f3ce 1144 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1145}
1146
18c9b560
AZ
1147#define ARM_CP_RW_BIT (1 << 20)
1148
a7812ae4 1149static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1150{
0ecb72a5 1151 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1152}
1153
a7812ae4 1154static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1155{
0ecb72a5 1156 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1157}
1158
da6b5335 1159static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1160{
7d1b0095 1161 TCGv var = tcg_temp_new_i32();
0ecb72a5 1162 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1163 return var;
e677137d
PB
1164}
1165
da6b5335 1166static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1167{
0ecb72a5 1168 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1169 tcg_temp_free_i32(var);
e677137d
PB
1170}
1171
1172static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1173{
1174 iwmmxt_store_reg(cpu_M0, rn);
1175}
1176
1177static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1178{
1179 iwmmxt_load_reg(cpu_M0, rn);
1180}
1181
1182static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1183{
1184 iwmmxt_load_reg(cpu_V1, rn);
1185 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1186}
1187
1188static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1189{
1190 iwmmxt_load_reg(cpu_V1, rn);
1191 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1192}
1193
1194static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1195{
1196 iwmmxt_load_reg(cpu_V1, rn);
1197 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1198}
1199
1200#define IWMMXT_OP(name) \
1201static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1202{ \
1203 iwmmxt_load_reg(cpu_V1, rn); \
1204 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1205}
1206
477955bd
PM
1207#define IWMMXT_OP_ENV(name) \
1208static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1209{ \
1210 iwmmxt_load_reg(cpu_V1, rn); \
1211 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1212}
1213
1214#define IWMMXT_OP_ENV_SIZE(name) \
1215IWMMXT_OP_ENV(name##b) \
1216IWMMXT_OP_ENV(name##w) \
1217IWMMXT_OP_ENV(name##l)
e677137d 1218
477955bd 1219#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1220static inline void gen_op_iwmmxt_##name##_M0(void) \
1221{ \
477955bd 1222 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1223}
1224
1225IWMMXT_OP(maddsq)
1226IWMMXT_OP(madduq)
1227IWMMXT_OP(sadb)
1228IWMMXT_OP(sadw)
1229IWMMXT_OP(mulslw)
1230IWMMXT_OP(mulshw)
1231IWMMXT_OP(mululw)
1232IWMMXT_OP(muluhw)
1233IWMMXT_OP(macsw)
1234IWMMXT_OP(macuw)
1235
477955bd
PM
1236IWMMXT_OP_ENV_SIZE(unpackl)
1237IWMMXT_OP_ENV_SIZE(unpackh)
1238
1239IWMMXT_OP_ENV1(unpacklub)
1240IWMMXT_OP_ENV1(unpackluw)
1241IWMMXT_OP_ENV1(unpacklul)
1242IWMMXT_OP_ENV1(unpackhub)
1243IWMMXT_OP_ENV1(unpackhuw)
1244IWMMXT_OP_ENV1(unpackhul)
1245IWMMXT_OP_ENV1(unpacklsb)
1246IWMMXT_OP_ENV1(unpacklsw)
1247IWMMXT_OP_ENV1(unpacklsl)
1248IWMMXT_OP_ENV1(unpackhsb)
1249IWMMXT_OP_ENV1(unpackhsw)
1250IWMMXT_OP_ENV1(unpackhsl)
1251
1252IWMMXT_OP_ENV_SIZE(cmpeq)
1253IWMMXT_OP_ENV_SIZE(cmpgtu)
1254IWMMXT_OP_ENV_SIZE(cmpgts)
1255
1256IWMMXT_OP_ENV_SIZE(mins)
1257IWMMXT_OP_ENV_SIZE(minu)
1258IWMMXT_OP_ENV_SIZE(maxs)
1259IWMMXT_OP_ENV_SIZE(maxu)
1260
1261IWMMXT_OP_ENV_SIZE(subn)
1262IWMMXT_OP_ENV_SIZE(addn)
1263IWMMXT_OP_ENV_SIZE(subu)
1264IWMMXT_OP_ENV_SIZE(addu)
1265IWMMXT_OP_ENV_SIZE(subs)
1266IWMMXT_OP_ENV_SIZE(adds)
1267
1268IWMMXT_OP_ENV(avgb0)
1269IWMMXT_OP_ENV(avgb1)
1270IWMMXT_OP_ENV(avgw0)
1271IWMMXT_OP_ENV(avgw1)
e677137d
PB
1272
1273IWMMXT_OP(msadb)
1274
477955bd
PM
1275IWMMXT_OP_ENV(packuw)
1276IWMMXT_OP_ENV(packul)
1277IWMMXT_OP_ENV(packuq)
1278IWMMXT_OP_ENV(packsw)
1279IWMMXT_OP_ENV(packsl)
1280IWMMXT_OP_ENV(packsq)
e677137d 1281
e677137d
PB
1282static void gen_op_iwmmxt_set_mup(void)
1283{
1284 TCGv tmp;
1285 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1286 tcg_gen_ori_i32(tmp, tmp, 2);
1287 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1288}
1289
1290static void gen_op_iwmmxt_set_cup(void)
1291{
1292 TCGv tmp;
1293 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1294 tcg_gen_ori_i32(tmp, tmp, 1);
1295 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1296}
1297
1298static void gen_op_iwmmxt_setpsr_nz(void)
1299{
7d1b0095 1300 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1301 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1302 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1303}
1304
1305static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1306{
1307 iwmmxt_load_reg(cpu_V1, rn);
86831435 1308 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1309 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1310}
1311
da6b5335 1312static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1313{
1314 int rd;
1315 uint32_t offset;
da6b5335 1316 TCGv tmp;
18c9b560
AZ
1317
1318 rd = (insn >> 16) & 0xf;
da6b5335 1319 tmp = load_reg(s, rd);
18c9b560
AZ
1320
1321 offset = (insn & 0xff) << ((insn >> 7) & 2);
1322 if (insn & (1 << 24)) {
1323 /* Pre indexed */
1324 if (insn & (1 << 23))
da6b5335 1325 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1326 else
da6b5335
FN
1327 tcg_gen_addi_i32(tmp, tmp, -offset);
1328 tcg_gen_mov_i32(dest, tmp);
18c9b560 1329 if (insn & (1 << 21))
da6b5335
FN
1330 store_reg(s, rd, tmp);
1331 else
7d1b0095 1332 tcg_temp_free_i32(tmp);
18c9b560
AZ
1333 } else if (insn & (1 << 21)) {
1334 /* Post indexed */
da6b5335 1335 tcg_gen_mov_i32(dest, tmp);
18c9b560 1336 if (insn & (1 << 23))
da6b5335 1337 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1338 else
da6b5335
FN
1339 tcg_gen_addi_i32(tmp, tmp, -offset);
1340 store_reg(s, rd, tmp);
18c9b560
AZ
1341 } else if (!(insn & (1 << 23)))
1342 return 1;
1343 return 0;
1344}
1345
da6b5335 1346static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1347{
1348 int rd = (insn >> 0) & 0xf;
da6b5335 1349 TCGv tmp;
18c9b560 1350
da6b5335
FN
1351 if (insn & (1 << 8)) {
1352 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1353 return 1;
da6b5335
FN
1354 } else {
1355 tmp = iwmmxt_load_creg(rd);
1356 }
1357 } else {
7d1b0095 1358 tmp = tcg_temp_new_i32();
da6b5335
FN
1359 iwmmxt_load_reg(cpu_V0, rd);
1360 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1361 }
1362 tcg_gen_andi_i32(tmp, tmp, mask);
1363 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1364 tcg_temp_free_i32(tmp);
18c9b560
AZ
1365 return 0;
1366}
1367
a1c7273b 1368/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1369 (ie. an undefined instruction). */
0ecb72a5 1370static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1371{
1372 int rd, wrd;
1373 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1374 TCGv addr;
1375 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1376
1377 if ((insn & 0x0e000e00) == 0x0c000000) {
1378 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1379 wrd = insn & 0xf;
1380 rdlo = (insn >> 12) & 0xf;
1381 rdhi = (insn >> 16) & 0xf;
1382 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1383 iwmmxt_load_reg(cpu_V0, wrd);
1384 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1385 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1386 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1387 } else { /* TMCRR */
da6b5335
FN
1388 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1389 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1390 gen_op_iwmmxt_set_mup();
1391 }
1392 return 0;
1393 }
1394
1395 wrd = (insn >> 12) & 0xf;
7d1b0095 1396 addr = tcg_temp_new_i32();
da6b5335 1397 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1398 tcg_temp_free_i32(addr);
18c9b560 1399 return 1;
da6b5335 1400 }
18c9b560
AZ
1401 if (insn & ARM_CP_RW_BIT) {
1402 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1403 tmp = tcg_temp_new_i32();
da6b5335
FN
1404 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1405 iwmmxt_store_creg(wrd, tmp);
18c9b560 1406 } else {
e677137d
PB
1407 i = 1;
1408 if (insn & (1 << 8)) {
1409 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1410 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1411 i = 0;
1412 } else { /* WLDRW wRd */
da6b5335 1413 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1414 }
1415 } else {
1416 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1417 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1418 } else { /* WLDRB */
da6b5335 1419 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1420 }
1421 }
1422 if (i) {
1423 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1424 tcg_temp_free_i32(tmp);
e677137d 1425 }
18c9b560
AZ
1426 gen_op_iwmmxt_movq_wRn_M0(wrd);
1427 }
1428 } else {
1429 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1430 tmp = iwmmxt_load_creg(wrd);
1431 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1432 } else {
1433 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1434 tmp = tcg_temp_new_i32();
e677137d
PB
1435 if (insn & (1 << 8)) {
1436 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1437 tcg_temp_free_i32(tmp);
da6b5335 1438 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1439 } else { /* WSTRW wRd */
1440 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1441 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1442 }
1443 } else {
1444 if (insn & (1 << 22)) { /* WSTRH */
1445 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1446 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1447 } else { /* WSTRB */
1448 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1449 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1450 }
1451 }
18c9b560
AZ
1452 }
1453 }
7d1b0095 1454 tcg_temp_free_i32(addr);
18c9b560
AZ
1455 return 0;
1456 }
1457
1458 if ((insn & 0x0f000000) != 0x0e000000)
1459 return 1;
1460
1461 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1462 case 0x000: /* WOR */
1463 wrd = (insn >> 12) & 0xf;
1464 rd0 = (insn >> 0) & 0xf;
1465 rd1 = (insn >> 16) & 0xf;
1466 gen_op_iwmmxt_movq_M0_wRn(rd0);
1467 gen_op_iwmmxt_orq_M0_wRn(rd1);
1468 gen_op_iwmmxt_setpsr_nz();
1469 gen_op_iwmmxt_movq_wRn_M0(wrd);
1470 gen_op_iwmmxt_set_mup();
1471 gen_op_iwmmxt_set_cup();
1472 break;
1473 case 0x011: /* TMCR */
1474 if (insn & 0xf)
1475 return 1;
1476 rd = (insn >> 12) & 0xf;
1477 wrd = (insn >> 16) & 0xf;
1478 switch (wrd) {
1479 case ARM_IWMMXT_wCID:
1480 case ARM_IWMMXT_wCASF:
1481 break;
1482 case ARM_IWMMXT_wCon:
1483 gen_op_iwmmxt_set_cup();
1484 /* Fall through. */
1485 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1486 tmp = iwmmxt_load_creg(wrd);
1487 tmp2 = load_reg(s, rd);
f669df27 1488 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1489 tcg_temp_free_i32(tmp2);
da6b5335 1490 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1491 break;
1492 case ARM_IWMMXT_wCGR0:
1493 case ARM_IWMMXT_wCGR1:
1494 case ARM_IWMMXT_wCGR2:
1495 case ARM_IWMMXT_wCGR3:
1496 gen_op_iwmmxt_set_cup();
da6b5335
FN
1497 tmp = load_reg(s, rd);
1498 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1499 break;
1500 default:
1501 return 1;
1502 }
1503 break;
1504 case 0x100: /* WXOR */
1505 wrd = (insn >> 12) & 0xf;
1506 rd0 = (insn >> 0) & 0xf;
1507 rd1 = (insn >> 16) & 0xf;
1508 gen_op_iwmmxt_movq_M0_wRn(rd0);
1509 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1510 gen_op_iwmmxt_setpsr_nz();
1511 gen_op_iwmmxt_movq_wRn_M0(wrd);
1512 gen_op_iwmmxt_set_mup();
1513 gen_op_iwmmxt_set_cup();
1514 break;
1515 case 0x111: /* TMRC */
1516 if (insn & 0xf)
1517 return 1;
1518 rd = (insn >> 12) & 0xf;
1519 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1520 tmp = iwmmxt_load_creg(wrd);
1521 store_reg(s, rd, tmp);
18c9b560
AZ
1522 break;
1523 case 0x300: /* WANDN */
1524 wrd = (insn >> 12) & 0xf;
1525 rd0 = (insn >> 0) & 0xf;
1526 rd1 = (insn >> 16) & 0xf;
1527 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1528 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1529 gen_op_iwmmxt_andq_M0_wRn(rd1);
1530 gen_op_iwmmxt_setpsr_nz();
1531 gen_op_iwmmxt_movq_wRn_M0(wrd);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1534 break;
1535 case 0x200: /* WAND */
1536 wrd = (insn >> 12) & 0xf;
1537 rd0 = (insn >> 0) & 0xf;
1538 rd1 = (insn >> 16) & 0xf;
1539 gen_op_iwmmxt_movq_M0_wRn(rd0);
1540 gen_op_iwmmxt_andq_M0_wRn(rd1);
1541 gen_op_iwmmxt_setpsr_nz();
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1545 break;
1546 case 0x810: case 0xa10: /* WMADD */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 0) & 0xf;
1549 rd1 = (insn >> 16) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 if (insn & (1 << 21))
1552 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1553 else
1554 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1555 gen_op_iwmmxt_movq_wRn_M0(wrd);
1556 gen_op_iwmmxt_set_mup();
1557 break;
1558 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1559 wrd = (insn >> 12) & 0xf;
1560 rd0 = (insn >> 16) & 0xf;
1561 rd1 = (insn >> 0) & 0xf;
1562 gen_op_iwmmxt_movq_M0_wRn(rd0);
1563 switch ((insn >> 22) & 3) {
1564 case 0:
1565 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1566 break;
1567 case 1:
1568 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1569 break;
1570 case 2:
1571 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1572 break;
1573 case 3:
1574 return 1;
1575 }
1576 gen_op_iwmmxt_movq_wRn_M0(wrd);
1577 gen_op_iwmmxt_set_mup();
1578 gen_op_iwmmxt_set_cup();
1579 break;
1580 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1581 wrd = (insn >> 12) & 0xf;
1582 rd0 = (insn >> 16) & 0xf;
1583 rd1 = (insn >> 0) & 0xf;
1584 gen_op_iwmmxt_movq_M0_wRn(rd0);
1585 switch ((insn >> 22) & 3) {
1586 case 0:
1587 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1588 break;
1589 case 1:
1590 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1591 break;
1592 case 2:
1593 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1594 break;
1595 case 3:
1596 return 1;
1597 }
1598 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 gen_op_iwmmxt_set_mup();
1600 gen_op_iwmmxt_set_cup();
1601 break;
1602 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1603 wrd = (insn >> 12) & 0xf;
1604 rd0 = (insn >> 16) & 0xf;
1605 rd1 = (insn >> 0) & 0xf;
1606 gen_op_iwmmxt_movq_M0_wRn(rd0);
1607 if (insn & (1 << 22))
1608 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1609 else
1610 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1611 if (!(insn & (1 << 20)))
1612 gen_op_iwmmxt_addl_M0_wRn(wrd);
1613 gen_op_iwmmxt_movq_wRn_M0(wrd);
1614 gen_op_iwmmxt_set_mup();
1615 break;
1616 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1617 wrd = (insn >> 12) & 0xf;
1618 rd0 = (insn >> 16) & 0xf;
1619 rd1 = (insn >> 0) & 0xf;
1620 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1621 if (insn & (1 << 21)) {
1622 if (insn & (1 << 20))
1623 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1624 else
1625 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1626 } else {
1627 if (insn & (1 << 20))
1628 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1629 else
1630 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1631 }
18c9b560
AZ
1632 gen_op_iwmmxt_movq_wRn_M0(wrd);
1633 gen_op_iwmmxt_set_mup();
1634 break;
1635 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1636 wrd = (insn >> 12) & 0xf;
1637 rd0 = (insn >> 16) & 0xf;
1638 rd1 = (insn >> 0) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0);
1640 if (insn & (1 << 21))
1641 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1642 else
1643 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1644 if (!(insn & (1 << 20))) {
e677137d
PB
1645 iwmmxt_load_reg(cpu_V1, wrd);
1646 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1647 }
1648 gen_op_iwmmxt_movq_wRn_M0(wrd);
1649 gen_op_iwmmxt_set_mup();
1650 break;
1651 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1652 wrd = (insn >> 12) & 0xf;
1653 rd0 = (insn >> 16) & 0xf;
1654 rd1 = (insn >> 0) & 0xf;
1655 gen_op_iwmmxt_movq_M0_wRn(rd0);
1656 switch ((insn >> 22) & 3) {
1657 case 0:
1658 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1659 break;
1660 case 1:
1661 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1662 break;
1663 case 2:
1664 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1665 break;
1666 case 3:
1667 return 1;
1668 }
1669 gen_op_iwmmxt_movq_wRn_M0(wrd);
1670 gen_op_iwmmxt_set_mup();
1671 gen_op_iwmmxt_set_cup();
1672 break;
1673 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1674 wrd = (insn >> 12) & 0xf;
1675 rd0 = (insn >> 16) & 0xf;
1676 rd1 = (insn >> 0) & 0xf;
1677 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1678 if (insn & (1 << 22)) {
1679 if (insn & (1 << 20))
1680 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1681 else
1682 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1683 } else {
1684 if (insn & (1 << 20))
1685 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1686 else
1687 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1688 }
18c9b560
AZ
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 gen_op_iwmmxt_set_cup();
1692 break;
1693 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1694 wrd = (insn >> 12) & 0xf;
1695 rd0 = (insn >> 16) & 0xf;
1696 rd1 = (insn >> 0) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1698 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1699 tcg_gen_andi_i32(tmp, tmp, 7);
1700 iwmmxt_load_reg(cpu_V1, rd1);
1701 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1702 tcg_temp_free_i32(tmp);
18c9b560
AZ
1703 gen_op_iwmmxt_movq_wRn_M0(wrd);
1704 gen_op_iwmmxt_set_mup();
1705 break;
1706 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1707 if (((insn >> 6) & 3) == 3)
1708 return 1;
18c9b560
AZ
1709 rd = (insn >> 12) & 0xf;
1710 wrd = (insn >> 16) & 0xf;
da6b5335 1711 tmp = load_reg(s, rd);
18c9b560
AZ
1712 gen_op_iwmmxt_movq_M0_wRn(wrd);
1713 switch ((insn >> 6) & 3) {
1714 case 0:
da6b5335
FN
1715 tmp2 = tcg_const_i32(0xff);
1716 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1717 break;
1718 case 1:
da6b5335
FN
1719 tmp2 = tcg_const_i32(0xffff);
1720 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1721 break;
1722 case 2:
da6b5335
FN
1723 tmp2 = tcg_const_i32(0xffffffff);
1724 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1725 break;
da6b5335
FN
1726 default:
1727 TCGV_UNUSED(tmp2);
1728 TCGV_UNUSED(tmp3);
18c9b560 1729 }
da6b5335
FN
1730 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1731 tcg_temp_free(tmp3);
1732 tcg_temp_free(tmp2);
7d1b0095 1733 tcg_temp_free_i32(tmp);
18c9b560
AZ
1734 gen_op_iwmmxt_movq_wRn_M0(wrd);
1735 gen_op_iwmmxt_set_mup();
1736 break;
1737 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1738 rd = (insn >> 12) & 0xf;
1739 wrd = (insn >> 16) & 0xf;
da6b5335 1740 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1741 return 1;
1742 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1743 tmp = tcg_temp_new_i32();
18c9b560
AZ
1744 switch ((insn >> 22) & 3) {
1745 case 0:
da6b5335
FN
1746 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1747 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1748 if (insn & 8) {
1749 tcg_gen_ext8s_i32(tmp, tmp);
1750 } else {
1751 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1752 }
1753 break;
1754 case 1:
da6b5335
FN
1755 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1756 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1757 if (insn & 8) {
1758 tcg_gen_ext16s_i32(tmp, tmp);
1759 } else {
1760 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1761 }
1762 break;
1763 case 2:
da6b5335
FN
1764 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1765 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1766 break;
18c9b560 1767 }
da6b5335 1768 store_reg(s, rd, tmp);
18c9b560
AZ
1769 break;
1770 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1771 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1772 return 1;
da6b5335 1773 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1774 switch ((insn >> 22) & 3) {
1775 case 0:
da6b5335 1776 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1777 break;
1778 case 1:
da6b5335 1779 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1780 break;
1781 case 2:
da6b5335 1782 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1783 break;
18c9b560 1784 }
da6b5335
FN
1785 tcg_gen_shli_i32(tmp, tmp, 28);
1786 gen_set_nzcv(tmp);
7d1b0095 1787 tcg_temp_free_i32(tmp);
18c9b560
AZ
1788 break;
1789 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1790 if (((insn >> 6) & 3) == 3)
1791 return 1;
18c9b560
AZ
1792 rd = (insn >> 12) & 0xf;
1793 wrd = (insn >> 16) & 0xf;
da6b5335 1794 tmp = load_reg(s, rd);
18c9b560
AZ
1795 switch ((insn >> 6) & 3) {
1796 case 0:
da6b5335 1797 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1798 break;
1799 case 1:
da6b5335 1800 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1801 break;
1802 case 2:
da6b5335 1803 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1804 break;
18c9b560 1805 }
7d1b0095 1806 tcg_temp_free_i32(tmp);
18c9b560
AZ
1807 gen_op_iwmmxt_movq_wRn_M0(wrd);
1808 gen_op_iwmmxt_set_mup();
1809 break;
1810 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1811 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1812 return 1;
da6b5335 1813 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1814 tmp2 = tcg_temp_new_i32();
da6b5335 1815 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1816 switch ((insn >> 22) & 3) {
1817 case 0:
1818 for (i = 0; i < 7; i ++) {
da6b5335
FN
1819 tcg_gen_shli_i32(tmp2, tmp2, 4);
1820 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1821 }
1822 break;
1823 case 1:
1824 for (i = 0; i < 3; i ++) {
da6b5335
FN
1825 tcg_gen_shli_i32(tmp2, tmp2, 8);
1826 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1827 }
1828 break;
1829 case 2:
da6b5335
FN
1830 tcg_gen_shli_i32(tmp2, tmp2, 16);
1831 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1832 break;
18c9b560 1833 }
da6b5335 1834 gen_set_nzcv(tmp);
7d1b0095
PM
1835 tcg_temp_free_i32(tmp2);
1836 tcg_temp_free_i32(tmp);
18c9b560
AZ
1837 break;
1838 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1839 wrd = (insn >> 12) & 0xf;
1840 rd0 = (insn >> 16) & 0xf;
1841 gen_op_iwmmxt_movq_M0_wRn(rd0);
1842 switch ((insn >> 22) & 3) {
1843 case 0:
e677137d 1844 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1845 break;
1846 case 1:
e677137d 1847 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1848 break;
1849 case 2:
e677137d 1850 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1851 break;
1852 case 3:
1853 return 1;
1854 }
1855 gen_op_iwmmxt_movq_wRn_M0(wrd);
1856 gen_op_iwmmxt_set_mup();
1857 break;
1858 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1859 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1860 return 1;
da6b5335 1861 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1862 tmp2 = tcg_temp_new_i32();
da6b5335 1863 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1864 switch ((insn >> 22) & 3) {
1865 case 0:
1866 for (i = 0; i < 7; i ++) {
da6b5335
FN
1867 tcg_gen_shli_i32(tmp2, tmp2, 4);
1868 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1869 }
1870 break;
1871 case 1:
1872 for (i = 0; i < 3; i ++) {
da6b5335
FN
1873 tcg_gen_shli_i32(tmp2, tmp2, 8);
1874 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1875 }
1876 break;
1877 case 2:
da6b5335
FN
1878 tcg_gen_shli_i32(tmp2, tmp2, 16);
1879 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1880 break;
18c9b560 1881 }
da6b5335 1882 gen_set_nzcv(tmp);
7d1b0095
PM
1883 tcg_temp_free_i32(tmp2);
1884 tcg_temp_free_i32(tmp);
18c9b560
AZ
1885 break;
1886 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1887 rd = (insn >> 12) & 0xf;
1888 rd0 = (insn >> 16) & 0xf;
da6b5335 1889 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1890 return 1;
1891 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1892 tmp = tcg_temp_new_i32();
18c9b560
AZ
1893 switch ((insn >> 22) & 3) {
1894 case 0:
da6b5335 1895 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1896 break;
1897 case 1:
da6b5335 1898 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1899 break;
1900 case 2:
da6b5335 1901 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1902 break;
18c9b560 1903 }
da6b5335 1904 store_reg(s, rd, tmp);
18c9b560
AZ
1905 break;
1906 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1907 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1908 wrd = (insn >> 12) & 0xf;
1909 rd0 = (insn >> 16) & 0xf;
1910 rd1 = (insn >> 0) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0);
1912 switch ((insn >> 22) & 3) {
1913 case 0:
1914 if (insn & (1 << 21))
1915 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1916 else
1917 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1918 break;
1919 case 1:
1920 if (insn & (1 << 21))
1921 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1922 else
1923 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1924 break;
1925 case 2:
1926 if (insn & (1 << 21))
1927 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1928 else
1929 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1930 break;
1931 case 3:
1932 return 1;
1933 }
1934 gen_op_iwmmxt_movq_wRn_M0(wrd);
1935 gen_op_iwmmxt_set_mup();
1936 gen_op_iwmmxt_set_cup();
1937 break;
1938 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1939 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1940 wrd = (insn >> 12) & 0xf;
1941 rd0 = (insn >> 16) & 0xf;
1942 gen_op_iwmmxt_movq_M0_wRn(rd0);
1943 switch ((insn >> 22) & 3) {
1944 case 0:
1945 if (insn & (1 << 21))
1946 gen_op_iwmmxt_unpacklsb_M0();
1947 else
1948 gen_op_iwmmxt_unpacklub_M0();
1949 break;
1950 case 1:
1951 if (insn & (1 << 21))
1952 gen_op_iwmmxt_unpacklsw_M0();
1953 else
1954 gen_op_iwmmxt_unpackluw_M0();
1955 break;
1956 case 2:
1957 if (insn & (1 << 21))
1958 gen_op_iwmmxt_unpacklsl_M0();
1959 else
1960 gen_op_iwmmxt_unpacklul_M0();
1961 break;
1962 case 3:
1963 return 1;
1964 }
1965 gen_op_iwmmxt_movq_wRn_M0(wrd);
1966 gen_op_iwmmxt_set_mup();
1967 gen_op_iwmmxt_set_cup();
1968 break;
1969 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1970 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1971 wrd = (insn >> 12) & 0xf;
1972 rd0 = (insn >> 16) & 0xf;
1973 gen_op_iwmmxt_movq_M0_wRn(rd0);
1974 switch ((insn >> 22) & 3) {
1975 case 0:
1976 if (insn & (1 << 21))
1977 gen_op_iwmmxt_unpackhsb_M0();
1978 else
1979 gen_op_iwmmxt_unpackhub_M0();
1980 break;
1981 case 1:
1982 if (insn & (1 << 21))
1983 gen_op_iwmmxt_unpackhsw_M0();
1984 else
1985 gen_op_iwmmxt_unpackhuw_M0();
1986 break;
1987 case 2:
1988 if (insn & (1 << 21))
1989 gen_op_iwmmxt_unpackhsl_M0();
1990 else
1991 gen_op_iwmmxt_unpackhul_M0();
1992 break;
1993 case 3:
1994 return 1;
1995 }
1996 gen_op_iwmmxt_movq_wRn_M0(wrd);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
1999 break;
2000 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2001 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2002 if (((insn >> 22) & 3) == 0)
2003 return 1;
18c9b560
AZ
2004 wrd = (insn >> 12) & 0xf;
2005 rd0 = (insn >> 16) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2007 tmp = tcg_temp_new_i32();
da6b5335 2008 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2009 tcg_temp_free_i32(tmp);
18c9b560 2010 return 1;
da6b5335 2011 }
18c9b560 2012 switch ((insn >> 22) & 3) {
18c9b560 2013 case 1:
477955bd 2014 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2015 break;
2016 case 2:
477955bd 2017 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2018 break;
2019 case 3:
477955bd 2020 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2021 break;
2022 }
7d1b0095 2023 tcg_temp_free_i32(tmp);
18c9b560
AZ
2024 gen_op_iwmmxt_movq_wRn_M0(wrd);
2025 gen_op_iwmmxt_set_mup();
2026 gen_op_iwmmxt_set_cup();
2027 break;
2028 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2029 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2030 if (((insn >> 22) & 3) == 0)
2031 return 1;
18c9b560
AZ
2032 wrd = (insn >> 12) & 0xf;
2033 rd0 = (insn >> 16) & 0xf;
2034 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2035 tmp = tcg_temp_new_i32();
da6b5335 2036 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2037 tcg_temp_free_i32(tmp);
18c9b560 2038 return 1;
da6b5335 2039 }
18c9b560 2040 switch ((insn >> 22) & 3) {
18c9b560 2041 case 1:
477955bd 2042 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2043 break;
2044 case 2:
477955bd 2045 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2046 break;
2047 case 3:
477955bd 2048 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2049 break;
2050 }
7d1b0095 2051 tcg_temp_free_i32(tmp);
18c9b560
AZ
2052 gen_op_iwmmxt_movq_wRn_M0(wrd);
2053 gen_op_iwmmxt_set_mup();
2054 gen_op_iwmmxt_set_cup();
2055 break;
2056 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2057 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2058 if (((insn >> 22) & 3) == 0)
2059 return 1;
18c9b560
AZ
2060 wrd = (insn >> 12) & 0xf;
2061 rd0 = (insn >> 16) & 0xf;
2062 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2063 tmp = tcg_temp_new_i32();
da6b5335 2064 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2065 tcg_temp_free_i32(tmp);
18c9b560 2066 return 1;
da6b5335 2067 }
18c9b560 2068 switch ((insn >> 22) & 3) {
18c9b560 2069 case 1:
477955bd 2070 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2071 break;
2072 case 2:
477955bd 2073 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2074 break;
2075 case 3:
477955bd 2076 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2077 break;
2078 }
7d1b0095 2079 tcg_temp_free_i32(tmp);
18c9b560
AZ
2080 gen_op_iwmmxt_movq_wRn_M0(wrd);
2081 gen_op_iwmmxt_set_mup();
2082 gen_op_iwmmxt_set_cup();
2083 break;
2084 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2085 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2086 if (((insn >> 22) & 3) == 0)
2087 return 1;
18c9b560
AZ
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2091 tmp = tcg_temp_new_i32();
18c9b560 2092 switch ((insn >> 22) & 3) {
18c9b560 2093 case 1:
da6b5335 2094 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2095 tcg_temp_free_i32(tmp);
18c9b560 2096 return 1;
da6b5335 2097 }
477955bd 2098 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2099 break;
2100 case 2:
da6b5335 2101 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2102 tcg_temp_free_i32(tmp);
18c9b560 2103 return 1;
da6b5335 2104 }
477955bd 2105 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2106 break;
2107 case 3:
da6b5335 2108 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2109 tcg_temp_free_i32(tmp);
18c9b560 2110 return 1;
da6b5335 2111 }
477955bd 2112 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2113 break;
2114 }
7d1b0095 2115 tcg_temp_free_i32(tmp);
18c9b560
AZ
2116 gen_op_iwmmxt_movq_wRn_M0(wrd);
2117 gen_op_iwmmxt_set_mup();
2118 gen_op_iwmmxt_set_cup();
2119 break;
2120 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2121 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2122 wrd = (insn >> 12) & 0xf;
2123 rd0 = (insn >> 16) & 0xf;
2124 rd1 = (insn >> 0) & 0xf;
2125 gen_op_iwmmxt_movq_M0_wRn(rd0);
2126 switch ((insn >> 22) & 3) {
2127 case 0:
2128 if (insn & (1 << 21))
2129 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2130 else
2131 gen_op_iwmmxt_minub_M0_wRn(rd1);
2132 break;
2133 case 1:
2134 if (insn & (1 << 21))
2135 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2136 else
2137 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2138 break;
2139 case 2:
2140 if (insn & (1 << 21))
2141 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2142 else
2143 gen_op_iwmmxt_minul_M0_wRn(rd1);
2144 break;
2145 case 3:
2146 return 1;
2147 }
2148 gen_op_iwmmxt_movq_wRn_M0(wrd);
2149 gen_op_iwmmxt_set_mup();
2150 break;
2151 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2152 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2153 wrd = (insn >> 12) & 0xf;
2154 rd0 = (insn >> 16) & 0xf;
2155 rd1 = (insn >> 0) & 0xf;
2156 gen_op_iwmmxt_movq_M0_wRn(rd0);
2157 switch ((insn >> 22) & 3) {
2158 case 0:
2159 if (insn & (1 << 21))
2160 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2161 else
2162 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2163 break;
2164 case 1:
2165 if (insn & (1 << 21))
2166 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2167 else
2168 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2169 break;
2170 case 2:
2171 if (insn & (1 << 21))
2172 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2173 else
2174 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2175 break;
2176 case 3:
2177 return 1;
2178 }
2179 gen_op_iwmmxt_movq_wRn_M0(wrd);
2180 gen_op_iwmmxt_set_mup();
2181 break;
2182 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2183 case 0x402: case 0x502: case 0x602: case 0x702:
2184 wrd = (insn >> 12) & 0xf;
2185 rd0 = (insn >> 16) & 0xf;
2186 rd1 = (insn >> 0) & 0xf;
2187 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2188 tmp = tcg_const_i32((insn >> 20) & 3);
2189 iwmmxt_load_reg(cpu_V1, rd1);
2190 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2191 tcg_temp_free(tmp);
18c9b560
AZ
2192 gen_op_iwmmxt_movq_wRn_M0(wrd);
2193 gen_op_iwmmxt_set_mup();
2194 break;
2195 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2196 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2197 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2198 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2199 wrd = (insn >> 12) & 0xf;
2200 rd0 = (insn >> 16) & 0xf;
2201 rd1 = (insn >> 0) & 0xf;
2202 gen_op_iwmmxt_movq_M0_wRn(rd0);
2203 switch ((insn >> 20) & 0xf) {
2204 case 0x0:
2205 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2206 break;
2207 case 0x1:
2208 gen_op_iwmmxt_subub_M0_wRn(rd1);
2209 break;
2210 case 0x3:
2211 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2212 break;
2213 case 0x4:
2214 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2215 break;
2216 case 0x5:
2217 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2218 break;
2219 case 0x7:
2220 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2221 break;
2222 case 0x8:
2223 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2224 break;
2225 case 0x9:
2226 gen_op_iwmmxt_subul_M0_wRn(rd1);
2227 break;
2228 case 0xb:
2229 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2230 break;
2231 default:
2232 return 1;
2233 }
2234 gen_op_iwmmxt_movq_wRn_M0(wrd);
2235 gen_op_iwmmxt_set_mup();
2236 gen_op_iwmmxt_set_cup();
2237 break;
2238 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2239 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2240 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2241 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2242 wrd = (insn >> 12) & 0xf;
2243 rd0 = (insn >> 16) & 0xf;
2244 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2245 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2246 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
da6b5335 2247 tcg_temp_free(tmp);
18c9b560
AZ
2248 gen_op_iwmmxt_movq_wRn_M0(wrd);
2249 gen_op_iwmmxt_set_mup();
2250 gen_op_iwmmxt_set_cup();
2251 break;
2252 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2253 case 0x418: case 0x518: case 0x618: case 0x718:
2254 case 0x818: case 0x918: case 0xa18: case 0xb18:
2255 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2256 wrd = (insn >> 12) & 0xf;
2257 rd0 = (insn >> 16) & 0xf;
2258 rd1 = (insn >> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0);
2260 switch ((insn >> 20) & 0xf) {
2261 case 0x0:
2262 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2263 break;
2264 case 0x1:
2265 gen_op_iwmmxt_addub_M0_wRn(rd1);
2266 break;
2267 case 0x3:
2268 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2269 break;
2270 case 0x4:
2271 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2272 break;
2273 case 0x5:
2274 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2275 break;
2276 case 0x7:
2277 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2278 break;
2279 case 0x8:
2280 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2281 break;
2282 case 0x9:
2283 gen_op_iwmmxt_addul_M0_wRn(rd1);
2284 break;
2285 case 0xb:
2286 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2287 break;
2288 default:
2289 return 1;
2290 }
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
2295 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2296 case 0x408: case 0x508: case 0x608: case 0x708:
2297 case 0x808: case 0x908: case 0xa08: case 0xb08:
2298 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2299 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2300 return 1;
18c9b560
AZ
2301 wrd = (insn >> 12) & 0xf;
2302 rd0 = (insn >> 16) & 0xf;
2303 rd1 = (insn >> 0) & 0xf;
2304 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2305 switch ((insn >> 22) & 3) {
18c9b560
AZ
2306 case 1:
2307 if (insn & (1 << 21))
2308 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2309 else
2310 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2311 break;
2312 case 2:
2313 if (insn & (1 << 21))
2314 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2315 else
2316 gen_op_iwmmxt_packul_M0_wRn(rd1);
2317 break;
2318 case 3:
2319 if (insn & (1 << 21))
2320 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2321 else
2322 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2323 break;
2324 }
2325 gen_op_iwmmxt_movq_wRn_M0(wrd);
2326 gen_op_iwmmxt_set_mup();
2327 gen_op_iwmmxt_set_cup();
2328 break;
2329 case 0x201: case 0x203: case 0x205: case 0x207:
2330 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2331 case 0x211: case 0x213: case 0x215: case 0x217:
2332 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2333 wrd = (insn >> 5) & 0xf;
2334 rd0 = (insn >> 12) & 0xf;
2335 rd1 = (insn >> 0) & 0xf;
2336 if (rd0 == 0xf || rd1 == 0xf)
2337 return 1;
2338 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2339 tmp = load_reg(s, rd0);
2340 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2341 switch ((insn >> 16) & 0xf) {
2342 case 0x0: /* TMIA */
da6b5335 2343 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2344 break;
2345 case 0x8: /* TMIAPH */
da6b5335 2346 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2347 break;
2348 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2349 if (insn & (1 << 16))
da6b5335 2350 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2351 if (insn & (1 << 17))
da6b5335
FN
2352 tcg_gen_shri_i32(tmp2, tmp2, 16);
2353 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2354 break;
2355 default:
7d1b0095
PM
2356 tcg_temp_free_i32(tmp2);
2357 tcg_temp_free_i32(tmp);
18c9b560
AZ
2358 return 1;
2359 }
7d1b0095
PM
2360 tcg_temp_free_i32(tmp2);
2361 tcg_temp_free_i32(tmp);
18c9b560
AZ
2362 gen_op_iwmmxt_movq_wRn_M0(wrd);
2363 gen_op_iwmmxt_set_mup();
2364 break;
2365 default:
2366 return 1;
2367 }
2368
2369 return 0;
2370}
2371
a1c7273b 2372/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2373 (ie. an undefined instruction). */
0ecb72a5 2374static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2375{
2376 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2377 TCGv tmp, tmp2;
18c9b560
AZ
2378
2379 if ((insn & 0x0ff00f10) == 0x0e200010) {
2380 /* Multiply with Internal Accumulate Format */
2381 rd0 = (insn >> 12) & 0xf;
2382 rd1 = insn & 0xf;
2383 acc = (insn >> 5) & 7;
2384
2385 if (acc != 0)
2386 return 1;
2387
3a554c0f
FN
2388 tmp = load_reg(s, rd0);
2389 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2390 switch ((insn >> 16) & 0xf) {
2391 case 0x0: /* MIA */
3a554c0f 2392 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2393 break;
2394 case 0x8: /* MIAPH */
3a554c0f 2395 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2396 break;
2397 case 0xc: /* MIABB */
2398 case 0xd: /* MIABT */
2399 case 0xe: /* MIATB */
2400 case 0xf: /* MIATT */
18c9b560 2401 if (insn & (1 << 16))
3a554c0f 2402 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2403 if (insn & (1 << 17))
3a554c0f
FN
2404 tcg_gen_shri_i32(tmp2, tmp2, 16);
2405 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2406 break;
2407 default:
2408 return 1;
2409 }
7d1b0095
PM
2410 tcg_temp_free_i32(tmp2);
2411 tcg_temp_free_i32(tmp);
18c9b560
AZ
2412
2413 gen_op_iwmmxt_movq_wRn_M0(acc);
2414 return 0;
2415 }
2416
2417 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2418 /* Internal Accumulator Access Format */
2419 rdhi = (insn >> 16) & 0xf;
2420 rdlo = (insn >> 12) & 0xf;
2421 acc = insn & 7;
2422
2423 if (acc != 0)
2424 return 1;
2425
2426 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2427 iwmmxt_load_reg(cpu_V0, acc);
2428 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2429 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2430 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2431 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2432 } else { /* MAR */
3a554c0f
FN
2433 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2434 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2435 }
2436 return 0;
2437 }
2438
2439 return 1;
2440}
2441
c1713132
AZ
2442/* Disassemble system coprocessor instruction. Return nonzero if
2443 instruction is not defined. */
0ecb72a5 2444static int disas_cp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
c1713132 2445{
b75263d6 2446 TCGv tmp, tmp2;
c1713132
AZ
2447 uint32_t rd = (insn >> 12) & 0xf;
2448 uint32_t cp = (insn >> 8) & 0xf;
2449 if (IS_USER(s)) {
2450 return 1;
2451 }
2452
18c9b560 2453 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2454 if (!env->cp[cp].cp_read)
2455 return 1;
8984bd2e 2456 gen_set_pc_im(s->pc);
7d1b0095 2457 tmp = tcg_temp_new_i32();
b75263d6
JR
2458 tmp2 = tcg_const_i32(insn);
2459 gen_helper_get_cp(tmp, cpu_env, tmp2);
2460 tcg_temp_free(tmp2);
8984bd2e 2461 store_reg(s, rd, tmp);
c1713132
AZ
2462 } else {
2463 if (!env->cp[cp].cp_write)
2464 return 1;
8984bd2e
PB
2465 gen_set_pc_im(s->pc);
2466 tmp = load_reg(s, rd);
b75263d6
JR
2467 tmp2 = tcg_const_i32(insn);
2468 gen_helper_set_cp(cpu_env, tmp2, tmp);
2469 tcg_temp_free(tmp2);
7d1b0095 2470 tcg_temp_free_i32(tmp);
c1713132
AZ
2471 }
2472 return 0;
2473}
2474
0ecb72a5 2475static int cp15_user_ok(CPUARMState *env, uint32_t insn)
9ee6e8bb
PB
2476{
2477 int cpn = (insn >> 16) & 0xf;
2478 int cpm = insn & 0xf;
2479 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2480
74594c9d
PM
2481 if (arm_feature(env, ARM_FEATURE_V7) && cpn == 9) {
2482 /* Performance monitor registers fall into three categories:
2483 * (a) always UNDEF in usermode
2484 * (b) UNDEF only if PMUSERENR.EN is 0
2485 * (c) always read OK and UNDEF on write (PMUSERENR only)
2486 */
2487 if ((cpm == 12 && (op < 6)) ||
2488 (cpm == 13 && (op < 3))) {
2489 return env->cp15.c9_pmuserenr;
2490 } else if (cpm == 14 && op == 0 && (insn & ARM_CP_RW_BIT)) {
2491 /* PMUSERENR, read only */
2492 return 1;
2493 }
2494 return 0;
2495 }
2496
9ee6e8bb
PB
2497 if (cpn == 13 && cpm == 0) {
2498 /* TLS register. */
2499 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2500 return 1;
2501 }
9ee6e8bb
PB
2502 return 0;
2503}
2504
0ecb72a5 2505static int cp15_tls_load_store(CPUARMState *env, DisasContext *s, uint32_t insn, uint32_t rd)
3f26c122
RV
2506{
2507 TCGv tmp;
2508 int cpn = (insn >> 16) & 0xf;
2509 int cpm = insn & 0xf;
2510 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2511
2512 if (!arm_feature(env, ARM_FEATURE_V6K))
2513 return 0;
2514
2515 if (!(cpn == 13 && cpm == 0))
2516 return 0;
2517
2518 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2519 switch (op) {
2520 case 2:
c5883be2 2521 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2522 break;
2523 case 3:
c5883be2 2524 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2525 break;
2526 case 4:
c5883be2 2527 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2528 break;
2529 default:
3f26c122
RV
2530 return 0;
2531 }
2532 store_reg(s, rd, tmp);
2533
2534 } else {
2535 tmp = load_reg(s, rd);
2536 switch (op) {
2537 case 2:
c5883be2 2538 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2539 break;
2540 case 3:
c5883be2 2541 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2542 break;
2543 case 4:
c5883be2 2544 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2545 break;
2546 default:
7d1b0095 2547 tcg_temp_free_i32(tmp);
3f26c122
RV
2548 return 0;
2549 }
3f26c122
RV
2550 }
2551 return 1;
2552}
2553
b5ff1b31
FB
2554/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2555 instruction is not defined. */
0ecb72a5 2556static int disas_cp15_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2557{
2558 uint32_t rd;
b75263d6 2559 TCGv tmp, tmp2;
b5ff1b31 2560
9ee6e8bb
PB
2561 /* M profile cores use memory mapped registers instead of cp15. */
2562 if (arm_feature(env, ARM_FEATURE_M))
2563 return 1;
2564
2565 if ((insn & (1 << 25)) == 0) {
2566 if (insn & (1 << 20)) {
2567 /* mrrc */
2568 return 1;
2569 }
2570 /* mcrr. Used for block cache operations, so implement as no-op. */
2571 return 0;
2572 }
2573 if ((insn & (1 << 4)) == 0) {
2574 /* cdp */
2575 return 1;
2576 }
87f19eb2
PM
2577 /* We special case a number of cp15 instructions which were used
2578 * for things which are real instructions in ARMv7. This allows
2579 * them to work in linux-user mode which doesn't provide functional
2580 * get_cp15/set_cp15 helpers, and is more efficient anyway.
cc688901 2581 */
87f19eb2
PM
2582 switch ((insn & 0x0fff0fff)) {
2583 case 0x0e070f90:
cc688901
PM
2584 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2585 * In v7, this must NOP.
2586 */
87f19eb2
PM
2587 if (IS_USER(s)) {
2588 return 1;
2589 }
cc688901
PM
2590 if (!arm_feature(env, ARM_FEATURE_V7)) {
2591 /* Wait for interrupt. */
2592 gen_set_pc_im(s->pc);
2593 s->is_jmp = DISAS_WFI;
2594 }
9332f9da 2595 return 0;
87f19eb2 2596 case 0x0e070f58:
cc688901
PM
2597 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2598 * so this is slightly over-broad.
2599 */
87f19eb2 2600 if (!IS_USER(s) && !arm_feature(env, ARM_FEATURE_V6)) {
cc688901
PM
2601 /* Wait for interrupt. */
2602 gen_set_pc_im(s->pc);
2603 s->is_jmp = DISAS_WFI;
2604 return 0;
2605 }
87f19eb2 2606 /* Otherwise continue to handle via helper function.
cc688901
PM
2607 * In particular, on v7 and some v6 cores this is one of
2608 * the VA-PA registers.
2609 */
87f19eb2
PM
2610 break;
2611 case 0x0e070f3d:
2612 /* 0,c7,c13,1: prefetch-by-MVA in v6, NOP in v7 */
2613 if (arm_feature(env, ARM_FEATURE_V6)) {
2614 return IS_USER(s) ? 1 : 0;
2615 }
2616 break;
2617 case 0x0e070f95: /* 0,c7,c5,4 : ISB */
2618 case 0x0e070f9a: /* 0,c7,c10,4: DSB */
2619 case 0x0e070fba: /* 0,c7,c10,5: DMB */
2620 /* Barriers in both v6 and v7 */
2621 if (arm_feature(env, ARM_FEATURE_V6)) {
2622 return 0;
2623 }
2624 break;
2625 default:
2626 break;
2627 }
2628
2629 if (IS_USER(s) && !cp15_user_ok(env, insn)) {
2630 return 1;
cc688901
PM
2631 }
2632
b5ff1b31 2633 rd = (insn >> 12) & 0xf;
3f26c122
RV
2634
2635 if (cp15_tls_load_store(env, s, insn, rd))
2636 return 0;
2637
b75263d6 2638 tmp2 = tcg_const_i32(insn);
18c9b560 2639 if (insn & ARM_CP_RW_BIT) {
7d1b0095 2640 tmp = tcg_temp_new_i32();
b75263d6 2641 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2642 /* If the destination register is r15 then sets condition codes. */
2643 if (rd != 15)
8984bd2e
PB
2644 store_reg(s, rd, tmp);
2645 else
7d1b0095 2646 tcg_temp_free_i32(tmp);
b5ff1b31 2647 } else {
8984bd2e 2648 tmp = load_reg(s, rd);
b75263d6 2649 gen_helper_set_cp15(cpu_env, tmp2, tmp);
7d1b0095 2650 tcg_temp_free_i32(tmp);
a90b7318
AZ
2651 /* Normally we would always end the TB here, but Linux
2652 * arch/arm/mach-pxa/sleep.S expects two instructions following
2653 * an MMU enable to execute from cache. Imitate this behaviour. */
2654 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2655 (insn & 0x0fff0fff) != 0x0e010f10)
2656 gen_lookup_tb(s);
b5ff1b31 2657 }
b75263d6 2658 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2659 return 0;
2660}
2661
9ee6e8bb
PB
2662#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2663#define VFP_SREG(insn, bigbit, smallbit) \
2664 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2665#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2666 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2667 reg = (((insn) >> (bigbit)) & 0x0f) \
2668 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2669 } else { \
2670 if (insn & (1 << (smallbit))) \
2671 return 1; \
2672 reg = ((insn) >> (bigbit)) & 0x0f; \
2673 }} while (0)
2674
2675#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2676#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2677#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2678#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2679#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2680#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2681
4373f3ce
PB
2682/* Move between integer and VFP cores. */
2683static TCGv gen_vfp_mrs(void)
2684{
7d1b0095 2685 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2686 tcg_gen_mov_i32(tmp, cpu_F0s);
2687 return tmp;
2688}
2689
2690static void gen_vfp_msr(TCGv tmp)
2691{
2692 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2693 tcg_temp_free_i32(tmp);
4373f3ce
PB
2694}
2695
ad69471c
PB
2696static void gen_neon_dup_u8(TCGv var, int shift)
2697{
7d1b0095 2698 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2699 if (shift)
2700 tcg_gen_shri_i32(var, var, shift);
86831435 2701 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2702 tcg_gen_shli_i32(tmp, var, 8);
2703 tcg_gen_or_i32(var, var, tmp);
2704 tcg_gen_shli_i32(tmp, var, 16);
2705 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2706 tcg_temp_free_i32(tmp);
ad69471c
PB
2707}
2708
2709static void gen_neon_dup_low16(TCGv var)
2710{
7d1b0095 2711 TCGv tmp = tcg_temp_new_i32();
86831435 2712 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2713 tcg_gen_shli_i32(tmp, var, 16);
2714 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2715 tcg_temp_free_i32(tmp);
ad69471c
PB
2716}
2717
2718static void gen_neon_dup_high16(TCGv var)
2719{
7d1b0095 2720 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2721 tcg_gen_andi_i32(var, var, 0xffff0000);
2722 tcg_gen_shri_i32(tmp, var, 16);
2723 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2724 tcg_temp_free_i32(tmp);
ad69471c
PB
2725}
2726
8e18cde3
PM
2727static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2728{
2729 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2730 TCGv tmp;
2731 switch (size) {
2732 case 0:
2733 tmp = gen_ld8u(addr, IS_USER(s));
2734 gen_neon_dup_u8(tmp, 0);
2735 break;
2736 case 1:
2737 tmp = gen_ld16u(addr, IS_USER(s));
2738 gen_neon_dup_low16(tmp);
2739 break;
2740 case 2:
2741 tmp = gen_ld32(addr, IS_USER(s));
2742 break;
2743 default: /* Avoid compiler warnings. */
2744 abort();
2745 }
2746 return tmp;
2747}
2748
a1c7273b 2749/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2750 (ie. an undefined instruction). */
0ecb72a5 2751static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2752{
2753 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2754 int dp, veclen;
312eea9f 2755 TCGv addr;
4373f3ce 2756 TCGv tmp;
ad69471c 2757 TCGv tmp2;
b7bcbe95 2758
40f137e1
PB
2759 if (!arm_feature(env, ARM_FEATURE_VFP))
2760 return 1;
2761
5df8bac1 2762 if (!s->vfp_enabled) {
9ee6e8bb 2763 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2764 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2765 return 1;
2766 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2767 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2768 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2769 return 1;
2770 }
b7bcbe95
FB
2771 dp = ((insn & 0xf00) == 0xb00);
2772 switch ((insn >> 24) & 0xf) {
2773 case 0xe:
2774 if (insn & (1 << 4)) {
2775 /* single register transfer */
b7bcbe95
FB
2776 rd = (insn >> 12) & 0xf;
2777 if (dp) {
9ee6e8bb
PB
2778 int size;
2779 int pass;
2780
2781 VFP_DREG_N(rn, insn);
2782 if (insn & 0xf)
b7bcbe95 2783 return 1;
9ee6e8bb
PB
2784 if (insn & 0x00c00060
2785 && !arm_feature(env, ARM_FEATURE_NEON))
2786 return 1;
2787
2788 pass = (insn >> 21) & 1;
2789 if (insn & (1 << 22)) {
2790 size = 0;
2791 offset = ((insn >> 5) & 3) * 8;
2792 } else if (insn & (1 << 5)) {
2793 size = 1;
2794 offset = (insn & (1 << 6)) ? 16 : 0;
2795 } else {
2796 size = 2;
2797 offset = 0;
2798 }
18c9b560 2799 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2800 /* vfp->arm */
ad69471c 2801 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2802 switch (size) {
2803 case 0:
9ee6e8bb 2804 if (offset)
ad69471c 2805 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2806 if (insn & (1 << 23))
ad69471c 2807 gen_uxtb(tmp);
9ee6e8bb 2808 else
ad69471c 2809 gen_sxtb(tmp);
9ee6e8bb
PB
2810 break;
2811 case 1:
9ee6e8bb
PB
2812 if (insn & (1 << 23)) {
2813 if (offset) {
ad69471c 2814 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2815 } else {
ad69471c 2816 gen_uxth(tmp);
9ee6e8bb
PB
2817 }
2818 } else {
2819 if (offset) {
ad69471c 2820 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2821 } else {
ad69471c 2822 gen_sxth(tmp);
9ee6e8bb
PB
2823 }
2824 }
2825 break;
2826 case 2:
9ee6e8bb
PB
2827 break;
2828 }
ad69471c 2829 store_reg(s, rd, tmp);
b7bcbe95
FB
2830 } else {
2831 /* arm->vfp */
ad69471c 2832 tmp = load_reg(s, rd);
9ee6e8bb
PB
2833 if (insn & (1 << 23)) {
2834 /* VDUP */
2835 if (size == 0) {
ad69471c 2836 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2837 } else if (size == 1) {
ad69471c 2838 gen_neon_dup_low16(tmp);
9ee6e8bb 2839 }
cbbccffc 2840 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2841 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2842 tcg_gen_mov_i32(tmp2, tmp);
2843 neon_store_reg(rn, n, tmp2);
2844 }
2845 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2846 } else {
2847 /* VMOV */
2848 switch (size) {
2849 case 0:
ad69471c
PB
2850 tmp2 = neon_load_reg(rn, pass);
2851 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
7d1b0095 2852 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2853 break;
2854 case 1:
ad69471c
PB
2855 tmp2 = neon_load_reg(rn, pass);
2856 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
7d1b0095 2857 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2858 break;
2859 case 2:
9ee6e8bb
PB
2860 break;
2861 }
ad69471c 2862 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2863 }
b7bcbe95 2864 }
9ee6e8bb
PB
2865 } else { /* !dp */
2866 if ((insn & 0x6f) != 0x00)
2867 return 1;
2868 rn = VFP_SREG_N(insn);
18c9b560 2869 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2870 /* vfp->arm */
2871 if (insn & (1 << 21)) {
2872 /* system register */
40f137e1 2873 rn >>= 1;
9ee6e8bb 2874
b7bcbe95 2875 switch (rn) {
40f137e1 2876 case ARM_VFP_FPSID:
4373f3ce 2877 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2878 VFP3 restricts all id registers to privileged
2879 accesses. */
2880 if (IS_USER(s)
2881 && arm_feature(env, ARM_FEATURE_VFP3))
2882 return 1;
4373f3ce 2883 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2884 break;
40f137e1 2885 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2886 if (IS_USER(s))
2887 return 1;
4373f3ce 2888 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2889 break;
40f137e1
PB
2890 case ARM_VFP_FPINST:
2891 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2892 /* Not present in VFP3. */
2893 if (IS_USER(s)
2894 || arm_feature(env, ARM_FEATURE_VFP3))
2895 return 1;
4373f3ce 2896 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2897 break;
40f137e1 2898 case ARM_VFP_FPSCR:
601d70b9 2899 if (rd == 15) {
4373f3ce
PB
2900 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2901 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2902 } else {
7d1b0095 2903 tmp = tcg_temp_new_i32();
4373f3ce
PB
2904 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2905 }
b7bcbe95 2906 break;
9ee6e8bb
PB
2907 case ARM_VFP_MVFR0:
2908 case ARM_VFP_MVFR1:
2909 if (IS_USER(s)
06ed5d66 2910 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 2911 return 1;
4373f3ce 2912 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2913 break;
b7bcbe95
FB
2914 default:
2915 return 1;
2916 }
2917 } else {
2918 gen_mov_F0_vreg(0, rn);
4373f3ce 2919 tmp = gen_vfp_mrs();
b7bcbe95
FB
2920 }
2921 if (rd == 15) {
b5ff1b31 2922 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2923 gen_set_nzcv(tmp);
7d1b0095 2924 tcg_temp_free_i32(tmp);
4373f3ce
PB
2925 } else {
2926 store_reg(s, rd, tmp);
2927 }
b7bcbe95
FB
2928 } else {
2929 /* arm->vfp */
4373f3ce 2930 tmp = load_reg(s, rd);
b7bcbe95 2931 if (insn & (1 << 21)) {
40f137e1 2932 rn >>= 1;
b7bcbe95
FB
2933 /* system register */
2934 switch (rn) {
40f137e1 2935 case ARM_VFP_FPSID:
9ee6e8bb
PB
2936 case ARM_VFP_MVFR0:
2937 case ARM_VFP_MVFR1:
b7bcbe95
FB
2938 /* Writes are ignored. */
2939 break;
40f137e1 2940 case ARM_VFP_FPSCR:
4373f3ce 2941 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2942 tcg_temp_free_i32(tmp);
b5ff1b31 2943 gen_lookup_tb(s);
b7bcbe95 2944 break;
40f137e1 2945 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2946 if (IS_USER(s))
2947 return 1;
71b3c3de
JR
2948 /* TODO: VFP subarchitecture support.
2949 * For now, keep the EN bit only */
2950 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2951 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2952 gen_lookup_tb(s);
2953 break;
2954 case ARM_VFP_FPINST:
2955 case ARM_VFP_FPINST2:
4373f3ce 2956 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2957 break;
b7bcbe95
FB
2958 default:
2959 return 1;
2960 }
2961 } else {
4373f3ce 2962 gen_vfp_msr(tmp);
b7bcbe95
FB
2963 gen_mov_vreg_F0(0, rn);
2964 }
2965 }
2966 }
2967 } else {
2968 /* data processing */
2969 /* The opcode is in bits 23, 21, 20 and 6. */
2970 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2971 if (dp) {
2972 if (op == 15) {
2973 /* rn is opcode */
2974 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2975 } else {
2976 /* rn is register number */
9ee6e8bb 2977 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2978 }
2979
04595bf6 2980 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2981 /* Integer or single precision destination. */
9ee6e8bb 2982 rd = VFP_SREG_D(insn);
b7bcbe95 2983 } else {
9ee6e8bb 2984 VFP_DREG_D(rd, insn);
b7bcbe95 2985 }
04595bf6
PM
2986 if (op == 15 &&
2987 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2988 /* VCVT from int is always from S reg regardless of dp bit.
2989 * VCVT with immediate frac_bits has same format as SREG_M
2990 */
2991 rm = VFP_SREG_M(insn);
b7bcbe95 2992 } else {
9ee6e8bb 2993 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2994 }
2995 } else {
9ee6e8bb 2996 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2997 if (op == 15 && rn == 15) {
2998 /* Double precision destination. */
9ee6e8bb
PB
2999 VFP_DREG_D(rd, insn);
3000 } else {
3001 rd = VFP_SREG_D(insn);
3002 }
04595bf6
PM
3003 /* NB that we implicitly rely on the encoding for the frac_bits
3004 * in VCVT of fixed to float being the same as that of an SREG_M
3005 */
9ee6e8bb 3006 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3007 }
3008
69d1fc22 3009 veclen = s->vec_len;
b7bcbe95
FB
3010 if (op == 15 && rn > 3)
3011 veclen = 0;
3012
3013 /* Shut up compiler warnings. */
3014 delta_m = 0;
3015 delta_d = 0;
3016 bank_mask = 0;
3b46e624 3017
b7bcbe95
FB
3018 if (veclen > 0) {
3019 if (dp)
3020 bank_mask = 0xc;
3021 else
3022 bank_mask = 0x18;
3023
3024 /* Figure out what type of vector operation this is. */
3025 if ((rd & bank_mask) == 0) {
3026 /* scalar */
3027 veclen = 0;
3028 } else {
3029 if (dp)
69d1fc22 3030 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3031 else
69d1fc22 3032 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3033
3034 if ((rm & bank_mask) == 0) {
3035 /* mixed scalar/vector */
3036 delta_m = 0;
3037 } else {
3038 /* vector */
3039 delta_m = delta_d;
3040 }
3041 }
3042 }
3043
3044 /* Load the initial operands. */
3045 if (op == 15) {
3046 switch (rn) {
3047 case 16:
3048 case 17:
3049 /* Integer source */
3050 gen_mov_F0_vreg(0, rm);
3051 break;
3052 case 8:
3053 case 9:
3054 /* Compare */
3055 gen_mov_F0_vreg(dp, rd);
3056 gen_mov_F1_vreg(dp, rm);
3057 break;
3058 case 10:
3059 case 11:
3060 /* Compare with zero */
3061 gen_mov_F0_vreg(dp, rd);
3062 gen_vfp_F1_ld0(dp);
3063 break;
9ee6e8bb
PB
3064 case 20:
3065 case 21:
3066 case 22:
3067 case 23:
644ad806
PB
3068 case 28:
3069 case 29:
3070 case 30:
3071 case 31:
9ee6e8bb
PB
3072 /* Source and destination the same. */
3073 gen_mov_F0_vreg(dp, rd);
3074 break;
6e0c0ed1
PM
3075 case 4:
3076 case 5:
3077 case 6:
3078 case 7:
3079 /* VCVTB, VCVTT: only present with the halfprec extension,
3080 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
3081 */
3082 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
3083 return 1;
3084 }
3085 /* Otherwise fall through */
b7bcbe95
FB
3086 default:
3087 /* One source operand. */
3088 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3089 break;
b7bcbe95
FB
3090 }
3091 } else {
3092 /* Two source operands. */
3093 gen_mov_F0_vreg(dp, rn);
3094 gen_mov_F1_vreg(dp, rm);
3095 }
3096
3097 for (;;) {
3098 /* Perform the calculation. */
3099 switch (op) {
605a6aed
PM
3100 case 0: /* VMLA: fd + (fn * fm) */
3101 /* Note that order of inputs to the add matters for NaNs */
3102 gen_vfp_F1_mul(dp);
3103 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3104 gen_vfp_add(dp);
3105 break;
605a6aed 3106 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3107 gen_vfp_mul(dp);
605a6aed
PM
3108 gen_vfp_F1_neg(dp);
3109 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3110 gen_vfp_add(dp);
3111 break;
605a6aed
PM
3112 case 2: /* VNMLS: -fd + (fn * fm) */
3113 /* Note that it isn't valid to replace (-A + B) with (B - A)
3114 * or similar plausible looking simplifications
3115 * because this will give wrong results for NaNs.
3116 */
3117 gen_vfp_F1_mul(dp);
3118 gen_mov_F0_vreg(dp, rd);
3119 gen_vfp_neg(dp);
3120 gen_vfp_add(dp);
b7bcbe95 3121 break;
605a6aed 3122 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3123 gen_vfp_mul(dp);
605a6aed
PM
3124 gen_vfp_F1_neg(dp);
3125 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3126 gen_vfp_neg(dp);
605a6aed 3127 gen_vfp_add(dp);
b7bcbe95
FB
3128 break;
3129 case 4: /* mul: fn * fm */
3130 gen_vfp_mul(dp);
3131 break;
3132 case 5: /* nmul: -(fn * fm) */
3133 gen_vfp_mul(dp);
3134 gen_vfp_neg(dp);
3135 break;
3136 case 6: /* add: fn + fm */
3137 gen_vfp_add(dp);
3138 break;
3139 case 7: /* sub: fn - fm */
3140 gen_vfp_sub(dp);
3141 break;
3142 case 8: /* div: fn / fm */
3143 gen_vfp_div(dp);
3144 break;
da97f52c
PM
3145 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3146 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3147 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3148 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3149 /* These are fused multiply-add, and must be done as one
3150 * floating point operation with no rounding between the
3151 * multiplication and addition steps.
3152 * NB that doing the negations here as separate steps is
3153 * correct : an input NaN should come out with its sign bit
3154 * flipped if it is a negated-input.
3155 */
3156 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3157 return 1;
3158 }
3159 if (dp) {
3160 TCGv_ptr fpst;
3161 TCGv_i64 frd;
3162 if (op & 1) {
3163 /* VFNMS, VFMS */
3164 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3165 }
3166 frd = tcg_temp_new_i64();
3167 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3168 if (op & 2) {
3169 /* VFNMA, VFNMS */
3170 gen_helper_vfp_negd(frd, frd);
3171 }
3172 fpst = get_fpstatus_ptr(0);
3173 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3174 cpu_F1d, frd, fpst);
3175 tcg_temp_free_ptr(fpst);
3176 tcg_temp_free_i64(frd);
3177 } else {
3178 TCGv_ptr fpst;
3179 TCGv_i32 frd;
3180 if (op & 1) {
3181 /* VFNMS, VFMS */
3182 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3183 }
3184 frd = tcg_temp_new_i32();
3185 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3186 if (op & 2) {
3187 gen_helper_vfp_negs(frd, frd);
3188 }
3189 fpst = get_fpstatus_ptr(0);
3190 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3191 cpu_F1s, frd, fpst);
3192 tcg_temp_free_ptr(fpst);
3193 tcg_temp_free_i32(frd);
3194 }
3195 break;
9ee6e8bb
PB
3196 case 14: /* fconst */
3197 if (!arm_feature(env, ARM_FEATURE_VFP3))
3198 return 1;
3199
3200 n = (insn << 12) & 0x80000000;
3201 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3202 if (dp) {
3203 if (i & 0x40)
3204 i |= 0x3f80;
3205 else
3206 i |= 0x4000;
3207 n |= i << 16;
4373f3ce 3208 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3209 } else {
3210 if (i & 0x40)
3211 i |= 0x780;
3212 else
3213 i |= 0x800;
3214 n |= i << 19;
5b340b51 3215 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3216 }
9ee6e8bb 3217 break;
b7bcbe95
FB
3218 case 15: /* extension space */
3219 switch (rn) {
3220 case 0: /* cpy */
3221 /* no-op */
3222 break;
3223 case 1: /* abs */
3224 gen_vfp_abs(dp);
3225 break;
3226 case 2: /* neg */
3227 gen_vfp_neg(dp);
3228 break;
3229 case 3: /* sqrt */
3230 gen_vfp_sqrt(dp);
3231 break;
60011498 3232 case 4: /* vcvtb.f32.f16 */
60011498
PB
3233 tmp = gen_vfp_mrs();
3234 tcg_gen_ext16u_i32(tmp, tmp);
3235 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3236 tcg_temp_free_i32(tmp);
60011498
PB
3237 break;
3238 case 5: /* vcvtt.f32.f16 */
60011498
PB
3239 tmp = gen_vfp_mrs();
3240 tcg_gen_shri_i32(tmp, tmp, 16);
3241 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3242 tcg_temp_free_i32(tmp);
60011498
PB
3243 break;
3244 case 6: /* vcvtb.f16.f32 */
7d1b0095 3245 tmp = tcg_temp_new_i32();
60011498
PB
3246 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3247 gen_mov_F0_vreg(0, rd);
3248 tmp2 = gen_vfp_mrs();
3249 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3250 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3251 tcg_temp_free_i32(tmp2);
60011498
PB
3252 gen_vfp_msr(tmp);
3253 break;
3254 case 7: /* vcvtt.f16.f32 */
7d1b0095 3255 tmp = tcg_temp_new_i32();
60011498
PB
3256 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3257 tcg_gen_shli_i32(tmp, tmp, 16);
3258 gen_mov_F0_vreg(0, rd);
3259 tmp2 = gen_vfp_mrs();
3260 tcg_gen_ext16u_i32(tmp2, tmp2);
3261 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3262 tcg_temp_free_i32(tmp2);
60011498
PB
3263 gen_vfp_msr(tmp);
3264 break;
b7bcbe95
FB
3265 case 8: /* cmp */
3266 gen_vfp_cmp(dp);
3267 break;
3268 case 9: /* cmpe */
3269 gen_vfp_cmpe(dp);
3270 break;
3271 case 10: /* cmpz */
3272 gen_vfp_cmp(dp);
3273 break;
3274 case 11: /* cmpez */
3275 gen_vfp_F1_ld0(dp);
3276 gen_vfp_cmpe(dp);
3277 break;
3278 case 15: /* single<->double conversion */
3279 if (dp)
4373f3ce 3280 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3281 else
4373f3ce 3282 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3283 break;
3284 case 16: /* fuito */
5500b06c 3285 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3286 break;
3287 case 17: /* fsito */
5500b06c 3288 gen_vfp_sito(dp, 0);
b7bcbe95 3289 break;
9ee6e8bb
PB
3290 case 20: /* fshto */
3291 if (!arm_feature(env, ARM_FEATURE_VFP3))
3292 return 1;
5500b06c 3293 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3294 break;
3295 case 21: /* fslto */
3296 if (!arm_feature(env, ARM_FEATURE_VFP3))
3297 return 1;
5500b06c 3298 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3299 break;
3300 case 22: /* fuhto */
3301 if (!arm_feature(env, ARM_FEATURE_VFP3))
3302 return 1;
5500b06c 3303 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3304 break;
3305 case 23: /* fulto */
3306 if (!arm_feature(env, ARM_FEATURE_VFP3))
3307 return 1;
5500b06c 3308 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3309 break;
b7bcbe95 3310 case 24: /* ftoui */
5500b06c 3311 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3312 break;
3313 case 25: /* ftouiz */
5500b06c 3314 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3315 break;
3316 case 26: /* ftosi */
5500b06c 3317 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3318 break;
3319 case 27: /* ftosiz */
5500b06c 3320 gen_vfp_tosiz(dp, 0);
b7bcbe95 3321 break;
9ee6e8bb
PB
3322 case 28: /* ftosh */
3323 if (!arm_feature(env, ARM_FEATURE_VFP3))
3324 return 1;
5500b06c 3325 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3326 break;
3327 case 29: /* ftosl */
3328 if (!arm_feature(env, ARM_FEATURE_VFP3))
3329 return 1;
5500b06c 3330 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3331 break;
3332 case 30: /* ftouh */
3333 if (!arm_feature(env, ARM_FEATURE_VFP3))
3334 return 1;
5500b06c 3335 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3336 break;
3337 case 31: /* ftoul */
3338 if (!arm_feature(env, ARM_FEATURE_VFP3))
3339 return 1;
5500b06c 3340 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3341 break;
b7bcbe95 3342 default: /* undefined */
b7bcbe95
FB
3343 return 1;
3344 }
3345 break;
3346 default: /* undefined */
b7bcbe95
FB
3347 return 1;
3348 }
3349
3350 /* Write back the result. */
3351 if (op == 15 && (rn >= 8 && rn <= 11))
3352 ; /* Comparison, do nothing. */
04595bf6
PM
3353 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3354 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3355 gen_mov_vreg_F0(0, rd);
3356 else if (op == 15 && rn == 15)
3357 /* conversion */
3358 gen_mov_vreg_F0(!dp, rd);
3359 else
3360 gen_mov_vreg_F0(dp, rd);
3361
3362 /* break out of the loop if we have finished */
3363 if (veclen == 0)
3364 break;
3365
3366 if (op == 15 && delta_m == 0) {
3367 /* single source one-many */
3368 while (veclen--) {
3369 rd = ((rd + delta_d) & (bank_mask - 1))
3370 | (rd & bank_mask);
3371 gen_mov_vreg_F0(dp, rd);
3372 }
3373 break;
3374 }
3375 /* Setup the next operands. */
3376 veclen--;
3377 rd = ((rd + delta_d) & (bank_mask - 1))
3378 | (rd & bank_mask);
3379
3380 if (op == 15) {
3381 /* One source operand. */
3382 rm = ((rm + delta_m) & (bank_mask - 1))
3383 | (rm & bank_mask);
3384 gen_mov_F0_vreg(dp, rm);
3385 } else {
3386 /* Two source operands. */
3387 rn = ((rn + delta_d) & (bank_mask - 1))
3388 | (rn & bank_mask);
3389 gen_mov_F0_vreg(dp, rn);
3390 if (delta_m) {
3391 rm = ((rm + delta_m) & (bank_mask - 1))
3392 | (rm & bank_mask);
3393 gen_mov_F1_vreg(dp, rm);
3394 }
3395 }
3396 }
3397 }
3398 break;
3399 case 0xc:
3400 case 0xd:
8387da81 3401 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3402 /* two-register transfer */
3403 rn = (insn >> 16) & 0xf;
3404 rd = (insn >> 12) & 0xf;
3405 if (dp) {
9ee6e8bb
PB
3406 VFP_DREG_M(rm, insn);
3407 } else {
3408 rm = VFP_SREG_M(insn);
3409 }
b7bcbe95 3410
18c9b560 3411 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3412 /* vfp->arm */
3413 if (dp) {
4373f3ce
PB
3414 gen_mov_F0_vreg(0, rm * 2);
3415 tmp = gen_vfp_mrs();
3416 store_reg(s, rd, tmp);
3417 gen_mov_F0_vreg(0, rm * 2 + 1);
3418 tmp = gen_vfp_mrs();
3419 store_reg(s, rn, tmp);
b7bcbe95
FB
3420 } else {
3421 gen_mov_F0_vreg(0, rm);
4373f3ce 3422 tmp = gen_vfp_mrs();
8387da81 3423 store_reg(s, rd, tmp);
b7bcbe95 3424 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3425 tmp = gen_vfp_mrs();
8387da81 3426 store_reg(s, rn, tmp);
b7bcbe95
FB
3427 }
3428 } else {
3429 /* arm->vfp */
3430 if (dp) {
4373f3ce
PB
3431 tmp = load_reg(s, rd);
3432 gen_vfp_msr(tmp);
3433 gen_mov_vreg_F0(0, rm * 2);
3434 tmp = load_reg(s, rn);
3435 gen_vfp_msr(tmp);
3436 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3437 } else {
8387da81 3438 tmp = load_reg(s, rd);
4373f3ce 3439 gen_vfp_msr(tmp);
b7bcbe95 3440 gen_mov_vreg_F0(0, rm);
8387da81 3441 tmp = load_reg(s, rn);
4373f3ce 3442 gen_vfp_msr(tmp);
b7bcbe95
FB
3443 gen_mov_vreg_F0(0, rm + 1);
3444 }
3445 }
3446 } else {
3447 /* Load/store */
3448 rn = (insn >> 16) & 0xf;
3449 if (dp)
9ee6e8bb 3450 VFP_DREG_D(rd, insn);
b7bcbe95 3451 else
9ee6e8bb 3452 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3453 if ((insn & 0x01200000) == 0x01000000) {
3454 /* Single load/store */
3455 offset = (insn & 0xff) << 2;
3456 if ((insn & (1 << 23)) == 0)
3457 offset = -offset;
934814f1
PM
3458 if (s->thumb && rn == 15) {
3459 /* This is actually UNPREDICTABLE */
3460 addr = tcg_temp_new_i32();
3461 tcg_gen_movi_i32(addr, s->pc & ~2);
3462 } else {
3463 addr = load_reg(s, rn);
3464 }
312eea9f 3465 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3466 if (insn & (1 << 20)) {
312eea9f 3467 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3468 gen_mov_vreg_F0(dp, rd);
3469 } else {
3470 gen_mov_F0_vreg(dp, rd);
312eea9f 3471 gen_vfp_st(s, dp, addr);
b7bcbe95 3472 }
7d1b0095 3473 tcg_temp_free_i32(addr);
b7bcbe95
FB
3474 } else {
3475 /* load/store multiple */
934814f1 3476 int w = insn & (1 << 21);
b7bcbe95
FB
3477 if (dp)
3478 n = (insn >> 1) & 0x7f;
3479 else
3480 n = insn & 0xff;
3481
934814f1
PM
3482 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3483 /* P == U , W == 1 => UNDEF */
3484 return 1;
3485 }
3486 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3487 /* UNPREDICTABLE cases for bad immediates: we choose to
3488 * UNDEF to avoid generating huge numbers of TCG ops
3489 */
3490 return 1;
3491 }
3492 if (rn == 15 && w) {
3493 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3494 return 1;
3495 }
3496
3497 if (s->thumb && rn == 15) {
3498 /* This is actually UNPREDICTABLE */
3499 addr = tcg_temp_new_i32();
3500 tcg_gen_movi_i32(addr, s->pc & ~2);
3501 } else {
3502 addr = load_reg(s, rn);
3503 }
b7bcbe95 3504 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3505 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3506
3507 if (dp)
3508 offset = 8;
3509 else
3510 offset = 4;
3511 for (i = 0; i < n; i++) {
18c9b560 3512 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3513 /* load */
312eea9f 3514 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3515 gen_mov_vreg_F0(dp, rd + i);
3516 } else {
3517 /* store */
3518 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3519 gen_vfp_st(s, dp, addr);
b7bcbe95 3520 }
312eea9f 3521 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3522 }
934814f1 3523 if (w) {
b7bcbe95
FB
3524 /* writeback */
3525 if (insn & (1 << 24))
3526 offset = -offset * n;
3527 else if (dp && (insn & 1))
3528 offset = 4;
3529 else
3530 offset = 0;
3531
3532 if (offset != 0)
312eea9f
FN
3533 tcg_gen_addi_i32(addr, addr, offset);
3534 store_reg(s, rn, addr);
3535 } else {
7d1b0095 3536 tcg_temp_free_i32(addr);
b7bcbe95
FB
3537 }
3538 }
3539 }
3540 break;
3541 default:
3542 /* Should never happen. */
3543 return 1;
3544 }
3545 return 0;
3546}
3547
6e256c93 3548static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3549{
6e256c93
FB
3550 TranslationBlock *tb;
3551
3552 tb = s->tb;
3553 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3554 tcg_gen_goto_tb(n);
8984bd2e 3555 gen_set_pc_im(dest);
4b4a72e5 3556 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3557 } else {
8984bd2e 3558 gen_set_pc_im(dest);
57fec1fe 3559 tcg_gen_exit_tb(0);
6e256c93 3560 }
c53be334
FB
3561}
3562
8aaca4c0
FB
3563static inline void gen_jmp (DisasContext *s, uint32_t dest)
3564{
551bd27f 3565 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3566 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3567 if (s->thumb)
d9ba4830
PB
3568 dest |= 1;
3569 gen_bx_im(s, dest);
8aaca4c0 3570 } else {
6e256c93 3571 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3572 s->is_jmp = DISAS_TB_JUMP;
3573 }
3574}
3575
d9ba4830 3576static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3577{
ee097184 3578 if (x)
d9ba4830 3579 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3580 else
d9ba4830 3581 gen_sxth(t0);
ee097184 3582 if (y)
d9ba4830 3583 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3584 else
d9ba4830
PB
3585 gen_sxth(t1);
3586 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3587}
3588
3589/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3590static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3591 uint32_t mask;
3592
3593 mask = 0;
3594 if (flags & (1 << 0))
3595 mask |= 0xff;
3596 if (flags & (1 << 1))
3597 mask |= 0xff00;
3598 if (flags & (1 << 2))
3599 mask |= 0xff0000;
3600 if (flags & (1 << 3))
3601 mask |= 0xff000000;
9ee6e8bb 3602
2ae23e75 3603 /* Mask out undefined bits. */
9ee6e8bb 3604 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3605 if (!arm_feature(env, ARM_FEATURE_V4T))
3606 mask &= ~CPSR_T;
3607 if (!arm_feature(env, ARM_FEATURE_V5))
3608 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3609 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3610 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3611 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3612 mask &= ~CPSR_IT;
9ee6e8bb 3613 /* Mask out execution state bits. */
2ae23e75 3614 if (!spsr)
e160c51c 3615 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3616 /* Mask out privileged bits. */
3617 if (IS_USER(s))
9ee6e8bb 3618 mask &= CPSR_USER;
b5ff1b31
FB
3619 return mask;
3620}
3621
2fbac54b
FN
3622/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3623static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3624{
d9ba4830 3625 TCGv tmp;
b5ff1b31
FB
3626 if (spsr) {
3627 /* ??? This is also undefined in system mode. */
3628 if (IS_USER(s))
3629 return 1;
d9ba4830
PB
3630
3631 tmp = load_cpu_field(spsr);
3632 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3633 tcg_gen_andi_i32(t0, t0, mask);
3634 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3635 store_cpu_field(tmp, spsr);
b5ff1b31 3636 } else {
2fbac54b 3637 gen_set_cpsr(t0, mask);
b5ff1b31 3638 }
7d1b0095 3639 tcg_temp_free_i32(t0);
b5ff1b31
FB
3640 gen_lookup_tb(s);
3641 return 0;
3642}
3643
2fbac54b
FN
3644/* Returns nonzero if access to the PSR is not permitted. */
3645static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3646{
3647 TCGv tmp;
7d1b0095 3648 tmp = tcg_temp_new_i32();
2fbac54b
FN
3649 tcg_gen_movi_i32(tmp, val);
3650 return gen_set_psr(s, mask, spsr, tmp);
3651}
3652
e9bb4aa9
JR
3653/* Generate an old-style exception return. Marks pc as dead. */
3654static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3655{
d9ba4830 3656 TCGv tmp;
e9bb4aa9 3657 store_reg(s, 15, pc);
d9ba4830
PB
3658 tmp = load_cpu_field(spsr);
3659 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3660 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3661 s->is_jmp = DISAS_UPDATE;
3662}
3663
b0109805
PB
3664/* Generate a v6 exception return. Marks both values as dead. */
3665static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3666{
b0109805 3667 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3668 tcg_temp_free_i32(cpsr);
b0109805 3669 store_reg(s, 15, pc);
9ee6e8bb
PB
3670 s->is_jmp = DISAS_UPDATE;
3671}
3b46e624 3672
9ee6e8bb
PB
3673static inline void
3674gen_set_condexec (DisasContext *s)
3675{
3676 if (s->condexec_mask) {
8f01245e 3677 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3678 TCGv tmp = tcg_temp_new_i32();
8f01245e 3679 tcg_gen_movi_i32(tmp, val);
d9ba4830 3680 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3681 }
3682}
3b46e624 3683
bc4a0de0
PM
3684static void gen_exception_insn(DisasContext *s, int offset, int excp)
3685{
3686 gen_set_condexec(s);
3687 gen_set_pc_im(s->pc - offset);
3688 gen_exception(excp);
3689 s->is_jmp = DISAS_JUMP;
3690}
3691
9ee6e8bb
PB
3692static void gen_nop_hint(DisasContext *s, int val)
3693{
3694 switch (val) {
3695 case 3: /* wfi */
8984bd2e 3696 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3697 s->is_jmp = DISAS_WFI;
3698 break;
3699 case 2: /* wfe */
3700 case 4: /* sev */
3701 /* TODO: Implement SEV and WFE. May help SMP performance. */
3702 default: /* nop */
3703 break;
3704 }
3705}
99c475ab 3706
ad69471c 3707#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3708
62698be3 3709static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3710{
3711 switch (size) {
dd8fbd78
FN
3712 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3713 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3714 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3715 default: abort();
9ee6e8bb 3716 }
9ee6e8bb
PB
3717}
3718
dd8fbd78 3719static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3720{
3721 switch (size) {
dd8fbd78
FN
3722 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3723 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3724 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3725 default: return;
3726 }
3727}
3728
3729/* 32-bit pairwise ops end up the same as the elementwise versions. */
3730#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3731#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3732#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3733#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3734
ad69471c
PB
3735#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3736 switch ((size << 1) | u) { \
3737 case 0: \
dd8fbd78 3738 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3739 break; \
3740 case 1: \
dd8fbd78 3741 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3742 break; \
3743 case 2: \
dd8fbd78 3744 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3745 break; \
3746 case 3: \
dd8fbd78 3747 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3748 break; \
3749 case 4: \
dd8fbd78 3750 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3751 break; \
3752 case 5: \
dd8fbd78 3753 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3754 break; \
3755 default: return 1; \
3756 }} while (0)
9ee6e8bb
PB
3757
3758#define GEN_NEON_INTEGER_OP(name) do { \
3759 switch ((size << 1) | u) { \
ad69471c 3760 case 0: \
dd8fbd78 3761 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3762 break; \
3763 case 1: \
dd8fbd78 3764 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3765 break; \
3766 case 2: \
dd8fbd78 3767 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3768 break; \
3769 case 3: \
dd8fbd78 3770 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3771 break; \
3772 case 4: \
dd8fbd78 3773 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3774 break; \
3775 case 5: \
dd8fbd78 3776 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3777 break; \
9ee6e8bb
PB
3778 default: return 1; \
3779 }} while (0)
3780
dd8fbd78 3781static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3782{
7d1b0095 3783 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3784 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3785 return tmp;
9ee6e8bb
PB
3786}
3787
dd8fbd78 3788static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3789{
dd8fbd78 3790 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3791 tcg_temp_free_i32(var);
9ee6e8bb
PB
3792}
3793
dd8fbd78 3794static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3795{
dd8fbd78 3796 TCGv tmp;
9ee6e8bb 3797 if (size == 1) {
0fad6efc
PM
3798 tmp = neon_load_reg(reg & 7, reg >> 4);
3799 if (reg & 8) {
dd8fbd78 3800 gen_neon_dup_high16(tmp);
0fad6efc
PM
3801 } else {
3802 gen_neon_dup_low16(tmp);
dd8fbd78 3803 }
0fad6efc
PM
3804 } else {
3805 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3806 }
dd8fbd78 3807 return tmp;
9ee6e8bb
PB
3808}
3809
02acedf9 3810static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3811{
02acedf9 3812 TCGv tmp, tmp2;
600b828c 3813 if (!q && size == 2) {
02acedf9
PM
3814 return 1;
3815 }
3816 tmp = tcg_const_i32(rd);
3817 tmp2 = tcg_const_i32(rm);
3818 if (q) {
3819 switch (size) {
3820 case 0:
02da0b2d 3821 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3822 break;
3823 case 1:
02da0b2d 3824 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3825 break;
3826 case 2:
02da0b2d 3827 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3828 break;
3829 default:
3830 abort();
3831 }
3832 } else {
3833 switch (size) {
3834 case 0:
02da0b2d 3835 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3836 break;
3837 case 1:
02da0b2d 3838 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3839 break;
3840 default:
3841 abort();
3842 }
3843 }
3844 tcg_temp_free_i32(tmp);
3845 tcg_temp_free_i32(tmp2);
3846 return 0;
19457615
FN
3847}
3848
d68a6f3a 3849static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3850{
3851 TCGv tmp, tmp2;
600b828c 3852 if (!q && size == 2) {
d68a6f3a
PM
3853 return 1;
3854 }
3855 tmp = tcg_const_i32(rd);
3856 tmp2 = tcg_const_i32(rm);
3857 if (q) {
3858 switch (size) {
3859 case 0:
02da0b2d 3860 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3861 break;
3862 case 1:
02da0b2d 3863 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3864 break;
3865 case 2:
02da0b2d 3866 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3867 break;
3868 default:
3869 abort();
3870 }
3871 } else {
3872 switch (size) {
3873 case 0:
02da0b2d 3874 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3875 break;
3876 case 1:
02da0b2d 3877 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3878 break;
3879 default:
3880 abort();
3881 }
3882 }
3883 tcg_temp_free_i32(tmp);
3884 tcg_temp_free_i32(tmp2);
3885 return 0;
19457615
FN
3886}
3887
19457615
FN
3888static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3889{
3890 TCGv rd, tmp;
3891
7d1b0095
PM
3892 rd = tcg_temp_new_i32();
3893 tmp = tcg_temp_new_i32();
19457615
FN
3894
3895 tcg_gen_shli_i32(rd, t0, 8);
3896 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3897 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3898 tcg_gen_or_i32(rd, rd, tmp);
3899
3900 tcg_gen_shri_i32(t1, t1, 8);
3901 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3902 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3903 tcg_gen_or_i32(t1, t1, tmp);
3904 tcg_gen_mov_i32(t0, rd);
3905
7d1b0095
PM
3906 tcg_temp_free_i32(tmp);
3907 tcg_temp_free_i32(rd);
19457615
FN
3908}
3909
3910static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3911{
3912 TCGv rd, tmp;
3913
7d1b0095
PM
3914 rd = tcg_temp_new_i32();
3915 tmp = tcg_temp_new_i32();
19457615
FN
3916
3917 tcg_gen_shli_i32(rd, t0, 16);
3918 tcg_gen_andi_i32(tmp, t1, 0xffff);
3919 tcg_gen_or_i32(rd, rd, tmp);
3920 tcg_gen_shri_i32(t1, t1, 16);
3921 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3922 tcg_gen_or_i32(t1, t1, tmp);
3923 tcg_gen_mov_i32(t0, rd);
3924
7d1b0095
PM
3925 tcg_temp_free_i32(tmp);
3926 tcg_temp_free_i32(rd);
19457615
FN
3927}
3928
3929
9ee6e8bb
PB
3930static struct {
3931 int nregs;
3932 int interleave;
3933 int spacing;
3934} neon_ls_element_type[11] = {
3935 {4, 4, 1},
3936 {4, 4, 2},
3937 {4, 1, 1},
3938 {4, 2, 1},
3939 {3, 3, 1},
3940 {3, 3, 2},
3941 {3, 1, 1},
3942 {1, 1, 1},
3943 {2, 2, 1},
3944 {2, 2, 2},
3945 {2, 1, 1}
3946};
3947
3948/* Translate a NEON load/store element instruction. Return nonzero if the
3949 instruction is invalid. */
0ecb72a5 3950static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3951{
3952 int rd, rn, rm;
3953 int op;
3954 int nregs;
3955 int interleave;
84496233 3956 int spacing;
9ee6e8bb
PB
3957 int stride;
3958 int size;
3959 int reg;
3960 int pass;
3961 int load;
3962 int shift;
9ee6e8bb 3963 int n;
1b2b1e54 3964 TCGv addr;
b0109805 3965 TCGv tmp;
8f8e3aa4 3966 TCGv tmp2;
84496233 3967 TCGv_i64 tmp64;
9ee6e8bb 3968
5df8bac1 3969 if (!s->vfp_enabled)
9ee6e8bb
PB
3970 return 1;
3971 VFP_DREG_D(rd, insn);
3972 rn = (insn >> 16) & 0xf;
3973 rm = insn & 0xf;
3974 load = (insn & (1 << 21)) != 0;
3975 if ((insn & (1 << 23)) == 0) {
3976 /* Load store all elements. */
3977 op = (insn >> 8) & 0xf;
3978 size = (insn >> 6) & 3;
84496233 3979 if (op > 10)
9ee6e8bb 3980 return 1;
f2dd89d0
PM
3981 /* Catch UNDEF cases for bad values of align field */
3982 switch (op & 0xc) {
3983 case 4:
3984 if (((insn >> 5) & 1) == 1) {
3985 return 1;
3986 }
3987 break;
3988 case 8:
3989 if (((insn >> 4) & 3) == 3) {
3990 return 1;
3991 }
3992 break;
3993 default:
3994 break;
3995 }
9ee6e8bb
PB
3996 nregs = neon_ls_element_type[op].nregs;
3997 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3998 spacing = neon_ls_element_type[op].spacing;
3999 if (size == 3 && (interleave | spacing) != 1)
4000 return 1;
e318a60b 4001 addr = tcg_temp_new_i32();
dcc65026 4002 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4003 stride = (1 << size) * interleave;
4004 for (reg = 0; reg < nregs; reg++) {
4005 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4006 load_reg_var(s, addr, rn);
4007 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4008 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4009 load_reg_var(s, addr, rn);
4010 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4011 }
84496233
JR
4012 if (size == 3) {
4013 if (load) {
4014 tmp64 = gen_ld64(addr, IS_USER(s));
4015 neon_store_reg64(tmp64, rd);
4016 tcg_temp_free_i64(tmp64);
4017 } else {
4018 tmp64 = tcg_temp_new_i64();
4019 neon_load_reg64(tmp64, rd);
4020 gen_st64(tmp64, addr, IS_USER(s));
4021 }
4022 tcg_gen_addi_i32(addr, addr, stride);
4023 } else {
4024 for (pass = 0; pass < 2; pass++) {
4025 if (size == 2) {
4026 if (load) {
4027 tmp = gen_ld32(addr, IS_USER(s));
4028 neon_store_reg(rd, pass, tmp);
4029 } else {
4030 tmp = neon_load_reg(rd, pass);
4031 gen_st32(tmp, addr, IS_USER(s));
4032 }
1b2b1e54 4033 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4034 } else if (size == 1) {
4035 if (load) {
4036 tmp = gen_ld16u(addr, IS_USER(s));
4037 tcg_gen_addi_i32(addr, addr, stride);
4038 tmp2 = gen_ld16u(addr, IS_USER(s));
4039 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4040 tcg_gen_shli_i32(tmp2, tmp2, 16);
4041 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4042 tcg_temp_free_i32(tmp2);
84496233
JR
4043 neon_store_reg(rd, pass, tmp);
4044 } else {
4045 tmp = neon_load_reg(rd, pass);
7d1b0095 4046 tmp2 = tcg_temp_new_i32();
84496233
JR
4047 tcg_gen_shri_i32(tmp2, tmp, 16);
4048 gen_st16(tmp, addr, IS_USER(s));
4049 tcg_gen_addi_i32(addr, addr, stride);
4050 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 4051 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4052 }
84496233
JR
4053 } else /* size == 0 */ {
4054 if (load) {
4055 TCGV_UNUSED(tmp2);
4056 for (n = 0; n < 4; n++) {
4057 tmp = gen_ld8u(addr, IS_USER(s));
4058 tcg_gen_addi_i32(addr, addr, stride);
4059 if (n == 0) {
4060 tmp2 = tmp;
4061 } else {
41ba8341
PB
4062 tcg_gen_shli_i32(tmp, tmp, n * 8);
4063 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4064 tcg_temp_free_i32(tmp);
84496233 4065 }
9ee6e8bb 4066 }
84496233
JR
4067 neon_store_reg(rd, pass, tmp2);
4068 } else {
4069 tmp2 = neon_load_reg(rd, pass);
4070 for (n = 0; n < 4; n++) {
7d1b0095 4071 tmp = tcg_temp_new_i32();
84496233
JR
4072 if (n == 0) {
4073 tcg_gen_mov_i32(tmp, tmp2);
4074 } else {
4075 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4076 }
4077 gen_st8(tmp, addr, IS_USER(s));
4078 tcg_gen_addi_i32(addr, addr, stride);
4079 }
7d1b0095 4080 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4081 }
4082 }
4083 }
4084 }
84496233 4085 rd += spacing;
9ee6e8bb 4086 }
e318a60b 4087 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4088 stride = nregs * 8;
4089 } else {
4090 size = (insn >> 10) & 3;
4091 if (size == 3) {
4092 /* Load single element to all lanes. */
8e18cde3
PM
4093 int a = (insn >> 4) & 1;
4094 if (!load) {
9ee6e8bb 4095 return 1;
8e18cde3 4096 }
9ee6e8bb
PB
4097 size = (insn >> 6) & 3;
4098 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4099
4100 if (size == 3) {
4101 if (nregs != 4 || a == 0) {
9ee6e8bb 4102 return 1;
99c475ab 4103 }
8e18cde3
PM
4104 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4105 size = 2;
4106 }
4107 if (nregs == 1 && a == 1 && size == 0) {
4108 return 1;
4109 }
4110 if (nregs == 3 && a == 1) {
4111 return 1;
4112 }
e318a60b 4113 addr = tcg_temp_new_i32();
8e18cde3
PM
4114 load_reg_var(s, addr, rn);
4115 if (nregs == 1) {
4116 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4117 tmp = gen_load_and_replicate(s, addr, size);
4118 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4119 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4120 if (insn & (1 << 5)) {
4121 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4122 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4123 }
4124 tcg_temp_free_i32(tmp);
4125 } else {
4126 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4127 stride = (insn & (1 << 5)) ? 2 : 1;
4128 for (reg = 0; reg < nregs; reg++) {
4129 tmp = gen_load_and_replicate(s, addr, size);
4130 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4131 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4132 tcg_temp_free_i32(tmp);
4133 tcg_gen_addi_i32(addr, addr, 1 << size);
4134 rd += stride;
4135 }
9ee6e8bb 4136 }
e318a60b 4137 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4138 stride = (1 << size) * nregs;
4139 } else {
4140 /* Single element. */
93262b16 4141 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4142 pass = (insn >> 7) & 1;
4143 switch (size) {
4144 case 0:
4145 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4146 stride = 1;
4147 break;
4148 case 1:
4149 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4150 stride = (insn & (1 << 5)) ? 2 : 1;
4151 break;
4152 case 2:
4153 shift = 0;
9ee6e8bb
PB
4154 stride = (insn & (1 << 6)) ? 2 : 1;
4155 break;
4156 default:
4157 abort();
4158 }
4159 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4160 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4161 switch (nregs) {
4162 case 1:
4163 if (((idx & (1 << size)) != 0) ||
4164 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4165 return 1;
4166 }
4167 break;
4168 case 3:
4169 if ((idx & 1) != 0) {
4170 return 1;
4171 }
4172 /* fall through */
4173 case 2:
4174 if (size == 2 && (idx & 2) != 0) {
4175 return 1;
4176 }
4177 break;
4178 case 4:
4179 if ((size == 2) && ((idx & 3) == 3)) {
4180 return 1;
4181 }
4182 break;
4183 default:
4184 abort();
4185 }
4186 if ((rd + stride * (nregs - 1)) > 31) {
4187 /* Attempts to write off the end of the register file
4188 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4189 * the neon_load_reg() would write off the end of the array.
4190 */
4191 return 1;
4192 }
e318a60b 4193 addr = tcg_temp_new_i32();
dcc65026 4194 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4195 for (reg = 0; reg < nregs; reg++) {
4196 if (load) {
9ee6e8bb
PB
4197 switch (size) {
4198 case 0:
1b2b1e54 4199 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
4200 break;
4201 case 1:
1b2b1e54 4202 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
4203 break;
4204 case 2:
1b2b1e54 4205 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 4206 break;
a50f5b91
PB
4207 default: /* Avoid compiler warnings. */
4208 abort();
9ee6e8bb
PB
4209 }
4210 if (size != 2) {
8f8e3aa4
PB
4211 tmp2 = neon_load_reg(rd, pass);
4212 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
7d1b0095 4213 tcg_temp_free_i32(tmp2);
9ee6e8bb 4214 }
8f8e3aa4 4215 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4216 } else { /* Store */
8f8e3aa4
PB
4217 tmp = neon_load_reg(rd, pass);
4218 if (shift)
4219 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4220 switch (size) {
4221 case 0:
1b2b1e54 4222 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4223 break;
4224 case 1:
1b2b1e54 4225 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4226 break;
4227 case 2:
1b2b1e54 4228 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4229 break;
99c475ab 4230 }
99c475ab 4231 }
9ee6e8bb 4232 rd += stride;
1b2b1e54 4233 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4234 }
e318a60b 4235 tcg_temp_free_i32(addr);
9ee6e8bb 4236 stride = nregs * (1 << size);
99c475ab 4237 }
9ee6e8bb
PB
4238 }
4239 if (rm != 15) {
b26eefb6
PB
4240 TCGv base;
4241
4242 base = load_reg(s, rn);
9ee6e8bb 4243 if (rm == 13) {
b26eefb6 4244 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4245 } else {
b26eefb6
PB
4246 TCGv index;
4247 index = load_reg(s, rm);
4248 tcg_gen_add_i32(base, base, index);
7d1b0095 4249 tcg_temp_free_i32(index);
9ee6e8bb 4250 }
b26eefb6 4251 store_reg(s, rn, base);
9ee6e8bb
PB
4252 }
4253 return 0;
4254}
3b46e624 4255
8f8e3aa4
PB
4256/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4257static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4258{
4259 tcg_gen_and_i32(t, t, c);
f669df27 4260 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4261 tcg_gen_or_i32(dest, t, f);
4262}
4263
a7812ae4 4264static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4265{
4266 switch (size) {
4267 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4268 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4269 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4270 default: abort();
4271 }
4272}
4273
a7812ae4 4274static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4275{
4276 switch (size) {
02da0b2d
PM
4277 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4278 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4279 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4280 default: abort();
4281 }
4282}
4283
a7812ae4 4284static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4285{
4286 switch (size) {
02da0b2d
PM
4287 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4288 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4289 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4290 default: abort();
4291 }
4292}
4293
af1bbf30
JR
4294static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4295{
4296 switch (size) {
02da0b2d
PM
4297 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4298 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4299 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4300 default: abort();
4301 }
4302}
4303
ad69471c
PB
4304static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4305 int q, int u)
4306{
4307 if (q) {
4308 if (u) {
4309 switch (size) {
4310 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4311 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4312 default: abort();
4313 }
4314 } else {
4315 switch (size) {
4316 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4317 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4318 default: abort();
4319 }
4320 }
4321 } else {
4322 if (u) {
4323 switch (size) {
b408a9b0
CL
4324 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4325 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4326 default: abort();
4327 }
4328 } else {
4329 switch (size) {
4330 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4331 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4332 default: abort();
4333 }
4334 }
4335 }
4336}
4337
a7812ae4 4338static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4339{
4340 if (u) {
4341 switch (size) {
4342 case 0: gen_helper_neon_widen_u8(dest, src); break;
4343 case 1: gen_helper_neon_widen_u16(dest, src); break;
4344 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4345 default: abort();
4346 }
4347 } else {
4348 switch (size) {
4349 case 0: gen_helper_neon_widen_s8(dest, src); break;
4350 case 1: gen_helper_neon_widen_s16(dest, src); break;
4351 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4352 default: abort();
4353 }
4354 }
7d1b0095 4355 tcg_temp_free_i32(src);
ad69471c
PB
4356}
4357
4358static inline void gen_neon_addl(int size)
4359{
4360 switch (size) {
4361 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4362 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4363 case 2: tcg_gen_add_i64(CPU_V001); break;
4364 default: abort();
4365 }
4366}
4367
4368static inline void gen_neon_subl(int size)
4369{
4370 switch (size) {
4371 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4372 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4373 case 2: tcg_gen_sub_i64(CPU_V001); break;
4374 default: abort();
4375 }
4376}
4377
a7812ae4 4378static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4379{
4380 switch (size) {
4381 case 0: gen_helper_neon_negl_u16(var, var); break;
4382 case 1: gen_helper_neon_negl_u32(var, var); break;
4383 case 2: gen_helper_neon_negl_u64(var, var); break;
4384 default: abort();
4385 }
4386}
4387
a7812ae4 4388static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4389{
4390 switch (size) {
02da0b2d
PM
4391 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4392 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4393 default: abort();
4394 }
4395}
4396
a7812ae4 4397static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4398{
a7812ae4 4399 TCGv_i64 tmp;
ad69471c
PB
4400
4401 switch ((size << 1) | u) {
4402 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4403 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4404 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4405 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4406 case 4:
4407 tmp = gen_muls_i64_i32(a, b);
4408 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4409 tcg_temp_free_i64(tmp);
ad69471c
PB
4410 break;
4411 case 5:
4412 tmp = gen_mulu_i64_i32(a, b);
4413 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4414 tcg_temp_free_i64(tmp);
ad69471c
PB
4415 break;
4416 default: abort();
4417 }
c6067f04
CL
4418
4419 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4420 Don't forget to clean them now. */
4421 if (size < 2) {
7d1b0095
PM
4422 tcg_temp_free_i32(a);
4423 tcg_temp_free_i32(b);
c6067f04 4424 }
ad69471c
PB
4425}
4426
c33171c7
PM
4427static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4428{
4429 if (op) {
4430 if (u) {
4431 gen_neon_unarrow_sats(size, dest, src);
4432 } else {
4433 gen_neon_narrow(size, dest, src);
4434 }
4435 } else {
4436 if (u) {
4437 gen_neon_narrow_satu(size, dest, src);
4438 } else {
4439 gen_neon_narrow_sats(size, dest, src);
4440 }
4441 }
4442}
4443
62698be3
PM
4444/* Symbolic constants for op fields for Neon 3-register same-length.
4445 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4446 * table A7-9.
4447 */
4448#define NEON_3R_VHADD 0
4449#define NEON_3R_VQADD 1
4450#define NEON_3R_VRHADD 2
4451#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4452#define NEON_3R_VHSUB 4
4453#define NEON_3R_VQSUB 5
4454#define NEON_3R_VCGT 6
4455#define NEON_3R_VCGE 7
4456#define NEON_3R_VSHL 8
4457#define NEON_3R_VQSHL 9
4458#define NEON_3R_VRSHL 10
4459#define NEON_3R_VQRSHL 11
4460#define NEON_3R_VMAX 12
4461#define NEON_3R_VMIN 13
4462#define NEON_3R_VABD 14
4463#define NEON_3R_VABA 15
4464#define NEON_3R_VADD_VSUB 16
4465#define NEON_3R_VTST_VCEQ 17
4466#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4467#define NEON_3R_VMUL 19
4468#define NEON_3R_VPMAX 20
4469#define NEON_3R_VPMIN 21
4470#define NEON_3R_VQDMULH_VQRDMULH 22
4471#define NEON_3R_VPADD 23
da97f52c 4472#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4473#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4474#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4475#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4476#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4477#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4478#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4479
4480static const uint8_t neon_3r_sizes[] = {
4481 [NEON_3R_VHADD] = 0x7,
4482 [NEON_3R_VQADD] = 0xf,
4483 [NEON_3R_VRHADD] = 0x7,
4484 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4485 [NEON_3R_VHSUB] = 0x7,
4486 [NEON_3R_VQSUB] = 0xf,
4487 [NEON_3R_VCGT] = 0x7,
4488 [NEON_3R_VCGE] = 0x7,
4489 [NEON_3R_VSHL] = 0xf,
4490 [NEON_3R_VQSHL] = 0xf,
4491 [NEON_3R_VRSHL] = 0xf,
4492 [NEON_3R_VQRSHL] = 0xf,
4493 [NEON_3R_VMAX] = 0x7,
4494 [NEON_3R_VMIN] = 0x7,
4495 [NEON_3R_VABD] = 0x7,
4496 [NEON_3R_VABA] = 0x7,
4497 [NEON_3R_VADD_VSUB] = 0xf,
4498 [NEON_3R_VTST_VCEQ] = 0x7,
4499 [NEON_3R_VML] = 0x7,
4500 [NEON_3R_VMUL] = 0x7,
4501 [NEON_3R_VPMAX] = 0x7,
4502 [NEON_3R_VPMIN] = 0x7,
4503 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4504 [NEON_3R_VPADD] = 0x7,
da97f52c 4505 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4506 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4507 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4508 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4509 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4510 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4511 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4512};
4513
600b828c
PM
4514/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4515 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4516 * table A7-13.
4517 */
4518#define NEON_2RM_VREV64 0
4519#define NEON_2RM_VREV32 1
4520#define NEON_2RM_VREV16 2
4521#define NEON_2RM_VPADDL 4
4522#define NEON_2RM_VPADDL_U 5
4523#define NEON_2RM_VCLS 8
4524#define NEON_2RM_VCLZ 9
4525#define NEON_2RM_VCNT 10
4526#define NEON_2RM_VMVN 11
4527#define NEON_2RM_VPADAL 12
4528#define NEON_2RM_VPADAL_U 13
4529#define NEON_2RM_VQABS 14
4530#define NEON_2RM_VQNEG 15
4531#define NEON_2RM_VCGT0 16
4532#define NEON_2RM_VCGE0 17
4533#define NEON_2RM_VCEQ0 18
4534#define NEON_2RM_VCLE0 19
4535#define NEON_2RM_VCLT0 20
4536#define NEON_2RM_VABS 22
4537#define NEON_2RM_VNEG 23
4538#define NEON_2RM_VCGT0_F 24
4539#define NEON_2RM_VCGE0_F 25
4540#define NEON_2RM_VCEQ0_F 26
4541#define NEON_2RM_VCLE0_F 27
4542#define NEON_2RM_VCLT0_F 28
4543#define NEON_2RM_VABS_F 30
4544#define NEON_2RM_VNEG_F 31
4545#define NEON_2RM_VSWP 32
4546#define NEON_2RM_VTRN 33
4547#define NEON_2RM_VUZP 34
4548#define NEON_2RM_VZIP 35
4549#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4550#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4551#define NEON_2RM_VSHLL 38
4552#define NEON_2RM_VCVT_F16_F32 44
4553#define NEON_2RM_VCVT_F32_F16 46
4554#define NEON_2RM_VRECPE 56
4555#define NEON_2RM_VRSQRTE 57
4556#define NEON_2RM_VRECPE_F 58
4557#define NEON_2RM_VRSQRTE_F 59
4558#define NEON_2RM_VCVT_FS 60
4559#define NEON_2RM_VCVT_FU 61
4560#define NEON_2RM_VCVT_SF 62
4561#define NEON_2RM_VCVT_UF 63
4562
4563static int neon_2rm_is_float_op(int op)
4564{
4565 /* Return true if this neon 2reg-misc op is float-to-float */
4566 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4567 op >= NEON_2RM_VRECPE_F);
4568}
4569
4570/* Each entry in this array has bit n set if the insn allows
4571 * size value n (otherwise it will UNDEF). Since unallocated
4572 * op values will have no bits set they always UNDEF.
4573 */
4574static const uint8_t neon_2rm_sizes[] = {
4575 [NEON_2RM_VREV64] = 0x7,
4576 [NEON_2RM_VREV32] = 0x3,
4577 [NEON_2RM_VREV16] = 0x1,
4578 [NEON_2RM_VPADDL] = 0x7,
4579 [NEON_2RM_VPADDL_U] = 0x7,
4580 [NEON_2RM_VCLS] = 0x7,
4581 [NEON_2RM_VCLZ] = 0x7,
4582 [NEON_2RM_VCNT] = 0x1,
4583 [NEON_2RM_VMVN] = 0x1,
4584 [NEON_2RM_VPADAL] = 0x7,
4585 [NEON_2RM_VPADAL_U] = 0x7,
4586 [NEON_2RM_VQABS] = 0x7,
4587 [NEON_2RM_VQNEG] = 0x7,
4588 [NEON_2RM_VCGT0] = 0x7,
4589 [NEON_2RM_VCGE0] = 0x7,
4590 [NEON_2RM_VCEQ0] = 0x7,
4591 [NEON_2RM_VCLE0] = 0x7,
4592 [NEON_2RM_VCLT0] = 0x7,
4593 [NEON_2RM_VABS] = 0x7,
4594 [NEON_2RM_VNEG] = 0x7,
4595 [NEON_2RM_VCGT0_F] = 0x4,
4596 [NEON_2RM_VCGE0_F] = 0x4,
4597 [NEON_2RM_VCEQ0_F] = 0x4,
4598 [NEON_2RM_VCLE0_F] = 0x4,
4599 [NEON_2RM_VCLT0_F] = 0x4,
4600 [NEON_2RM_VABS_F] = 0x4,
4601 [NEON_2RM_VNEG_F] = 0x4,
4602 [NEON_2RM_VSWP] = 0x1,
4603 [NEON_2RM_VTRN] = 0x7,
4604 [NEON_2RM_VUZP] = 0x7,
4605 [NEON_2RM_VZIP] = 0x7,
4606 [NEON_2RM_VMOVN] = 0x7,
4607 [NEON_2RM_VQMOVN] = 0x7,
4608 [NEON_2RM_VSHLL] = 0x7,
4609 [NEON_2RM_VCVT_F16_F32] = 0x2,
4610 [NEON_2RM_VCVT_F32_F16] = 0x2,
4611 [NEON_2RM_VRECPE] = 0x4,
4612 [NEON_2RM_VRSQRTE] = 0x4,
4613 [NEON_2RM_VRECPE_F] = 0x4,
4614 [NEON_2RM_VRSQRTE_F] = 0x4,
4615 [NEON_2RM_VCVT_FS] = 0x4,
4616 [NEON_2RM_VCVT_FU] = 0x4,
4617 [NEON_2RM_VCVT_SF] = 0x4,
4618 [NEON_2RM_VCVT_UF] = 0x4,
4619};
4620
9ee6e8bb
PB
4621/* Translate a NEON data processing instruction. Return nonzero if the
4622 instruction is invalid.
ad69471c
PB
4623 We process data in a mixture of 32-bit and 64-bit chunks.
4624 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4625
0ecb72a5 4626static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4627{
4628 int op;
4629 int q;
4630 int rd, rn, rm;
4631 int size;
4632 int shift;
4633 int pass;
4634 int count;
4635 int pairwise;
4636 int u;
ca9a32e4 4637 uint32_t imm, mask;
b75263d6 4638 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4639 TCGv_i64 tmp64;
9ee6e8bb 4640
5df8bac1 4641 if (!s->vfp_enabled)
9ee6e8bb
PB
4642 return 1;
4643 q = (insn & (1 << 6)) != 0;
4644 u = (insn >> 24) & 1;
4645 VFP_DREG_D(rd, insn);
4646 VFP_DREG_N(rn, insn);
4647 VFP_DREG_M(rm, insn);
4648 size = (insn >> 20) & 3;
4649 if ((insn & (1 << 23)) == 0) {
4650 /* Three register same length. */
4651 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4652 /* Catch invalid op and bad size combinations: UNDEF */
4653 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4654 return 1;
4655 }
25f84f79
PM
4656 /* All insns of this form UNDEF for either this condition or the
4657 * superset of cases "Q==1"; we catch the latter later.
4658 */
4659 if (q && ((rd | rn | rm) & 1)) {
4660 return 1;
4661 }
62698be3
PM
4662 if (size == 3 && op != NEON_3R_LOGIC) {
4663 /* 64-bit element instructions. */
9ee6e8bb 4664 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4665 neon_load_reg64(cpu_V0, rn + pass);
4666 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4667 switch (op) {
62698be3 4668 case NEON_3R_VQADD:
9ee6e8bb 4669 if (u) {
02da0b2d
PM
4670 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4671 cpu_V0, cpu_V1);
2c0262af 4672 } else {
02da0b2d
PM
4673 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4674 cpu_V0, cpu_V1);
2c0262af 4675 }
9ee6e8bb 4676 break;
62698be3 4677 case NEON_3R_VQSUB:
9ee6e8bb 4678 if (u) {
02da0b2d
PM
4679 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4680 cpu_V0, cpu_V1);
ad69471c 4681 } else {
02da0b2d
PM
4682 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4683 cpu_V0, cpu_V1);
ad69471c
PB
4684 }
4685 break;
62698be3 4686 case NEON_3R_VSHL:
ad69471c
PB
4687 if (u) {
4688 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4689 } else {
4690 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4691 }
4692 break;
62698be3 4693 case NEON_3R_VQSHL:
ad69471c 4694 if (u) {
02da0b2d
PM
4695 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4696 cpu_V1, cpu_V0);
ad69471c 4697 } else {
02da0b2d
PM
4698 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4699 cpu_V1, cpu_V0);
ad69471c
PB
4700 }
4701 break;
62698be3 4702 case NEON_3R_VRSHL:
ad69471c
PB
4703 if (u) {
4704 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4705 } else {
ad69471c
PB
4706 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4707 }
4708 break;
62698be3 4709 case NEON_3R_VQRSHL:
ad69471c 4710 if (u) {
02da0b2d
PM
4711 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4712 cpu_V1, cpu_V0);
ad69471c 4713 } else {
02da0b2d
PM
4714 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4715 cpu_V1, cpu_V0);
1e8d4eec 4716 }
9ee6e8bb 4717 break;
62698be3 4718 case NEON_3R_VADD_VSUB:
9ee6e8bb 4719 if (u) {
ad69471c 4720 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4721 } else {
ad69471c 4722 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4723 }
4724 break;
4725 default:
4726 abort();
2c0262af 4727 }
ad69471c 4728 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4729 }
9ee6e8bb 4730 return 0;
2c0262af 4731 }
25f84f79 4732 pairwise = 0;
9ee6e8bb 4733 switch (op) {
62698be3
PM
4734 case NEON_3R_VSHL:
4735 case NEON_3R_VQSHL:
4736 case NEON_3R_VRSHL:
4737 case NEON_3R_VQRSHL:
9ee6e8bb 4738 {
ad69471c
PB
4739 int rtmp;
4740 /* Shift instruction operands are reversed. */
4741 rtmp = rn;
9ee6e8bb 4742 rn = rm;
ad69471c 4743 rm = rtmp;
9ee6e8bb 4744 }
2c0262af 4745 break;
25f84f79
PM
4746 case NEON_3R_VPADD:
4747 if (u) {
4748 return 1;
4749 }
4750 /* Fall through */
62698be3
PM
4751 case NEON_3R_VPMAX:
4752 case NEON_3R_VPMIN:
9ee6e8bb 4753 pairwise = 1;
2c0262af 4754 break;
25f84f79
PM
4755 case NEON_3R_FLOAT_ARITH:
4756 pairwise = (u && size < 2); /* if VPADD (float) */
4757 break;
4758 case NEON_3R_FLOAT_MINMAX:
4759 pairwise = u; /* if VPMIN/VPMAX (float) */
4760 break;
4761 case NEON_3R_FLOAT_CMP:
4762 if (!u && size) {
4763 /* no encoding for U=0 C=1x */
4764 return 1;
4765 }
4766 break;
4767 case NEON_3R_FLOAT_ACMP:
4768 if (!u) {
4769 return 1;
4770 }
4771 break;
4772 case NEON_3R_VRECPS_VRSQRTS:
4773 if (u) {
4774 return 1;
4775 }
2c0262af 4776 break;
25f84f79
PM
4777 case NEON_3R_VMUL:
4778 if (u && (size != 0)) {
4779 /* UNDEF on invalid size for polynomial subcase */
4780 return 1;
4781 }
2c0262af 4782 break;
da97f52c
PM
4783 case NEON_3R_VFM:
4784 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4785 return 1;
4786 }
4787 break;
9ee6e8bb 4788 default:
2c0262af 4789 break;
9ee6e8bb 4790 }
dd8fbd78 4791
25f84f79
PM
4792 if (pairwise && q) {
4793 /* All the pairwise insns UNDEF if Q is set */
4794 return 1;
4795 }
4796
9ee6e8bb
PB
4797 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4798
4799 if (pairwise) {
4800 /* Pairwise. */
a5a14945
JR
4801 if (pass < 1) {
4802 tmp = neon_load_reg(rn, 0);
4803 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4804 } else {
a5a14945
JR
4805 tmp = neon_load_reg(rm, 0);
4806 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4807 }
4808 } else {
4809 /* Elementwise. */
dd8fbd78
FN
4810 tmp = neon_load_reg(rn, pass);
4811 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4812 }
4813 switch (op) {
62698be3 4814 case NEON_3R_VHADD:
9ee6e8bb
PB
4815 GEN_NEON_INTEGER_OP(hadd);
4816 break;
62698be3 4817 case NEON_3R_VQADD:
02da0b2d 4818 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4819 break;
62698be3 4820 case NEON_3R_VRHADD:
9ee6e8bb 4821 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4822 break;
62698be3 4823 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4824 switch ((u << 2) | size) {
4825 case 0: /* VAND */
dd8fbd78 4826 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4827 break;
4828 case 1: /* BIC */
f669df27 4829 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4830 break;
4831 case 2: /* VORR */
dd8fbd78 4832 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4833 break;
4834 case 3: /* VORN */
f669df27 4835 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4836 break;
4837 case 4: /* VEOR */
dd8fbd78 4838 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4839 break;
4840 case 5: /* VBSL */
dd8fbd78
FN
4841 tmp3 = neon_load_reg(rd, pass);
4842 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4843 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4844 break;
4845 case 6: /* VBIT */
dd8fbd78
FN
4846 tmp3 = neon_load_reg(rd, pass);
4847 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4848 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4849 break;
4850 case 7: /* VBIF */
dd8fbd78
FN
4851 tmp3 = neon_load_reg(rd, pass);
4852 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4853 tcg_temp_free_i32(tmp3);
9ee6e8bb 4854 break;
2c0262af
FB
4855 }
4856 break;
62698be3 4857 case NEON_3R_VHSUB:
9ee6e8bb
PB
4858 GEN_NEON_INTEGER_OP(hsub);
4859 break;
62698be3 4860 case NEON_3R_VQSUB:
02da0b2d 4861 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4862 break;
62698be3 4863 case NEON_3R_VCGT:
9ee6e8bb
PB
4864 GEN_NEON_INTEGER_OP(cgt);
4865 break;
62698be3 4866 case NEON_3R_VCGE:
9ee6e8bb
PB
4867 GEN_NEON_INTEGER_OP(cge);
4868 break;
62698be3 4869 case NEON_3R_VSHL:
ad69471c 4870 GEN_NEON_INTEGER_OP(shl);
2c0262af 4871 break;
62698be3 4872 case NEON_3R_VQSHL:
02da0b2d 4873 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4874 break;
62698be3 4875 case NEON_3R_VRSHL:
ad69471c 4876 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4877 break;
62698be3 4878 case NEON_3R_VQRSHL:
02da0b2d 4879 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4880 break;
62698be3 4881 case NEON_3R_VMAX:
9ee6e8bb
PB
4882 GEN_NEON_INTEGER_OP(max);
4883 break;
62698be3 4884 case NEON_3R_VMIN:
9ee6e8bb
PB
4885 GEN_NEON_INTEGER_OP(min);
4886 break;
62698be3 4887 case NEON_3R_VABD:
9ee6e8bb
PB
4888 GEN_NEON_INTEGER_OP(abd);
4889 break;
62698be3 4890 case NEON_3R_VABA:
9ee6e8bb 4891 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4892 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4893 tmp2 = neon_load_reg(rd, pass);
4894 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4895 break;
62698be3 4896 case NEON_3R_VADD_VSUB:
9ee6e8bb 4897 if (!u) { /* VADD */
62698be3 4898 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4899 } else { /* VSUB */
4900 switch (size) {
dd8fbd78
FN
4901 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4902 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4903 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4904 default: abort();
9ee6e8bb
PB
4905 }
4906 }
4907 break;
62698be3 4908 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4909 if (!u) { /* VTST */
4910 switch (size) {
dd8fbd78
FN
4911 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4912 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4913 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4914 default: abort();
9ee6e8bb
PB
4915 }
4916 } else { /* VCEQ */
4917 switch (size) {
dd8fbd78
FN
4918 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4919 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4920 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4921 default: abort();
9ee6e8bb
PB
4922 }
4923 }
4924 break;
62698be3 4925 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4926 switch (size) {
dd8fbd78
FN
4927 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4928 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4929 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4930 default: abort();
9ee6e8bb 4931 }
7d1b0095 4932 tcg_temp_free_i32(tmp2);
dd8fbd78 4933 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4934 if (u) { /* VMLS */
dd8fbd78 4935 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4936 } else { /* VMLA */
dd8fbd78 4937 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4938 }
4939 break;
62698be3 4940 case NEON_3R_VMUL:
9ee6e8bb 4941 if (u) { /* polynomial */
dd8fbd78 4942 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4943 } else { /* Integer */
4944 switch (size) {
dd8fbd78
FN
4945 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4946 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4947 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4948 default: abort();
9ee6e8bb
PB
4949 }
4950 }
4951 break;
62698be3 4952 case NEON_3R_VPMAX:
9ee6e8bb
PB
4953 GEN_NEON_INTEGER_OP(pmax);
4954 break;
62698be3 4955 case NEON_3R_VPMIN:
9ee6e8bb
PB
4956 GEN_NEON_INTEGER_OP(pmin);
4957 break;
62698be3 4958 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4959 if (!u) { /* VQDMULH */
4960 switch (size) {
02da0b2d
PM
4961 case 1:
4962 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4963 break;
4964 case 2:
4965 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4966 break;
62698be3 4967 default: abort();
9ee6e8bb 4968 }
62698be3 4969 } else { /* VQRDMULH */
9ee6e8bb 4970 switch (size) {
02da0b2d
PM
4971 case 1:
4972 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4973 break;
4974 case 2:
4975 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4976 break;
62698be3 4977 default: abort();
9ee6e8bb
PB
4978 }
4979 }
4980 break;
62698be3 4981 case NEON_3R_VPADD:
9ee6e8bb 4982 switch (size) {
dd8fbd78
FN
4983 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4984 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4985 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4986 default: abort();
9ee6e8bb
PB
4987 }
4988 break;
62698be3 4989 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4990 {
4991 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4992 switch ((u << 2) | size) {
4993 case 0: /* VADD */
aa47cfdd
PM
4994 case 4: /* VPADD */
4995 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4996 break;
4997 case 2: /* VSUB */
aa47cfdd 4998 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4999 break;
5000 case 6: /* VABD */
aa47cfdd 5001 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5002 break;
5003 default:
62698be3 5004 abort();
9ee6e8bb 5005 }
aa47cfdd 5006 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5007 break;
aa47cfdd 5008 }
62698be3 5009 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5010 {
5011 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5012 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5013 if (!u) {
7d1b0095 5014 tcg_temp_free_i32(tmp2);
dd8fbd78 5015 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5016 if (size == 0) {
aa47cfdd 5017 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5018 } else {
aa47cfdd 5019 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5020 }
5021 }
aa47cfdd 5022 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5023 break;
aa47cfdd 5024 }
62698be3 5025 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5026 {
5027 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5028 if (!u) {
aa47cfdd 5029 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5030 } else {
aa47cfdd
PM
5031 if (size == 0) {
5032 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5033 } else {
5034 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5035 }
b5ff1b31 5036 }
aa47cfdd 5037 tcg_temp_free_ptr(fpstatus);
2c0262af 5038 break;
aa47cfdd 5039 }
62698be3 5040 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5041 {
5042 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5043 if (size == 0) {
5044 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5045 } else {
5046 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5047 }
5048 tcg_temp_free_ptr(fpstatus);
2c0262af 5049 break;
aa47cfdd 5050 }
62698be3 5051 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5052 {
5053 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5054 if (size == 0) {
5055 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
5056 } else {
5057 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
5058 }
5059 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5060 break;
aa47cfdd 5061 }
62698be3 5062 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 5063 if (size == 0)
dd8fbd78 5064 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 5065 else
dd8fbd78 5066 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 5067 break;
da97f52c
PM
5068 case NEON_3R_VFM:
5069 {
5070 /* VFMA, VFMS: fused multiply-add */
5071 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5072 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5073 if (size) {
5074 /* VFMS */
5075 gen_helper_vfp_negs(tmp, tmp);
5076 }
5077 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5078 tcg_temp_free_i32(tmp3);
5079 tcg_temp_free_ptr(fpstatus);
5080 break;
5081 }
9ee6e8bb
PB
5082 default:
5083 abort();
2c0262af 5084 }
7d1b0095 5085 tcg_temp_free_i32(tmp2);
dd8fbd78 5086
9ee6e8bb
PB
5087 /* Save the result. For elementwise operations we can put it
5088 straight into the destination register. For pairwise operations
5089 we have to be careful to avoid clobbering the source operands. */
5090 if (pairwise && rd == rm) {
dd8fbd78 5091 neon_store_scratch(pass, tmp);
9ee6e8bb 5092 } else {
dd8fbd78 5093 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5094 }
5095
5096 } /* for pass */
5097 if (pairwise && rd == rm) {
5098 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5099 tmp = neon_load_scratch(pass);
5100 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5101 }
5102 }
ad69471c 5103 /* End of 3 register same size operations. */
9ee6e8bb
PB
5104 } else if (insn & (1 << 4)) {
5105 if ((insn & 0x00380080) != 0) {
5106 /* Two registers and shift. */
5107 op = (insn >> 8) & 0xf;
5108 if (insn & (1 << 7)) {
cc13115b
PM
5109 /* 64-bit shift. */
5110 if (op > 7) {
5111 return 1;
5112 }
9ee6e8bb
PB
5113 size = 3;
5114 } else {
5115 size = 2;
5116 while ((insn & (1 << (size + 19))) == 0)
5117 size--;
5118 }
5119 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5120 /* To avoid excessive dumplication of ops we implement shift
5121 by immediate using the variable shift operations. */
5122 if (op < 8) {
5123 /* Shift by immediate:
5124 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5125 if (q && ((rd | rm) & 1)) {
5126 return 1;
5127 }
5128 if (!u && (op == 4 || op == 6)) {
5129 return 1;
5130 }
9ee6e8bb
PB
5131 /* Right shifts are encoded as N - shift, where N is the
5132 element size in bits. */
5133 if (op <= 4)
5134 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5135 if (size == 3) {
5136 count = q + 1;
5137 } else {
5138 count = q ? 4: 2;
5139 }
5140 switch (size) {
5141 case 0:
5142 imm = (uint8_t) shift;
5143 imm |= imm << 8;
5144 imm |= imm << 16;
5145 break;
5146 case 1:
5147 imm = (uint16_t) shift;
5148 imm |= imm << 16;
5149 break;
5150 case 2:
5151 case 3:
5152 imm = shift;
5153 break;
5154 default:
5155 abort();
5156 }
5157
5158 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5159 if (size == 3) {
5160 neon_load_reg64(cpu_V0, rm + pass);
5161 tcg_gen_movi_i64(cpu_V1, imm);
5162 switch (op) {
5163 case 0: /* VSHR */
5164 case 1: /* VSRA */
5165 if (u)
5166 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5167 else
ad69471c 5168 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5169 break;
ad69471c
PB
5170 case 2: /* VRSHR */
5171 case 3: /* VRSRA */
5172 if (u)
5173 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5174 else
ad69471c 5175 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5176 break;
ad69471c 5177 case 4: /* VSRI */
ad69471c
PB
5178 case 5: /* VSHL, VSLI */
5179 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5180 break;
0322b26e 5181 case 6: /* VQSHLU */
02da0b2d
PM
5182 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5183 cpu_V0, cpu_V1);
ad69471c 5184 break;
0322b26e
PM
5185 case 7: /* VQSHL */
5186 if (u) {
02da0b2d 5187 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5188 cpu_V0, cpu_V1);
5189 } else {
02da0b2d 5190 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5191 cpu_V0, cpu_V1);
5192 }
9ee6e8bb 5193 break;
9ee6e8bb 5194 }
ad69471c
PB
5195 if (op == 1 || op == 3) {
5196 /* Accumulate. */
5371cb81 5197 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5198 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5199 } else if (op == 4 || (op == 5 && u)) {
5200 /* Insert */
923e6509
CL
5201 neon_load_reg64(cpu_V1, rd + pass);
5202 uint64_t mask;
5203 if (shift < -63 || shift > 63) {
5204 mask = 0;
5205 } else {
5206 if (op == 4) {
5207 mask = 0xffffffffffffffffull >> -shift;
5208 } else {
5209 mask = 0xffffffffffffffffull << shift;
5210 }
5211 }
5212 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5213 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5214 }
5215 neon_store_reg64(cpu_V0, rd + pass);
5216 } else { /* size < 3 */
5217 /* Operands in T0 and T1. */
dd8fbd78 5218 tmp = neon_load_reg(rm, pass);
7d1b0095 5219 tmp2 = tcg_temp_new_i32();
dd8fbd78 5220 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5221 switch (op) {
5222 case 0: /* VSHR */
5223 case 1: /* VSRA */
5224 GEN_NEON_INTEGER_OP(shl);
5225 break;
5226 case 2: /* VRSHR */
5227 case 3: /* VRSRA */
5228 GEN_NEON_INTEGER_OP(rshl);
5229 break;
5230 case 4: /* VSRI */
ad69471c
PB
5231 case 5: /* VSHL, VSLI */
5232 switch (size) {
dd8fbd78
FN
5233 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5234 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5235 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5236 default: abort();
ad69471c
PB
5237 }
5238 break;
0322b26e 5239 case 6: /* VQSHLU */
ad69471c 5240 switch (size) {
0322b26e 5241 case 0:
02da0b2d
PM
5242 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5243 tmp, tmp2);
0322b26e
PM
5244 break;
5245 case 1:
02da0b2d
PM
5246 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5247 tmp, tmp2);
0322b26e
PM
5248 break;
5249 case 2:
02da0b2d
PM
5250 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5251 tmp, tmp2);
0322b26e
PM
5252 break;
5253 default:
cc13115b 5254 abort();
ad69471c
PB
5255 }
5256 break;
0322b26e 5257 case 7: /* VQSHL */
02da0b2d 5258 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5259 break;
ad69471c 5260 }
7d1b0095 5261 tcg_temp_free_i32(tmp2);
ad69471c
PB
5262
5263 if (op == 1 || op == 3) {
5264 /* Accumulate. */
dd8fbd78 5265 tmp2 = neon_load_reg(rd, pass);
5371cb81 5266 gen_neon_add(size, tmp, tmp2);
7d1b0095 5267 tcg_temp_free_i32(tmp2);
ad69471c
PB
5268 } else if (op == 4 || (op == 5 && u)) {
5269 /* Insert */
5270 switch (size) {
5271 case 0:
5272 if (op == 4)
ca9a32e4 5273 mask = 0xff >> -shift;
ad69471c 5274 else
ca9a32e4
JR
5275 mask = (uint8_t)(0xff << shift);
5276 mask |= mask << 8;
5277 mask |= mask << 16;
ad69471c
PB
5278 break;
5279 case 1:
5280 if (op == 4)
ca9a32e4 5281 mask = 0xffff >> -shift;
ad69471c 5282 else
ca9a32e4
JR
5283 mask = (uint16_t)(0xffff << shift);
5284 mask |= mask << 16;
ad69471c
PB
5285 break;
5286 case 2:
ca9a32e4
JR
5287 if (shift < -31 || shift > 31) {
5288 mask = 0;
5289 } else {
5290 if (op == 4)
5291 mask = 0xffffffffu >> -shift;
5292 else
5293 mask = 0xffffffffu << shift;
5294 }
ad69471c
PB
5295 break;
5296 default:
5297 abort();
5298 }
dd8fbd78 5299 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5300 tcg_gen_andi_i32(tmp, tmp, mask);
5301 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5302 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5303 tcg_temp_free_i32(tmp2);
ad69471c 5304 }
dd8fbd78 5305 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5306 }
5307 } /* for pass */
5308 } else if (op < 10) {
ad69471c 5309 /* Shift by immediate and narrow:
9ee6e8bb 5310 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5311 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5312 if (rm & 1) {
5313 return 1;
5314 }
9ee6e8bb
PB
5315 shift = shift - (1 << (size + 3));
5316 size++;
92cdfaeb 5317 if (size == 3) {
a7812ae4 5318 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5319 neon_load_reg64(cpu_V0, rm);
5320 neon_load_reg64(cpu_V1, rm + 1);
5321 for (pass = 0; pass < 2; pass++) {
5322 TCGv_i64 in;
5323 if (pass == 0) {
5324 in = cpu_V0;
5325 } else {
5326 in = cpu_V1;
5327 }
ad69471c 5328 if (q) {
0b36f4cd 5329 if (input_unsigned) {
92cdfaeb 5330 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5331 } else {
92cdfaeb 5332 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5333 }
ad69471c 5334 } else {
0b36f4cd 5335 if (input_unsigned) {
92cdfaeb 5336 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5337 } else {
92cdfaeb 5338 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5339 }
ad69471c 5340 }
7d1b0095 5341 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5342 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5343 neon_store_reg(rd, pass, tmp);
5344 } /* for pass */
5345 tcg_temp_free_i64(tmp64);
5346 } else {
5347 if (size == 1) {
5348 imm = (uint16_t)shift;
5349 imm |= imm << 16;
2c0262af 5350 } else {
92cdfaeb
PM
5351 /* size == 2 */
5352 imm = (uint32_t)shift;
5353 }
5354 tmp2 = tcg_const_i32(imm);
5355 tmp4 = neon_load_reg(rm + 1, 0);
5356 tmp5 = neon_load_reg(rm + 1, 1);
5357 for (pass = 0; pass < 2; pass++) {
5358 if (pass == 0) {
5359 tmp = neon_load_reg(rm, 0);
5360 } else {
5361 tmp = tmp4;
5362 }
0b36f4cd
CL
5363 gen_neon_shift_narrow(size, tmp, tmp2, q,
5364 input_unsigned);
92cdfaeb
PM
5365 if (pass == 0) {
5366 tmp3 = neon_load_reg(rm, 1);
5367 } else {
5368 tmp3 = tmp5;
5369 }
0b36f4cd
CL
5370 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5371 input_unsigned);
36aa55dc 5372 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5373 tcg_temp_free_i32(tmp);
5374 tcg_temp_free_i32(tmp3);
5375 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5376 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5377 neon_store_reg(rd, pass, tmp);
5378 } /* for pass */
c6067f04 5379 tcg_temp_free_i32(tmp2);
b75263d6 5380 }
9ee6e8bb 5381 } else if (op == 10) {
cc13115b
PM
5382 /* VSHLL, VMOVL */
5383 if (q || (rd & 1)) {
9ee6e8bb 5384 return 1;
cc13115b 5385 }
ad69471c
PB
5386 tmp = neon_load_reg(rm, 0);
5387 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5388 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5389 if (pass == 1)
5390 tmp = tmp2;
5391
5392 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5393
9ee6e8bb
PB
5394 if (shift != 0) {
5395 /* The shift is less than the width of the source
ad69471c
PB
5396 type, so we can just shift the whole register. */
5397 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5398 /* Widen the result of shift: we need to clear
5399 * the potential overflow bits resulting from
5400 * left bits of the narrow input appearing as
5401 * right bits of left the neighbour narrow
5402 * input. */
ad69471c
PB
5403 if (size < 2 || !u) {
5404 uint64_t imm64;
5405 if (size == 0) {
5406 imm = (0xffu >> (8 - shift));
5407 imm |= imm << 16;
acdf01ef 5408 } else if (size == 1) {
ad69471c 5409 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5410 } else {
5411 /* size == 2 */
5412 imm = 0xffffffff >> (32 - shift);
5413 }
5414 if (size < 2) {
5415 imm64 = imm | (((uint64_t)imm) << 32);
5416 } else {
5417 imm64 = imm;
9ee6e8bb 5418 }
acdf01ef 5419 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5420 }
5421 }
ad69471c 5422 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5423 }
f73534a5 5424 } else if (op >= 14) {
9ee6e8bb 5425 /* VCVT fixed-point. */
cc13115b
PM
5426 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5427 return 1;
5428 }
f73534a5
PM
5429 /* We have already masked out the must-be-1 top bit of imm6,
5430 * hence this 32-shift where the ARM ARM has 64-imm6.
5431 */
5432 shift = 32 - shift;
9ee6e8bb 5433 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5434 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5435 if (!(op & 1)) {
9ee6e8bb 5436 if (u)
5500b06c 5437 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5438 else
5500b06c 5439 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5440 } else {
5441 if (u)
5500b06c 5442 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5443 else
5500b06c 5444 gen_vfp_tosl(0, shift, 1);
2c0262af 5445 }
4373f3ce 5446 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5447 }
5448 } else {
9ee6e8bb
PB
5449 return 1;
5450 }
5451 } else { /* (insn & 0x00380080) == 0 */
5452 int invert;
7d80fee5
PM
5453 if (q && (rd & 1)) {
5454 return 1;
5455 }
9ee6e8bb
PB
5456
5457 op = (insn >> 8) & 0xf;
5458 /* One register and immediate. */
5459 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5460 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5461 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5462 * We choose to not special-case this and will behave as if a
5463 * valid constant encoding of 0 had been given.
5464 */
9ee6e8bb
PB
5465 switch (op) {
5466 case 0: case 1:
5467 /* no-op */
5468 break;
5469 case 2: case 3:
5470 imm <<= 8;
5471 break;
5472 case 4: case 5:
5473 imm <<= 16;
5474 break;
5475 case 6: case 7:
5476 imm <<= 24;
5477 break;
5478 case 8: case 9:
5479 imm |= imm << 16;
5480 break;
5481 case 10: case 11:
5482 imm = (imm << 8) | (imm << 24);
5483 break;
5484 case 12:
8e31209e 5485 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5486 break;
5487 case 13:
5488 imm = (imm << 16) | 0xffff;
5489 break;
5490 case 14:
5491 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5492 if (invert)
5493 imm = ~imm;
5494 break;
5495 case 15:
7d80fee5
PM
5496 if (invert) {
5497 return 1;
5498 }
9ee6e8bb
PB
5499 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5500 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5501 break;
5502 }
5503 if (invert)
5504 imm = ~imm;
5505
9ee6e8bb
PB
5506 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5507 if (op & 1 && op < 12) {
ad69471c 5508 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5509 if (invert) {
5510 /* The immediate value has already been inverted, so
5511 BIC becomes AND. */
ad69471c 5512 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5513 } else {
ad69471c 5514 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5515 }
9ee6e8bb 5516 } else {
ad69471c 5517 /* VMOV, VMVN. */
7d1b0095 5518 tmp = tcg_temp_new_i32();
9ee6e8bb 5519 if (op == 14 && invert) {
a5a14945 5520 int n;
ad69471c
PB
5521 uint32_t val;
5522 val = 0;
9ee6e8bb
PB
5523 for (n = 0; n < 4; n++) {
5524 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5525 val |= 0xff << (n * 8);
9ee6e8bb 5526 }
ad69471c
PB
5527 tcg_gen_movi_i32(tmp, val);
5528 } else {
5529 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5530 }
9ee6e8bb 5531 }
ad69471c 5532 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5533 }
5534 }
e4b3861d 5535 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5536 if (size != 3) {
5537 op = (insn >> 8) & 0xf;
5538 if ((insn & (1 << 6)) == 0) {
5539 /* Three registers of different lengths. */
5540 int src1_wide;
5541 int src2_wide;
5542 int prewiden;
695272dc
PM
5543 /* undefreq: bit 0 : UNDEF if size != 0
5544 * bit 1 : UNDEF if size == 0
5545 * bit 2 : UNDEF if U == 1
5546 * Note that [1:0] set implies 'always UNDEF'
5547 */
5548 int undefreq;
5549 /* prewiden, src1_wide, src2_wide, undefreq */
5550 static const int neon_3reg_wide[16][4] = {
5551 {1, 0, 0, 0}, /* VADDL */
5552 {1, 1, 0, 0}, /* VADDW */
5553 {1, 0, 0, 0}, /* VSUBL */
5554 {1, 1, 0, 0}, /* VSUBW */
5555 {0, 1, 1, 0}, /* VADDHN */
5556 {0, 0, 0, 0}, /* VABAL */
5557 {0, 1, 1, 0}, /* VSUBHN */
5558 {0, 0, 0, 0}, /* VABDL */
5559 {0, 0, 0, 0}, /* VMLAL */
5560 {0, 0, 0, 6}, /* VQDMLAL */
5561 {0, 0, 0, 0}, /* VMLSL */
5562 {0, 0, 0, 6}, /* VQDMLSL */
5563 {0, 0, 0, 0}, /* Integer VMULL */
5564 {0, 0, 0, 2}, /* VQDMULL */
5565 {0, 0, 0, 5}, /* Polynomial VMULL */
5566 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5567 };
5568
5569 prewiden = neon_3reg_wide[op][0];
5570 src1_wide = neon_3reg_wide[op][1];
5571 src2_wide = neon_3reg_wide[op][2];
695272dc 5572 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5573
695272dc
PM
5574 if (((undefreq & 1) && (size != 0)) ||
5575 ((undefreq & 2) && (size == 0)) ||
5576 ((undefreq & 4) && u)) {
5577 return 1;
5578 }
5579 if ((src1_wide && (rn & 1)) ||
5580 (src2_wide && (rm & 1)) ||
5581 (!src2_wide && (rd & 1))) {
ad69471c 5582 return 1;
695272dc 5583 }
ad69471c 5584
9ee6e8bb
PB
5585 /* Avoid overlapping operands. Wide source operands are
5586 always aligned so will never overlap with wide
5587 destinations in problematic ways. */
8f8e3aa4 5588 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5589 tmp = neon_load_reg(rm, 1);
5590 neon_store_scratch(2, tmp);
8f8e3aa4 5591 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5592 tmp = neon_load_reg(rn, 1);
5593 neon_store_scratch(2, tmp);
9ee6e8bb 5594 }
a50f5b91 5595 TCGV_UNUSED(tmp3);
9ee6e8bb 5596 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5597 if (src1_wide) {
5598 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5599 TCGV_UNUSED(tmp);
9ee6e8bb 5600 } else {
ad69471c 5601 if (pass == 1 && rd == rn) {
dd8fbd78 5602 tmp = neon_load_scratch(2);
9ee6e8bb 5603 } else {
ad69471c
PB
5604 tmp = neon_load_reg(rn, pass);
5605 }
5606 if (prewiden) {
5607 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5608 }
5609 }
ad69471c
PB
5610 if (src2_wide) {
5611 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5612 TCGV_UNUSED(tmp2);
9ee6e8bb 5613 } else {
ad69471c 5614 if (pass == 1 && rd == rm) {
dd8fbd78 5615 tmp2 = neon_load_scratch(2);
9ee6e8bb 5616 } else {
ad69471c
PB
5617 tmp2 = neon_load_reg(rm, pass);
5618 }
5619 if (prewiden) {
5620 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5621 }
9ee6e8bb
PB
5622 }
5623 switch (op) {
5624 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5625 gen_neon_addl(size);
9ee6e8bb 5626 break;
79b0e534 5627 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5628 gen_neon_subl(size);
9ee6e8bb
PB
5629 break;
5630 case 5: case 7: /* VABAL, VABDL */
5631 switch ((size << 1) | u) {
ad69471c
PB
5632 case 0:
5633 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5634 break;
5635 case 1:
5636 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5637 break;
5638 case 2:
5639 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5640 break;
5641 case 3:
5642 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5643 break;
5644 case 4:
5645 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5646 break;
5647 case 5:
5648 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5649 break;
9ee6e8bb
PB
5650 default: abort();
5651 }
7d1b0095
PM
5652 tcg_temp_free_i32(tmp2);
5653 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5654 break;
5655 case 8: case 9: case 10: case 11: case 12: case 13:
5656 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5657 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5658 break;
5659 case 14: /* Polynomial VMULL */
e5ca24cb 5660 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5661 tcg_temp_free_i32(tmp2);
5662 tcg_temp_free_i32(tmp);
e5ca24cb 5663 break;
695272dc
PM
5664 default: /* 15 is RESERVED: caught earlier */
5665 abort();
9ee6e8bb 5666 }
ebcd88ce
PM
5667 if (op == 13) {
5668 /* VQDMULL */
5669 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5670 neon_store_reg64(cpu_V0, rd + pass);
5671 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5672 /* Accumulate. */
ebcd88ce 5673 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5674 switch (op) {
4dc064e6
PM
5675 case 10: /* VMLSL */
5676 gen_neon_negl(cpu_V0, size);
5677 /* Fall through */
5678 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5679 gen_neon_addl(size);
9ee6e8bb
PB
5680 break;
5681 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5682 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5683 if (op == 11) {
5684 gen_neon_negl(cpu_V0, size);
5685 }
ad69471c
PB
5686 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5687 break;
9ee6e8bb
PB
5688 default:
5689 abort();
5690 }
ad69471c 5691 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5692 } else if (op == 4 || op == 6) {
5693 /* Narrowing operation. */
7d1b0095 5694 tmp = tcg_temp_new_i32();
79b0e534 5695 if (!u) {
9ee6e8bb 5696 switch (size) {
ad69471c
PB
5697 case 0:
5698 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5699 break;
5700 case 1:
5701 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5702 break;
5703 case 2:
5704 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5705 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5706 break;
9ee6e8bb
PB
5707 default: abort();
5708 }
5709 } else {
5710 switch (size) {
ad69471c
PB
5711 case 0:
5712 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5713 break;
5714 case 1:
5715 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5716 break;
5717 case 2:
5718 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5719 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5720 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5721 break;
9ee6e8bb
PB
5722 default: abort();
5723 }
5724 }
ad69471c
PB
5725 if (pass == 0) {
5726 tmp3 = tmp;
5727 } else {
5728 neon_store_reg(rd, 0, tmp3);
5729 neon_store_reg(rd, 1, tmp);
5730 }
9ee6e8bb
PB
5731 } else {
5732 /* Write back the result. */
ad69471c 5733 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5734 }
5735 }
5736 } else {
3e3326df
PM
5737 /* Two registers and a scalar. NB that for ops of this form
5738 * the ARM ARM labels bit 24 as Q, but it is in our variable
5739 * 'u', not 'q'.
5740 */
5741 if (size == 0) {
5742 return 1;
5743 }
9ee6e8bb 5744 switch (op) {
9ee6e8bb 5745 case 1: /* Float VMLA scalar */
9ee6e8bb 5746 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5747 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5748 if (size == 1) {
5749 return 1;
5750 }
5751 /* fall through */
5752 case 0: /* Integer VMLA scalar */
5753 case 4: /* Integer VMLS scalar */
5754 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5755 case 12: /* VQDMULH scalar */
5756 case 13: /* VQRDMULH scalar */
3e3326df
PM
5757 if (u && ((rd | rn) & 1)) {
5758 return 1;
5759 }
dd8fbd78
FN
5760 tmp = neon_get_scalar(size, rm);
5761 neon_store_scratch(0, tmp);
9ee6e8bb 5762 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5763 tmp = neon_load_scratch(0);
5764 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5765 if (op == 12) {
5766 if (size == 1) {
02da0b2d 5767 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5768 } else {
02da0b2d 5769 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5770 }
5771 } else if (op == 13) {
5772 if (size == 1) {
02da0b2d 5773 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5774 } else {
02da0b2d 5775 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5776 }
5777 } else if (op & 1) {
aa47cfdd
PM
5778 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5779 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5780 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5781 } else {
5782 switch (size) {
dd8fbd78
FN
5783 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5784 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5785 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5786 default: abort();
9ee6e8bb
PB
5787 }
5788 }
7d1b0095 5789 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5790 if (op < 8) {
5791 /* Accumulate. */
dd8fbd78 5792 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5793 switch (op) {
5794 case 0:
dd8fbd78 5795 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5796 break;
5797 case 1:
aa47cfdd
PM
5798 {
5799 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5800 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5801 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5802 break;
aa47cfdd 5803 }
9ee6e8bb 5804 case 4:
dd8fbd78 5805 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5806 break;
5807 case 5:
aa47cfdd
PM
5808 {
5809 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5810 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5811 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5812 break;
aa47cfdd 5813 }
9ee6e8bb
PB
5814 default:
5815 abort();
5816 }
7d1b0095 5817 tcg_temp_free_i32(tmp2);
9ee6e8bb 5818 }
dd8fbd78 5819 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5820 }
5821 break;
9ee6e8bb 5822 case 3: /* VQDMLAL scalar */
9ee6e8bb 5823 case 7: /* VQDMLSL scalar */
9ee6e8bb 5824 case 11: /* VQDMULL scalar */
3e3326df 5825 if (u == 1) {
ad69471c 5826 return 1;
3e3326df
PM
5827 }
5828 /* fall through */
5829 case 2: /* VMLAL sclar */
5830 case 6: /* VMLSL scalar */
5831 case 10: /* VMULL scalar */
5832 if (rd & 1) {
5833 return 1;
5834 }
dd8fbd78 5835 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5836 /* We need a copy of tmp2 because gen_neon_mull
5837 * deletes it during pass 0. */
7d1b0095 5838 tmp4 = tcg_temp_new_i32();
c6067f04 5839 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5840 tmp3 = neon_load_reg(rn, 1);
ad69471c 5841
9ee6e8bb 5842 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5843 if (pass == 0) {
5844 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5845 } else {
dd8fbd78 5846 tmp = tmp3;
c6067f04 5847 tmp2 = tmp4;
9ee6e8bb 5848 }
ad69471c 5849 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5850 if (op != 11) {
5851 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5852 }
9ee6e8bb 5853 switch (op) {
4dc064e6
PM
5854 case 6:
5855 gen_neon_negl(cpu_V0, size);
5856 /* Fall through */
5857 case 2:
ad69471c 5858 gen_neon_addl(size);
9ee6e8bb
PB
5859 break;
5860 case 3: case 7:
ad69471c 5861 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5862 if (op == 7) {
5863 gen_neon_negl(cpu_V0, size);
5864 }
ad69471c 5865 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5866 break;
5867 case 10:
5868 /* no-op */
5869 break;
5870 case 11:
ad69471c 5871 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5872 break;
5873 default:
5874 abort();
5875 }
ad69471c 5876 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5877 }
dd8fbd78 5878
dd8fbd78 5879
9ee6e8bb
PB
5880 break;
5881 default: /* 14 and 15 are RESERVED */
5882 return 1;
5883 }
5884 }
5885 } else { /* size == 3 */
5886 if (!u) {
5887 /* Extract. */
9ee6e8bb 5888 imm = (insn >> 8) & 0xf;
ad69471c
PB
5889
5890 if (imm > 7 && !q)
5891 return 1;
5892
52579ea1
PM
5893 if (q && ((rd | rn | rm) & 1)) {
5894 return 1;
5895 }
5896
ad69471c
PB
5897 if (imm == 0) {
5898 neon_load_reg64(cpu_V0, rn);
5899 if (q) {
5900 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5901 }
ad69471c
PB
5902 } else if (imm == 8) {
5903 neon_load_reg64(cpu_V0, rn + 1);
5904 if (q) {
5905 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5906 }
ad69471c 5907 } else if (q) {
a7812ae4 5908 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5909 if (imm < 8) {
5910 neon_load_reg64(cpu_V0, rn);
a7812ae4 5911 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5912 } else {
5913 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5914 neon_load_reg64(tmp64, rm);
ad69471c
PB
5915 }
5916 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5917 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5918 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5919 if (imm < 8) {
5920 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5921 } else {
ad69471c
PB
5922 neon_load_reg64(cpu_V1, rm + 1);
5923 imm -= 8;
9ee6e8bb 5924 }
ad69471c 5925 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5926 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5927 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5928 tcg_temp_free_i64(tmp64);
ad69471c 5929 } else {
a7812ae4 5930 /* BUGFIX */
ad69471c 5931 neon_load_reg64(cpu_V0, rn);
a7812ae4 5932 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5933 neon_load_reg64(cpu_V1, rm);
a7812ae4 5934 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5935 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5936 }
5937 neon_store_reg64(cpu_V0, rd);
5938 if (q) {
5939 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5940 }
5941 } else if ((insn & (1 << 11)) == 0) {
5942 /* Two register misc. */
5943 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5944 size = (insn >> 18) & 3;
600b828c
PM
5945 /* UNDEF for unknown op values and bad op-size combinations */
5946 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5947 return 1;
5948 }
fc2a9b37
PM
5949 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5950 q && ((rm | rd) & 1)) {
5951 return 1;
5952 }
9ee6e8bb 5953 switch (op) {
600b828c 5954 case NEON_2RM_VREV64:
9ee6e8bb 5955 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5956 tmp = neon_load_reg(rm, pass * 2);
5957 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5958 switch (size) {
dd8fbd78
FN
5959 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5960 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5961 case 2: /* no-op */ break;
5962 default: abort();
5963 }
dd8fbd78 5964 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5965 if (size == 2) {
dd8fbd78 5966 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5967 } else {
9ee6e8bb 5968 switch (size) {
dd8fbd78
FN
5969 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5970 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5971 default: abort();
5972 }
dd8fbd78 5973 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5974 }
5975 }
5976 break;
600b828c
PM
5977 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5978 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5979 for (pass = 0; pass < q + 1; pass++) {
5980 tmp = neon_load_reg(rm, pass * 2);
5981 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5982 tmp = neon_load_reg(rm, pass * 2 + 1);
5983 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5984 switch (size) {
5985 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5986 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5987 case 2: tcg_gen_add_i64(CPU_V001); break;
5988 default: abort();
5989 }
600b828c 5990 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5991 /* Accumulate. */
ad69471c
PB
5992 neon_load_reg64(cpu_V1, rd + pass);
5993 gen_neon_addl(size);
9ee6e8bb 5994 }
ad69471c 5995 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5996 }
5997 break;
600b828c 5998 case NEON_2RM_VTRN:
9ee6e8bb 5999 if (size == 2) {
a5a14945 6000 int n;
9ee6e8bb 6001 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6002 tmp = neon_load_reg(rm, n);
6003 tmp2 = neon_load_reg(rd, n + 1);
6004 neon_store_reg(rm, n, tmp2);
6005 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6006 }
6007 } else {
6008 goto elementwise;
6009 }
6010 break;
600b828c 6011 case NEON_2RM_VUZP:
02acedf9 6012 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6013 return 1;
9ee6e8bb
PB
6014 }
6015 break;
600b828c 6016 case NEON_2RM_VZIP:
d68a6f3a 6017 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6018 return 1;
9ee6e8bb
PB
6019 }
6020 break;
600b828c
PM
6021 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6022 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6023 if (rm & 1) {
6024 return 1;
6025 }
a50f5b91 6026 TCGV_UNUSED(tmp2);
9ee6e8bb 6027 for (pass = 0; pass < 2; pass++) {
ad69471c 6028 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6029 tmp = tcg_temp_new_i32();
600b828c
PM
6030 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6031 tmp, cpu_V0);
ad69471c
PB
6032 if (pass == 0) {
6033 tmp2 = tmp;
6034 } else {
6035 neon_store_reg(rd, 0, tmp2);
6036 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6037 }
9ee6e8bb
PB
6038 }
6039 break;
600b828c 6040 case NEON_2RM_VSHLL:
fc2a9b37 6041 if (q || (rd & 1)) {
9ee6e8bb 6042 return 1;
600b828c 6043 }
ad69471c
PB
6044 tmp = neon_load_reg(rm, 0);
6045 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6046 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6047 if (pass == 1)
6048 tmp = tmp2;
6049 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6050 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6051 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6052 }
6053 break;
600b828c 6054 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
6055 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6056 q || (rm & 1)) {
6057 return 1;
6058 }
7d1b0095
PM
6059 tmp = tcg_temp_new_i32();
6060 tmp2 = tcg_temp_new_i32();
60011498 6061 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6062 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6063 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6064 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6065 tcg_gen_shli_i32(tmp2, tmp2, 16);
6066 tcg_gen_or_i32(tmp2, tmp2, tmp);
6067 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6068 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6069 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6070 neon_store_reg(rd, 0, tmp2);
7d1b0095 6071 tmp2 = tcg_temp_new_i32();
2d981da7 6072 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6073 tcg_gen_shli_i32(tmp2, tmp2, 16);
6074 tcg_gen_or_i32(tmp2, tmp2, tmp);
6075 neon_store_reg(rd, 1, tmp2);
7d1b0095 6076 tcg_temp_free_i32(tmp);
60011498 6077 break;
600b828c 6078 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
6079 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6080 q || (rd & 1)) {
6081 return 1;
6082 }
7d1b0095 6083 tmp3 = tcg_temp_new_i32();
60011498
PB
6084 tmp = neon_load_reg(rm, 0);
6085 tmp2 = neon_load_reg(rm, 1);
6086 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6087 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6088 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6089 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6090 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6091 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6092 tcg_temp_free_i32(tmp);
60011498 6093 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6094 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6095 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6096 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6097 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6098 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6099 tcg_temp_free_i32(tmp2);
6100 tcg_temp_free_i32(tmp3);
60011498 6101 break;
9ee6e8bb
PB
6102 default:
6103 elementwise:
6104 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6105 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6106 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6107 neon_reg_offset(rm, pass));
dd8fbd78 6108 TCGV_UNUSED(tmp);
9ee6e8bb 6109 } else {
dd8fbd78 6110 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6111 }
6112 switch (op) {
600b828c 6113 case NEON_2RM_VREV32:
9ee6e8bb 6114 switch (size) {
dd8fbd78
FN
6115 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6116 case 1: gen_swap_half(tmp); break;
600b828c 6117 default: abort();
9ee6e8bb
PB
6118 }
6119 break;
600b828c 6120 case NEON_2RM_VREV16:
dd8fbd78 6121 gen_rev16(tmp);
9ee6e8bb 6122 break;
600b828c 6123 case NEON_2RM_VCLS:
9ee6e8bb 6124 switch (size) {
dd8fbd78
FN
6125 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6126 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6127 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6128 default: abort();
9ee6e8bb
PB
6129 }
6130 break;
600b828c 6131 case NEON_2RM_VCLZ:
9ee6e8bb 6132 switch (size) {
dd8fbd78
FN
6133 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6134 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6135 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6136 default: abort();
9ee6e8bb
PB
6137 }
6138 break;
600b828c 6139 case NEON_2RM_VCNT:
dd8fbd78 6140 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6141 break;
600b828c 6142 case NEON_2RM_VMVN:
dd8fbd78 6143 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6144 break;
600b828c 6145 case NEON_2RM_VQABS:
9ee6e8bb 6146 switch (size) {
02da0b2d
PM
6147 case 0:
6148 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6149 break;
6150 case 1:
6151 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6152 break;
6153 case 2:
6154 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6155 break;
600b828c 6156 default: abort();
9ee6e8bb
PB
6157 }
6158 break;
600b828c 6159 case NEON_2RM_VQNEG:
9ee6e8bb 6160 switch (size) {
02da0b2d
PM
6161 case 0:
6162 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6163 break;
6164 case 1:
6165 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6166 break;
6167 case 2:
6168 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6169 break;
600b828c 6170 default: abort();
9ee6e8bb
PB
6171 }
6172 break;
600b828c 6173 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6174 tmp2 = tcg_const_i32(0);
9ee6e8bb 6175 switch(size) {
dd8fbd78
FN
6176 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6177 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6178 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6179 default: abort();
9ee6e8bb 6180 }
dd8fbd78 6181 tcg_temp_free(tmp2);
600b828c 6182 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6183 tcg_gen_not_i32(tmp, tmp);
600b828c 6184 }
9ee6e8bb 6185 break;
600b828c 6186 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6187 tmp2 = tcg_const_i32(0);
9ee6e8bb 6188 switch(size) {
dd8fbd78
FN
6189 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6190 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6191 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6192 default: abort();
9ee6e8bb 6193 }
dd8fbd78 6194 tcg_temp_free(tmp2);
600b828c 6195 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6196 tcg_gen_not_i32(tmp, tmp);
600b828c 6197 }
9ee6e8bb 6198 break;
600b828c 6199 case NEON_2RM_VCEQ0:
dd8fbd78 6200 tmp2 = tcg_const_i32(0);
9ee6e8bb 6201 switch(size) {
dd8fbd78
FN
6202 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6203 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6204 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6205 default: abort();
9ee6e8bb 6206 }
dd8fbd78 6207 tcg_temp_free(tmp2);
9ee6e8bb 6208 break;
600b828c 6209 case NEON_2RM_VABS:
9ee6e8bb 6210 switch(size) {
dd8fbd78
FN
6211 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6212 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6213 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6214 default: abort();
9ee6e8bb
PB
6215 }
6216 break;
600b828c 6217 case NEON_2RM_VNEG:
dd8fbd78
FN
6218 tmp2 = tcg_const_i32(0);
6219 gen_neon_rsb(size, tmp, tmp2);
6220 tcg_temp_free(tmp2);
9ee6e8bb 6221 break;
600b828c 6222 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6223 {
6224 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6225 tmp2 = tcg_const_i32(0);
aa47cfdd 6226 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6227 tcg_temp_free(tmp2);
aa47cfdd 6228 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6229 break;
aa47cfdd 6230 }
600b828c 6231 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6232 {
6233 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6234 tmp2 = tcg_const_i32(0);
aa47cfdd 6235 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6236 tcg_temp_free(tmp2);
aa47cfdd 6237 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6238 break;
aa47cfdd 6239 }
600b828c 6240 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6241 {
6242 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6243 tmp2 = tcg_const_i32(0);
aa47cfdd 6244 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6245 tcg_temp_free(tmp2);
aa47cfdd 6246 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6247 break;
aa47cfdd 6248 }
600b828c 6249 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6250 {
6251 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6252 tmp2 = tcg_const_i32(0);
aa47cfdd 6253 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6254 tcg_temp_free(tmp2);
aa47cfdd 6255 tcg_temp_free_ptr(fpstatus);
0e326109 6256 break;
aa47cfdd 6257 }
600b828c 6258 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6259 {
6260 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6261 tmp2 = tcg_const_i32(0);
aa47cfdd 6262 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6263 tcg_temp_free(tmp2);
aa47cfdd 6264 tcg_temp_free_ptr(fpstatus);
0e326109 6265 break;
aa47cfdd 6266 }
600b828c 6267 case NEON_2RM_VABS_F:
4373f3ce 6268 gen_vfp_abs(0);
9ee6e8bb 6269 break;
600b828c 6270 case NEON_2RM_VNEG_F:
4373f3ce 6271 gen_vfp_neg(0);
9ee6e8bb 6272 break;
600b828c 6273 case NEON_2RM_VSWP:
dd8fbd78
FN
6274 tmp2 = neon_load_reg(rd, pass);
6275 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6276 break;
600b828c 6277 case NEON_2RM_VTRN:
dd8fbd78 6278 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6279 switch (size) {
dd8fbd78
FN
6280 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6281 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6282 default: abort();
9ee6e8bb 6283 }
dd8fbd78 6284 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6285 break;
600b828c 6286 case NEON_2RM_VRECPE:
dd8fbd78 6287 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6288 break;
600b828c 6289 case NEON_2RM_VRSQRTE:
dd8fbd78 6290 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6291 break;
600b828c 6292 case NEON_2RM_VRECPE_F:
4373f3ce 6293 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6294 break;
600b828c 6295 case NEON_2RM_VRSQRTE_F:
4373f3ce 6296 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6297 break;
600b828c 6298 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6299 gen_vfp_sito(0, 1);
9ee6e8bb 6300 break;
600b828c 6301 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6302 gen_vfp_uito(0, 1);
9ee6e8bb 6303 break;
600b828c 6304 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6305 gen_vfp_tosiz(0, 1);
9ee6e8bb 6306 break;
600b828c 6307 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6308 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6309 break;
6310 default:
600b828c
PM
6311 /* Reserved op values were caught by the
6312 * neon_2rm_sizes[] check earlier.
6313 */
6314 abort();
9ee6e8bb 6315 }
600b828c 6316 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6317 tcg_gen_st_f32(cpu_F0s, cpu_env,
6318 neon_reg_offset(rd, pass));
9ee6e8bb 6319 } else {
dd8fbd78 6320 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6321 }
6322 }
6323 break;
6324 }
6325 } else if ((insn & (1 << 10)) == 0) {
6326 /* VTBL, VTBX. */
56907d77
PM
6327 int n = ((insn >> 8) & 3) + 1;
6328 if ((rn + n) > 32) {
6329 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6330 * helper function running off the end of the register file.
6331 */
6332 return 1;
6333 }
6334 n <<= 3;
9ee6e8bb 6335 if (insn & (1 << 6)) {
8f8e3aa4 6336 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6337 } else {
7d1b0095 6338 tmp = tcg_temp_new_i32();
8f8e3aa4 6339 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6340 }
8f8e3aa4 6341 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6342 tmp4 = tcg_const_i32(rn);
6343 tmp5 = tcg_const_i32(n);
6344 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
7d1b0095 6345 tcg_temp_free_i32(tmp);
9ee6e8bb 6346 if (insn & (1 << 6)) {
8f8e3aa4 6347 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6348 } else {
7d1b0095 6349 tmp = tcg_temp_new_i32();
8f8e3aa4 6350 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6351 }
8f8e3aa4 6352 tmp3 = neon_load_reg(rm, 1);
b75263d6 6353 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6354 tcg_temp_free_i32(tmp5);
6355 tcg_temp_free_i32(tmp4);
8f8e3aa4 6356 neon_store_reg(rd, 0, tmp2);
3018f259 6357 neon_store_reg(rd, 1, tmp3);
7d1b0095 6358 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6359 } else if ((insn & 0x380) == 0) {
6360 /* VDUP */
133da6aa
JR
6361 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6362 return 1;
6363 }
9ee6e8bb 6364 if (insn & (1 << 19)) {
dd8fbd78 6365 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6366 } else {
dd8fbd78 6367 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6368 }
6369 if (insn & (1 << 16)) {
dd8fbd78 6370 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6371 } else if (insn & (1 << 17)) {
6372 if ((insn >> 18) & 1)
dd8fbd78 6373 gen_neon_dup_high16(tmp);
9ee6e8bb 6374 else
dd8fbd78 6375 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6376 }
6377 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6378 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6379 tcg_gen_mov_i32(tmp2, tmp);
6380 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6381 }
7d1b0095 6382 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6383 } else {
6384 return 1;
6385 }
6386 }
6387 }
6388 return 0;
6389}
6390
0ecb72a5 6391static int disas_cp14_read(CPUARMState * env, DisasContext *s, uint32_t insn)
fe1479c3
PB
6392{
6393 int crn = (insn >> 16) & 0xf;
6394 int crm = insn & 0xf;
6395 int op1 = (insn >> 21) & 7;
6396 int op2 = (insn >> 5) & 7;
6397 int rt = (insn >> 12) & 0xf;
6398 TCGv tmp;
6399
ca27c052
PM
6400 /* Minimal set of debug registers, since we don't support debug */
6401 if (op1 == 0 && crn == 0 && op2 == 0) {
6402 switch (crm) {
6403 case 0:
6404 /* DBGDIDR: just RAZ. In particular this means the
6405 * "debug architecture version" bits will read as
6406 * a reserved value, which should cause Linux to
6407 * not try to use the debug hardware.
6408 */
6409 tmp = tcg_const_i32(0);
6410 store_reg(s, rt, tmp);
6411 return 0;
6412 case 1:
6413 case 2:
6414 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6415 * don't implement memory mapped debug components
6416 */
6417 if (ENABLE_ARCH_7) {
6418 tmp = tcg_const_i32(0);
6419 store_reg(s, rt, tmp);
6420 return 0;
6421 }
6422 break;
6423 default:
6424 break;
6425 }
6426 }
6427
fe1479c3
PB
6428 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6429 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6430 /* TEECR */
6431 if (IS_USER(s))
6432 return 1;
6433 tmp = load_cpu_field(teecr);
6434 store_reg(s, rt, tmp);
6435 return 0;
6436 }
6437 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6438 /* TEEHBR */
6439 if (IS_USER(s) && (env->teecr & 1))
6440 return 1;
6441 tmp = load_cpu_field(teehbr);
6442 store_reg(s, rt, tmp);
6443 return 0;
6444 }
6445 }
fe1479c3
PB
6446 return 1;
6447}
6448
0ecb72a5 6449static int disas_cp14_write(CPUARMState * env, DisasContext *s, uint32_t insn)
fe1479c3
PB
6450{
6451 int crn = (insn >> 16) & 0xf;
6452 int crm = insn & 0xf;
6453 int op1 = (insn >> 21) & 7;
6454 int op2 = (insn >> 5) & 7;
6455 int rt = (insn >> 12) & 0xf;
6456 TCGv tmp;
6457
6458 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6459 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6460 /* TEECR */
6461 if (IS_USER(s))
6462 return 1;
6463 tmp = load_reg(s, rt);
6464 gen_helper_set_teecr(cpu_env, tmp);
7d1b0095 6465 tcg_temp_free_i32(tmp);
fe1479c3
PB
6466 return 0;
6467 }
6468 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6469 /* TEEHBR */
6470 if (IS_USER(s) && (env->teecr & 1))
6471 return 1;
6472 tmp = load_reg(s, rt);
6473 store_cpu_field(tmp, teehbr);
6474 return 0;
6475 }
6476 }
fe1479c3
PB
6477 return 1;
6478}
6479
0ecb72a5 6480static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
6481{
6482 int cpnum;
6483
6484 cpnum = (insn >> 8) & 0xf;
6485 if (arm_feature(env, ARM_FEATURE_XSCALE)
6486 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6487 return 1;
6488
6489 switch (cpnum) {
6490 case 0:
6491 case 1:
6492 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6493 return disas_iwmmxt_insn(env, s, insn);
6494 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6495 return disas_dsp_insn(env, s, insn);
6496 }
6497 return 1;
6498 case 10:
6499 case 11:
6500 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
6501 case 14:
6502 /* Coprocessors 7-15 are architecturally reserved by ARM.
6503 Unfortunately Intel decided to ignore this. */
6504 if (arm_feature(env, ARM_FEATURE_XSCALE))
6505 goto board;
6506 if (insn & (1 << 20))
6507 return disas_cp14_read(env, s, insn);
6508 else
6509 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
6510 case 15:
6511 return disas_cp15_insn (env, s, insn);
6512 default:
fe1479c3 6513 board:
9ee6e8bb
PB
6514 /* Unknown coprocessor. See if the board has hooked it. */
6515 return disas_cp_insn (env, s, insn);
6516 }
6517}
6518
5e3f878a
PB
6519
6520/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6521static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
6522{
6523 TCGv tmp;
7d1b0095 6524 tmp = tcg_temp_new_i32();
5e3f878a
PB
6525 tcg_gen_trunc_i64_i32(tmp, val);
6526 store_reg(s, rlow, tmp);
7d1b0095 6527 tmp = tcg_temp_new_i32();
5e3f878a
PB
6528 tcg_gen_shri_i64(val, val, 32);
6529 tcg_gen_trunc_i64_i32(tmp, val);
6530 store_reg(s, rhigh, tmp);
6531}
6532
6533/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6534static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6535{
a7812ae4 6536 TCGv_i64 tmp;
5e3f878a
PB
6537 TCGv tmp2;
6538
36aa55dc 6539 /* Load value and extend to 64 bits. */
a7812ae4 6540 tmp = tcg_temp_new_i64();
5e3f878a
PB
6541 tmp2 = load_reg(s, rlow);
6542 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6543 tcg_temp_free_i32(tmp2);
5e3f878a 6544 tcg_gen_add_i64(val, val, tmp);
b75263d6 6545 tcg_temp_free_i64(tmp);
5e3f878a
PB
6546}
6547
6548/* load and add a 64-bit value from a register pair. */
a7812ae4 6549static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6550{
a7812ae4 6551 TCGv_i64 tmp;
36aa55dc
PB
6552 TCGv tmpl;
6553 TCGv tmph;
5e3f878a
PB
6554
6555 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6556 tmpl = load_reg(s, rlow);
6557 tmph = load_reg(s, rhigh);
a7812ae4 6558 tmp = tcg_temp_new_i64();
36aa55dc 6559 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6560 tcg_temp_free_i32(tmpl);
6561 tcg_temp_free_i32(tmph);
5e3f878a 6562 tcg_gen_add_i64(val, val, tmp);
b75263d6 6563 tcg_temp_free_i64(tmp);
5e3f878a
PB
6564}
6565
6566/* Set N and Z flags from a 64-bit value. */
a7812ae4 6567static void gen_logicq_cc(TCGv_i64 val)
5e3f878a 6568{
7d1b0095 6569 TCGv tmp = tcg_temp_new_i32();
5e3f878a 6570 gen_helper_logicq_cc(tmp, val);
6fbe23d5 6571 gen_logic_CC(tmp);
7d1b0095 6572 tcg_temp_free_i32(tmp);
5e3f878a
PB
6573}
6574
426f5abc
PB
6575/* Load/Store exclusive instructions are implemented by remembering
6576 the value/address loaded, and seeing if these are the same
6577 when the store is performed. This should be is sufficient to implement
6578 the architecturally mandated semantics, and avoids having to monitor
6579 regular stores.
6580
6581 In system emulation mode only one CPU will be running at once, so
6582 this sequence is effectively atomic. In user emulation mode we
6583 throw an exception and handle the atomic operation elsewhere. */
6584static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6585 TCGv addr, int size)
6586{
6587 TCGv tmp;
6588
6589 switch (size) {
6590 case 0:
6591 tmp = gen_ld8u(addr, IS_USER(s));
6592 break;
6593 case 1:
6594 tmp = gen_ld16u(addr, IS_USER(s));
6595 break;
6596 case 2:
6597 case 3:
6598 tmp = gen_ld32(addr, IS_USER(s));
6599 break;
6600 default:
6601 abort();
6602 }
6603 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6604 store_reg(s, rt, tmp);
6605 if (size == 3) {
7d1b0095 6606 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6607 tcg_gen_addi_i32(tmp2, addr, 4);
6608 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6609 tcg_temp_free_i32(tmp2);
426f5abc
PB
6610 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6611 store_reg(s, rt2, tmp);
6612 }
6613 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6614}
6615
6616static void gen_clrex(DisasContext *s)
6617{
6618 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6619}
6620
6621#ifdef CONFIG_USER_ONLY
6622static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6623 TCGv addr, int size)
6624{
6625 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6626 tcg_gen_movi_i32(cpu_exclusive_info,
6627 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6628 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6629}
6630#else
6631static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6632 TCGv addr, int size)
6633{
6634 TCGv tmp;
6635 int done_label;
6636 int fail_label;
6637
6638 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6639 [addr] = {Rt};
6640 {Rd} = 0;
6641 } else {
6642 {Rd} = 1;
6643 } */
6644 fail_label = gen_new_label();
6645 done_label = gen_new_label();
6646 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6647 switch (size) {
6648 case 0:
6649 tmp = gen_ld8u(addr, IS_USER(s));
6650 break;
6651 case 1:
6652 tmp = gen_ld16u(addr, IS_USER(s));
6653 break;
6654 case 2:
6655 case 3:
6656 tmp = gen_ld32(addr, IS_USER(s));
6657 break;
6658 default:
6659 abort();
6660 }
6661 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6662 tcg_temp_free_i32(tmp);
426f5abc 6663 if (size == 3) {
7d1b0095 6664 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6665 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6666 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6667 tcg_temp_free_i32(tmp2);
426f5abc 6668 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6669 tcg_temp_free_i32(tmp);
426f5abc
PB
6670 }
6671 tmp = load_reg(s, rt);
6672 switch (size) {
6673 case 0:
6674 gen_st8(tmp, addr, IS_USER(s));
6675 break;
6676 case 1:
6677 gen_st16(tmp, addr, IS_USER(s));
6678 break;
6679 case 2:
6680 case 3:
6681 gen_st32(tmp, addr, IS_USER(s));
6682 break;
6683 default:
6684 abort();
6685 }
6686 if (size == 3) {
6687 tcg_gen_addi_i32(addr, addr, 4);
6688 tmp = load_reg(s, rt2);
6689 gen_st32(tmp, addr, IS_USER(s));
6690 }
6691 tcg_gen_movi_i32(cpu_R[rd], 0);
6692 tcg_gen_br(done_label);
6693 gen_set_label(fail_label);
6694 tcg_gen_movi_i32(cpu_R[rd], 1);
6695 gen_set_label(done_label);
6696 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6697}
6698#endif
6699
0ecb72a5 6700static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
6701{
6702 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6703 TCGv tmp;
3670669c 6704 TCGv tmp2;
6ddbc6e4 6705 TCGv tmp3;
b0109805 6706 TCGv addr;
a7812ae4 6707 TCGv_i64 tmp64;
9ee6e8bb 6708
d8fd2954 6709 insn = arm_ldl_code(s->pc, s->bswap_code);
9ee6e8bb
PB
6710 s->pc += 4;
6711
6712 /* M variants do not implement ARM mode. */
6713 if (IS_M(env))
6714 goto illegal_op;
6715 cond = insn >> 28;
6716 if (cond == 0xf){
be5e7a76
DES
6717 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6718 * choose to UNDEF. In ARMv5 and above the space is used
6719 * for miscellaneous unconditional instructions.
6720 */
6721 ARCH(5);
6722
9ee6e8bb
PB
6723 /* Unconditional instructions. */
6724 if (((insn >> 25) & 7) == 1) {
6725 /* NEON Data processing. */
6726 if (!arm_feature(env, ARM_FEATURE_NEON))
6727 goto illegal_op;
6728
6729 if (disas_neon_data_insn(env, s, insn))
6730 goto illegal_op;
6731 return;
6732 }
6733 if ((insn & 0x0f100000) == 0x04000000) {
6734 /* NEON load/store. */
6735 if (!arm_feature(env, ARM_FEATURE_NEON))
6736 goto illegal_op;
6737
6738 if (disas_neon_ls_insn(env, s, insn))
6739 goto illegal_op;
6740 return;
6741 }
3d185e5d
PM
6742 if (((insn & 0x0f30f000) == 0x0510f000) ||
6743 ((insn & 0x0f30f010) == 0x0710f000)) {
6744 if ((insn & (1 << 22)) == 0) {
6745 /* PLDW; v7MP */
6746 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6747 goto illegal_op;
6748 }
6749 }
6750 /* Otherwise PLD; v5TE+ */
be5e7a76 6751 ARCH(5TE);
3d185e5d
PM
6752 return;
6753 }
6754 if (((insn & 0x0f70f000) == 0x0450f000) ||
6755 ((insn & 0x0f70f010) == 0x0650f000)) {
6756 ARCH(7);
6757 return; /* PLI; V7 */
6758 }
6759 if (((insn & 0x0f700000) == 0x04100000) ||
6760 ((insn & 0x0f700010) == 0x06100000)) {
6761 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6762 goto illegal_op;
6763 }
6764 return; /* v7MP: Unallocated memory hint: must NOP */
6765 }
6766
6767 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6768 ARCH(6);
6769 /* setend */
10962fd5
PM
6770 if (((insn >> 9) & 1) != s->bswap_code) {
6771 /* Dynamic endianness switching not implemented. */
9ee6e8bb
PB
6772 goto illegal_op;
6773 }
6774 return;
6775 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6776 switch ((insn >> 4) & 0xf) {
6777 case 1: /* clrex */
6778 ARCH(6K);
426f5abc 6779 gen_clrex(s);
9ee6e8bb
PB
6780 return;
6781 case 4: /* dsb */
6782 case 5: /* dmb */
6783 case 6: /* isb */
6784 ARCH(7);
6785 /* We don't emulate caches so these are a no-op. */
6786 return;
6787 default:
6788 goto illegal_op;
6789 }
6790 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6791 /* srs */
c67b6b71 6792 int32_t offset;
9ee6e8bb
PB
6793 if (IS_USER(s))
6794 goto illegal_op;
6795 ARCH(6);
6796 op1 = (insn & 0x1f);
7d1b0095 6797 addr = tcg_temp_new_i32();
39ea3d4e
PM
6798 tmp = tcg_const_i32(op1);
6799 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6800 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6801 i = (insn >> 23) & 3;
6802 switch (i) {
6803 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6804 case 1: offset = 0; break; /* IA */
6805 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6806 case 3: offset = 4; break; /* IB */
6807 default: abort();
6808 }
6809 if (offset)
b0109805
PB
6810 tcg_gen_addi_i32(addr, addr, offset);
6811 tmp = load_reg(s, 14);
6812 gen_st32(tmp, addr, 0);
c67b6b71 6813 tmp = load_cpu_field(spsr);
b0109805
PB
6814 tcg_gen_addi_i32(addr, addr, 4);
6815 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6816 if (insn & (1 << 21)) {
6817 /* Base writeback. */
6818 switch (i) {
6819 case 0: offset = -8; break;
c67b6b71
FN
6820 case 1: offset = 4; break;
6821 case 2: offset = -4; break;
9ee6e8bb
PB
6822 case 3: offset = 0; break;
6823 default: abort();
6824 }
6825 if (offset)
c67b6b71 6826 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6827 tmp = tcg_const_i32(op1);
6828 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6829 tcg_temp_free_i32(tmp);
7d1b0095 6830 tcg_temp_free_i32(addr);
b0109805 6831 } else {
7d1b0095 6832 tcg_temp_free_i32(addr);
9ee6e8bb 6833 }
a990f58f 6834 return;
ea825eee 6835 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6836 /* rfe */
c67b6b71 6837 int32_t offset;
9ee6e8bb
PB
6838 if (IS_USER(s))
6839 goto illegal_op;
6840 ARCH(6);
6841 rn = (insn >> 16) & 0xf;
b0109805 6842 addr = load_reg(s, rn);
9ee6e8bb
PB
6843 i = (insn >> 23) & 3;
6844 switch (i) {
b0109805 6845 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6846 case 1: offset = 0; break; /* IA */
6847 case 2: offset = -8; break; /* DB */
b0109805 6848 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6849 default: abort();
6850 }
6851 if (offset)
b0109805
PB
6852 tcg_gen_addi_i32(addr, addr, offset);
6853 /* Load PC into tmp and CPSR into tmp2. */
6854 tmp = gen_ld32(addr, 0);
6855 tcg_gen_addi_i32(addr, addr, 4);
6856 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6857 if (insn & (1 << 21)) {
6858 /* Base writeback. */
6859 switch (i) {
b0109805 6860 case 0: offset = -8; break;
c67b6b71
FN
6861 case 1: offset = 4; break;
6862 case 2: offset = -4; break;
b0109805 6863 case 3: offset = 0; break;
9ee6e8bb
PB
6864 default: abort();
6865 }
6866 if (offset)
b0109805
PB
6867 tcg_gen_addi_i32(addr, addr, offset);
6868 store_reg(s, rn, addr);
6869 } else {
7d1b0095 6870 tcg_temp_free_i32(addr);
9ee6e8bb 6871 }
b0109805 6872 gen_rfe(s, tmp, tmp2);
c67b6b71 6873 return;
9ee6e8bb
PB
6874 } else if ((insn & 0x0e000000) == 0x0a000000) {
6875 /* branch link and change to thumb (blx <offset>) */
6876 int32_t offset;
6877
6878 val = (uint32_t)s->pc;
7d1b0095 6879 tmp = tcg_temp_new_i32();
d9ba4830
PB
6880 tcg_gen_movi_i32(tmp, val);
6881 store_reg(s, 14, tmp);
9ee6e8bb
PB
6882 /* Sign-extend the 24-bit offset */
6883 offset = (((int32_t)insn) << 8) >> 8;
6884 /* offset * 4 + bit24 * 2 + (thumb bit) */
6885 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6886 /* pipeline offset */
6887 val += 4;
be5e7a76 6888 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6889 gen_bx_im(s, val);
9ee6e8bb
PB
6890 return;
6891 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6892 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6893 /* iWMMXt register transfer. */
6894 if (env->cp15.c15_cpar & (1 << 1))
6895 if (!disas_iwmmxt_insn(env, s, insn))
6896 return;
6897 }
6898 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6899 /* Coprocessor double register transfer. */
be5e7a76 6900 ARCH(5TE);
9ee6e8bb
PB
6901 } else if ((insn & 0x0f000010) == 0x0e000010) {
6902 /* Additional coprocessor register transfer. */
7997d92f 6903 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6904 uint32_t mask;
6905 uint32_t val;
6906 /* cps (privileged) */
6907 if (IS_USER(s))
6908 return;
6909 mask = val = 0;
6910 if (insn & (1 << 19)) {
6911 if (insn & (1 << 8))
6912 mask |= CPSR_A;
6913 if (insn & (1 << 7))
6914 mask |= CPSR_I;
6915 if (insn & (1 << 6))
6916 mask |= CPSR_F;
6917 if (insn & (1 << 18))
6918 val |= mask;
6919 }
7997d92f 6920 if (insn & (1 << 17)) {
9ee6e8bb
PB
6921 mask |= CPSR_M;
6922 val |= (insn & 0x1f);
6923 }
6924 if (mask) {
2fbac54b 6925 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6926 }
6927 return;
6928 }
6929 goto illegal_op;
6930 }
6931 if (cond != 0xe) {
6932 /* if not always execute, we generate a conditional jump to
6933 next instruction */
6934 s->condlabel = gen_new_label();
d9ba4830 6935 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6936 s->condjmp = 1;
6937 }
6938 if ((insn & 0x0f900000) == 0x03000000) {
6939 if ((insn & (1 << 21)) == 0) {
6940 ARCH(6T2);
6941 rd = (insn >> 12) & 0xf;
6942 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6943 if ((insn & (1 << 22)) == 0) {
6944 /* MOVW */
7d1b0095 6945 tmp = tcg_temp_new_i32();
5e3f878a 6946 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6947 } else {
6948 /* MOVT */
5e3f878a 6949 tmp = load_reg(s, rd);
86831435 6950 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6951 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6952 }
5e3f878a 6953 store_reg(s, rd, tmp);
9ee6e8bb
PB
6954 } else {
6955 if (((insn >> 12) & 0xf) != 0xf)
6956 goto illegal_op;
6957 if (((insn >> 16) & 0xf) == 0) {
6958 gen_nop_hint(s, insn & 0xff);
6959 } else {
6960 /* CPSR = immediate */
6961 val = insn & 0xff;
6962 shift = ((insn >> 8) & 0xf) * 2;
6963 if (shift)
6964 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6965 i = ((insn & (1 << 22)) != 0);
2fbac54b 6966 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6967 goto illegal_op;
6968 }
6969 }
6970 } else if ((insn & 0x0f900000) == 0x01000000
6971 && (insn & 0x00000090) != 0x00000090) {
6972 /* miscellaneous instructions */
6973 op1 = (insn >> 21) & 3;
6974 sh = (insn >> 4) & 0xf;
6975 rm = insn & 0xf;
6976 switch (sh) {
6977 case 0x0: /* move program status register */
6978 if (op1 & 1) {
6979 /* PSR = reg */
2fbac54b 6980 tmp = load_reg(s, rm);
9ee6e8bb 6981 i = ((op1 & 2) != 0);
2fbac54b 6982 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6983 goto illegal_op;
6984 } else {
6985 /* reg = PSR */
6986 rd = (insn >> 12) & 0xf;
6987 if (op1 & 2) {
6988 if (IS_USER(s))
6989 goto illegal_op;
d9ba4830 6990 tmp = load_cpu_field(spsr);
9ee6e8bb 6991 } else {
7d1b0095 6992 tmp = tcg_temp_new_i32();
d9ba4830 6993 gen_helper_cpsr_read(tmp);
9ee6e8bb 6994 }
d9ba4830 6995 store_reg(s, rd, tmp);
9ee6e8bb
PB
6996 }
6997 break;
6998 case 0x1:
6999 if (op1 == 1) {
7000 /* branch/exchange thumb (bx). */
be5e7a76 7001 ARCH(4T);
d9ba4830
PB
7002 tmp = load_reg(s, rm);
7003 gen_bx(s, tmp);
9ee6e8bb
PB
7004 } else if (op1 == 3) {
7005 /* clz */
be5e7a76 7006 ARCH(5);
9ee6e8bb 7007 rd = (insn >> 12) & 0xf;
1497c961
PB
7008 tmp = load_reg(s, rm);
7009 gen_helper_clz(tmp, tmp);
7010 store_reg(s, rd, tmp);
9ee6e8bb
PB
7011 } else {
7012 goto illegal_op;
7013 }
7014 break;
7015 case 0x2:
7016 if (op1 == 1) {
7017 ARCH(5J); /* bxj */
7018 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7019 tmp = load_reg(s, rm);
7020 gen_bx(s, tmp);
9ee6e8bb
PB
7021 } else {
7022 goto illegal_op;
7023 }
7024 break;
7025 case 0x3:
7026 if (op1 != 1)
7027 goto illegal_op;
7028
be5e7a76 7029 ARCH(5);
9ee6e8bb 7030 /* branch link/exchange thumb (blx) */
d9ba4830 7031 tmp = load_reg(s, rm);
7d1b0095 7032 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7033 tcg_gen_movi_i32(tmp2, s->pc);
7034 store_reg(s, 14, tmp2);
7035 gen_bx(s, tmp);
9ee6e8bb
PB
7036 break;
7037 case 0x5: /* saturating add/subtract */
be5e7a76 7038 ARCH(5TE);
9ee6e8bb
PB
7039 rd = (insn >> 12) & 0xf;
7040 rn = (insn >> 16) & 0xf;
b40d0353 7041 tmp = load_reg(s, rm);
5e3f878a 7042 tmp2 = load_reg(s, rn);
9ee6e8bb 7043 if (op1 & 2)
5e3f878a 7044 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 7045 if (op1 & 1)
5e3f878a 7046 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 7047 else
5e3f878a 7048 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 7049 tcg_temp_free_i32(tmp2);
5e3f878a 7050 store_reg(s, rd, tmp);
9ee6e8bb 7051 break;
49e14940
AL
7052 case 7:
7053 /* SMC instruction (op1 == 3)
7054 and undefined instructions (op1 == 0 || op1 == 2)
7055 will trap */
7056 if (op1 != 1) {
7057 goto illegal_op;
7058 }
7059 /* bkpt */
be5e7a76 7060 ARCH(5);
bc4a0de0 7061 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
7062 break;
7063 case 0x8: /* signed multiply */
7064 case 0xa:
7065 case 0xc:
7066 case 0xe:
be5e7a76 7067 ARCH(5TE);
9ee6e8bb
PB
7068 rs = (insn >> 8) & 0xf;
7069 rn = (insn >> 12) & 0xf;
7070 rd = (insn >> 16) & 0xf;
7071 if (op1 == 1) {
7072 /* (32 * 16) >> 16 */
5e3f878a
PB
7073 tmp = load_reg(s, rm);
7074 tmp2 = load_reg(s, rs);
9ee6e8bb 7075 if (sh & 4)
5e3f878a 7076 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7077 else
5e3f878a 7078 gen_sxth(tmp2);
a7812ae4
PB
7079 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7080 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7081 tmp = tcg_temp_new_i32();
a7812ae4 7082 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7083 tcg_temp_free_i64(tmp64);
9ee6e8bb 7084 if ((sh & 2) == 0) {
5e3f878a
PB
7085 tmp2 = load_reg(s, rn);
7086 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7087 tcg_temp_free_i32(tmp2);
9ee6e8bb 7088 }
5e3f878a 7089 store_reg(s, rd, tmp);
9ee6e8bb
PB
7090 } else {
7091 /* 16 * 16 */
5e3f878a
PB
7092 tmp = load_reg(s, rm);
7093 tmp2 = load_reg(s, rs);
7094 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7095 tcg_temp_free_i32(tmp2);
9ee6e8bb 7096 if (op1 == 2) {
a7812ae4
PB
7097 tmp64 = tcg_temp_new_i64();
7098 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7099 tcg_temp_free_i32(tmp);
a7812ae4
PB
7100 gen_addq(s, tmp64, rn, rd);
7101 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7102 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7103 } else {
7104 if (op1 == 0) {
5e3f878a
PB
7105 tmp2 = load_reg(s, rn);
7106 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7107 tcg_temp_free_i32(tmp2);
9ee6e8bb 7108 }
5e3f878a 7109 store_reg(s, rd, tmp);
9ee6e8bb
PB
7110 }
7111 }
7112 break;
7113 default:
7114 goto illegal_op;
7115 }
7116 } else if (((insn & 0x0e000000) == 0 &&
7117 (insn & 0x00000090) != 0x90) ||
7118 ((insn & 0x0e000000) == (1 << 25))) {
7119 int set_cc, logic_cc, shiftop;
7120
7121 op1 = (insn >> 21) & 0xf;
7122 set_cc = (insn >> 20) & 1;
7123 logic_cc = table_logic_cc[op1] & set_cc;
7124
7125 /* data processing instruction */
7126 if (insn & (1 << 25)) {
7127 /* immediate operand */
7128 val = insn & 0xff;
7129 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7130 if (shift) {
9ee6e8bb 7131 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7132 }
7d1b0095 7133 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7134 tcg_gen_movi_i32(tmp2, val);
7135 if (logic_cc && shift) {
7136 gen_set_CF_bit31(tmp2);
7137 }
9ee6e8bb
PB
7138 } else {
7139 /* register */
7140 rm = (insn) & 0xf;
e9bb4aa9 7141 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7142 shiftop = (insn >> 5) & 3;
7143 if (!(insn & (1 << 4))) {
7144 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7145 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7146 } else {
7147 rs = (insn >> 8) & 0xf;
8984bd2e 7148 tmp = load_reg(s, rs);
e9bb4aa9 7149 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7150 }
7151 }
7152 if (op1 != 0x0f && op1 != 0x0d) {
7153 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7154 tmp = load_reg(s, rn);
7155 } else {
7156 TCGV_UNUSED(tmp);
9ee6e8bb
PB
7157 }
7158 rd = (insn >> 12) & 0xf;
7159 switch(op1) {
7160 case 0x00:
e9bb4aa9
JR
7161 tcg_gen_and_i32(tmp, tmp, tmp2);
7162 if (logic_cc) {
7163 gen_logic_CC(tmp);
7164 }
21aeb343 7165 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7166 break;
7167 case 0x01:
e9bb4aa9
JR
7168 tcg_gen_xor_i32(tmp, tmp, tmp2);
7169 if (logic_cc) {
7170 gen_logic_CC(tmp);
7171 }
21aeb343 7172 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7173 break;
7174 case 0x02:
7175 if (set_cc && rd == 15) {
7176 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7177 if (IS_USER(s)) {
9ee6e8bb 7178 goto illegal_op;
e9bb4aa9
JR
7179 }
7180 gen_helper_sub_cc(tmp, tmp, tmp2);
7181 gen_exception_return(s, tmp);
9ee6e8bb 7182 } else {
e9bb4aa9
JR
7183 if (set_cc) {
7184 gen_helper_sub_cc(tmp, tmp, tmp2);
7185 } else {
7186 tcg_gen_sub_i32(tmp, tmp, tmp2);
7187 }
21aeb343 7188 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7189 }
7190 break;
7191 case 0x03:
e9bb4aa9
JR
7192 if (set_cc) {
7193 gen_helper_sub_cc(tmp, tmp2, tmp);
7194 } else {
7195 tcg_gen_sub_i32(tmp, tmp2, tmp);
7196 }
21aeb343 7197 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7198 break;
7199 case 0x04:
e9bb4aa9
JR
7200 if (set_cc) {
7201 gen_helper_add_cc(tmp, tmp, tmp2);
7202 } else {
7203 tcg_gen_add_i32(tmp, tmp, tmp2);
7204 }
21aeb343 7205 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7206 break;
7207 case 0x05:
e9bb4aa9
JR
7208 if (set_cc) {
7209 gen_helper_adc_cc(tmp, tmp, tmp2);
7210 } else {
7211 gen_add_carry(tmp, tmp, tmp2);
7212 }
21aeb343 7213 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7214 break;
7215 case 0x06:
e9bb4aa9
JR
7216 if (set_cc) {
7217 gen_helper_sbc_cc(tmp, tmp, tmp2);
7218 } else {
7219 gen_sub_carry(tmp, tmp, tmp2);
7220 }
21aeb343 7221 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7222 break;
7223 case 0x07:
e9bb4aa9
JR
7224 if (set_cc) {
7225 gen_helper_sbc_cc(tmp, tmp2, tmp);
7226 } else {
7227 gen_sub_carry(tmp, tmp2, tmp);
7228 }
21aeb343 7229 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7230 break;
7231 case 0x08:
7232 if (set_cc) {
e9bb4aa9
JR
7233 tcg_gen_and_i32(tmp, tmp, tmp2);
7234 gen_logic_CC(tmp);
9ee6e8bb 7235 }
7d1b0095 7236 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7237 break;
7238 case 0x09:
7239 if (set_cc) {
e9bb4aa9
JR
7240 tcg_gen_xor_i32(tmp, tmp, tmp2);
7241 gen_logic_CC(tmp);
9ee6e8bb 7242 }
7d1b0095 7243 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7244 break;
7245 case 0x0a:
7246 if (set_cc) {
e9bb4aa9 7247 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 7248 }
7d1b0095 7249 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7250 break;
7251 case 0x0b:
7252 if (set_cc) {
e9bb4aa9 7253 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 7254 }
7d1b0095 7255 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7256 break;
7257 case 0x0c:
e9bb4aa9
JR
7258 tcg_gen_or_i32(tmp, tmp, tmp2);
7259 if (logic_cc) {
7260 gen_logic_CC(tmp);
7261 }
21aeb343 7262 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7263 break;
7264 case 0x0d:
7265 if (logic_cc && rd == 15) {
7266 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7267 if (IS_USER(s)) {
9ee6e8bb 7268 goto illegal_op;
e9bb4aa9
JR
7269 }
7270 gen_exception_return(s, tmp2);
9ee6e8bb 7271 } else {
e9bb4aa9
JR
7272 if (logic_cc) {
7273 gen_logic_CC(tmp2);
7274 }
21aeb343 7275 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7276 }
7277 break;
7278 case 0x0e:
f669df27 7279 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7280 if (logic_cc) {
7281 gen_logic_CC(tmp);
7282 }
21aeb343 7283 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7284 break;
7285 default:
7286 case 0x0f:
e9bb4aa9
JR
7287 tcg_gen_not_i32(tmp2, tmp2);
7288 if (logic_cc) {
7289 gen_logic_CC(tmp2);
7290 }
21aeb343 7291 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7292 break;
7293 }
e9bb4aa9 7294 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7295 tcg_temp_free_i32(tmp2);
e9bb4aa9 7296 }
9ee6e8bb
PB
7297 } else {
7298 /* other instructions */
7299 op1 = (insn >> 24) & 0xf;
7300 switch(op1) {
7301 case 0x0:
7302 case 0x1:
7303 /* multiplies, extra load/stores */
7304 sh = (insn >> 5) & 3;
7305 if (sh == 0) {
7306 if (op1 == 0x0) {
7307 rd = (insn >> 16) & 0xf;
7308 rn = (insn >> 12) & 0xf;
7309 rs = (insn >> 8) & 0xf;
7310 rm = (insn) & 0xf;
7311 op1 = (insn >> 20) & 0xf;
7312 switch (op1) {
7313 case 0: case 1: case 2: case 3: case 6:
7314 /* 32 bit mul */
5e3f878a
PB
7315 tmp = load_reg(s, rs);
7316 tmp2 = load_reg(s, rm);
7317 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7318 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7319 if (insn & (1 << 22)) {
7320 /* Subtract (mls) */
7321 ARCH(6T2);
5e3f878a
PB
7322 tmp2 = load_reg(s, rn);
7323 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7324 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7325 } else if (insn & (1 << 21)) {
7326 /* Add */
5e3f878a
PB
7327 tmp2 = load_reg(s, rn);
7328 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7329 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7330 }
7331 if (insn & (1 << 20))
5e3f878a
PB
7332 gen_logic_CC(tmp);
7333 store_reg(s, rd, tmp);
9ee6e8bb 7334 break;
8aac08b1
AJ
7335 case 4:
7336 /* 64 bit mul double accumulate (UMAAL) */
7337 ARCH(6);
7338 tmp = load_reg(s, rs);
7339 tmp2 = load_reg(s, rm);
7340 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7341 gen_addq_lo(s, tmp64, rn);
7342 gen_addq_lo(s, tmp64, rd);
7343 gen_storeq_reg(s, rn, rd, tmp64);
7344 tcg_temp_free_i64(tmp64);
7345 break;
7346 case 8: case 9: case 10: case 11:
7347 case 12: case 13: case 14: case 15:
7348 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7349 tmp = load_reg(s, rs);
7350 tmp2 = load_reg(s, rm);
8aac08b1 7351 if (insn & (1 << 22)) {
a7812ae4 7352 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 7353 } else {
a7812ae4 7354 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
7355 }
7356 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 7357 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 7358 }
8aac08b1 7359 if (insn & (1 << 20)) {
a7812ae4 7360 gen_logicq_cc(tmp64);
8aac08b1 7361 }
a7812ae4 7362 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7363 tcg_temp_free_i64(tmp64);
9ee6e8bb 7364 break;
8aac08b1
AJ
7365 default:
7366 goto illegal_op;
9ee6e8bb
PB
7367 }
7368 } else {
7369 rn = (insn >> 16) & 0xf;
7370 rd = (insn >> 12) & 0xf;
7371 if (insn & (1 << 23)) {
7372 /* load/store exclusive */
86753403
PB
7373 op1 = (insn >> 21) & 0x3;
7374 if (op1)
a47f43d2 7375 ARCH(6K);
86753403
PB
7376 else
7377 ARCH(6);
3174f8e9 7378 addr = tcg_temp_local_new_i32();
98a46317 7379 load_reg_var(s, addr, rn);
9ee6e8bb 7380 if (insn & (1 << 20)) {
86753403
PB
7381 switch (op1) {
7382 case 0: /* ldrex */
426f5abc 7383 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7384 break;
7385 case 1: /* ldrexd */
426f5abc 7386 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7387 break;
7388 case 2: /* ldrexb */
426f5abc 7389 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7390 break;
7391 case 3: /* ldrexh */
426f5abc 7392 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7393 break;
7394 default:
7395 abort();
7396 }
9ee6e8bb
PB
7397 } else {
7398 rm = insn & 0xf;
86753403
PB
7399 switch (op1) {
7400 case 0: /* strex */
426f5abc 7401 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7402 break;
7403 case 1: /* strexd */
502e64fe 7404 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7405 break;
7406 case 2: /* strexb */
426f5abc 7407 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7408 break;
7409 case 3: /* strexh */
426f5abc 7410 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7411 break;
7412 default:
7413 abort();
7414 }
9ee6e8bb 7415 }
3174f8e9 7416 tcg_temp_free(addr);
9ee6e8bb
PB
7417 } else {
7418 /* SWP instruction */
7419 rm = (insn) & 0xf;
7420
8984bd2e
PB
7421 /* ??? This is not really atomic. However we know
7422 we never have multiple CPUs running in parallel,
7423 so it is good enough. */
7424 addr = load_reg(s, rn);
7425 tmp = load_reg(s, rm);
9ee6e8bb 7426 if (insn & (1 << 22)) {
8984bd2e
PB
7427 tmp2 = gen_ld8u(addr, IS_USER(s));
7428 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7429 } else {
8984bd2e
PB
7430 tmp2 = gen_ld32(addr, IS_USER(s));
7431 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7432 }
7d1b0095 7433 tcg_temp_free_i32(addr);
8984bd2e 7434 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7435 }
7436 }
7437 } else {
7438 int address_offset;
7439 int load;
7440 /* Misc load/store */
7441 rn = (insn >> 16) & 0xf;
7442 rd = (insn >> 12) & 0xf;
b0109805 7443 addr = load_reg(s, rn);
9ee6e8bb 7444 if (insn & (1 << 24))
b0109805 7445 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7446 address_offset = 0;
7447 if (insn & (1 << 20)) {
7448 /* load */
7449 switch(sh) {
7450 case 1:
b0109805 7451 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
7452 break;
7453 case 2:
b0109805 7454 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
7455 break;
7456 default:
7457 case 3:
b0109805 7458 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
7459 break;
7460 }
7461 load = 1;
7462 } else if (sh & 2) {
be5e7a76 7463 ARCH(5TE);
9ee6e8bb
PB
7464 /* doubleword */
7465 if (sh & 1) {
7466 /* store */
b0109805
PB
7467 tmp = load_reg(s, rd);
7468 gen_st32(tmp, addr, IS_USER(s));
7469 tcg_gen_addi_i32(addr, addr, 4);
7470 tmp = load_reg(s, rd + 1);
7471 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7472 load = 0;
7473 } else {
7474 /* load */
b0109805
PB
7475 tmp = gen_ld32(addr, IS_USER(s));
7476 store_reg(s, rd, tmp);
7477 tcg_gen_addi_i32(addr, addr, 4);
7478 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
7479 rd++;
7480 load = 1;
7481 }
7482 address_offset = -4;
7483 } else {
7484 /* store */
b0109805
PB
7485 tmp = load_reg(s, rd);
7486 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7487 load = 0;
7488 }
7489 /* Perform base writeback before the loaded value to
7490 ensure correct behavior with overlapping index registers.
7491 ldrd with base writeback is is undefined if the
7492 destination and index registers overlap. */
7493 if (!(insn & (1 << 24))) {
b0109805
PB
7494 gen_add_datah_offset(s, insn, address_offset, addr);
7495 store_reg(s, rn, addr);
9ee6e8bb
PB
7496 } else if (insn & (1 << 21)) {
7497 if (address_offset)
b0109805
PB
7498 tcg_gen_addi_i32(addr, addr, address_offset);
7499 store_reg(s, rn, addr);
7500 } else {
7d1b0095 7501 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7502 }
7503 if (load) {
7504 /* Complete the load. */
b0109805 7505 store_reg(s, rd, tmp);
9ee6e8bb
PB
7506 }
7507 }
7508 break;
7509 case 0x4:
7510 case 0x5:
7511 goto do_ldst;
7512 case 0x6:
7513 case 0x7:
7514 if (insn & (1 << 4)) {
7515 ARCH(6);
7516 /* Armv6 Media instructions. */
7517 rm = insn & 0xf;
7518 rn = (insn >> 16) & 0xf;
2c0262af 7519 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7520 rs = (insn >> 8) & 0xf;
7521 switch ((insn >> 23) & 3) {
7522 case 0: /* Parallel add/subtract. */
7523 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7524 tmp = load_reg(s, rn);
7525 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7526 sh = (insn >> 5) & 7;
7527 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7528 goto illegal_op;
6ddbc6e4 7529 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7530 tcg_temp_free_i32(tmp2);
6ddbc6e4 7531 store_reg(s, rd, tmp);
9ee6e8bb
PB
7532 break;
7533 case 1:
7534 if ((insn & 0x00700020) == 0) {
6c95676b 7535 /* Halfword pack. */
3670669c
PB
7536 tmp = load_reg(s, rn);
7537 tmp2 = load_reg(s, rm);
9ee6e8bb 7538 shift = (insn >> 7) & 0x1f;
3670669c
PB
7539 if (insn & (1 << 6)) {
7540 /* pkhtb */
22478e79
AZ
7541 if (shift == 0)
7542 shift = 31;
7543 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7544 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7545 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7546 } else {
7547 /* pkhbt */
22478e79
AZ
7548 if (shift)
7549 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7550 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7551 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7552 }
7553 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7554 tcg_temp_free_i32(tmp2);
3670669c 7555 store_reg(s, rd, tmp);
9ee6e8bb
PB
7556 } else if ((insn & 0x00200020) == 0x00200000) {
7557 /* [us]sat */
6ddbc6e4 7558 tmp = load_reg(s, rm);
9ee6e8bb
PB
7559 shift = (insn >> 7) & 0x1f;
7560 if (insn & (1 << 6)) {
7561 if (shift == 0)
7562 shift = 31;
6ddbc6e4 7563 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7564 } else {
6ddbc6e4 7565 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7566 }
7567 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7568 tmp2 = tcg_const_i32(sh);
7569 if (insn & (1 << 22))
7570 gen_helper_usat(tmp, tmp, tmp2);
7571 else
7572 gen_helper_ssat(tmp, tmp, tmp2);
7573 tcg_temp_free_i32(tmp2);
6ddbc6e4 7574 store_reg(s, rd, tmp);
9ee6e8bb
PB
7575 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7576 /* [us]sat16 */
6ddbc6e4 7577 tmp = load_reg(s, rm);
9ee6e8bb 7578 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7579 tmp2 = tcg_const_i32(sh);
7580 if (insn & (1 << 22))
7581 gen_helper_usat16(tmp, tmp, tmp2);
7582 else
7583 gen_helper_ssat16(tmp, tmp, tmp2);
7584 tcg_temp_free_i32(tmp2);
6ddbc6e4 7585 store_reg(s, rd, tmp);
9ee6e8bb
PB
7586 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7587 /* Select bytes. */
6ddbc6e4
PB
7588 tmp = load_reg(s, rn);
7589 tmp2 = load_reg(s, rm);
7d1b0095 7590 tmp3 = tcg_temp_new_i32();
0ecb72a5 7591 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 7592 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7593 tcg_temp_free_i32(tmp3);
7594 tcg_temp_free_i32(tmp2);
6ddbc6e4 7595 store_reg(s, rd, tmp);
9ee6e8bb 7596 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7597 tmp = load_reg(s, rm);
9ee6e8bb 7598 shift = (insn >> 10) & 3;
1301f322 7599 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7600 rotate, a shift is sufficient. */
7601 if (shift != 0)
f669df27 7602 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7603 op1 = (insn >> 20) & 7;
7604 switch (op1) {
5e3f878a
PB
7605 case 0: gen_sxtb16(tmp); break;
7606 case 2: gen_sxtb(tmp); break;
7607 case 3: gen_sxth(tmp); break;
7608 case 4: gen_uxtb16(tmp); break;
7609 case 6: gen_uxtb(tmp); break;
7610 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7611 default: goto illegal_op;
7612 }
7613 if (rn != 15) {
5e3f878a 7614 tmp2 = load_reg(s, rn);
9ee6e8bb 7615 if ((op1 & 3) == 0) {
5e3f878a 7616 gen_add16(tmp, tmp2);
9ee6e8bb 7617 } else {
5e3f878a 7618 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7619 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7620 }
7621 }
6c95676b 7622 store_reg(s, rd, tmp);
9ee6e8bb
PB
7623 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7624 /* rev */
b0109805 7625 tmp = load_reg(s, rm);
9ee6e8bb
PB
7626 if (insn & (1 << 22)) {
7627 if (insn & (1 << 7)) {
b0109805 7628 gen_revsh(tmp);
9ee6e8bb
PB
7629 } else {
7630 ARCH(6T2);
b0109805 7631 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7632 }
7633 } else {
7634 if (insn & (1 << 7))
b0109805 7635 gen_rev16(tmp);
9ee6e8bb 7636 else
66896cb8 7637 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7638 }
b0109805 7639 store_reg(s, rd, tmp);
9ee6e8bb
PB
7640 } else {
7641 goto illegal_op;
7642 }
7643 break;
7644 case 2: /* Multiplies (Type 3). */
41e9564d
PM
7645 switch ((insn >> 20) & 0x7) {
7646 case 5:
7647 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7648 /* op2 not 00x or 11x : UNDEF */
7649 goto illegal_op;
7650 }
838fa72d
AJ
7651 /* Signed multiply most significant [accumulate].
7652 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
7653 tmp = load_reg(s, rm);
7654 tmp2 = load_reg(s, rs);
a7812ae4 7655 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7656
955a7dd5 7657 if (rd != 15) {
838fa72d 7658 tmp = load_reg(s, rd);
9ee6e8bb 7659 if (insn & (1 << 6)) {
838fa72d 7660 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7661 } else {
838fa72d 7662 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7663 }
7664 }
838fa72d
AJ
7665 if (insn & (1 << 5)) {
7666 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7667 }
7668 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7669 tmp = tcg_temp_new_i32();
838fa72d
AJ
7670 tcg_gen_trunc_i64_i32(tmp, tmp64);
7671 tcg_temp_free_i64(tmp64);
955a7dd5 7672 store_reg(s, rn, tmp);
41e9564d
PM
7673 break;
7674 case 0:
7675 case 4:
7676 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7677 if (insn & (1 << 7)) {
7678 goto illegal_op;
7679 }
7680 tmp = load_reg(s, rm);
7681 tmp2 = load_reg(s, rs);
9ee6e8bb 7682 if (insn & (1 << 5))
5e3f878a
PB
7683 gen_swap_half(tmp2);
7684 gen_smul_dual(tmp, tmp2);
5e3f878a 7685 if (insn & (1 << 6)) {
e1d177b9 7686 /* This subtraction cannot overflow. */
5e3f878a
PB
7687 tcg_gen_sub_i32(tmp, tmp, tmp2);
7688 } else {
e1d177b9
PM
7689 /* This addition cannot overflow 32 bits;
7690 * however it may overflow considered as a signed
7691 * operation, in which case we must set the Q flag.
7692 */
7693 gen_helper_add_setq(tmp, tmp, tmp2);
5e3f878a 7694 }
7d1b0095 7695 tcg_temp_free_i32(tmp2);
9ee6e8bb 7696 if (insn & (1 << 22)) {
5e3f878a 7697 /* smlald, smlsld */
a7812ae4
PB
7698 tmp64 = tcg_temp_new_i64();
7699 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7700 tcg_temp_free_i32(tmp);
a7812ae4
PB
7701 gen_addq(s, tmp64, rd, rn);
7702 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7703 tcg_temp_free_i64(tmp64);
9ee6e8bb 7704 } else {
5e3f878a 7705 /* smuad, smusd, smlad, smlsd */
22478e79 7706 if (rd != 15)
9ee6e8bb 7707 {
22478e79 7708 tmp2 = load_reg(s, rd);
5e3f878a 7709 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7710 tcg_temp_free_i32(tmp2);
9ee6e8bb 7711 }
22478e79 7712 store_reg(s, rn, tmp);
9ee6e8bb 7713 }
41e9564d 7714 break;
b8b8ea05
PM
7715 case 1:
7716 case 3:
7717 /* SDIV, UDIV */
7718 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7719 goto illegal_op;
7720 }
7721 if (((insn >> 5) & 7) || (rd != 15)) {
7722 goto illegal_op;
7723 }
7724 tmp = load_reg(s, rm);
7725 tmp2 = load_reg(s, rs);
7726 if (insn & (1 << 21)) {
7727 gen_helper_udiv(tmp, tmp, tmp2);
7728 } else {
7729 gen_helper_sdiv(tmp, tmp, tmp2);
7730 }
7731 tcg_temp_free_i32(tmp2);
7732 store_reg(s, rn, tmp);
7733 break;
41e9564d
PM
7734 default:
7735 goto illegal_op;
9ee6e8bb
PB
7736 }
7737 break;
7738 case 3:
7739 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7740 switch (op1) {
7741 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7742 ARCH(6);
7743 tmp = load_reg(s, rm);
7744 tmp2 = load_reg(s, rs);
7745 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7746 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7747 if (rd != 15) {
7748 tmp2 = load_reg(s, rd);
6ddbc6e4 7749 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7750 tcg_temp_free_i32(tmp2);
9ee6e8bb 7751 }
ded9d295 7752 store_reg(s, rn, tmp);
9ee6e8bb
PB
7753 break;
7754 case 0x20: case 0x24: case 0x28: case 0x2c:
7755 /* Bitfield insert/clear. */
7756 ARCH(6T2);
7757 shift = (insn >> 7) & 0x1f;
7758 i = (insn >> 16) & 0x1f;
7759 i = i + 1 - shift;
7760 if (rm == 15) {
7d1b0095 7761 tmp = tcg_temp_new_i32();
5e3f878a 7762 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7763 } else {
5e3f878a 7764 tmp = load_reg(s, rm);
9ee6e8bb
PB
7765 }
7766 if (i != 32) {
5e3f878a 7767 tmp2 = load_reg(s, rd);
8f8e3aa4 7768 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7d1b0095 7769 tcg_temp_free_i32(tmp2);
9ee6e8bb 7770 }
5e3f878a 7771 store_reg(s, rd, tmp);
9ee6e8bb
PB
7772 break;
7773 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7774 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7775 ARCH(6T2);
5e3f878a 7776 tmp = load_reg(s, rm);
9ee6e8bb
PB
7777 shift = (insn >> 7) & 0x1f;
7778 i = ((insn >> 16) & 0x1f) + 1;
7779 if (shift + i > 32)
7780 goto illegal_op;
7781 if (i < 32) {
7782 if (op1 & 0x20) {
5e3f878a 7783 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7784 } else {
5e3f878a 7785 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7786 }
7787 }
5e3f878a 7788 store_reg(s, rd, tmp);
9ee6e8bb
PB
7789 break;
7790 default:
7791 goto illegal_op;
7792 }
7793 break;
7794 }
7795 break;
7796 }
7797 do_ldst:
7798 /* Check for undefined extension instructions
7799 * per the ARM Bible IE:
7800 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7801 */
7802 sh = (0xf << 20) | (0xf << 4);
7803 if (op1 == 0x7 && ((insn & sh) == sh))
7804 {
7805 goto illegal_op;
7806 }
7807 /* load/store byte/word */
7808 rn = (insn >> 16) & 0xf;
7809 rd = (insn >> 12) & 0xf;
b0109805 7810 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7811 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7812 if (insn & (1 << 24))
b0109805 7813 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7814 if (insn & (1 << 20)) {
7815 /* load */
9ee6e8bb 7816 if (insn & (1 << 22)) {
b0109805 7817 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7818 } else {
b0109805 7819 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7820 }
9ee6e8bb
PB
7821 } else {
7822 /* store */
b0109805 7823 tmp = load_reg(s, rd);
9ee6e8bb 7824 if (insn & (1 << 22))
b0109805 7825 gen_st8(tmp, tmp2, i);
9ee6e8bb 7826 else
b0109805 7827 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7828 }
7829 if (!(insn & (1 << 24))) {
b0109805
PB
7830 gen_add_data_offset(s, insn, tmp2);
7831 store_reg(s, rn, tmp2);
7832 } else if (insn & (1 << 21)) {
7833 store_reg(s, rn, tmp2);
7834 } else {
7d1b0095 7835 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7836 }
7837 if (insn & (1 << 20)) {
7838 /* Complete the load. */
be5e7a76 7839 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7840 }
7841 break;
7842 case 0x08:
7843 case 0x09:
7844 {
7845 int j, n, user, loaded_base;
b0109805 7846 TCGv loaded_var;
9ee6e8bb
PB
7847 /* load/store multiple words */
7848 /* XXX: store correct base if write back */
7849 user = 0;
7850 if (insn & (1 << 22)) {
7851 if (IS_USER(s))
7852 goto illegal_op; /* only usable in supervisor mode */
7853
7854 if ((insn & (1 << 15)) == 0)
7855 user = 1;
7856 }
7857 rn = (insn >> 16) & 0xf;
b0109805 7858 addr = load_reg(s, rn);
9ee6e8bb
PB
7859
7860 /* compute total size */
7861 loaded_base = 0;
a50f5b91 7862 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7863 n = 0;
7864 for(i=0;i<16;i++) {
7865 if (insn & (1 << i))
7866 n++;
7867 }
7868 /* XXX: test invalid n == 0 case ? */
7869 if (insn & (1 << 23)) {
7870 if (insn & (1 << 24)) {
7871 /* pre increment */
b0109805 7872 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7873 } else {
7874 /* post increment */
7875 }
7876 } else {
7877 if (insn & (1 << 24)) {
7878 /* pre decrement */
b0109805 7879 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7880 } else {
7881 /* post decrement */
7882 if (n != 1)
b0109805 7883 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7884 }
7885 }
7886 j = 0;
7887 for(i=0;i<16;i++) {
7888 if (insn & (1 << i)) {
7889 if (insn & (1 << 20)) {
7890 /* load */
b0109805 7891 tmp = gen_ld32(addr, IS_USER(s));
be5e7a76 7892 if (user) {
b75263d6
JR
7893 tmp2 = tcg_const_i32(i);
7894 gen_helper_set_user_reg(tmp2, tmp);
7895 tcg_temp_free_i32(tmp2);
7d1b0095 7896 tcg_temp_free_i32(tmp);
9ee6e8bb 7897 } else if (i == rn) {
b0109805 7898 loaded_var = tmp;
9ee6e8bb
PB
7899 loaded_base = 1;
7900 } else {
be5e7a76 7901 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7902 }
7903 } else {
7904 /* store */
7905 if (i == 15) {
7906 /* special case: r15 = PC + 8 */
7907 val = (long)s->pc + 4;
7d1b0095 7908 tmp = tcg_temp_new_i32();
b0109805 7909 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7910 } else if (user) {
7d1b0095 7911 tmp = tcg_temp_new_i32();
b75263d6
JR
7912 tmp2 = tcg_const_i32(i);
7913 gen_helper_get_user_reg(tmp, tmp2);
7914 tcg_temp_free_i32(tmp2);
9ee6e8bb 7915 } else {
b0109805 7916 tmp = load_reg(s, i);
9ee6e8bb 7917 }
b0109805 7918 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7919 }
7920 j++;
7921 /* no need to add after the last transfer */
7922 if (j != n)
b0109805 7923 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7924 }
7925 }
7926 if (insn & (1 << 21)) {
7927 /* write back */
7928 if (insn & (1 << 23)) {
7929 if (insn & (1 << 24)) {
7930 /* pre increment */
7931 } else {
7932 /* post increment */
b0109805 7933 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7934 }
7935 } else {
7936 if (insn & (1 << 24)) {
7937 /* pre decrement */
7938 if (n != 1)
b0109805 7939 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7940 } else {
7941 /* post decrement */
b0109805 7942 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7943 }
7944 }
b0109805
PB
7945 store_reg(s, rn, addr);
7946 } else {
7d1b0095 7947 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7948 }
7949 if (loaded_base) {
b0109805 7950 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7951 }
7952 if ((insn & (1 << 22)) && !user) {
7953 /* Restore CPSR from SPSR. */
d9ba4830
PB
7954 tmp = load_cpu_field(spsr);
7955 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7956 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7957 s->is_jmp = DISAS_UPDATE;
7958 }
7959 }
7960 break;
7961 case 0xa:
7962 case 0xb:
7963 {
7964 int32_t offset;
7965
7966 /* branch (and link) */
7967 val = (int32_t)s->pc;
7968 if (insn & (1 << 24)) {
7d1b0095 7969 tmp = tcg_temp_new_i32();
5e3f878a
PB
7970 tcg_gen_movi_i32(tmp, val);
7971 store_reg(s, 14, tmp);
9ee6e8bb
PB
7972 }
7973 offset = (((int32_t)insn << 8) >> 8);
7974 val += (offset << 2) + 4;
7975 gen_jmp(s, val);
7976 }
7977 break;
7978 case 0xc:
7979 case 0xd:
7980 case 0xe:
7981 /* Coprocessor. */
7982 if (disas_coproc_insn(env, s, insn))
7983 goto illegal_op;
7984 break;
7985 case 0xf:
7986 /* swi */
5e3f878a 7987 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7988 s->is_jmp = DISAS_SWI;
7989 break;
7990 default:
7991 illegal_op:
bc4a0de0 7992 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7993 break;
7994 }
7995 }
7996}
7997
7998/* Return true if this is a Thumb-2 logical op. */
7999static int
8000thumb2_logic_op(int op)
8001{
8002 return (op < 8);
8003}
8004
8005/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8006 then set condition code flags based on the result of the operation.
8007 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8008 to the high bit of T1.
8009 Returns zero if the opcode is valid. */
8010
8011static int
396e467c 8012gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
8013{
8014 int logic_cc;
8015
8016 logic_cc = 0;
8017 switch (op) {
8018 case 0: /* and */
396e467c 8019 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
8020 logic_cc = conds;
8021 break;
8022 case 1: /* bic */
f669df27 8023 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
8024 logic_cc = conds;
8025 break;
8026 case 2: /* orr */
396e467c 8027 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
8028 logic_cc = conds;
8029 break;
8030 case 3: /* orn */
29501f1b 8031 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
8032 logic_cc = conds;
8033 break;
8034 case 4: /* eor */
396e467c 8035 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
8036 logic_cc = conds;
8037 break;
8038 case 8: /* add */
8039 if (conds)
396e467c 8040 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 8041 else
396e467c 8042 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
8043 break;
8044 case 10: /* adc */
8045 if (conds)
396e467c 8046 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 8047 else
396e467c 8048 gen_adc(t0, t1);
9ee6e8bb
PB
8049 break;
8050 case 11: /* sbc */
8051 if (conds)
396e467c 8052 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 8053 else
396e467c 8054 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
8055 break;
8056 case 13: /* sub */
8057 if (conds)
396e467c 8058 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 8059 else
396e467c 8060 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
8061 break;
8062 case 14: /* rsb */
8063 if (conds)
396e467c 8064 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 8065 else
396e467c 8066 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
8067 break;
8068 default: /* 5, 6, 7, 9, 12, 15. */
8069 return 1;
8070 }
8071 if (logic_cc) {
396e467c 8072 gen_logic_CC(t0);
9ee6e8bb 8073 if (shifter_out)
396e467c 8074 gen_set_CF_bit31(t1);
9ee6e8bb
PB
8075 }
8076 return 0;
8077}
8078
8079/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8080 is not legal. */
0ecb72a5 8081static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 8082{
b0109805 8083 uint32_t insn, imm, shift, offset;
9ee6e8bb 8084 uint32_t rd, rn, rm, rs;
b26eefb6 8085 TCGv tmp;
6ddbc6e4
PB
8086 TCGv tmp2;
8087 TCGv tmp3;
b0109805 8088 TCGv addr;
a7812ae4 8089 TCGv_i64 tmp64;
9ee6e8bb
PB
8090 int op;
8091 int shiftop;
8092 int conds;
8093 int logic_cc;
8094
8095 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8096 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8097 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8098 16-bit instructions to get correct prefetch abort behavior. */
8099 insn = insn_hw1;
8100 if ((insn & (1 << 12)) == 0) {
be5e7a76 8101 ARCH(5);
9ee6e8bb
PB
8102 /* Second half of blx. */
8103 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8104 tmp = load_reg(s, 14);
8105 tcg_gen_addi_i32(tmp, tmp, offset);
8106 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8107
7d1b0095 8108 tmp2 = tcg_temp_new_i32();
b0109805 8109 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8110 store_reg(s, 14, tmp2);
8111 gen_bx(s, tmp);
9ee6e8bb
PB
8112 return 0;
8113 }
8114 if (insn & (1 << 11)) {
8115 /* Second half of bl. */
8116 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8117 tmp = load_reg(s, 14);
6a0d8a1d 8118 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8119
7d1b0095 8120 tmp2 = tcg_temp_new_i32();
b0109805 8121 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8122 store_reg(s, 14, tmp2);
8123 gen_bx(s, tmp);
9ee6e8bb
PB
8124 return 0;
8125 }
8126 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8127 /* Instruction spans a page boundary. Implement it as two
8128 16-bit instructions in case the second half causes an
8129 prefetch abort. */
8130 offset = ((int32_t)insn << 21) >> 9;
396e467c 8131 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8132 return 0;
8133 }
8134 /* Fall through to 32-bit decode. */
8135 }
8136
d8fd2954 8137 insn = arm_lduw_code(s->pc, s->bswap_code);
9ee6e8bb
PB
8138 s->pc += 2;
8139 insn |= (uint32_t)insn_hw1 << 16;
8140
8141 if ((insn & 0xf800e800) != 0xf000e800) {
8142 ARCH(6T2);
8143 }
8144
8145 rn = (insn >> 16) & 0xf;
8146 rs = (insn >> 12) & 0xf;
8147 rd = (insn >> 8) & 0xf;
8148 rm = insn & 0xf;
8149 switch ((insn >> 25) & 0xf) {
8150 case 0: case 1: case 2: case 3:
8151 /* 16-bit instructions. Should never happen. */
8152 abort();
8153 case 4:
8154 if (insn & (1 << 22)) {
8155 /* Other load/store, table branch. */
8156 if (insn & 0x01200000) {
8157 /* Load/store doubleword. */
8158 if (rn == 15) {
7d1b0095 8159 addr = tcg_temp_new_i32();
b0109805 8160 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8161 } else {
b0109805 8162 addr = load_reg(s, rn);
9ee6e8bb
PB
8163 }
8164 offset = (insn & 0xff) * 4;
8165 if ((insn & (1 << 23)) == 0)
8166 offset = -offset;
8167 if (insn & (1 << 24)) {
b0109805 8168 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8169 offset = 0;
8170 }
8171 if (insn & (1 << 20)) {
8172 /* ldrd */
b0109805
PB
8173 tmp = gen_ld32(addr, IS_USER(s));
8174 store_reg(s, rs, tmp);
8175 tcg_gen_addi_i32(addr, addr, 4);
8176 tmp = gen_ld32(addr, IS_USER(s));
8177 store_reg(s, rd, tmp);
9ee6e8bb
PB
8178 } else {
8179 /* strd */
b0109805
PB
8180 tmp = load_reg(s, rs);
8181 gen_st32(tmp, addr, IS_USER(s));
8182 tcg_gen_addi_i32(addr, addr, 4);
8183 tmp = load_reg(s, rd);
8184 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8185 }
8186 if (insn & (1 << 21)) {
8187 /* Base writeback. */
8188 if (rn == 15)
8189 goto illegal_op;
b0109805
PB
8190 tcg_gen_addi_i32(addr, addr, offset - 4);
8191 store_reg(s, rn, addr);
8192 } else {
7d1b0095 8193 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8194 }
8195 } else if ((insn & (1 << 23)) == 0) {
8196 /* Load/store exclusive word. */
3174f8e9 8197 addr = tcg_temp_local_new();
98a46317 8198 load_reg_var(s, addr, rn);
426f5abc 8199 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8200 if (insn & (1 << 20)) {
426f5abc 8201 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8202 } else {
426f5abc 8203 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8204 }
3174f8e9 8205 tcg_temp_free(addr);
9ee6e8bb
PB
8206 } else if ((insn & (1 << 6)) == 0) {
8207 /* Table Branch. */
8208 if (rn == 15) {
7d1b0095 8209 addr = tcg_temp_new_i32();
b0109805 8210 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8211 } else {
b0109805 8212 addr = load_reg(s, rn);
9ee6e8bb 8213 }
b26eefb6 8214 tmp = load_reg(s, rm);
b0109805 8215 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8216 if (insn & (1 << 4)) {
8217 /* tbh */
b0109805 8218 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8219 tcg_temp_free_i32(tmp);
b0109805 8220 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 8221 } else { /* tbb */
7d1b0095 8222 tcg_temp_free_i32(tmp);
b0109805 8223 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 8224 }
7d1b0095 8225 tcg_temp_free_i32(addr);
b0109805
PB
8226 tcg_gen_shli_i32(tmp, tmp, 1);
8227 tcg_gen_addi_i32(tmp, tmp, s->pc);
8228 store_reg(s, 15, tmp);
9ee6e8bb
PB
8229 } else {
8230 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 8231 ARCH(7);
9ee6e8bb 8232 op = (insn >> 4) & 0x3;
426f5abc
PB
8233 if (op == 2) {
8234 goto illegal_op;
8235 }
3174f8e9 8236 addr = tcg_temp_local_new();
98a46317 8237 load_reg_var(s, addr, rn);
9ee6e8bb 8238 if (insn & (1 << 20)) {
426f5abc 8239 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8240 } else {
426f5abc 8241 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8242 }
3174f8e9 8243 tcg_temp_free(addr);
9ee6e8bb
PB
8244 }
8245 } else {
8246 /* Load/store multiple, RFE, SRS. */
8247 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8248 /* Not available in user mode. */
b0109805 8249 if (IS_USER(s))
9ee6e8bb
PB
8250 goto illegal_op;
8251 if (insn & (1 << 20)) {
8252 /* rfe */
b0109805
PB
8253 addr = load_reg(s, rn);
8254 if ((insn & (1 << 24)) == 0)
8255 tcg_gen_addi_i32(addr, addr, -8);
8256 /* Load PC into tmp and CPSR into tmp2. */
8257 tmp = gen_ld32(addr, 0);
8258 tcg_gen_addi_i32(addr, addr, 4);
8259 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
8260 if (insn & (1 << 21)) {
8261 /* Base writeback. */
b0109805
PB
8262 if (insn & (1 << 24)) {
8263 tcg_gen_addi_i32(addr, addr, 4);
8264 } else {
8265 tcg_gen_addi_i32(addr, addr, -4);
8266 }
8267 store_reg(s, rn, addr);
8268 } else {
7d1b0095 8269 tcg_temp_free_i32(addr);
9ee6e8bb 8270 }
b0109805 8271 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8272 } else {
8273 /* srs */
8274 op = (insn & 0x1f);
7d1b0095 8275 addr = tcg_temp_new_i32();
39ea3d4e
PM
8276 tmp = tcg_const_i32(op);
8277 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8278 tcg_temp_free_i32(tmp);
9ee6e8bb 8279 if ((insn & (1 << 24)) == 0) {
b0109805 8280 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 8281 }
b0109805
PB
8282 tmp = load_reg(s, 14);
8283 gen_st32(tmp, addr, 0);
8284 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 8285 tmp = tcg_temp_new_i32();
b0109805
PB
8286 gen_helper_cpsr_read(tmp);
8287 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
8288 if (insn & (1 << 21)) {
8289 if ((insn & (1 << 24)) == 0) {
b0109805 8290 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 8291 } else {
b0109805 8292 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8293 }
39ea3d4e
PM
8294 tmp = tcg_const_i32(op);
8295 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8296 tcg_temp_free_i32(tmp);
b0109805 8297 } else {
7d1b0095 8298 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8299 }
8300 }
8301 } else {
5856d44e
YO
8302 int i, loaded_base = 0;
8303 TCGv loaded_var;
9ee6e8bb 8304 /* Load/store multiple. */
b0109805 8305 addr = load_reg(s, rn);
9ee6e8bb
PB
8306 offset = 0;
8307 for (i = 0; i < 16; i++) {
8308 if (insn & (1 << i))
8309 offset += 4;
8310 }
8311 if (insn & (1 << 24)) {
b0109805 8312 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8313 }
8314
5856d44e 8315 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
8316 for (i = 0; i < 16; i++) {
8317 if ((insn & (1 << i)) == 0)
8318 continue;
8319 if (insn & (1 << 20)) {
8320 /* Load. */
b0109805 8321 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 8322 if (i == 15) {
b0109805 8323 gen_bx(s, tmp);
5856d44e
YO
8324 } else if (i == rn) {
8325 loaded_var = tmp;
8326 loaded_base = 1;
9ee6e8bb 8327 } else {
b0109805 8328 store_reg(s, i, tmp);
9ee6e8bb
PB
8329 }
8330 } else {
8331 /* Store. */
b0109805
PB
8332 tmp = load_reg(s, i);
8333 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8334 }
b0109805 8335 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8336 }
5856d44e
YO
8337 if (loaded_base) {
8338 store_reg(s, rn, loaded_var);
8339 }
9ee6e8bb
PB
8340 if (insn & (1 << 21)) {
8341 /* Base register writeback. */
8342 if (insn & (1 << 24)) {
b0109805 8343 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8344 }
8345 /* Fault if writeback register is in register list. */
8346 if (insn & (1 << rn))
8347 goto illegal_op;
b0109805
PB
8348 store_reg(s, rn, addr);
8349 } else {
7d1b0095 8350 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8351 }
8352 }
8353 }
8354 break;
2af9ab77
JB
8355 case 5:
8356
9ee6e8bb 8357 op = (insn >> 21) & 0xf;
2af9ab77
JB
8358 if (op == 6) {
8359 /* Halfword pack. */
8360 tmp = load_reg(s, rn);
8361 tmp2 = load_reg(s, rm);
8362 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8363 if (insn & (1 << 5)) {
8364 /* pkhtb */
8365 if (shift == 0)
8366 shift = 31;
8367 tcg_gen_sari_i32(tmp2, tmp2, shift);
8368 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8369 tcg_gen_ext16u_i32(tmp2, tmp2);
8370 } else {
8371 /* pkhbt */
8372 if (shift)
8373 tcg_gen_shli_i32(tmp2, tmp2, shift);
8374 tcg_gen_ext16u_i32(tmp, tmp);
8375 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8376 }
8377 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8378 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8379 store_reg(s, rd, tmp);
8380 } else {
2af9ab77
JB
8381 /* Data processing register constant shift. */
8382 if (rn == 15) {
7d1b0095 8383 tmp = tcg_temp_new_i32();
2af9ab77
JB
8384 tcg_gen_movi_i32(tmp, 0);
8385 } else {
8386 tmp = load_reg(s, rn);
8387 }
8388 tmp2 = load_reg(s, rm);
8389
8390 shiftop = (insn >> 4) & 3;
8391 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8392 conds = (insn & (1 << 20)) != 0;
8393 logic_cc = (conds && thumb2_logic_op(op));
8394 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8395 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8396 goto illegal_op;
7d1b0095 8397 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8398 if (rd != 15) {
8399 store_reg(s, rd, tmp);
8400 } else {
7d1b0095 8401 tcg_temp_free_i32(tmp);
2af9ab77 8402 }
3174f8e9 8403 }
9ee6e8bb
PB
8404 break;
8405 case 13: /* Misc data processing. */
8406 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8407 if (op < 4 && (insn & 0xf000) != 0xf000)
8408 goto illegal_op;
8409 switch (op) {
8410 case 0: /* Register controlled shift. */
8984bd2e
PB
8411 tmp = load_reg(s, rn);
8412 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8413 if ((insn & 0x70) != 0)
8414 goto illegal_op;
8415 op = (insn >> 21) & 3;
8984bd2e
PB
8416 logic_cc = (insn & (1 << 20)) != 0;
8417 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8418 if (logic_cc)
8419 gen_logic_CC(tmp);
21aeb343 8420 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8421 break;
8422 case 1: /* Sign/zero extend. */
5e3f878a 8423 tmp = load_reg(s, rm);
9ee6e8bb 8424 shift = (insn >> 4) & 3;
1301f322 8425 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8426 rotate, a shift is sufficient. */
8427 if (shift != 0)
f669df27 8428 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8429 op = (insn >> 20) & 7;
8430 switch (op) {
5e3f878a
PB
8431 case 0: gen_sxth(tmp); break;
8432 case 1: gen_uxth(tmp); break;
8433 case 2: gen_sxtb16(tmp); break;
8434 case 3: gen_uxtb16(tmp); break;
8435 case 4: gen_sxtb(tmp); break;
8436 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8437 default: goto illegal_op;
8438 }
8439 if (rn != 15) {
5e3f878a 8440 tmp2 = load_reg(s, rn);
9ee6e8bb 8441 if ((op >> 1) == 1) {
5e3f878a 8442 gen_add16(tmp, tmp2);
9ee6e8bb 8443 } else {
5e3f878a 8444 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8445 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8446 }
8447 }
5e3f878a 8448 store_reg(s, rd, tmp);
9ee6e8bb
PB
8449 break;
8450 case 2: /* SIMD add/subtract. */
8451 op = (insn >> 20) & 7;
8452 shift = (insn >> 4) & 7;
8453 if ((op & 3) == 3 || (shift & 3) == 3)
8454 goto illegal_op;
6ddbc6e4
PB
8455 tmp = load_reg(s, rn);
8456 tmp2 = load_reg(s, rm);
8457 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8458 tcg_temp_free_i32(tmp2);
6ddbc6e4 8459 store_reg(s, rd, tmp);
9ee6e8bb
PB
8460 break;
8461 case 3: /* Other data processing. */
8462 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8463 if (op < 4) {
8464 /* Saturating add/subtract. */
d9ba4830
PB
8465 tmp = load_reg(s, rn);
8466 tmp2 = load_reg(s, rm);
9ee6e8bb 8467 if (op & 1)
4809c612
JB
8468 gen_helper_double_saturate(tmp, tmp);
8469 if (op & 2)
d9ba4830 8470 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 8471 else
d9ba4830 8472 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 8473 tcg_temp_free_i32(tmp2);
9ee6e8bb 8474 } else {
d9ba4830 8475 tmp = load_reg(s, rn);
9ee6e8bb
PB
8476 switch (op) {
8477 case 0x0a: /* rbit */
d9ba4830 8478 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8479 break;
8480 case 0x08: /* rev */
66896cb8 8481 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8482 break;
8483 case 0x09: /* rev16 */
d9ba4830 8484 gen_rev16(tmp);
9ee6e8bb
PB
8485 break;
8486 case 0x0b: /* revsh */
d9ba4830 8487 gen_revsh(tmp);
9ee6e8bb
PB
8488 break;
8489 case 0x10: /* sel */
d9ba4830 8490 tmp2 = load_reg(s, rm);
7d1b0095 8491 tmp3 = tcg_temp_new_i32();
0ecb72a5 8492 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 8493 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8494 tcg_temp_free_i32(tmp3);
8495 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8496 break;
8497 case 0x18: /* clz */
d9ba4830 8498 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8499 break;
8500 default:
8501 goto illegal_op;
8502 }
8503 }
d9ba4830 8504 store_reg(s, rd, tmp);
9ee6e8bb
PB
8505 break;
8506 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8507 op = (insn >> 4) & 0xf;
d9ba4830
PB
8508 tmp = load_reg(s, rn);
8509 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8510 switch ((insn >> 20) & 7) {
8511 case 0: /* 32 x 32 -> 32 */
d9ba4830 8512 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8513 tcg_temp_free_i32(tmp2);
9ee6e8bb 8514 if (rs != 15) {
d9ba4830 8515 tmp2 = load_reg(s, rs);
9ee6e8bb 8516 if (op)
d9ba4830 8517 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8518 else
d9ba4830 8519 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8520 tcg_temp_free_i32(tmp2);
9ee6e8bb 8521 }
9ee6e8bb
PB
8522 break;
8523 case 1: /* 16 x 16 -> 32 */
d9ba4830 8524 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8525 tcg_temp_free_i32(tmp2);
9ee6e8bb 8526 if (rs != 15) {
d9ba4830
PB
8527 tmp2 = load_reg(s, rs);
8528 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8529 tcg_temp_free_i32(tmp2);
9ee6e8bb 8530 }
9ee6e8bb
PB
8531 break;
8532 case 2: /* Dual multiply add. */
8533 case 4: /* Dual multiply subtract. */
8534 if (op)
d9ba4830
PB
8535 gen_swap_half(tmp2);
8536 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8537 if (insn & (1 << 22)) {
e1d177b9 8538 /* This subtraction cannot overflow. */
d9ba4830 8539 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8540 } else {
e1d177b9
PM
8541 /* This addition cannot overflow 32 bits;
8542 * however it may overflow considered as a signed
8543 * operation, in which case we must set the Q flag.
8544 */
8545 gen_helper_add_setq(tmp, tmp, tmp2);
9ee6e8bb 8546 }
7d1b0095 8547 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8548 if (rs != 15)
8549 {
d9ba4830
PB
8550 tmp2 = load_reg(s, rs);
8551 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8552 tcg_temp_free_i32(tmp2);
9ee6e8bb 8553 }
9ee6e8bb
PB
8554 break;
8555 case 3: /* 32 * 16 -> 32msb */
8556 if (op)
d9ba4830 8557 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8558 else
d9ba4830 8559 gen_sxth(tmp2);
a7812ae4
PB
8560 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8561 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8562 tmp = tcg_temp_new_i32();
a7812ae4 8563 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8564 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8565 if (rs != 15)
8566 {
d9ba4830
PB
8567 tmp2 = load_reg(s, rs);
8568 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8569 tcg_temp_free_i32(tmp2);
9ee6e8bb 8570 }
9ee6e8bb 8571 break;
838fa72d
AJ
8572 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8573 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8574 if (rs != 15) {
838fa72d
AJ
8575 tmp = load_reg(s, rs);
8576 if (insn & (1 << 20)) {
8577 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8578 } else {
838fa72d 8579 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8580 }
2c0262af 8581 }
838fa72d
AJ
8582 if (insn & (1 << 4)) {
8583 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8584 }
8585 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8586 tmp = tcg_temp_new_i32();
838fa72d
AJ
8587 tcg_gen_trunc_i64_i32(tmp, tmp64);
8588 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8589 break;
8590 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8591 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8592 tcg_temp_free_i32(tmp2);
9ee6e8bb 8593 if (rs != 15) {
d9ba4830
PB
8594 tmp2 = load_reg(s, rs);
8595 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8596 tcg_temp_free_i32(tmp2);
5fd46862 8597 }
9ee6e8bb 8598 break;
2c0262af 8599 }
d9ba4830 8600 store_reg(s, rd, tmp);
2c0262af 8601 break;
9ee6e8bb
PB
8602 case 6: case 7: /* 64-bit multiply, Divide. */
8603 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8604 tmp = load_reg(s, rn);
8605 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8606 if ((op & 0x50) == 0x10) {
8607 /* sdiv, udiv */
47789990 8608 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 8609 goto illegal_op;
47789990 8610 }
9ee6e8bb 8611 if (op & 0x20)
5e3f878a 8612 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8613 else
5e3f878a 8614 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8615 tcg_temp_free_i32(tmp2);
5e3f878a 8616 store_reg(s, rd, tmp);
9ee6e8bb
PB
8617 } else if ((op & 0xe) == 0xc) {
8618 /* Dual multiply accumulate long. */
8619 if (op & 1)
5e3f878a
PB
8620 gen_swap_half(tmp2);
8621 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8622 if (op & 0x10) {
5e3f878a 8623 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8624 } else {
5e3f878a 8625 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8626 }
7d1b0095 8627 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8628 /* BUGFIX */
8629 tmp64 = tcg_temp_new_i64();
8630 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8631 tcg_temp_free_i32(tmp);
a7812ae4
PB
8632 gen_addq(s, tmp64, rs, rd);
8633 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8634 tcg_temp_free_i64(tmp64);
2c0262af 8635 } else {
9ee6e8bb
PB
8636 if (op & 0x20) {
8637 /* Unsigned 64-bit multiply */
a7812ae4 8638 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8639 } else {
9ee6e8bb
PB
8640 if (op & 8) {
8641 /* smlalxy */
5e3f878a 8642 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8643 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8644 tmp64 = tcg_temp_new_i64();
8645 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8646 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8647 } else {
8648 /* Signed 64-bit multiply */
a7812ae4 8649 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8650 }
b5ff1b31 8651 }
9ee6e8bb
PB
8652 if (op & 4) {
8653 /* umaal */
a7812ae4
PB
8654 gen_addq_lo(s, tmp64, rs);
8655 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8656 } else if (op & 0x40) {
8657 /* 64-bit accumulate. */
a7812ae4 8658 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8659 }
a7812ae4 8660 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8661 tcg_temp_free_i64(tmp64);
5fd46862 8662 }
2c0262af 8663 break;
9ee6e8bb
PB
8664 }
8665 break;
8666 case 6: case 7: case 14: case 15:
8667 /* Coprocessor. */
8668 if (((insn >> 24) & 3) == 3) {
8669 /* Translate into the equivalent ARM encoding. */
f06053e3 8670 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8671 if (disas_neon_data_insn(env, s, insn))
8672 goto illegal_op;
8673 } else {
8674 if (insn & (1 << 28))
8675 goto illegal_op;
8676 if (disas_coproc_insn (env, s, insn))
8677 goto illegal_op;
8678 }
8679 break;
8680 case 8: case 9: case 10: case 11:
8681 if (insn & (1 << 15)) {
8682 /* Branches, misc control. */
8683 if (insn & 0x5000) {
8684 /* Unconditional branch. */
8685 /* signextend(hw1[10:0]) -> offset[:12]. */
8686 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8687 /* hw1[10:0] -> offset[11:1]. */
8688 offset |= (insn & 0x7ff) << 1;
8689 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8690 offset[24:22] already have the same value because of the
8691 sign extension above. */
8692 offset ^= ((~insn) & (1 << 13)) << 10;
8693 offset ^= ((~insn) & (1 << 11)) << 11;
8694
9ee6e8bb
PB
8695 if (insn & (1 << 14)) {
8696 /* Branch and link. */
3174f8e9 8697 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8698 }
3b46e624 8699
b0109805 8700 offset += s->pc;
9ee6e8bb
PB
8701 if (insn & (1 << 12)) {
8702 /* b/bl */
b0109805 8703 gen_jmp(s, offset);
9ee6e8bb
PB
8704 } else {
8705 /* blx */
b0109805 8706 offset &= ~(uint32_t)2;
be5e7a76 8707 /* thumb2 bx, no need to check */
b0109805 8708 gen_bx_im(s, offset);
2c0262af 8709 }
9ee6e8bb
PB
8710 } else if (((insn >> 23) & 7) == 7) {
8711 /* Misc control */
8712 if (insn & (1 << 13))
8713 goto illegal_op;
8714
8715 if (insn & (1 << 26)) {
8716 /* Secure monitor call (v6Z) */
8717 goto illegal_op; /* not implemented. */
2c0262af 8718 } else {
9ee6e8bb
PB
8719 op = (insn >> 20) & 7;
8720 switch (op) {
8721 case 0: /* msr cpsr. */
8722 if (IS_M(env)) {
8984bd2e
PB
8723 tmp = load_reg(s, rn);
8724 addr = tcg_const_i32(insn & 0xff);
8725 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8726 tcg_temp_free_i32(addr);
7d1b0095 8727 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8728 gen_lookup_tb(s);
8729 break;
8730 }
8731 /* fall through */
8732 case 1: /* msr spsr. */
8733 if (IS_M(env))
8734 goto illegal_op;
2fbac54b
FN
8735 tmp = load_reg(s, rn);
8736 if (gen_set_psr(s,
9ee6e8bb 8737 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8738 op == 1, tmp))
9ee6e8bb
PB
8739 goto illegal_op;
8740 break;
8741 case 2: /* cps, nop-hint. */
8742 if (((insn >> 8) & 7) == 0) {
8743 gen_nop_hint(s, insn & 0xff);
8744 }
8745 /* Implemented as NOP in user mode. */
8746 if (IS_USER(s))
8747 break;
8748 offset = 0;
8749 imm = 0;
8750 if (insn & (1 << 10)) {
8751 if (insn & (1 << 7))
8752 offset |= CPSR_A;
8753 if (insn & (1 << 6))
8754 offset |= CPSR_I;
8755 if (insn & (1 << 5))
8756 offset |= CPSR_F;
8757 if (insn & (1 << 9))
8758 imm = CPSR_A | CPSR_I | CPSR_F;
8759 }
8760 if (insn & (1 << 8)) {
8761 offset |= 0x1f;
8762 imm |= (insn & 0x1f);
8763 }
8764 if (offset) {
2fbac54b 8765 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8766 }
8767 break;
8768 case 3: /* Special control operations. */
426f5abc 8769 ARCH(7);
9ee6e8bb
PB
8770 op = (insn >> 4) & 0xf;
8771 switch (op) {
8772 case 2: /* clrex */
426f5abc 8773 gen_clrex(s);
9ee6e8bb
PB
8774 break;
8775 case 4: /* dsb */
8776 case 5: /* dmb */
8777 case 6: /* isb */
8778 /* These execute as NOPs. */
9ee6e8bb
PB
8779 break;
8780 default:
8781 goto illegal_op;
8782 }
8783 break;
8784 case 4: /* bxj */
8785 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8786 tmp = load_reg(s, rn);
8787 gen_bx(s, tmp);
9ee6e8bb
PB
8788 break;
8789 case 5: /* Exception return. */
b8b45b68
RV
8790 if (IS_USER(s)) {
8791 goto illegal_op;
8792 }
8793 if (rn != 14 || rd != 15) {
8794 goto illegal_op;
8795 }
8796 tmp = load_reg(s, rn);
8797 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8798 gen_exception_return(s, tmp);
8799 break;
9ee6e8bb 8800 case 6: /* mrs cpsr. */
7d1b0095 8801 tmp = tcg_temp_new_i32();
9ee6e8bb 8802 if (IS_M(env)) {
8984bd2e
PB
8803 addr = tcg_const_i32(insn & 0xff);
8804 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8805 tcg_temp_free_i32(addr);
9ee6e8bb 8806 } else {
8984bd2e 8807 gen_helper_cpsr_read(tmp);
9ee6e8bb 8808 }
8984bd2e 8809 store_reg(s, rd, tmp);
9ee6e8bb
PB
8810 break;
8811 case 7: /* mrs spsr. */
8812 /* Not accessible in user mode. */
8813 if (IS_USER(s) || IS_M(env))
8814 goto illegal_op;
d9ba4830
PB
8815 tmp = load_cpu_field(spsr);
8816 store_reg(s, rd, tmp);
9ee6e8bb 8817 break;
2c0262af
FB
8818 }
8819 }
9ee6e8bb
PB
8820 } else {
8821 /* Conditional branch. */
8822 op = (insn >> 22) & 0xf;
8823 /* Generate a conditional jump to next instruction. */
8824 s->condlabel = gen_new_label();
d9ba4830 8825 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8826 s->condjmp = 1;
8827
8828 /* offset[11:1] = insn[10:0] */
8829 offset = (insn & 0x7ff) << 1;
8830 /* offset[17:12] = insn[21:16]. */
8831 offset |= (insn & 0x003f0000) >> 4;
8832 /* offset[31:20] = insn[26]. */
8833 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8834 /* offset[18] = insn[13]. */
8835 offset |= (insn & (1 << 13)) << 5;
8836 /* offset[19] = insn[11]. */
8837 offset |= (insn & (1 << 11)) << 8;
8838
8839 /* jump to the offset */
b0109805 8840 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8841 }
8842 } else {
8843 /* Data processing immediate. */
8844 if (insn & (1 << 25)) {
8845 if (insn & (1 << 24)) {
8846 if (insn & (1 << 20))
8847 goto illegal_op;
8848 /* Bitfield/Saturate. */
8849 op = (insn >> 21) & 7;
8850 imm = insn & 0x1f;
8851 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8852 if (rn == 15) {
7d1b0095 8853 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8854 tcg_gen_movi_i32(tmp, 0);
8855 } else {
8856 tmp = load_reg(s, rn);
8857 }
9ee6e8bb
PB
8858 switch (op) {
8859 case 2: /* Signed bitfield extract. */
8860 imm++;
8861 if (shift + imm > 32)
8862 goto illegal_op;
8863 if (imm < 32)
6ddbc6e4 8864 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8865 break;
8866 case 6: /* Unsigned bitfield extract. */
8867 imm++;
8868 if (shift + imm > 32)
8869 goto illegal_op;
8870 if (imm < 32)
6ddbc6e4 8871 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8872 break;
8873 case 3: /* Bitfield insert/clear. */
8874 if (imm < shift)
8875 goto illegal_op;
8876 imm = imm + 1 - shift;
8877 if (imm != 32) {
6ddbc6e4 8878 tmp2 = load_reg(s, rd);
8f8e3aa4 8879 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7d1b0095 8880 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8881 }
8882 break;
8883 case 7:
8884 goto illegal_op;
8885 default: /* Saturate. */
9ee6e8bb
PB
8886 if (shift) {
8887 if (op & 1)
6ddbc6e4 8888 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8889 else
6ddbc6e4 8890 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8891 }
6ddbc6e4 8892 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8893 if (op & 4) {
8894 /* Unsigned. */
9ee6e8bb 8895 if ((op & 1) && shift == 0)
6ddbc6e4 8896 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8897 else
6ddbc6e4 8898 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8899 } else {
9ee6e8bb 8900 /* Signed. */
9ee6e8bb 8901 if ((op & 1) && shift == 0)
6ddbc6e4 8902 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8903 else
6ddbc6e4 8904 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8905 }
b75263d6 8906 tcg_temp_free_i32(tmp2);
9ee6e8bb 8907 break;
2c0262af 8908 }
6ddbc6e4 8909 store_reg(s, rd, tmp);
9ee6e8bb
PB
8910 } else {
8911 imm = ((insn & 0x04000000) >> 15)
8912 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8913 if (insn & (1 << 22)) {
8914 /* 16-bit immediate. */
8915 imm |= (insn >> 4) & 0xf000;
8916 if (insn & (1 << 23)) {
8917 /* movt */
5e3f878a 8918 tmp = load_reg(s, rd);
86831435 8919 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8920 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8921 } else {
9ee6e8bb 8922 /* movw */
7d1b0095 8923 tmp = tcg_temp_new_i32();
5e3f878a 8924 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8925 }
8926 } else {
9ee6e8bb
PB
8927 /* Add/sub 12-bit immediate. */
8928 if (rn == 15) {
b0109805 8929 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8930 if (insn & (1 << 23))
b0109805 8931 offset -= imm;
9ee6e8bb 8932 else
b0109805 8933 offset += imm;
7d1b0095 8934 tmp = tcg_temp_new_i32();
5e3f878a 8935 tcg_gen_movi_i32(tmp, offset);
2c0262af 8936 } else {
5e3f878a 8937 tmp = load_reg(s, rn);
9ee6e8bb 8938 if (insn & (1 << 23))
5e3f878a 8939 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8940 else
5e3f878a 8941 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8942 }
9ee6e8bb 8943 }
5e3f878a 8944 store_reg(s, rd, tmp);
191abaa2 8945 }
9ee6e8bb
PB
8946 } else {
8947 int shifter_out = 0;
8948 /* modified 12-bit immediate. */
8949 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8950 imm = (insn & 0xff);
8951 switch (shift) {
8952 case 0: /* XY */
8953 /* Nothing to do. */
8954 break;
8955 case 1: /* 00XY00XY */
8956 imm |= imm << 16;
8957 break;
8958 case 2: /* XY00XY00 */
8959 imm |= imm << 16;
8960 imm <<= 8;
8961 break;
8962 case 3: /* XYXYXYXY */
8963 imm |= imm << 16;
8964 imm |= imm << 8;
8965 break;
8966 default: /* Rotated constant. */
8967 shift = (shift << 1) | (imm >> 7);
8968 imm |= 0x80;
8969 imm = imm << (32 - shift);
8970 shifter_out = 1;
8971 break;
b5ff1b31 8972 }
7d1b0095 8973 tmp2 = tcg_temp_new_i32();
3174f8e9 8974 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8975 rn = (insn >> 16) & 0xf;
3174f8e9 8976 if (rn == 15) {
7d1b0095 8977 tmp = tcg_temp_new_i32();
3174f8e9
FN
8978 tcg_gen_movi_i32(tmp, 0);
8979 } else {
8980 tmp = load_reg(s, rn);
8981 }
9ee6e8bb
PB
8982 op = (insn >> 21) & 0xf;
8983 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8984 shifter_out, tmp, tmp2))
9ee6e8bb 8985 goto illegal_op;
7d1b0095 8986 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8987 rd = (insn >> 8) & 0xf;
8988 if (rd != 15) {
3174f8e9
FN
8989 store_reg(s, rd, tmp);
8990 } else {
7d1b0095 8991 tcg_temp_free_i32(tmp);
2c0262af 8992 }
2c0262af 8993 }
9ee6e8bb
PB
8994 }
8995 break;
8996 case 12: /* Load/store single data item. */
8997 {
8998 int postinc = 0;
8999 int writeback = 0;
b0109805 9000 int user;
9ee6e8bb
PB
9001 if ((insn & 0x01100000) == 0x01000000) {
9002 if (disas_neon_ls_insn(env, s, insn))
c1713132 9003 goto illegal_op;
9ee6e8bb
PB
9004 break;
9005 }
a2fdc890
PM
9006 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9007 if (rs == 15) {
9008 if (!(insn & (1 << 20))) {
9009 goto illegal_op;
9010 }
9011 if (op != 2) {
9012 /* Byte or halfword load space with dest == r15 : memory hints.
9013 * Catch them early so we don't emit pointless addressing code.
9014 * This space is a mix of:
9015 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9016 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9017 * cores)
9018 * unallocated hints, which must be treated as NOPs
9019 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9020 * which is easiest for the decoding logic
9021 * Some space which must UNDEF
9022 */
9023 int op1 = (insn >> 23) & 3;
9024 int op2 = (insn >> 6) & 0x3f;
9025 if (op & 2) {
9026 goto illegal_op;
9027 }
9028 if (rn == 15) {
02afbf64
PM
9029 /* UNPREDICTABLE, unallocated hint or
9030 * PLD/PLDW/PLI (literal)
9031 */
a2fdc890
PM
9032 return 0;
9033 }
9034 if (op1 & 1) {
02afbf64 9035 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9036 }
9037 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 9038 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9039 }
9040 /* UNDEF space, or an UNPREDICTABLE */
9041 return 1;
9042 }
9043 }
b0109805 9044 user = IS_USER(s);
9ee6e8bb 9045 if (rn == 15) {
7d1b0095 9046 addr = tcg_temp_new_i32();
9ee6e8bb
PB
9047 /* PC relative. */
9048 /* s->pc has already been incremented by 4. */
9049 imm = s->pc & 0xfffffffc;
9050 if (insn & (1 << 23))
9051 imm += insn & 0xfff;
9052 else
9053 imm -= insn & 0xfff;
b0109805 9054 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 9055 } else {
b0109805 9056 addr = load_reg(s, rn);
9ee6e8bb
PB
9057 if (insn & (1 << 23)) {
9058 /* Positive offset. */
9059 imm = insn & 0xfff;
b0109805 9060 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 9061 } else {
9ee6e8bb 9062 imm = insn & 0xff;
2a0308c5
PM
9063 switch ((insn >> 8) & 0xf) {
9064 case 0x0: /* Shifted Register. */
9ee6e8bb 9065 shift = (insn >> 4) & 0xf;
2a0308c5
PM
9066 if (shift > 3) {
9067 tcg_temp_free_i32(addr);
18c9b560 9068 goto illegal_op;
2a0308c5 9069 }
b26eefb6 9070 tmp = load_reg(s, rm);
9ee6e8bb 9071 if (shift)
b26eefb6 9072 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 9073 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9074 tcg_temp_free_i32(tmp);
9ee6e8bb 9075 break;
2a0308c5 9076 case 0xc: /* Negative offset. */
b0109805 9077 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 9078 break;
2a0308c5 9079 case 0xe: /* User privilege. */
b0109805
PB
9080 tcg_gen_addi_i32(addr, addr, imm);
9081 user = 1;
9ee6e8bb 9082 break;
2a0308c5 9083 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
9084 imm = -imm;
9085 /* Fall through. */
2a0308c5 9086 case 0xb: /* Post-increment. */
9ee6e8bb
PB
9087 postinc = 1;
9088 writeback = 1;
9089 break;
2a0308c5 9090 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
9091 imm = -imm;
9092 /* Fall through. */
2a0308c5 9093 case 0xf: /* Pre-increment. */
b0109805 9094 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
9095 writeback = 1;
9096 break;
9097 default:
2a0308c5 9098 tcg_temp_free_i32(addr);
b7bcbe95 9099 goto illegal_op;
9ee6e8bb
PB
9100 }
9101 }
9102 }
9ee6e8bb
PB
9103 if (insn & (1 << 20)) {
9104 /* Load. */
a2fdc890
PM
9105 switch (op) {
9106 case 0: tmp = gen_ld8u(addr, user); break;
9107 case 4: tmp = gen_ld8s(addr, user); break;
9108 case 1: tmp = gen_ld16u(addr, user); break;
9109 case 5: tmp = gen_ld16s(addr, user); break;
9110 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
9111 default:
9112 tcg_temp_free_i32(addr);
9113 goto illegal_op;
a2fdc890
PM
9114 }
9115 if (rs == 15) {
9116 gen_bx(s, tmp);
9ee6e8bb 9117 } else {
a2fdc890 9118 store_reg(s, rs, tmp);
9ee6e8bb
PB
9119 }
9120 } else {
9121 /* Store. */
b0109805 9122 tmp = load_reg(s, rs);
9ee6e8bb 9123 switch (op) {
b0109805
PB
9124 case 0: gen_st8(tmp, addr, user); break;
9125 case 1: gen_st16(tmp, addr, user); break;
9126 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
9127 default:
9128 tcg_temp_free_i32(addr);
9129 goto illegal_op;
b7bcbe95 9130 }
2c0262af 9131 }
9ee6e8bb 9132 if (postinc)
b0109805
PB
9133 tcg_gen_addi_i32(addr, addr, imm);
9134 if (writeback) {
9135 store_reg(s, rn, addr);
9136 } else {
7d1b0095 9137 tcg_temp_free_i32(addr);
b0109805 9138 }
9ee6e8bb
PB
9139 }
9140 break;
9141 default:
9142 goto illegal_op;
2c0262af 9143 }
9ee6e8bb
PB
9144 return 0;
9145illegal_op:
9146 return 1;
2c0262af
FB
9147}
9148
0ecb72a5 9149static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9150{
9151 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9152 int32_t offset;
9153 int i;
b26eefb6 9154 TCGv tmp;
d9ba4830 9155 TCGv tmp2;
b0109805 9156 TCGv addr;
99c475ab 9157
9ee6e8bb
PB
9158 if (s->condexec_mask) {
9159 cond = s->condexec_cond;
bedd2912
JB
9160 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9161 s->condlabel = gen_new_label();
9162 gen_test_cc(cond ^ 1, s->condlabel);
9163 s->condjmp = 1;
9164 }
9ee6e8bb
PB
9165 }
9166
d8fd2954 9167 insn = arm_lduw_code(s->pc, s->bswap_code);
99c475ab 9168 s->pc += 2;
b5ff1b31 9169
99c475ab
FB
9170 switch (insn >> 12) {
9171 case 0: case 1:
396e467c 9172
99c475ab
FB
9173 rd = insn & 7;
9174 op = (insn >> 11) & 3;
9175 if (op == 3) {
9176 /* add/subtract */
9177 rn = (insn >> 3) & 7;
396e467c 9178 tmp = load_reg(s, rn);
99c475ab
FB
9179 if (insn & (1 << 10)) {
9180 /* immediate */
7d1b0095 9181 tmp2 = tcg_temp_new_i32();
396e467c 9182 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9183 } else {
9184 /* reg */
9185 rm = (insn >> 6) & 7;
396e467c 9186 tmp2 = load_reg(s, rm);
99c475ab 9187 }
9ee6e8bb
PB
9188 if (insn & (1 << 9)) {
9189 if (s->condexec_mask)
396e467c 9190 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9191 else
396e467c 9192 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
9193 } else {
9194 if (s->condexec_mask)
396e467c 9195 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9196 else
396e467c 9197 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 9198 }
7d1b0095 9199 tcg_temp_free_i32(tmp2);
396e467c 9200 store_reg(s, rd, tmp);
99c475ab
FB
9201 } else {
9202 /* shift immediate */
9203 rm = (insn >> 3) & 7;
9204 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9205 tmp = load_reg(s, rm);
9206 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9207 if (!s->condexec_mask)
9208 gen_logic_CC(tmp);
9209 store_reg(s, rd, tmp);
99c475ab
FB
9210 }
9211 break;
9212 case 2: case 3:
9213 /* arithmetic large immediate */
9214 op = (insn >> 11) & 3;
9215 rd = (insn >> 8) & 0x7;
396e467c 9216 if (op == 0) { /* mov */
7d1b0095 9217 tmp = tcg_temp_new_i32();
396e467c 9218 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9219 if (!s->condexec_mask)
396e467c
FN
9220 gen_logic_CC(tmp);
9221 store_reg(s, rd, tmp);
9222 } else {
9223 tmp = load_reg(s, rd);
7d1b0095 9224 tmp2 = tcg_temp_new_i32();
396e467c
FN
9225 tcg_gen_movi_i32(tmp2, insn & 0xff);
9226 switch (op) {
9227 case 1: /* cmp */
9228 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
9229 tcg_temp_free_i32(tmp);
9230 tcg_temp_free_i32(tmp2);
396e467c
FN
9231 break;
9232 case 2: /* add */
9233 if (s->condexec_mask)
9234 tcg_gen_add_i32(tmp, tmp, tmp2);
9235 else
9236 gen_helper_add_cc(tmp, tmp, tmp2);
7d1b0095 9237 tcg_temp_free_i32(tmp2);
396e467c
FN
9238 store_reg(s, rd, tmp);
9239 break;
9240 case 3: /* sub */
9241 if (s->condexec_mask)
9242 tcg_gen_sub_i32(tmp, tmp, tmp2);
9243 else
9244 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095 9245 tcg_temp_free_i32(tmp2);
396e467c
FN
9246 store_reg(s, rd, tmp);
9247 break;
9248 }
99c475ab 9249 }
99c475ab
FB
9250 break;
9251 case 4:
9252 if (insn & (1 << 11)) {
9253 rd = (insn >> 8) & 7;
5899f386
FB
9254 /* load pc-relative. Bit 1 of PC is ignored. */
9255 val = s->pc + 2 + ((insn & 0xff) * 4);
9256 val &= ~(uint32_t)2;
7d1b0095 9257 addr = tcg_temp_new_i32();
b0109805
PB
9258 tcg_gen_movi_i32(addr, val);
9259 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 9260 tcg_temp_free_i32(addr);
b0109805 9261 store_reg(s, rd, tmp);
99c475ab
FB
9262 break;
9263 }
9264 if (insn & (1 << 10)) {
9265 /* data processing extended or blx */
9266 rd = (insn & 7) | ((insn >> 4) & 8);
9267 rm = (insn >> 3) & 0xf;
9268 op = (insn >> 8) & 3;
9269 switch (op) {
9270 case 0: /* add */
396e467c
FN
9271 tmp = load_reg(s, rd);
9272 tmp2 = load_reg(s, rm);
9273 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9274 tcg_temp_free_i32(tmp2);
396e467c 9275 store_reg(s, rd, tmp);
99c475ab
FB
9276 break;
9277 case 1: /* cmp */
396e467c
FN
9278 tmp = load_reg(s, rd);
9279 tmp2 = load_reg(s, rm);
9280 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
9281 tcg_temp_free_i32(tmp2);
9282 tcg_temp_free_i32(tmp);
99c475ab
FB
9283 break;
9284 case 2: /* mov/cpy */
396e467c
FN
9285 tmp = load_reg(s, rm);
9286 store_reg(s, rd, tmp);
99c475ab
FB
9287 break;
9288 case 3:/* branch [and link] exchange thumb register */
b0109805 9289 tmp = load_reg(s, rm);
99c475ab 9290 if (insn & (1 << 7)) {
be5e7a76 9291 ARCH(5);
99c475ab 9292 val = (uint32_t)s->pc | 1;
7d1b0095 9293 tmp2 = tcg_temp_new_i32();
b0109805
PB
9294 tcg_gen_movi_i32(tmp2, val);
9295 store_reg(s, 14, tmp2);
99c475ab 9296 }
be5e7a76 9297 /* already thumb, no need to check */
d9ba4830 9298 gen_bx(s, tmp);
99c475ab
FB
9299 break;
9300 }
9301 break;
9302 }
9303
9304 /* data processing register */
9305 rd = insn & 7;
9306 rm = (insn >> 3) & 7;
9307 op = (insn >> 6) & 0xf;
9308 if (op == 2 || op == 3 || op == 4 || op == 7) {
9309 /* the shift/rotate ops want the operands backwards */
9310 val = rm;
9311 rm = rd;
9312 rd = val;
9313 val = 1;
9314 } else {
9315 val = 0;
9316 }
9317
396e467c 9318 if (op == 9) { /* neg */
7d1b0095 9319 tmp = tcg_temp_new_i32();
396e467c
FN
9320 tcg_gen_movi_i32(tmp, 0);
9321 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9322 tmp = load_reg(s, rd);
9323 } else {
9324 TCGV_UNUSED(tmp);
9325 }
99c475ab 9326
396e467c 9327 tmp2 = load_reg(s, rm);
5899f386 9328 switch (op) {
99c475ab 9329 case 0x0: /* and */
396e467c 9330 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9331 if (!s->condexec_mask)
396e467c 9332 gen_logic_CC(tmp);
99c475ab
FB
9333 break;
9334 case 0x1: /* eor */
396e467c 9335 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9336 if (!s->condexec_mask)
396e467c 9337 gen_logic_CC(tmp);
99c475ab
FB
9338 break;
9339 case 0x2: /* lsl */
9ee6e8bb 9340 if (s->condexec_mask) {
396e467c 9341 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 9342 } else {
396e467c
FN
9343 gen_helper_shl_cc(tmp2, tmp2, tmp);
9344 gen_logic_CC(tmp2);
9ee6e8bb 9345 }
99c475ab
FB
9346 break;
9347 case 0x3: /* lsr */
9ee6e8bb 9348 if (s->condexec_mask) {
396e467c 9349 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 9350 } else {
396e467c
FN
9351 gen_helper_shr_cc(tmp2, tmp2, tmp);
9352 gen_logic_CC(tmp2);
9ee6e8bb 9353 }
99c475ab
FB
9354 break;
9355 case 0x4: /* asr */
9ee6e8bb 9356 if (s->condexec_mask) {
396e467c 9357 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 9358 } else {
396e467c
FN
9359 gen_helper_sar_cc(tmp2, tmp2, tmp);
9360 gen_logic_CC(tmp2);
9ee6e8bb 9361 }
99c475ab
FB
9362 break;
9363 case 0x5: /* adc */
9ee6e8bb 9364 if (s->condexec_mask)
396e467c 9365 gen_adc(tmp, tmp2);
9ee6e8bb 9366 else
396e467c 9367 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
9368 break;
9369 case 0x6: /* sbc */
9ee6e8bb 9370 if (s->condexec_mask)
396e467c 9371 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 9372 else
396e467c 9373 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
9374 break;
9375 case 0x7: /* ror */
9ee6e8bb 9376 if (s->condexec_mask) {
f669df27
AJ
9377 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9378 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9379 } else {
396e467c
FN
9380 gen_helper_ror_cc(tmp2, tmp2, tmp);
9381 gen_logic_CC(tmp2);
9ee6e8bb 9382 }
99c475ab
FB
9383 break;
9384 case 0x8: /* tst */
396e467c
FN
9385 tcg_gen_and_i32(tmp, tmp, tmp2);
9386 gen_logic_CC(tmp);
99c475ab 9387 rd = 16;
5899f386 9388 break;
99c475ab 9389 case 0x9: /* neg */
9ee6e8bb 9390 if (s->condexec_mask)
396e467c 9391 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9392 else
396e467c 9393 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9394 break;
9395 case 0xa: /* cmp */
396e467c 9396 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9397 rd = 16;
9398 break;
9399 case 0xb: /* cmn */
396e467c 9400 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
9401 rd = 16;
9402 break;
9403 case 0xc: /* orr */
396e467c 9404 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9405 if (!s->condexec_mask)
396e467c 9406 gen_logic_CC(tmp);
99c475ab
FB
9407 break;
9408 case 0xd: /* mul */
7b2919a0 9409 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9410 if (!s->condexec_mask)
396e467c 9411 gen_logic_CC(tmp);
99c475ab
FB
9412 break;
9413 case 0xe: /* bic */
f669df27 9414 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9415 if (!s->condexec_mask)
396e467c 9416 gen_logic_CC(tmp);
99c475ab
FB
9417 break;
9418 case 0xf: /* mvn */
396e467c 9419 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9420 if (!s->condexec_mask)
396e467c 9421 gen_logic_CC(tmp2);
99c475ab 9422 val = 1;
5899f386 9423 rm = rd;
99c475ab
FB
9424 break;
9425 }
9426 if (rd != 16) {
396e467c
FN
9427 if (val) {
9428 store_reg(s, rm, tmp2);
9429 if (op != 0xf)
7d1b0095 9430 tcg_temp_free_i32(tmp);
396e467c
FN
9431 } else {
9432 store_reg(s, rd, tmp);
7d1b0095 9433 tcg_temp_free_i32(tmp2);
396e467c
FN
9434 }
9435 } else {
7d1b0095
PM
9436 tcg_temp_free_i32(tmp);
9437 tcg_temp_free_i32(tmp2);
99c475ab
FB
9438 }
9439 break;
9440
9441 case 5:
9442 /* load/store register offset. */
9443 rd = insn & 7;
9444 rn = (insn >> 3) & 7;
9445 rm = (insn >> 6) & 7;
9446 op = (insn >> 9) & 7;
b0109805 9447 addr = load_reg(s, rn);
b26eefb6 9448 tmp = load_reg(s, rm);
b0109805 9449 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9450 tcg_temp_free_i32(tmp);
99c475ab
FB
9451
9452 if (op < 3) /* store */
b0109805 9453 tmp = load_reg(s, rd);
99c475ab
FB
9454
9455 switch (op) {
9456 case 0: /* str */
b0109805 9457 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9458 break;
9459 case 1: /* strh */
b0109805 9460 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9461 break;
9462 case 2: /* strb */
b0109805 9463 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9464 break;
9465 case 3: /* ldrsb */
b0109805 9466 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
9467 break;
9468 case 4: /* ldr */
b0109805 9469 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9470 break;
9471 case 5: /* ldrh */
b0109805 9472 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
9473 break;
9474 case 6: /* ldrb */
b0109805 9475 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
9476 break;
9477 case 7: /* ldrsh */
b0109805 9478 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
9479 break;
9480 }
9481 if (op >= 3) /* load */
b0109805 9482 store_reg(s, rd, tmp);
7d1b0095 9483 tcg_temp_free_i32(addr);
99c475ab
FB
9484 break;
9485
9486 case 6:
9487 /* load/store word immediate offset */
9488 rd = insn & 7;
9489 rn = (insn >> 3) & 7;
b0109805 9490 addr = load_reg(s, rn);
99c475ab 9491 val = (insn >> 4) & 0x7c;
b0109805 9492 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9493
9494 if (insn & (1 << 11)) {
9495 /* load */
b0109805
PB
9496 tmp = gen_ld32(addr, IS_USER(s));
9497 store_reg(s, rd, tmp);
99c475ab
FB
9498 } else {
9499 /* store */
b0109805
PB
9500 tmp = load_reg(s, rd);
9501 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9502 }
7d1b0095 9503 tcg_temp_free_i32(addr);
99c475ab
FB
9504 break;
9505
9506 case 7:
9507 /* load/store byte immediate offset */
9508 rd = insn & 7;
9509 rn = (insn >> 3) & 7;
b0109805 9510 addr = load_reg(s, rn);
99c475ab 9511 val = (insn >> 6) & 0x1f;
b0109805 9512 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9513
9514 if (insn & (1 << 11)) {
9515 /* load */
b0109805
PB
9516 tmp = gen_ld8u(addr, IS_USER(s));
9517 store_reg(s, rd, tmp);
99c475ab
FB
9518 } else {
9519 /* store */
b0109805
PB
9520 tmp = load_reg(s, rd);
9521 gen_st8(tmp, addr, IS_USER(s));
99c475ab 9522 }
7d1b0095 9523 tcg_temp_free_i32(addr);
99c475ab
FB
9524 break;
9525
9526 case 8:
9527 /* load/store halfword immediate offset */
9528 rd = insn & 7;
9529 rn = (insn >> 3) & 7;
b0109805 9530 addr = load_reg(s, rn);
99c475ab 9531 val = (insn >> 5) & 0x3e;
b0109805 9532 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9533
9534 if (insn & (1 << 11)) {
9535 /* load */
b0109805
PB
9536 tmp = gen_ld16u(addr, IS_USER(s));
9537 store_reg(s, rd, tmp);
99c475ab
FB
9538 } else {
9539 /* store */
b0109805
PB
9540 tmp = load_reg(s, rd);
9541 gen_st16(tmp, addr, IS_USER(s));
99c475ab 9542 }
7d1b0095 9543 tcg_temp_free_i32(addr);
99c475ab
FB
9544 break;
9545
9546 case 9:
9547 /* load/store from stack */
9548 rd = (insn >> 8) & 7;
b0109805 9549 addr = load_reg(s, 13);
99c475ab 9550 val = (insn & 0xff) * 4;
b0109805 9551 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9552
9553 if (insn & (1 << 11)) {
9554 /* load */
b0109805
PB
9555 tmp = gen_ld32(addr, IS_USER(s));
9556 store_reg(s, rd, tmp);
99c475ab
FB
9557 } else {
9558 /* store */
b0109805
PB
9559 tmp = load_reg(s, rd);
9560 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9561 }
7d1b0095 9562 tcg_temp_free_i32(addr);
99c475ab
FB
9563 break;
9564
9565 case 10:
9566 /* add to high reg */
9567 rd = (insn >> 8) & 7;
5899f386
FB
9568 if (insn & (1 << 11)) {
9569 /* SP */
5e3f878a 9570 tmp = load_reg(s, 13);
5899f386
FB
9571 } else {
9572 /* PC. bit 1 is ignored. */
7d1b0095 9573 tmp = tcg_temp_new_i32();
5e3f878a 9574 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9575 }
99c475ab 9576 val = (insn & 0xff) * 4;
5e3f878a
PB
9577 tcg_gen_addi_i32(tmp, tmp, val);
9578 store_reg(s, rd, tmp);
99c475ab
FB
9579 break;
9580
9581 case 11:
9582 /* misc */
9583 op = (insn >> 8) & 0xf;
9584 switch (op) {
9585 case 0:
9586 /* adjust stack pointer */
b26eefb6 9587 tmp = load_reg(s, 13);
99c475ab
FB
9588 val = (insn & 0x7f) * 4;
9589 if (insn & (1 << 7))
6a0d8a1d 9590 val = -(int32_t)val;
b26eefb6
PB
9591 tcg_gen_addi_i32(tmp, tmp, val);
9592 store_reg(s, 13, tmp);
99c475ab
FB
9593 break;
9594
9ee6e8bb
PB
9595 case 2: /* sign/zero extend. */
9596 ARCH(6);
9597 rd = insn & 7;
9598 rm = (insn >> 3) & 7;
b0109805 9599 tmp = load_reg(s, rm);
9ee6e8bb 9600 switch ((insn >> 6) & 3) {
b0109805
PB
9601 case 0: gen_sxth(tmp); break;
9602 case 1: gen_sxtb(tmp); break;
9603 case 2: gen_uxth(tmp); break;
9604 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9605 }
b0109805 9606 store_reg(s, rd, tmp);
9ee6e8bb 9607 break;
99c475ab
FB
9608 case 4: case 5: case 0xc: case 0xd:
9609 /* push/pop */
b0109805 9610 addr = load_reg(s, 13);
5899f386
FB
9611 if (insn & (1 << 8))
9612 offset = 4;
99c475ab 9613 else
5899f386
FB
9614 offset = 0;
9615 for (i = 0; i < 8; i++) {
9616 if (insn & (1 << i))
9617 offset += 4;
9618 }
9619 if ((insn & (1 << 11)) == 0) {
b0109805 9620 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9621 }
99c475ab
FB
9622 for (i = 0; i < 8; i++) {
9623 if (insn & (1 << i)) {
9624 if (insn & (1 << 11)) {
9625 /* pop */
b0109805
PB
9626 tmp = gen_ld32(addr, IS_USER(s));
9627 store_reg(s, i, tmp);
99c475ab
FB
9628 } else {
9629 /* push */
b0109805
PB
9630 tmp = load_reg(s, i);
9631 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9632 }
5899f386 9633 /* advance to the next address. */
b0109805 9634 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9635 }
9636 }
a50f5b91 9637 TCGV_UNUSED(tmp);
99c475ab
FB
9638 if (insn & (1 << 8)) {
9639 if (insn & (1 << 11)) {
9640 /* pop pc */
b0109805 9641 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9642 /* don't set the pc until the rest of the instruction
9643 has completed */
9644 } else {
9645 /* push lr */
b0109805
PB
9646 tmp = load_reg(s, 14);
9647 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9648 }
b0109805 9649 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9650 }
5899f386 9651 if ((insn & (1 << 11)) == 0) {
b0109805 9652 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9653 }
99c475ab 9654 /* write back the new stack pointer */
b0109805 9655 store_reg(s, 13, addr);
99c475ab 9656 /* set the new PC value */
be5e7a76
DES
9657 if ((insn & 0x0900) == 0x0900) {
9658 store_reg_from_load(env, s, 15, tmp);
9659 }
99c475ab
FB
9660 break;
9661
9ee6e8bb
PB
9662 case 1: case 3: case 9: case 11: /* czb */
9663 rm = insn & 7;
d9ba4830 9664 tmp = load_reg(s, rm);
9ee6e8bb
PB
9665 s->condlabel = gen_new_label();
9666 s->condjmp = 1;
9667 if (insn & (1 << 11))
cb63669a 9668 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9669 else
cb63669a 9670 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9671 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9672 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9673 val = (uint32_t)s->pc + 2;
9674 val += offset;
9675 gen_jmp(s, val);
9676 break;
9677
9678 case 15: /* IT, nop-hint. */
9679 if ((insn & 0xf) == 0) {
9680 gen_nop_hint(s, (insn >> 4) & 0xf);
9681 break;
9682 }
9683 /* If Then. */
9684 s->condexec_cond = (insn >> 4) & 0xe;
9685 s->condexec_mask = insn & 0x1f;
9686 /* No actual code generated for this insn, just setup state. */
9687 break;
9688
06c949e6 9689 case 0xe: /* bkpt */
be5e7a76 9690 ARCH(5);
bc4a0de0 9691 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9692 break;
9693
9ee6e8bb
PB
9694 case 0xa: /* rev */
9695 ARCH(6);
9696 rn = (insn >> 3) & 0x7;
9697 rd = insn & 0x7;
b0109805 9698 tmp = load_reg(s, rn);
9ee6e8bb 9699 switch ((insn >> 6) & 3) {
66896cb8 9700 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9701 case 1: gen_rev16(tmp); break;
9702 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9703 default: goto illegal_op;
9704 }
b0109805 9705 store_reg(s, rd, tmp);
9ee6e8bb
PB
9706 break;
9707
d9e028c1
PM
9708 case 6:
9709 switch ((insn >> 5) & 7) {
9710 case 2:
9711 /* setend */
9712 ARCH(6);
10962fd5
PM
9713 if (((insn >> 3) & 1) != s->bswap_code) {
9714 /* Dynamic endianness switching not implemented. */
d9e028c1
PM
9715 goto illegal_op;
9716 }
9ee6e8bb 9717 break;
d9e028c1
PM
9718 case 3:
9719 /* cps */
9720 ARCH(6);
9721 if (IS_USER(s)) {
9722 break;
8984bd2e 9723 }
d9e028c1
PM
9724 if (IS_M(env)) {
9725 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9726 /* FAULTMASK */
9727 if (insn & 1) {
9728 addr = tcg_const_i32(19);
9729 gen_helper_v7m_msr(cpu_env, addr, tmp);
9730 tcg_temp_free_i32(addr);
9731 }
9732 /* PRIMASK */
9733 if (insn & 2) {
9734 addr = tcg_const_i32(16);
9735 gen_helper_v7m_msr(cpu_env, addr, tmp);
9736 tcg_temp_free_i32(addr);
9737 }
9738 tcg_temp_free_i32(tmp);
9739 gen_lookup_tb(s);
9740 } else {
9741 if (insn & (1 << 4)) {
9742 shift = CPSR_A | CPSR_I | CPSR_F;
9743 } else {
9744 shift = 0;
9745 }
9746 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 9747 }
d9e028c1
PM
9748 break;
9749 default:
9750 goto undef;
9ee6e8bb
PB
9751 }
9752 break;
9753
99c475ab
FB
9754 default:
9755 goto undef;
9756 }
9757 break;
9758
9759 case 12:
a7d3970d 9760 {
99c475ab 9761 /* load/store multiple */
a7d3970d
PM
9762 TCGv loaded_var;
9763 TCGV_UNUSED(loaded_var);
99c475ab 9764 rn = (insn >> 8) & 0x7;
b0109805 9765 addr = load_reg(s, rn);
99c475ab
FB
9766 for (i = 0; i < 8; i++) {
9767 if (insn & (1 << i)) {
99c475ab
FB
9768 if (insn & (1 << 11)) {
9769 /* load */
b0109805 9770 tmp = gen_ld32(addr, IS_USER(s));
a7d3970d
PM
9771 if (i == rn) {
9772 loaded_var = tmp;
9773 } else {
9774 store_reg(s, i, tmp);
9775 }
99c475ab
FB
9776 } else {
9777 /* store */
b0109805
PB
9778 tmp = load_reg(s, i);
9779 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9780 }
5899f386 9781 /* advance to the next address */
b0109805 9782 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9783 }
9784 }
b0109805 9785 if ((insn & (1 << rn)) == 0) {
a7d3970d 9786 /* base reg not in list: base register writeback */
b0109805
PB
9787 store_reg(s, rn, addr);
9788 } else {
a7d3970d
PM
9789 /* base reg in list: if load, complete it now */
9790 if (insn & (1 << 11)) {
9791 store_reg(s, rn, loaded_var);
9792 }
7d1b0095 9793 tcg_temp_free_i32(addr);
b0109805 9794 }
99c475ab 9795 break;
a7d3970d 9796 }
99c475ab
FB
9797 case 13:
9798 /* conditional branch or swi */
9799 cond = (insn >> 8) & 0xf;
9800 if (cond == 0xe)
9801 goto undef;
9802
9803 if (cond == 0xf) {
9804 /* swi */
422ebf69 9805 gen_set_pc_im(s->pc);
9ee6e8bb 9806 s->is_jmp = DISAS_SWI;
99c475ab
FB
9807 break;
9808 }
9809 /* generate a conditional jump to next instruction */
e50e6a20 9810 s->condlabel = gen_new_label();
d9ba4830 9811 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9812 s->condjmp = 1;
99c475ab
FB
9813
9814 /* jump to the offset */
5899f386 9815 val = (uint32_t)s->pc + 2;
99c475ab 9816 offset = ((int32_t)insn << 24) >> 24;
5899f386 9817 val += offset << 1;
8aaca4c0 9818 gen_jmp(s, val);
99c475ab
FB
9819 break;
9820
9821 case 14:
358bf29e 9822 if (insn & (1 << 11)) {
9ee6e8bb
PB
9823 if (disas_thumb2_insn(env, s, insn))
9824 goto undef32;
358bf29e
PB
9825 break;
9826 }
9ee6e8bb 9827 /* unconditional branch */
99c475ab
FB
9828 val = (uint32_t)s->pc;
9829 offset = ((int32_t)insn << 21) >> 21;
9830 val += (offset << 1) + 2;
8aaca4c0 9831 gen_jmp(s, val);
99c475ab
FB
9832 break;
9833
9834 case 15:
9ee6e8bb 9835 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9836 goto undef32;
9ee6e8bb 9837 break;
99c475ab
FB
9838 }
9839 return;
9ee6e8bb 9840undef32:
bc4a0de0 9841 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9842 return;
9843illegal_op:
99c475ab 9844undef:
bc4a0de0 9845 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9846}
9847
2c0262af
FB
9848/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9849 basic block 'tb'. If search_pc is TRUE, also generate PC
9850 information for each intermediate instruction. */
0ecb72a5 9851static inline void gen_intermediate_code_internal(CPUARMState *env,
2cfc5f17
TS
9852 TranslationBlock *tb,
9853 int search_pc)
2c0262af
FB
9854{
9855 DisasContext dc1, *dc = &dc1;
a1d1bb31 9856 CPUBreakpoint *bp;
2c0262af
FB
9857 uint16_t *gen_opc_end;
9858 int j, lj;
0fa85d43 9859 target_ulong pc_start;
b5ff1b31 9860 uint32_t next_page_start;
2e70f6ef
PB
9861 int num_insns;
9862 int max_insns;
3b46e624 9863
2c0262af 9864 /* generate intermediate code */
0fa85d43 9865 pc_start = tb->pc;
3b46e624 9866
2c0262af
FB
9867 dc->tb = tb;
9868
2c0262af 9869 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9870
9871 dc->is_jmp = DISAS_NEXT;
9872 dc->pc = pc_start;
8aaca4c0 9873 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9874 dc->condjmp = 0;
7204ab88 9875 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
d8fd2954 9876 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
98eac7ca
PM
9877 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9878 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9879#if !defined(CONFIG_USER_ONLY)
61f74d6a 9880 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9881#endif
5df8bac1 9882 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9883 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9884 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9885 cpu_F0s = tcg_temp_new_i32();
9886 cpu_F1s = tcg_temp_new_i32();
9887 cpu_F0d = tcg_temp_new_i64();
9888 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9889 cpu_V0 = cpu_F0d;
9890 cpu_V1 = cpu_F1d;
e677137d 9891 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9892 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9893 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9894 lj = -1;
2e70f6ef
PB
9895 num_insns = 0;
9896 max_insns = tb->cflags & CF_COUNT_MASK;
9897 if (max_insns == 0)
9898 max_insns = CF_COUNT_MASK;
9899
9900 gen_icount_start();
e12ce78d 9901
3849902c
PM
9902 tcg_clear_temp_count();
9903
e12ce78d
PM
9904 /* A note on handling of the condexec (IT) bits:
9905 *
9906 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 9907 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 9908 * (1) if the condexec bits are not already zero then we write
0ecb72a5 9909 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
9910 * to do it at the end of the block. (For example if we don't do this
9911 * it's hard to identify whether we can safely skip writing condexec
9912 * at the end of the TB, which we definitely want to do for the case
9913 * where a TB doesn't do anything with the IT state at all.)
9914 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 9915 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
9916 * This is done both for leaving the TB at the end, and for leaving
9917 * it because of an exception we know will happen, which is done in
9918 * gen_exception_insn(). The latter is necessary because we need to
9919 * leave the TB with the PC/IT state just prior to execution of the
9920 * instruction which caused the exception.
9921 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 9922 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
9923 * This is handled in the same way as restoration of the
9924 * PC in these situations: we will be called again with search_pc=1
9925 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9926 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9927 * this to restore the condexec bits.
e12ce78d
PM
9928 *
9929 * Note that there are no instructions which can read the condexec
9930 * bits, and none which can write non-static values to them, so
0ecb72a5 9931 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
9932 * middle of a TB.
9933 */
9934
9ee6e8bb
PB
9935 /* Reset the conditional execution bits immediately. This avoids
9936 complications trying to do it at the end of the block. */
98eac7ca 9937 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9938 {
7d1b0095 9939 TCGv tmp = tcg_temp_new_i32();
8f01245e 9940 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9941 store_cpu_field(tmp, condexec_bits);
8f01245e 9942 }
2c0262af 9943 do {
fbb4a2e3
PB
9944#ifdef CONFIG_USER_ONLY
9945 /* Intercept jump to the magic kernel page. */
9946 if (dc->pc >= 0xffff0000) {
9947 /* We always get here via a jump, so know we are not in a
9948 conditional execution block. */
9949 gen_exception(EXCP_KERNEL_TRAP);
9950 dc->is_jmp = DISAS_UPDATE;
9951 break;
9952 }
9953#else
9ee6e8bb
PB
9954 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9955 /* We always get here via a jump, so know we are not in a
9956 conditional execution block. */
d9ba4830 9957 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9958 dc->is_jmp = DISAS_UPDATE;
9959 break;
9ee6e8bb
PB
9960 }
9961#endif
9962
72cf2d4f
BS
9963 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9964 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9965 if (bp->pc == dc->pc) {
bc4a0de0 9966 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9967 /* Advance PC so that clearing the breakpoint will
9968 invalidate this TB. */
9969 dc->pc += 2;
9970 goto done_generating;
1fddef4b
FB
9971 break;
9972 }
9973 }
9974 }
2c0262af
FB
9975 if (search_pc) {
9976 j = gen_opc_ptr - gen_opc_buf;
9977 if (lj < j) {
9978 lj++;
9979 while (lj < j)
9980 gen_opc_instr_start[lj++] = 0;
9981 }
0fa85d43 9982 gen_opc_pc[lj] = dc->pc;
e12ce78d 9983 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
2c0262af 9984 gen_opc_instr_start[lj] = 1;
2e70f6ef 9985 gen_opc_icount[lj] = num_insns;
2c0262af 9986 }
e50e6a20 9987
2e70f6ef
PB
9988 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9989 gen_io_start();
9990
5642463a
PM
9991 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9992 tcg_gen_debug_insn_start(dc->pc);
9993 }
9994
7204ab88 9995 if (dc->thumb) {
9ee6e8bb
PB
9996 disas_thumb_insn(env, dc);
9997 if (dc->condexec_mask) {
9998 dc->condexec_cond = (dc->condexec_cond & 0xe)
9999 | ((dc->condexec_mask >> 4) & 1);
10000 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
10001 if (dc->condexec_mask == 0) {
10002 dc->condexec_cond = 0;
10003 }
10004 }
10005 } else {
10006 disas_arm_insn(env, dc);
10007 }
e50e6a20
FB
10008
10009 if (dc->condjmp && !dc->is_jmp) {
10010 gen_set_label(dc->condlabel);
10011 dc->condjmp = 0;
10012 }
3849902c
PM
10013
10014 if (tcg_check_temp_count()) {
10015 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
10016 }
10017
aaf2d97d 10018 /* Translation stops when a conditional branch is encountered.
e50e6a20 10019 * Otherwise the subsequent code could get translated several times.
b5ff1b31 10020 * Also stop translation when a page boundary is reached. This
bf20dc07 10021 * ensures prefetch aborts occur at the right place. */
2e70f6ef 10022 num_insns ++;
1fddef4b
FB
10023 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
10024 !env->singlestep_enabled &&
1b530a6d 10025 !singlestep &&
2e70f6ef
PB
10026 dc->pc < next_page_start &&
10027 num_insns < max_insns);
10028
10029 if (tb->cflags & CF_LAST_IO) {
10030 if (dc->condjmp) {
10031 /* FIXME: This can theoretically happen with self-modifying
10032 code. */
10033 cpu_abort(env, "IO on conditional branch instruction");
10034 }
10035 gen_io_end();
10036 }
9ee6e8bb 10037
b5ff1b31 10038 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
10039 instruction was a conditional branch or trap, and the PC has
10040 already been written. */
551bd27f 10041 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 10042 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 10043 if (dc->condjmp) {
9ee6e8bb
PB
10044 gen_set_condexec(dc);
10045 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 10046 gen_exception(EXCP_SWI);
9ee6e8bb 10047 } else {
d9ba4830 10048 gen_exception(EXCP_DEBUG);
9ee6e8bb 10049 }
e50e6a20
FB
10050 gen_set_label(dc->condlabel);
10051 }
10052 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 10053 gen_set_pc_im(dc->pc);
e50e6a20 10054 dc->condjmp = 0;
8aaca4c0 10055 }
9ee6e8bb
PB
10056 gen_set_condexec(dc);
10057 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 10058 gen_exception(EXCP_SWI);
9ee6e8bb
PB
10059 } else {
10060 /* FIXME: Single stepping a WFI insn will not halt
10061 the CPU. */
d9ba4830 10062 gen_exception(EXCP_DEBUG);
9ee6e8bb 10063 }
8aaca4c0 10064 } else {
9ee6e8bb
PB
10065 /* While branches must always occur at the end of an IT block,
10066 there are a few other things that can cause us to terminate
10067 the TB in the middel of an IT block:
10068 - Exception generating instructions (bkpt, swi, undefined).
10069 - Page boundaries.
10070 - Hardware watchpoints.
10071 Hardware breakpoints have already been handled and skip this code.
10072 */
10073 gen_set_condexec(dc);
8aaca4c0 10074 switch(dc->is_jmp) {
8aaca4c0 10075 case DISAS_NEXT:
6e256c93 10076 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
10077 break;
10078 default:
10079 case DISAS_JUMP:
10080 case DISAS_UPDATE:
10081 /* indicate that the hash table must be used to find the next TB */
57fec1fe 10082 tcg_gen_exit_tb(0);
8aaca4c0
FB
10083 break;
10084 case DISAS_TB_JUMP:
10085 /* nothing more to generate */
10086 break;
9ee6e8bb 10087 case DISAS_WFI:
d9ba4830 10088 gen_helper_wfi();
9ee6e8bb
PB
10089 break;
10090 case DISAS_SWI:
d9ba4830 10091 gen_exception(EXCP_SWI);
9ee6e8bb 10092 break;
8aaca4c0 10093 }
e50e6a20
FB
10094 if (dc->condjmp) {
10095 gen_set_label(dc->condlabel);
9ee6e8bb 10096 gen_set_condexec(dc);
6e256c93 10097 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
10098 dc->condjmp = 0;
10099 }
2c0262af 10100 }
2e70f6ef 10101
9ee6e8bb 10102done_generating:
2e70f6ef 10103 gen_icount_end(tb, num_insns);
2c0262af
FB
10104 *gen_opc_ptr = INDEX_op_end;
10105
10106#ifdef DEBUG_DISAS
8fec2b8c 10107 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
10108 qemu_log("----------------\n");
10109 qemu_log("IN: %s\n", lookup_symbol(pc_start));
d8fd2954
PB
10110 log_target_disas(pc_start, dc->pc - pc_start,
10111 dc->thumb | (dc->bswap_code << 1));
93fcfe39 10112 qemu_log("\n");
2c0262af
FB
10113 }
10114#endif
b5ff1b31
FB
10115 if (search_pc) {
10116 j = gen_opc_ptr - gen_opc_buf;
10117 lj++;
10118 while (lj <= j)
10119 gen_opc_instr_start[lj++] = 0;
b5ff1b31 10120 } else {
2c0262af 10121 tb->size = dc->pc - pc_start;
2e70f6ef 10122 tb->icount = num_insns;
b5ff1b31 10123 }
2c0262af
FB
10124}
10125
0ecb72a5 10126void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 10127{
2cfc5f17 10128 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
10129}
10130
0ecb72a5 10131void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 10132{
2cfc5f17 10133 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
10134}
10135
b5ff1b31
FB
10136static const char *cpu_mode_names[16] = {
10137 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10138 "???", "???", "???", "und", "???", "???", "???", "sys"
10139};
9ee6e8bb 10140
0ecb72a5 10141void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 10142 int flags)
2c0262af
FB
10143{
10144 int i;
06e80fc9 10145#if 0
bc380d17 10146 union {
b7bcbe95
FB
10147 uint32_t i;
10148 float s;
10149 } s0, s1;
10150 CPU_DoubleU d;
a94a6abf
PB
10151 /* ??? This assumes float64 and double have the same layout.
10152 Oh well, it's only debug dumps. */
10153 union {
10154 float64 f64;
10155 double d;
10156 } d0;
06e80fc9 10157#endif
b5ff1b31 10158 uint32_t psr;
2c0262af
FB
10159
10160 for(i=0;i<16;i++) {
7fe48483 10161 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10162 if ((i % 4) == 3)
7fe48483 10163 cpu_fprintf(f, "\n");
2c0262af 10164 else
7fe48483 10165 cpu_fprintf(f, " ");
2c0262af 10166 }
b5ff1b31 10167 psr = cpsr_read(env);
687fa640
TS
10168 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10169 psr,
b5ff1b31
FB
10170 psr & (1 << 31) ? 'N' : '-',
10171 psr & (1 << 30) ? 'Z' : '-',
10172 psr & (1 << 29) ? 'C' : '-',
10173 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10174 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10175 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10176
5e3f878a 10177#if 0
b7bcbe95 10178 for (i = 0; i < 16; i++) {
8e96005d
FB
10179 d.d = env->vfp.regs[i];
10180 s0.i = d.l.lower;
10181 s1.i = d.l.upper;
a94a6abf
PB
10182 d0.f64 = d.d;
10183 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 10184 i * 2, (int)s0.i, s0.s,
a94a6abf 10185 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 10186 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 10187 d0.d);
b7bcbe95 10188 }
40f137e1 10189 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 10190#endif
2c0262af 10191}
a6b025d3 10192
0ecb72a5 10193void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
10194{
10195 env->regs[15] = gen_opc_pc[pc_pos];
e12ce78d 10196 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10197}