]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-arm: v6 media multiply space: UNDEF on unassigned encodings
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
2c0262af 28#include "disas.h"
57fec1fe 29#include "tcg-op.h"
79383c9c 30#include "qemu-log.h"
1497c961 31
7b59220e 32#include "helper.h"
1497c961 33#define GEN_HELPER 1
7b59220e 34#include "helper.h"
2c0262af 35
be5e7a76
DES
36#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38/* currently all emulated v5 cores are also v5TE, so don't bother */
39#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
40#define ENABLE_ARCH_5J 0
41#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 45
86753403 46#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 47
2c0262af
FB
48/* internal defines */
49typedef struct DisasContext {
0fa85d43 50 target_ulong pc;
2c0262af 51 int is_jmp;
e50e6a20
FB
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
9ee6e8bb
PB
56 /* Thumb-2 condtional execution bits. */
57 int condexec_mask;
58 int condexec_cond;
2c0262af 59 struct TranslationBlock *tb;
8aaca4c0 60 int singlestep_enabled;
5899f386 61 int thumb;
b5ff1b31
FB
62#if !defined(CONFIG_USER_ONLY)
63 int user;
64#endif
5df8bac1 65 int vfp_enabled;
69d1fc22
PM
66 int vec_len;
67 int vec_stride;
2c0262af
FB
68} DisasContext;
69
e12ce78d
PM
70static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
71
b5ff1b31
FB
72#if defined(CONFIG_USER_ONLY)
73#define IS_USER(s) 1
74#else
75#define IS_USER(s) (s->user)
76#endif
77
9ee6e8bb
PB
78/* These instructions trap after executing, so defer them until after the
79 conditional executions state has been updated. */
80#define DISAS_WFI 4
81#define DISAS_SWI 5
2c0262af 82
a7812ae4 83static TCGv_ptr cpu_env;
ad69471c 84/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 85static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 86static TCGv_i32 cpu_R[16];
426f5abc
PB
87static TCGv_i32 cpu_exclusive_addr;
88static TCGv_i32 cpu_exclusive_val;
89static TCGv_i32 cpu_exclusive_high;
90#ifdef CONFIG_USER_ONLY
91static TCGv_i32 cpu_exclusive_test;
92static TCGv_i32 cpu_exclusive_info;
93#endif
ad69471c 94
b26eefb6 95/* FIXME: These should be removed. */
a7812ae4
PB
96static TCGv cpu_F0s, cpu_F1s;
97static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 98
2e70f6ef
PB
99#include "gen-icount.h"
100
155c3eac
FN
101static const char *regnames[] =
102 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
103 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
104
b26eefb6
PB
105/* initialize TCG globals. */
106void arm_translate_init(void)
107{
155c3eac
FN
108 int i;
109
a7812ae4
PB
110 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
111
155c3eac
FN
112 for (i = 0; i < 16; i++) {
113 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, regs[i]),
115 regnames[i]);
116 }
426f5abc
PB
117 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUState, exclusive_addr), "exclusive_addr");
119 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUState, exclusive_val), "exclusive_val");
121 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_high), "exclusive_high");
123#ifdef CONFIG_USER_ONLY
124 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUState, exclusive_test), "exclusive_test");
126 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUState, exclusive_info), "exclusive_info");
128#endif
155c3eac 129
a7812ae4 130#define GEN_HELPER 2
7b59220e 131#include "helper.h"
b26eefb6
PB
132}
133
d9ba4830
PB
134static inline TCGv load_cpu_offset(int offset)
135{
7d1b0095 136 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
137 tcg_gen_ld_i32(tmp, cpu_env, offset);
138 return tmp;
139}
140
141#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
142
143static inline void store_cpu_offset(TCGv var, int offset)
144{
145 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 146 tcg_temp_free_i32(var);
d9ba4830
PB
147}
148
149#define store_cpu_field(var, name) \
150 store_cpu_offset(var, offsetof(CPUState, name))
151
b26eefb6
PB
152/* Set a variable to the value of a CPU register. */
153static void load_reg_var(DisasContext *s, TCGv var, int reg)
154{
155 if (reg == 15) {
156 uint32_t addr;
157 /* normaly, since we updated PC, we need only to add one insn */
158 if (s->thumb)
159 addr = (long)s->pc + 2;
160 else
161 addr = (long)s->pc + 4;
162 tcg_gen_movi_i32(var, addr);
163 } else {
155c3eac 164 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
165 }
166}
167
168/* Create a new temporary and set it to the value of a CPU register. */
169static inline TCGv load_reg(DisasContext *s, int reg)
170{
7d1b0095 171 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
172 load_reg_var(s, tmp, reg);
173 return tmp;
174}
175
176/* Set a CPU register. The source must be a temporary and will be
177 marked as dead. */
178static void store_reg(DisasContext *s, int reg, TCGv var)
179{
180 if (reg == 15) {
181 tcg_gen_andi_i32(var, var, ~1);
182 s->is_jmp = DISAS_JUMP;
183 }
155c3eac 184 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 185 tcg_temp_free_i32(var);
b26eefb6
PB
186}
187
b26eefb6 188/* Value extensions. */
86831435
PB
189#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
190#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
191#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
192#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
193
1497c961
PB
194#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
195#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 196
b26eefb6 197
b75263d6
JR
198static inline void gen_set_cpsr(TCGv var, uint32_t mask)
199{
200 TCGv tmp_mask = tcg_const_i32(mask);
201 gen_helper_cpsr_write(var, tmp_mask);
202 tcg_temp_free_i32(tmp_mask);
203}
d9ba4830
PB
204/* Set NZCV flags from the high 4 bits of var. */
205#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
206
207static void gen_exception(int excp)
208{
7d1b0095 209 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
210 tcg_gen_movi_i32(tmp, excp);
211 gen_helper_exception(tmp);
7d1b0095 212 tcg_temp_free_i32(tmp);
d9ba4830
PB
213}
214
3670669c
PB
215static void gen_smul_dual(TCGv a, TCGv b)
216{
7d1b0095
PM
217 TCGv tmp1 = tcg_temp_new_i32();
218 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
219 tcg_gen_ext16s_i32(tmp1, a);
220 tcg_gen_ext16s_i32(tmp2, b);
3670669c 221 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 222 tcg_temp_free_i32(tmp2);
3670669c
PB
223 tcg_gen_sari_i32(a, a, 16);
224 tcg_gen_sari_i32(b, b, 16);
225 tcg_gen_mul_i32(b, b, a);
226 tcg_gen_mov_i32(a, tmp1);
7d1b0095 227 tcg_temp_free_i32(tmp1);
3670669c
PB
228}
229
230/* Byteswap each halfword. */
231static void gen_rev16(TCGv var)
232{
7d1b0095 233 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
234 tcg_gen_shri_i32(tmp, var, 8);
235 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
236 tcg_gen_shli_i32(var, var, 8);
237 tcg_gen_andi_i32(var, var, 0xff00ff00);
238 tcg_gen_or_i32(var, var, tmp);
7d1b0095 239 tcg_temp_free_i32(tmp);
3670669c
PB
240}
241
242/* Byteswap low halfword and sign extend. */
243static void gen_revsh(TCGv var)
244{
1a855029
AJ
245 tcg_gen_ext16u_i32(var, var);
246 tcg_gen_bswap16_i32(var, var);
247 tcg_gen_ext16s_i32(var, var);
3670669c
PB
248}
249
250/* Unsigned bitfield extract. */
251static void gen_ubfx(TCGv var, int shift, uint32_t mask)
252{
253 if (shift)
254 tcg_gen_shri_i32(var, var, shift);
255 tcg_gen_andi_i32(var, var, mask);
256}
257
258/* Signed bitfield extract. */
259static void gen_sbfx(TCGv var, int shift, int width)
260{
261 uint32_t signbit;
262
263 if (shift)
264 tcg_gen_sari_i32(var, var, shift);
265 if (shift + width < 32) {
266 signbit = 1u << (width - 1);
267 tcg_gen_andi_i32(var, var, (1u << width) - 1);
268 tcg_gen_xori_i32(var, var, signbit);
269 tcg_gen_subi_i32(var, var, signbit);
270 }
271}
272
273/* Bitfield insertion. Insert val into base. Clobbers base and val. */
274static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
275{
3670669c 276 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
277 tcg_gen_shli_i32(val, val, shift);
278 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
279 tcg_gen_or_i32(dest, base, val);
280}
281
838fa72d
AJ
282/* Return (b << 32) + a. Mark inputs as dead */
283static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 284{
838fa72d
AJ
285 TCGv_i64 tmp64 = tcg_temp_new_i64();
286
287 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 288 tcg_temp_free_i32(b);
838fa72d
AJ
289 tcg_gen_shli_i64(tmp64, tmp64, 32);
290 tcg_gen_add_i64(a, tmp64, a);
291
292 tcg_temp_free_i64(tmp64);
293 return a;
294}
295
296/* Return (b << 32) - a. Mark inputs as dead. */
297static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
298{
299 TCGv_i64 tmp64 = tcg_temp_new_i64();
300
301 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 302 tcg_temp_free_i32(b);
838fa72d
AJ
303 tcg_gen_shli_i64(tmp64, tmp64, 32);
304 tcg_gen_sub_i64(a, tmp64, a);
305
306 tcg_temp_free_i64(tmp64);
307 return a;
3670669c
PB
308}
309
8f01245e
PB
310/* FIXME: Most targets have native widening multiplication.
311 It would be good to use that instead of a full wide multiply. */
5e3f878a 312/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 313static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 314{
a7812ae4
PB
315 TCGv_i64 tmp1 = tcg_temp_new_i64();
316 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
317
318 tcg_gen_extu_i32_i64(tmp1, a);
7d1b0095 319 tcg_temp_free_i32(a);
5e3f878a 320 tcg_gen_extu_i32_i64(tmp2, b);
7d1b0095 321 tcg_temp_free_i32(b);
5e3f878a 322 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 323 tcg_temp_free_i64(tmp2);
5e3f878a
PB
324 return tmp1;
325}
326
a7812ae4 327static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 328{
a7812ae4
PB
329 TCGv_i64 tmp1 = tcg_temp_new_i64();
330 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
331
332 tcg_gen_ext_i32_i64(tmp1, a);
7d1b0095 333 tcg_temp_free_i32(a);
5e3f878a 334 tcg_gen_ext_i32_i64(tmp2, b);
7d1b0095 335 tcg_temp_free_i32(b);
5e3f878a 336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 337 tcg_temp_free_i64(tmp2);
5e3f878a
PB
338 return tmp1;
339}
340
8f01245e
PB
341/* Swap low and high halfwords. */
342static void gen_swap_half(TCGv var)
343{
7d1b0095 344 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
345 tcg_gen_shri_i32(tmp, var, 16);
346 tcg_gen_shli_i32(var, var, 16);
347 tcg_gen_or_i32(var, var, tmp);
7d1b0095 348 tcg_temp_free_i32(tmp);
8f01245e
PB
349}
350
b26eefb6
PB
351/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
352 tmp = (t0 ^ t1) & 0x8000;
353 t0 &= ~0x8000;
354 t1 &= ~0x8000;
355 t0 = (t0 + t1) ^ tmp;
356 */
357
358static void gen_add16(TCGv t0, TCGv t1)
359{
7d1b0095 360 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
361 tcg_gen_xor_i32(tmp, t0, t1);
362 tcg_gen_andi_i32(tmp, tmp, 0x8000);
363 tcg_gen_andi_i32(t0, t0, ~0x8000);
364 tcg_gen_andi_i32(t1, t1, ~0x8000);
365 tcg_gen_add_i32(t0, t0, t1);
366 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
367 tcg_temp_free_i32(tmp);
368 tcg_temp_free_i32(t1);
b26eefb6
PB
369}
370
9a119ff6
PB
371#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
372
b26eefb6
PB
373/* Set CF to the top bit of var. */
374static void gen_set_CF_bit31(TCGv var)
375{
7d1b0095 376 TCGv tmp = tcg_temp_new_i32();
b26eefb6 377 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 378 gen_set_CF(tmp);
7d1b0095 379 tcg_temp_free_i32(tmp);
b26eefb6
PB
380}
381
382/* Set N and Z flags from var. */
383static inline void gen_logic_CC(TCGv var)
384{
6fbe23d5
PB
385 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
387}
388
389/* T0 += T1 + CF. */
396e467c 390static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 391{
d9ba4830 392 TCGv tmp;
396e467c 393 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 394 tmp = load_cpu_field(CF);
396e467c 395 tcg_gen_add_i32(t0, t0, tmp);
7d1b0095 396 tcg_temp_free_i32(tmp);
b26eefb6
PB
397}
398
e9bb4aa9
JR
399/* dest = T0 + T1 + CF. */
400static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
401{
402 TCGv tmp;
403 tcg_gen_add_i32(dest, t0, t1);
404 tmp = load_cpu_field(CF);
405 tcg_gen_add_i32(dest, dest, tmp);
7d1b0095 406 tcg_temp_free_i32(tmp);
e9bb4aa9
JR
407}
408
3670669c
PB
409/* dest = T0 - T1 + CF - 1. */
410static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
411{
d9ba4830 412 TCGv tmp;
3670669c 413 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 414 tmp = load_cpu_field(CF);
3670669c
PB
415 tcg_gen_add_i32(dest, dest, tmp);
416 tcg_gen_subi_i32(dest, dest, 1);
7d1b0095 417 tcg_temp_free_i32(tmp);
3670669c
PB
418}
419
ad69471c
PB
420/* FIXME: Implement this natively. */
421#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
422
9a119ff6 423static void shifter_out_im(TCGv var, int shift)
b26eefb6 424{
7d1b0095 425 TCGv tmp = tcg_temp_new_i32();
9a119ff6
PB
426 if (shift == 0) {
427 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 428 } else {
9a119ff6 429 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 430 if (shift != 31)
9a119ff6
PB
431 tcg_gen_andi_i32(tmp, tmp, 1);
432 }
433 gen_set_CF(tmp);
7d1b0095 434 tcg_temp_free_i32(tmp);
9a119ff6 435}
b26eefb6 436
9a119ff6
PB
437/* Shift by immediate. Includes special handling for shift == 0. */
438static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
439{
440 switch (shiftop) {
441 case 0: /* LSL */
442 if (shift != 0) {
443 if (flags)
444 shifter_out_im(var, 32 - shift);
445 tcg_gen_shli_i32(var, var, shift);
446 }
447 break;
448 case 1: /* LSR */
449 if (shift == 0) {
450 if (flags) {
451 tcg_gen_shri_i32(var, var, 31);
452 gen_set_CF(var);
453 }
454 tcg_gen_movi_i32(var, 0);
455 } else {
456 if (flags)
457 shifter_out_im(var, shift - 1);
458 tcg_gen_shri_i32(var, var, shift);
459 }
460 break;
461 case 2: /* ASR */
462 if (shift == 0)
463 shift = 32;
464 if (flags)
465 shifter_out_im(var, shift - 1);
466 if (shift == 32)
467 shift = 31;
468 tcg_gen_sari_i32(var, var, shift);
469 break;
470 case 3: /* ROR/RRX */
471 if (shift != 0) {
472 if (flags)
473 shifter_out_im(var, shift - 1);
f669df27 474 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 475 } else {
d9ba4830 476 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
477 if (flags)
478 shifter_out_im(var, 0);
479 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
480 tcg_gen_shli_i32(tmp, tmp, 31);
481 tcg_gen_or_i32(var, var, tmp);
7d1b0095 482 tcg_temp_free_i32(tmp);
b26eefb6
PB
483 }
484 }
485};
486
8984bd2e
PB
487static inline void gen_arm_shift_reg(TCGv var, int shiftop,
488 TCGv shift, int flags)
489{
490 if (flags) {
491 switch (shiftop) {
492 case 0: gen_helper_shl_cc(var, var, shift); break;
493 case 1: gen_helper_shr_cc(var, var, shift); break;
494 case 2: gen_helper_sar_cc(var, var, shift); break;
495 case 3: gen_helper_ror_cc(var, var, shift); break;
496 }
497 } else {
498 switch (shiftop) {
499 case 0: gen_helper_shl(var, var, shift); break;
500 case 1: gen_helper_shr(var, var, shift); break;
501 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
502 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
503 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
504 }
505 }
7d1b0095 506 tcg_temp_free_i32(shift);
8984bd2e
PB
507}
508
6ddbc6e4
PB
509#define PAS_OP(pfx) \
510 switch (op2) { \
511 case 0: gen_pas_helper(glue(pfx,add16)); break; \
512 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
513 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
514 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
515 case 4: gen_pas_helper(glue(pfx,add8)); break; \
516 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
517 }
d9ba4830 518static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 519{
a7812ae4 520 TCGv_ptr tmp;
6ddbc6e4
PB
521
522 switch (op1) {
523#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
524 case 1:
a7812ae4 525 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
526 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
527 PAS_OP(s)
b75263d6 528 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
529 break;
530 case 5:
a7812ae4 531 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
532 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
533 PAS_OP(u)
b75263d6 534 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
535 break;
536#undef gen_pas_helper
537#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
538 case 2:
539 PAS_OP(q);
540 break;
541 case 3:
542 PAS_OP(sh);
543 break;
544 case 6:
545 PAS_OP(uq);
546 break;
547 case 7:
548 PAS_OP(uh);
549 break;
550#undef gen_pas_helper
551 }
552}
9ee6e8bb
PB
553#undef PAS_OP
554
6ddbc6e4
PB
555/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
556#define PAS_OP(pfx) \
ed89a2f1 557 switch (op1) { \
6ddbc6e4
PB
558 case 0: gen_pas_helper(glue(pfx,add8)); break; \
559 case 1: gen_pas_helper(glue(pfx,add16)); break; \
560 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
561 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
562 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
563 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
564 }
d9ba4830 565static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 566{
a7812ae4 567 TCGv_ptr tmp;
6ddbc6e4 568
ed89a2f1 569 switch (op2) {
6ddbc6e4
PB
570#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
571 case 0:
a7812ae4 572 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
573 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
574 PAS_OP(s)
b75263d6 575 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
576 break;
577 case 4:
a7812ae4 578 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
579 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
580 PAS_OP(u)
b75263d6 581 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
582 break;
583#undef gen_pas_helper
584#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
585 case 1:
586 PAS_OP(q);
587 break;
588 case 2:
589 PAS_OP(sh);
590 break;
591 case 5:
592 PAS_OP(uq);
593 break;
594 case 6:
595 PAS_OP(uh);
596 break;
597#undef gen_pas_helper
598 }
599}
9ee6e8bb
PB
600#undef PAS_OP
601
d9ba4830
PB
602static void gen_test_cc(int cc, int label)
603{
604 TCGv tmp;
605 TCGv tmp2;
d9ba4830
PB
606 int inv;
607
d9ba4830
PB
608 switch (cc) {
609 case 0: /* eq: Z */
6fbe23d5 610 tmp = load_cpu_field(ZF);
cb63669a 611 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
612 break;
613 case 1: /* ne: !Z */
6fbe23d5 614 tmp = load_cpu_field(ZF);
cb63669a 615 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
616 break;
617 case 2: /* cs: C */
618 tmp = load_cpu_field(CF);
cb63669a 619 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
620 break;
621 case 3: /* cc: !C */
622 tmp = load_cpu_field(CF);
cb63669a 623 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
624 break;
625 case 4: /* mi: N */
6fbe23d5 626 tmp = load_cpu_field(NF);
cb63669a 627 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
628 break;
629 case 5: /* pl: !N */
6fbe23d5 630 tmp = load_cpu_field(NF);
cb63669a 631 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
632 break;
633 case 6: /* vs: V */
634 tmp = load_cpu_field(VF);
cb63669a 635 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
636 break;
637 case 7: /* vc: !V */
638 tmp = load_cpu_field(VF);
cb63669a 639 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
640 break;
641 case 8: /* hi: C && !Z */
642 inv = gen_new_label();
643 tmp = load_cpu_field(CF);
cb63669a 644 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 645 tcg_temp_free_i32(tmp);
6fbe23d5 646 tmp = load_cpu_field(ZF);
cb63669a 647 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
648 gen_set_label(inv);
649 break;
650 case 9: /* ls: !C || Z */
651 tmp = load_cpu_field(CF);
cb63669a 652 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 653 tcg_temp_free_i32(tmp);
6fbe23d5 654 tmp = load_cpu_field(ZF);
cb63669a 655 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
656 break;
657 case 10: /* ge: N == V -> N ^ V == 0 */
658 tmp = load_cpu_field(VF);
6fbe23d5 659 tmp2 = load_cpu_field(NF);
d9ba4830 660 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 661 tcg_temp_free_i32(tmp2);
cb63669a 662 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
663 break;
664 case 11: /* lt: N != V -> N ^ V != 0 */
665 tmp = load_cpu_field(VF);
6fbe23d5 666 tmp2 = load_cpu_field(NF);
d9ba4830 667 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 668 tcg_temp_free_i32(tmp2);
cb63669a 669 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
670 break;
671 case 12: /* gt: !Z && N == V */
672 inv = gen_new_label();
6fbe23d5 673 tmp = load_cpu_field(ZF);
cb63669a 674 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 675 tcg_temp_free_i32(tmp);
d9ba4830 676 tmp = load_cpu_field(VF);
6fbe23d5 677 tmp2 = load_cpu_field(NF);
d9ba4830 678 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 679 tcg_temp_free_i32(tmp2);
cb63669a 680 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
681 gen_set_label(inv);
682 break;
683 case 13: /* le: Z || N != V */
6fbe23d5 684 tmp = load_cpu_field(ZF);
cb63669a 685 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 686 tcg_temp_free_i32(tmp);
d9ba4830 687 tmp = load_cpu_field(VF);
6fbe23d5 688 tmp2 = load_cpu_field(NF);
d9ba4830 689 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 690 tcg_temp_free_i32(tmp2);
cb63669a 691 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
692 break;
693 default:
694 fprintf(stderr, "Bad condition code 0x%x\n", cc);
695 abort();
696 }
7d1b0095 697 tcg_temp_free_i32(tmp);
d9ba4830 698}
2c0262af 699
b1d8e52e 700static const uint8_t table_logic_cc[16] = {
2c0262af
FB
701 1, /* and */
702 1, /* xor */
703 0, /* sub */
704 0, /* rsb */
705 0, /* add */
706 0, /* adc */
707 0, /* sbc */
708 0, /* rsc */
709 1, /* andl */
710 1, /* xorl */
711 0, /* cmp */
712 0, /* cmn */
713 1, /* orr */
714 1, /* mov */
715 1, /* bic */
716 1, /* mvn */
717};
3b46e624 718
d9ba4830
PB
719/* Set PC and Thumb state from an immediate address. */
720static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 721{
b26eefb6 722 TCGv tmp;
99c475ab 723
b26eefb6 724 s->is_jmp = DISAS_UPDATE;
d9ba4830 725 if (s->thumb != (addr & 1)) {
7d1b0095 726 tmp = tcg_temp_new_i32();
d9ba4830
PB
727 tcg_gen_movi_i32(tmp, addr & 1);
728 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
7d1b0095 729 tcg_temp_free_i32(tmp);
d9ba4830 730 }
155c3eac 731 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
732}
733
734/* Set PC and Thumb state from var. var is marked as dead. */
735static inline void gen_bx(DisasContext *s, TCGv var)
736{
d9ba4830 737 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
738 tcg_gen_andi_i32(cpu_R[15], var, ~1);
739 tcg_gen_andi_i32(var, var, 1);
740 store_cpu_field(var, thumb);
d9ba4830
PB
741}
742
21aeb343
JR
743/* Variant of store_reg which uses branch&exchange logic when storing
744 to r15 in ARM architecture v7 and above. The source must be a temporary
745 and will be marked as dead. */
746static inline void store_reg_bx(CPUState *env, DisasContext *s,
747 int reg, TCGv var)
748{
749 if (reg == 15 && ENABLE_ARCH_7) {
750 gen_bx(s, var);
751 } else {
752 store_reg(s, reg, var);
753 }
754}
755
be5e7a76
DES
756/* Variant of store_reg which uses branch&exchange logic when storing
757 * to r15 in ARM architecture v5T and above. This is used for storing
758 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
759 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
760static inline void store_reg_from_load(CPUState *env, DisasContext *s,
761 int reg, TCGv var)
762{
763 if (reg == 15 && ENABLE_ARCH_5) {
764 gen_bx(s, var);
765 } else {
766 store_reg(s, reg, var);
767 }
768}
769
b0109805
PB
770static inline TCGv gen_ld8s(TCGv addr, int index)
771{
7d1b0095 772 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
773 tcg_gen_qemu_ld8s(tmp, addr, index);
774 return tmp;
775}
776static inline TCGv gen_ld8u(TCGv addr, int index)
777{
7d1b0095 778 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
779 tcg_gen_qemu_ld8u(tmp, addr, index);
780 return tmp;
781}
782static inline TCGv gen_ld16s(TCGv addr, int index)
783{
7d1b0095 784 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
785 tcg_gen_qemu_ld16s(tmp, addr, index);
786 return tmp;
787}
788static inline TCGv gen_ld16u(TCGv addr, int index)
789{
7d1b0095 790 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
791 tcg_gen_qemu_ld16u(tmp, addr, index);
792 return tmp;
793}
794static inline TCGv gen_ld32(TCGv addr, int index)
795{
7d1b0095 796 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
797 tcg_gen_qemu_ld32u(tmp, addr, index);
798 return tmp;
799}
84496233
JR
800static inline TCGv_i64 gen_ld64(TCGv addr, int index)
801{
802 TCGv_i64 tmp = tcg_temp_new_i64();
803 tcg_gen_qemu_ld64(tmp, addr, index);
804 return tmp;
805}
b0109805
PB
806static inline void gen_st8(TCGv val, TCGv addr, int index)
807{
808 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 809 tcg_temp_free_i32(val);
b0109805
PB
810}
811static inline void gen_st16(TCGv val, TCGv addr, int index)
812{
813 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 814 tcg_temp_free_i32(val);
b0109805
PB
815}
816static inline void gen_st32(TCGv val, TCGv addr, int index)
817{
818 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 819 tcg_temp_free_i32(val);
b0109805 820}
84496233
JR
821static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
822{
823 tcg_gen_qemu_st64(val, addr, index);
824 tcg_temp_free_i64(val);
825}
b5ff1b31 826
5e3f878a
PB
827static inline void gen_set_pc_im(uint32_t val)
828{
155c3eac 829 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
830}
831
b5ff1b31
FB
832/* Force a TB lookup after an instruction that changes the CPU state. */
833static inline void gen_lookup_tb(DisasContext *s)
834{
a6445c52 835 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
836 s->is_jmp = DISAS_UPDATE;
837}
838
b0109805
PB
839static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
840 TCGv var)
2c0262af 841{
1e8d4eec 842 int val, rm, shift, shiftop;
b26eefb6 843 TCGv offset;
2c0262af
FB
844
845 if (!(insn & (1 << 25))) {
846 /* immediate */
847 val = insn & 0xfff;
848 if (!(insn & (1 << 23)))
849 val = -val;
537730b9 850 if (val != 0)
b0109805 851 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
852 } else {
853 /* shift/register */
854 rm = (insn) & 0xf;
855 shift = (insn >> 7) & 0x1f;
1e8d4eec 856 shiftop = (insn >> 5) & 3;
b26eefb6 857 offset = load_reg(s, rm);
9a119ff6 858 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 859 if (!(insn & (1 << 23)))
b0109805 860 tcg_gen_sub_i32(var, var, offset);
2c0262af 861 else
b0109805 862 tcg_gen_add_i32(var, var, offset);
7d1b0095 863 tcg_temp_free_i32(offset);
2c0262af
FB
864 }
865}
866
191f9a93 867static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 868 int extra, TCGv var)
2c0262af
FB
869{
870 int val, rm;
b26eefb6 871 TCGv offset;
3b46e624 872
2c0262af
FB
873 if (insn & (1 << 22)) {
874 /* immediate */
875 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
876 if (!(insn & (1 << 23)))
877 val = -val;
18acad92 878 val += extra;
537730b9 879 if (val != 0)
b0109805 880 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
881 } else {
882 /* register */
191f9a93 883 if (extra)
b0109805 884 tcg_gen_addi_i32(var, var, extra);
2c0262af 885 rm = (insn) & 0xf;
b26eefb6 886 offset = load_reg(s, rm);
2c0262af 887 if (!(insn & (1 << 23)))
b0109805 888 tcg_gen_sub_i32(var, var, offset);
2c0262af 889 else
b0109805 890 tcg_gen_add_i32(var, var, offset);
7d1b0095 891 tcg_temp_free_i32(offset);
2c0262af
FB
892 }
893}
894
5aaebd13
PM
895static TCGv_ptr get_fpstatus_ptr(int neon)
896{
897 TCGv_ptr statusptr = tcg_temp_new_ptr();
898 int offset;
899 if (neon) {
900 offset = offsetof(CPUState, vfp.standard_fp_status);
901 } else {
902 offset = offsetof(CPUState, vfp.fp_status);
903 }
904 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
905 return statusptr;
906}
907
4373f3ce
PB
908#define VFP_OP2(name) \
909static inline void gen_vfp_##name(int dp) \
910{ \
ae1857ec
PM
911 TCGv_ptr fpst = get_fpstatus_ptr(0); \
912 if (dp) { \
913 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
914 } else { \
915 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
916 } \
917 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
918}
919
4373f3ce
PB
920VFP_OP2(add)
921VFP_OP2(sub)
922VFP_OP2(mul)
923VFP_OP2(div)
924
925#undef VFP_OP2
926
605a6aed
PM
927static inline void gen_vfp_F1_mul(int dp)
928{
929 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 930 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 931 if (dp) {
ae1857ec 932 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 933 } else {
ae1857ec 934 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 935 }
ae1857ec 936 tcg_temp_free_ptr(fpst);
605a6aed
PM
937}
938
939static inline void gen_vfp_F1_neg(int dp)
940{
941 /* Like gen_vfp_neg() but put result in F1 */
942 if (dp) {
943 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
944 } else {
945 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
946 }
947}
948
4373f3ce
PB
949static inline void gen_vfp_abs(int dp)
950{
951 if (dp)
952 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
953 else
954 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
955}
956
957static inline void gen_vfp_neg(int dp)
958{
959 if (dp)
960 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
961 else
962 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
963}
964
965static inline void gen_vfp_sqrt(int dp)
966{
967 if (dp)
968 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
969 else
970 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
971}
972
973static inline void gen_vfp_cmp(int dp)
974{
975 if (dp)
976 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
977 else
978 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
979}
980
981static inline void gen_vfp_cmpe(int dp)
982{
983 if (dp)
984 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
985 else
986 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
987}
988
989static inline void gen_vfp_F1_ld0(int dp)
990{
991 if (dp)
5b340b51 992 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 993 else
5b340b51 994 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
995}
996
5500b06c
PM
997#define VFP_GEN_ITOF(name) \
998static inline void gen_vfp_##name(int dp, int neon) \
999{ \
5aaebd13 1000 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1001 if (dp) { \
1002 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1003 } else { \
1004 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1005 } \
b7fa9214 1006 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1007}
1008
5500b06c
PM
1009VFP_GEN_ITOF(uito)
1010VFP_GEN_ITOF(sito)
1011#undef VFP_GEN_ITOF
4373f3ce 1012
5500b06c
PM
1013#define VFP_GEN_FTOI(name) \
1014static inline void gen_vfp_##name(int dp, int neon) \
1015{ \
5aaebd13 1016 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1017 if (dp) { \
1018 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1019 } else { \
1020 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1021 } \
b7fa9214 1022 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1023}
1024
5500b06c
PM
1025VFP_GEN_FTOI(toui)
1026VFP_GEN_FTOI(touiz)
1027VFP_GEN_FTOI(tosi)
1028VFP_GEN_FTOI(tosiz)
1029#undef VFP_GEN_FTOI
4373f3ce
PB
1030
1031#define VFP_GEN_FIX(name) \
5500b06c 1032static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1033{ \
b75263d6 1034 TCGv tmp_shift = tcg_const_i32(shift); \
5aaebd13 1035 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1036 if (dp) { \
1037 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1038 } else { \
1039 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1040 } \
b75263d6 1041 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1042 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1043}
4373f3ce
PB
1044VFP_GEN_FIX(tosh)
1045VFP_GEN_FIX(tosl)
1046VFP_GEN_FIX(touh)
1047VFP_GEN_FIX(toul)
1048VFP_GEN_FIX(shto)
1049VFP_GEN_FIX(slto)
1050VFP_GEN_FIX(uhto)
1051VFP_GEN_FIX(ulto)
1052#undef VFP_GEN_FIX
9ee6e8bb 1053
312eea9f 1054static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1055{
1056 if (dp)
312eea9f 1057 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1058 else
312eea9f 1059 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1060}
1061
312eea9f 1062static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1063{
1064 if (dp)
312eea9f 1065 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1066 else
312eea9f 1067 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1068}
1069
8e96005d
FB
1070static inline long
1071vfp_reg_offset (int dp, int reg)
1072{
1073 if (dp)
1074 return offsetof(CPUARMState, vfp.regs[reg]);
1075 else if (reg & 1) {
1076 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1077 + offsetof(CPU_DoubleU, l.upper);
1078 } else {
1079 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1080 + offsetof(CPU_DoubleU, l.lower);
1081 }
1082}
9ee6e8bb
PB
1083
1084/* Return the offset of a 32-bit piece of a NEON register.
1085 zero is the least significant end of the register. */
1086static inline long
1087neon_reg_offset (int reg, int n)
1088{
1089 int sreg;
1090 sreg = reg * 2 + n;
1091 return vfp_reg_offset(0, sreg);
1092}
1093
8f8e3aa4
PB
1094static TCGv neon_load_reg(int reg, int pass)
1095{
7d1b0095 1096 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1097 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1098 return tmp;
1099}
1100
1101static void neon_store_reg(int reg, int pass, TCGv var)
1102{
1103 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1104 tcg_temp_free_i32(var);
8f8e3aa4
PB
1105}
1106
a7812ae4 1107static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1108{
1109 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1110}
1111
a7812ae4 1112static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1113{
1114 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1115}
1116
4373f3ce
PB
1117#define tcg_gen_ld_f32 tcg_gen_ld_i32
1118#define tcg_gen_ld_f64 tcg_gen_ld_i64
1119#define tcg_gen_st_f32 tcg_gen_st_i32
1120#define tcg_gen_st_f64 tcg_gen_st_i64
1121
b7bcbe95
FB
1122static inline void gen_mov_F0_vreg(int dp, int reg)
1123{
1124 if (dp)
4373f3ce 1125 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1126 else
4373f3ce 1127 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1128}
1129
1130static inline void gen_mov_F1_vreg(int dp, int reg)
1131{
1132 if (dp)
4373f3ce 1133 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1134 else
4373f3ce 1135 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1136}
1137
1138static inline void gen_mov_vreg_F0(int dp, int reg)
1139{
1140 if (dp)
4373f3ce 1141 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1142 else
4373f3ce 1143 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1144}
1145
18c9b560
AZ
1146#define ARM_CP_RW_BIT (1 << 20)
1147
a7812ae4 1148static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1149{
1150 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1151}
1152
a7812ae4 1153static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1154{
1155 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1156}
1157
da6b5335 1158static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1159{
7d1b0095 1160 TCGv var = tcg_temp_new_i32();
da6b5335
FN
1161 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1162 return var;
e677137d
PB
1163}
1164
da6b5335 1165static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1166{
da6b5335 1167 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
7d1b0095 1168 tcg_temp_free_i32(var);
e677137d
PB
1169}
1170
1171static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1172{
1173 iwmmxt_store_reg(cpu_M0, rn);
1174}
1175
1176static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1177{
1178 iwmmxt_load_reg(cpu_M0, rn);
1179}
1180
1181static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1182{
1183 iwmmxt_load_reg(cpu_V1, rn);
1184 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1185}
1186
1187static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1188{
1189 iwmmxt_load_reg(cpu_V1, rn);
1190 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1191}
1192
1193static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1194{
1195 iwmmxt_load_reg(cpu_V1, rn);
1196 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1197}
1198
1199#define IWMMXT_OP(name) \
1200static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1201{ \
1202 iwmmxt_load_reg(cpu_V1, rn); \
1203 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1204}
1205
477955bd
PM
1206#define IWMMXT_OP_ENV(name) \
1207static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1208{ \
1209 iwmmxt_load_reg(cpu_V1, rn); \
1210 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1211}
1212
1213#define IWMMXT_OP_ENV_SIZE(name) \
1214IWMMXT_OP_ENV(name##b) \
1215IWMMXT_OP_ENV(name##w) \
1216IWMMXT_OP_ENV(name##l)
e677137d 1217
477955bd 1218#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1219static inline void gen_op_iwmmxt_##name##_M0(void) \
1220{ \
477955bd 1221 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1222}
1223
1224IWMMXT_OP(maddsq)
1225IWMMXT_OP(madduq)
1226IWMMXT_OP(sadb)
1227IWMMXT_OP(sadw)
1228IWMMXT_OP(mulslw)
1229IWMMXT_OP(mulshw)
1230IWMMXT_OP(mululw)
1231IWMMXT_OP(muluhw)
1232IWMMXT_OP(macsw)
1233IWMMXT_OP(macuw)
1234
477955bd
PM
1235IWMMXT_OP_ENV_SIZE(unpackl)
1236IWMMXT_OP_ENV_SIZE(unpackh)
1237
1238IWMMXT_OP_ENV1(unpacklub)
1239IWMMXT_OP_ENV1(unpackluw)
1240IWMMXT_OP_ENV1(unpacklul)
1241IWMMXT_OP_ENV1(unpackhub)
1242IWMMXT_OP_ENV1(unpackhuw)
1243IWMMXT_OP_ENV1(unpackhul)
1244IWMMXT_OP_ENV1(unpacklsb)
1245IWMMXT_OP_ENV1(unpacklsw)
1246IWMMXT_OP_ENV1(unpacklsl)
1247IWMMXT_OP_ENV1(unpackhsb)
1248IWMMXT_OP_ENV1(unpackhsw)
1249IWMMXT_OP_ENV1(unpackhsl)
1250
1251IWMMXT_OP_ENV_SIZE(cmpeq)
1252IWMMXT_OP_ENV_SIZE(cmpgtu)
1253IWMMXT_OP_ENV_SIZE(cmpgts)
1254
1255IWMMXT_OP_ENV_SIZE(mins)
1256IWMMXT_OP_ENV_SIZE(minu)
1257IWMMXT_OP_ENV_SIZE(maxs)
1258IWMMXT_OP_ENV_SIZE(maxu)
1259
1260IWMMXT_OP_ENV_SIZE(subn)
1261IWMMXT_OP_ENV_SIZE(addn)
1262IWMMXT_OP_ENV_SIZE(subu)
1263IWMMXT_OP_ENV_SIZE(addu)
1264IWMMXT_OP_ENV_SIZE(subs)
1265IWMMXT_OP_ENV_SIZE(adds)
1266
1267IWMMXT_OP_ENV(avgb0)
1268IWMMXT_OP_ENV(avgb1)
1269IWMMXT_OP_ENV(avgw0)
1270IWMMXT_OP_ENV(avgw1)
e677137d
PB
1271
1272IWMMXT_OP(msadb)
1273
477955bd
PM
1274IWMMXT_OP_ENV(packuw)
1275IWMMXT_OP_ENV(packul)
1276IWMMXT_OP_ENV(packuq)
1277IWMMXT_OP_ENV(packsw)
1278IWMMXT_OP_ENV(packsl)
1279IWMMXT_OP_ENV(packsq)
e677137d 1280
e677137d
PB
1281static void gen_op_iwmmxt_set_mup(void)
1282{
1283 TCGv tmp;
1284 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1285 tcg_gen_ori_i32(tmp, tmp, 2);
1286 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1287}
1288
1289static void gen_op_iwmmxt_set_cup(void)
1290{
1291 TCGv tmp;
1292 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1293 tcg_gen_ori_i32(tmp, tmp, 1);
1294 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1295}
1296
1297static void gen_op_iwmmxt_setpsr_nz(void)
1298{
7d1b0095 1299 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1300 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1301 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1302}
1303
1304static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1305{
1306 iwmmxt_load_reg(cpu_V1, rn);
86831435 1307 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1308 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1309}
1310
da6b5335 1311static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1312{
1313 int rd;
1314 uint32_t offset;
da6b5335 1315 TCGv tmp;
18c9b560
AZ
1316
1317 rd = (insn >> 16) & 0xf;
da6b5335 1318 tmp = load_reg(s, rd);
18c9b560
AZ
1319
1320 offset = (insn & 0xff) << ((insn >> 7) & 2);
1321 if (insn & (1 << 24)) {
1322 /* Pre indexed */
1323 if (insn & (1 << 23))
da6b5335 1324 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1325 else
da6b5335
FN
1326 tcg_gen_addi_i32(tmp, tmp, -offset);
1327 tcg_gen_mov_i32(dest, tmp);
18c9b560 1328 if (insn & (1 << 21))
da6b5335
FN
1329 store_reg(s, rd, tmp);
1330 else
7d1b0095 1331 tcg_temp_free_i32(tmp);
18c9b560
AZ
1332 } else if (insn & (1 << 21)) {
1333 /* Post indexed */
da6b5335 1334 tcg_gen_mov_i32(dest, tmp);
18c9b560 1335 if (insn & (1 << 23))
da6b5335 1336 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1337 else
da6b5335
FN
1338 tcg_gen_addi_i32(tmp, tmp, -offset);
1339 store_reg(s, rd, tmp);
18c9b560
AZ
1340 } else if (!(insn & (1 << 23)))
1341 return 1;
1342 return 0;
1343}
1344
da6b5335 1345static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1346{
1347 int rd = (insn >> 0) & 0xf;
da6b5335 1348 TCGv tmp;
18c9b560 1349
da6b5335
FN
1350 if (insn & (1 << 8)) {
1351 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1352 return 1;
da6b5335
FN
1353 } else {
1354 tmp = iwmmxt_load_creg(rd);
1355 }
1356 } else {
7d1b0095 1357 tmp = tcg_temp_new_i32();
da6b5335
FN
1358 iwmmxt_load_reg(cpu_V0, rd);
1359 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1360 }
1361 tcg_gen_andi_i32(tmp, tmp, mask);
1362 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1363 tcg_temp_free_i32(tmp);
18c9b560
AZ
1364 return 0;
1365}
1366
a1c7273b 1367/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560
AZ
1368 (ie. an undefined instruction). */
1369static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1370{
1371 int rd, wrd;
1372 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1373 TCGv addr;
1374 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1375
1376 if ((insn & 0x0e000e00) == 0x0c000000) {
1377 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1378 wrd = insn & 0xf;
1379 rdlo = (insn >> 12) & 0xf;
1380 rdhi = (insn >> 16) & 0xf;
1381 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1382 iwmmxt_load_reg(cpu_V0, wrd);
1383 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1384 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1385 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1386 } else { /* TMCRR */
da6b5335
FN
1387 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1388 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1389 gen_op_iwmmxt_set_mup();
1390 }
1391 return 0;
1392 }
1393
1394 wrd = (insn >> 12) & 0xf;
7d1b0095 1395 addr = tcg_temp_new_i32();
da6b5335 1396 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1397 tcg_temp_free_i32(addr);
18c9b560 1398 return 1;
da6b5335 1399 }
18c9b560
AZ
1400 if (insn & ARM_CP_RW_BIT) {
1401 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1402 tmp = tcg_temp_new_i32();
da6b5335
FN
1403 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1404 iwmmxt_store_creg(wrd, tmp);
18c9b560 1405 } else {
e677137d
PB
1406 i = 1;
1407 if (insn & (1 << 8)) {
1408 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1409 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1410 i = 0;
1411 } else { /* WLDRW wRd */
da6b5335 1412 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1413 }
1414 } else {
1415 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1416 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1417 } else { /* WLDRB */
da6b5335 1418 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1419 }
1420 }
1421 if (i) {
1422 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1423 tcg_temp_free_i32(tmp);
e677137d 1424 }
18c9b560
AZ
1425 gen_op_iwmmxt_movq_wRn_M0(wrd);
1426 }
1427 } else {
1428 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1429 tmp = iwmmxt_load_creg(wrd);
1430 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1431 } else {
1432 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1433 tmp = tcg_temp_new_i32();
e677137d
PB
1434 if (insn & (1 << 8)) {
1435 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1436 tcg_temp_free_i32(tmp);
da6b5335 1437 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1438 } else { /* WSTRW wRd */
1439 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1440 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1441 }
1442 } else {
1443 if (insn & (1 << 22)) { /* WSTRH */
1444 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1445 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1446 } else { /* WSTRB */
1447 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1448 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1449 }
1450 }
18c9b560
AZ
1451 }
1452 }
7d1b0095 1453 tcg_temp_free_i32(addr);
18c9b560
AZ
1454 return 0;
1455 }
1456
1457 if ((insn & 0x0f000000) != 0x0e000000)
1458 return 1;
1459
1460 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1461 case 0x000: /* WOR */
1462 wrd = (insn >> 12) & 0xf;
1463 rd0 = (insn >> 0) & 0xf;
1464 rd1 = (insn >> 16) & 0xf;
1465 gen_op_iwmmxt_movq_M0_wRn(rd0);
1466 gen_op_iwmmxt_orq_M0_wRn(rd1);
1467 gen_op_iwmmxt_setpsr_nz();
1468 gen_op_iwmmxt_movq_wRn_M0(wrd);
1469 gen_op_iwmmxt_set_mup();
1470 gen_op_iwmmxt_set_cup();
1471 break;
1472 case 0x011: /* TMCR */
1473 if (insn & 0xf)
1474 return 1;
1475 rd = (insn >> 12) & 0xf;
1476 wrd = (insn >> 16) & 0xf;
1477 switch (wrd) {
1478 case ARM_IWMMXT_wCID:
1479 case ARM_IWMMXT_wCASF:
1480 break;
1481 case ARM_IWMMXT_wCon:
1482 gen_op_iwmmxt_set_cup();
1483 /* Fall through. */
1484 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1485 tmp = iwmmxt_load_creg(wrd);
1486 tmp2 = load_reg(s, rd);
f669df27 1487 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1488 tcg_temp_free_i32(tmp2);
da6b5335 1489 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1490 break;
1491 case ARM_IWMMXT_wCGR0:
1492 case ARM_IWMMXT_wCGR1:
1493 case ARM_IWMMXT_wCGR2:
1494 case ARM_IWMMXT_wCGR3:
1495 gen_op_iwmmxt_set_cup();
da6b5335
FN
1496 tmp = load_reg(s, rd);
1497 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1498 break;
1499 default:
1500 return 1;
1501 }
1502 break;
1503 case 0x100: /* WXOR */
1504 wrd = (insn >> 12) & 0xf;
1505 rd0 = (insn >> 0) & 0xf;
1506 rd1 = (insn >> 16) & 0xf;
1507 gen_op_iwmmxt_movq_M0_wRn(rd0);
1508 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1509 gen_op_iwmmxt_setpsr_nz();
1510 gen_op_iwmmxt_movq_wRn_M0(wrd);
1511 gen_op_iwmmxt_set_mup();
1512 gen_op_iwmmxt_set_cup();
1513 break;
1514 case 0x111: /* TMRC */
1515 if (insn & 0xf)
1516 return 1;
1517 rd = (insn >> 12) & 0xf;
1518 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1519 tmp = iwmmxt_load_creg(wrd);
1520 store_reg(s, rd, tmp);
18c9b560
AZ
1521 break;
1522 case 0x300: /* WANDN */
1523 wrd = (insn >> 12) & 0xf;
1524 rd0 = (insn >> 0) & 0xf;
1525 rd1 = (insn >> 16) & 0xf;
1526 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1527 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1528 gen_op_iwmmxt_andq_M0_wRn(rd1);
1529 gen_op_iwmmxt_setpsr_nz();
1530 gen_op_iwmmxt_movq_wRn_M0(wrd);
1531 gen_op_iwmmxt_set_mup();
1532 gen_op_iwmmxt_set_cup();
1533 break;
1534 case 0x200: /* WAND */
1535 wrd = (insn >> 12) & 0xf;
1536 rd0 = (insn >> 0) & 0xf;
1537 rd1 = (insn >> 16) & 0xf;
1538 gen_op_iwmmxt_movq_M0_wRn(rd0);
1539 gen_op_iwmmxt_andq_M0_wRn(rd1);
1540 gen_op_iwmmxt_setpsr_nz();
1541 gen_op_iwmmxt_movq_wRn_M0(wrd);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1544 break;
1545 case 0x810: case 0xa10: /* WMADD */
1546 wrd = (insn >> 12) & 0xf;
1547 rd0 = (insn >> 0) & 0xf;
1548 rd1 = (insn >> 16) & 0xf;
1549 gen_op_iwmmxt_movq_M0_wRn(rd0);
1550 if (insn & (1 << 21))
1551 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1552 else
1553 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1554 gen_op_iwmmxt_movq_wRn_M0(wrd);
1555 gen_op_iwmmxt_set_mup();
1556 break;
1557 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1558 wrd = (insn >> 12) & 0xf;
1559 rd0 = (insn >> 16) & 0xf;
1560 rd1 = (insn >> 0) & 0xf;
1561 gen_op_iwmmxt_movq_M0_wRn(rd0);
1562 switch ((insn >> 22) & 3) {
1563 case 0:
1564 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1565 break;
1566 case 1:
1567 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1568 break;
1569 case 2:
1570 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1571 break;
1572 case 3:
1573 return 1;
1574 }
1575 gen_op_iwmmxt_movq_wRn_M0(wrd);
1576 gen_op_iwmmxt_set_mup();
1577 gen_op_iwmmxt_set_cup();
1578 break;
1579 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1580 wrd = (insn >> 12) & 0xf;
1581 rd0 = (insn >> 16) & 0xf;
1582 rd1 = (insn >> 0) & 0xf;
1583 gen_op_iwmmxt_movq_M0_wRn(rd0);
1584 switch ((insn >> 22) & 3) {
1585 case 0:
1586 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1587 break;
1588 case 1:
1589 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1590 break;
1591 case 2:
1592 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1593 break;
1594 case 3:
1595 return 1;
1596 }
1597 gen_op_iwmmxt_movq_wRn_M0(wrd);
1598 gen_op_iwmmxt_set_mup();
1599 gen_op_iwmmxt_set_cup();
1600 break;
1601 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1602 wrd = (insn >> 12) & 0xf;
1603 rd0 = (insn >> 16) & 0xf;
1604 rd1 = (insn >> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0);
1606 if (insn & (1 << 22))
1607 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1608 else
1609 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1610 if (!(insn & (1 << 20)))
1611 gen_op_iwmmxt_addl_M0_wRn(wrd);
1612 gen_op_iwmmxt_movq_wRn_M0(wrd);
1613 gen_op_iwmmxt_set_mup();
1614 break;
1615 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1616 wrd = (insn >> 12) & 0xf;
1617 rd0 = (insn >> 16) & 0xf;
1618 rd1 = (insn >> 0) & 0xf;
1619 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1620 if (insn & (1 << 21)) {
1621 if (insn & (1 << 20))
1622 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1623 else
1624 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1625 } else {
1626 if (insn & (1 << 20))
1627 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1628 else
1629 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1630 }
18c9b560
AZ
1631 gen_op_iwmmxt_movq_wRn_M0(wrd);
1632 gen_op_iwmmxt_set_mup();
1633 break;
1634 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1635 wrd = (insn >> 12) & 0xf;
1636 rd0 = (insn >> 16) & 0xf;
1637 rd1 = (insn >> 0) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0);
1639 if (insn & (1 << 21))
1640 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1641 else
1642 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1643 if (!(insn & (1 << 20))) {
e677137d
PB
1644 iwmmxt_load_reg(cpu_V1, wrd);
1645 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1646 }
1647 gen_op_iwmmxt_movq_wRn_M0(wrd);
1648 gen_op_iwmmxt_set_mup();
1649 break;
1650 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1651 wrd = (insn >> 12) & 0xf;
1652 rd0 = (insn >> 16) & 0xf;
1653 rd1 = (insn >> 0) & 0xf;
1654 gen_op_iwmmxt_movq_M0_wRn(rd0);
1655 switch ((insn >> 22) & 3) {
1656 case 0:
1657 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1658 break;
1659 case 1:
1660 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1661 break;
1662 case 2:
1663 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1664 break;
1665 case 3:
1666 return 1;
1667 }
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 gen_op_iwmmxt_set_cup();
1671 break;
1672 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1673 wrd = (insn >> 12) & 0xf;
1674 rd0 = (insn >> 16) & 0xf;
1675 rd1 = (insn >> 0) & 0xf;
1676 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1677 if (insn & (1 << 22)) {
1678 if (insn & (1 << 20))
1679 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1680 else
1681 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1682 } else {
1683 if (insn & (1 << 20))
1684 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1685 else
1686 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1687 }
18c9b560
AZ
1688 gen_op_iwmmxt_movq_wRn_M0(wrd);
1689 gen_op_iwmmxt_set_mup();
1690 gen_op_iwmmxt_set_cup();
1691 break;
1692 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1693 wrd = (insn >> 12) & 0xf;
1694 rd0 = (insn >> 16) & 0xf;
1695 rd1 = (insn >> 0) & 0xf;
1696 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1697 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1698 tcg_gen_andi_i32(tmp, tmp, 7);
1699 iwmmxt_load_reg(cpu_V1, rd1);
1700 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1701 tcg_temp_free_i32(tmp);
18c9b560
AZ
1702 gen_op_iwmmxt_movq_wRn_M0(wrd);
1703 gen_op_iwmmxt_set_mup();
1704 break;
1705 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1706 if (((insn >> 6) & 3) == 3)
1707 return 1;
18c9b560
AZ
1708 rd = (insn >> 12) & 0xf;
1709 wrd = (insn >> 16) & 0xf;
da6b5335 1710 tmp = load_reg(s, rd);
18c9b560
AZ
1711 gen_op_iwmmxt_movq_M0_wRn(wrd);
1712 switch ((insn >> 6) & 3) {
1713 case 0:
da6b5335
FN
1714 tmp2 = tcg_const_i32(0xff);
1715 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1716 break;
1717 case 1:
da6b5335
FN
1718 tmp2 = tcg_const_i32(0xffff);
1719 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1720 break;
1721 case 2:
da6b5335
FN
1722 tmp2 = tcg_const_i32(0xffffffff);
1723 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1724 break;
da6b5335
FN
1725 default:
1726 TCGV_UNUSED(tmp2);
1727 TCGV_UNUSED(tmp3);
18c9b560 1728 }
da6b5335
FN
1729 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1730 tcg_temp_free(tmp3);
1731 tcg_temp_free(tmp2);
7d1b0095 1732 tcg_temp_free_i32(tmp);
18c9b560
AZ
1733 gen_op_iwmmxt_movq_wRn_M0(wrd);
1734 gen_op_iwmmxt_set_mup();
1735 break;
1736 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1737 rd = (insn >> 12) & 0xf;
1738 wrd = (insn >> 16) & 0xf;
da6b5335 1739 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1740 return 1;
1741 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1742 tmp = tcg_temp_new_i32();
18c9b560
AZ
1743 switch ((insn >> 22) & 3) {
1744 case 0:
da6b5335
FN
1745 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1746 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1747 if (insn & 8) {
1748 tcg_gen_ext8s_i32(tmp, tmp);
1749 } else {
1750 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1751 }
1752 break;
1753 case 1:
da6b5335
FN
1754 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1755 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1756 if (insn & 8) {
1757 tcg_gen_ext16s_i32(tmp, tmp);
1758 } else {
1759 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1760 }
1761 break;
1762 case 2:
da6b5335
FN
1763 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1764 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1765 break;
18c9b560 1766 }
da6b5335 1767 store_reg(s, rd, tmp);
18c9b560
AZ
1768 break;
1769 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1770 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1771 return 1;
da6b5335 1772 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1773 switch ((insn >> 22) & 3) {
1774 case 0:
da6b5335 1775 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1776 break;
1777 case 1:
da6b5335 1778 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1779 break;
1780 case 2:
da6b5335 1781 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1782 break;
18c9b560 1783 }
da6b5335
FN
1784 tcg_gen_shli_i32(tmp, tmp, 28);
1785 gen_set_nzcv(tmp);
7d1b0095 1786 tcg_temp_free_i32(tmp);
18c9b560
AZ
1787 break;
1788 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1789 if (((insn >> 6) & 3) == 3)
1790 return 1;
18c9b560
AZ
1791 rd = (insn >> 12) & 0xf;
1792 wrd = (insn >> 16) & 0xf;
da6b5335 1793 tmp = load_reg(s, rd);
18c9b560
AZ
1794 switch ((insn >> 6) & 3) {
1795 case 0:
da6b5335 1796 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1797 break;
1798 case 1:
da6b5335 1799 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1800 break;
1801 case 2:
da6b5335 1802 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1803 break;
18c9b560 1804 }
7d1b0095 1805 tcg_temp_free_i32(tmp);
18c9b560
AZ
1806 gen_op_iwmmxt_movq_wRn_M0(wrd);
1807 gen_op_iwmmxt_set_mup();
1808 break;
1809 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1810 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1811 return 1;
da6b5335 1812 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1813 tmp2 = tcg_temp_new_i32();
da6b5335 1814 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1815 switch ((insn >> 22) & 3) {
1816 case 0:
1817 for (i = 0; i < 7; i ++) {
da6b5335
FN
1818 tcg_gen_shli_i32(tmp2, tmp2, 4);
1819 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1820 }
1821 break;
1822 case 1:
1823 for (i = 0; i < 3; i ++) {
da6b5335
FN
1824 tcg_gen_shli_i32(tmp2, tmp2, 8);
1825 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1826 }
1827 break;
1828 case 2:
da6b5335
FN
1829 tcg_gen_shli_i32(tmp2, tmp2, 16);
1830 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1831 break;
18c9b560 1832 }
da6b5335 1833 gen_set_nzcv(tmp);
7d1b0095
PM
1834 tcg_temp_free_i32(tmp2);
1835 tcg_temp_free_i32(tmp);
18c9b560
AZ
1836 break;
1837 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1838 wrd = (insn >> 12) & 0xf;
1839 rd0 = (insn >> 16) & 0xf;
1840 gen_op_iwmmxt_movq_M0_wRn(rd0);
1841 switch ((insn >> 22) & 3) {
1842 case 0:
e677137d 1843 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1844 break;
1845 case 1:
e677137d 1846 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1847 break;
1848 case 2:
e677137d 1849 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1850 break;
1851 case 3:
1852 return 1;
1853 }
1854 gen_op_iwmmxt_movq_wRn_M0(wrd);
1855 gen_op_iwmmxt_set_mup();
1856 break;
1857 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1858 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1859 return 1;
da6b5335 1860 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1861 tmp2 = tcg_temp_new_i32();
da6b5335 1862 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1863 switch ((insn >> 22) & 3) {
1864 case 0:
1865 for (i = 0; i < 7; i ++) {
da6b5335
FN
1866 tcg_gen_shli_i32(tmp2, tmp2, 4);
1867 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1868 }
1869 break;
1870 case 1:
1871 for (i = 0; i < 3; i ++) {
da6b5335
FN
1872 tcg_gen_shli_i32(tmp2, tmp2, 8);
1873 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1874 }
1875 break;
1876 case 2:
da6b5335
FN
1877 tcg_gen_shli_i32(tmp2, tmp2, 16);
1878 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1879 break;
18c9b560 1880 }
da6b5335 1881 gen_set_nzcv(tmp);
7d1b0095
PM
1882 tcg_temp_free_i32(tmp2);
1883 tcg_temp_free_i32(tmp);
18c9b560
AZ
1884 break;
1885 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1886 rd = (insn >> 12) & 0xf;
1887 rd0 = (insn >> 16) & 0xf;
da6b5335 1888 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1889 return 1;
1890 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1891 tmp = tcg_temp_new_i32();
18c9b560
AZ
1892 switch ((insn >> 22) & 3) {
1893 case 0:
da6b5335 1894 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1895 break;
1896 case 1:
da6b5335 1897 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1898 break;
1899 case 2:
da6b5335 1900 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1901 break;
18c9b560 1902 }
da6b5335 1903 store_reg(s, rd, tmp);
18c9b560
AZ
1904 break;
1905 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1906 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1907 wrd = (insn >> 12) & 0xf;
1908 rd0 = (insn >> 16) & 0xf;
1909 rd1 = (insn >> 0) & 0xf;
1910 gen_op_iwmmxt_movq_M0_wRn(rd0);
1911 switch ((insn >> 22) & 3) {
1912 case 0:
1913 if (insn & (1 << 21))
1914 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1915 else
1916 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1917 break;
1918 case 1:
1919 if (insn & (1 << 21))
1920 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1921 else
1922 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1923 break;
1924 case 2:
1925 if (insn & (1 << 21))
1926 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1927 else
1928 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1929 break;
1930 case 3:
1931 return 1;
1932 }
1933 gen_op_iwmmxt_movq_wRn_M0(wrd);
1934 gen_op_iwmmxt_set_mup();
1935 gen_op_iwmmxt_set_cup();
1936 break;
1937 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1938 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1939 wrd = (insn >> 12) & 0xf;
1940 rd0 = (insn >> 16) & 0xf;
1941 gen_op_iwmmxt_movq_M0_wRn(rd0);
1942 switch ((insn >> 22) & 3) {
1943 case 0:
1944 if (insn & (1 << 21))
1945 gen_op_iwmmxt_unpacklsb_M0();
1946 else
1947 gen_op_iwmmxt_unpacklub_M0();
1948 break;
1949 case 1:
1950 if (insn & (1 << 21))
1951 gen_op_iwmmxt_unpacklsw_M0();
1952 else
1953 gen_op_iwmmxt_unpackluw_M0();
1954 break;
1955 case 2:
1956 if (insn & (1 << 21))
1957 gen_op_iwmmxt_unpacklsl_M0();
1958 else
1959 gen_op_iwmmxt_unpacklul_M0();
1960 break;
1961 case 3:
1962 return 1;
1963 }
1964 gen_op_iwmmxt_movq_wRn_M0(wrd);
1965 gen_op_iwmmxt_set_mup();
1966 gen_op_iwmmxt_set_cup();
1967 break;
1968 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1969 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
1973 switch ((insn >> 22) & 3) {
1974 case 0:
1975 if (insn & (1 << 21))
1976 gen_op_iwmmxt_unpackhsb_M0();
1977 else
1978 gen_op_iwmmxt_unpackhub_M0();
1979 break;
1980 case 1:
1981 if (insn & (1 << 21))
1982 gen_op_iwmmxt_unpackhsw_M0();
1983 else
1984 gen_op_iwmmxt_unpackhuw_M0();
1985 break;
1986 case 2:
1987 if (insn & (1 << 21))
1988 gen_op_iwmmxt_unpackhsl_M0();
1989 else
1990 gen_op_iwmmxt_unpackhul_M0();
1991 break;
1992 case 3:
1993 return 1;
1994 }
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2000 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2001 if (((insn >> 22) & 3) == 0)
2002 return 1;
18c9b560
AZ
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2006 tmp = tcg_temp_new_i32();
da6b5335 2007 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2008 tcg_temp_free_i32(tmp);
18c9b560 2009 return 1;
da6b5335 2010 }
18c9b560 2011 switch ((insn >> 22) & 3) {
18c9b560 2012 case 1:
477955bd 2013 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2014 break;
2015 case 2:
477955bd 2016 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2017 break;
2018 case 3:
477955bd 2019 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2020 break;
2021 }
7d1b0095 2022 tcg_temp_free_i32(tmp);
18c9b560
AZ
2023 gen_op_iwmmxt_movq_wRn_M0(wrd);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2026 break;
2027 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2028 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2029 if (((insn >> 22) & 3) == 0)
2030 return 1;
18c9b560
AZ
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 16) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2034 tmp = tcg_temp_new_i32();
da6b5335 2035 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2036 tcg_temp_free_i32(tmp);
18c9b560 2037 return 1;
da6b5335 2038 }
18c9b560 2039 switch ((insn >> 22) & 3) {
18c9b560 2040 case 1:
477955bd 2041 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2042 break;
2043 case 2:
477955bd 2044 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2045 break;
2046 case 3:
477955bd 2047 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2048 break;
2049 }
7d1b0095 2050 tcg_temp_free_i32(tmp);
18c9b560
AZ
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2054 break;
2055 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2056 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2057 if (((insn >> 22) & 3) == 0)
2058 return 1;
18c9b560
AZ
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2062 tmp = tcg_temp_new_i32();
da6b5335 2063 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2064 tcg_temp_free_i32(tmp);
18c9b560 2065 return 1;
da6b5335 2066 }
18c9b560 2067 switch ((insn >> 22) & 3) {
18c9b560 2068 case 1:
477955bd 2069 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2070 break;
2071 case 2:
477955bd 2072 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2073 break;
2074 case 3:
477955bd 2075 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2076 break;
2077 }
7d1b0095 2078 tcg_temp_free_i32(tmp);
18c9b560
AZ
2079 gen_op_iwmmxt_movq_wRn_M0(wrd);
2080 gen_op_iwmmxt_set_mup();
2081 gen_op_iwmmxt_set_cup();
2082 break;
2083 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2084 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2085 if (((insn >> 22) & 3) == 0)
2086 return 1;
18c9b560
AZ
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2090 tmp = tcg_temp_new_i32();
18c9b560 2091 switch ((insn >> 22) & 3) {
18c9b560 2092 case 1:
da6b5335 2093 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2094 tcg_temp_free_i32(tmp);
18c9b560 2095 return 1;
da6b5335 2096 }
477955bd 2097 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2098 break;
2099 case 2:
da6b5335 2100 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2101 tcg_temp_free_i32(tmp);
18c9b560 2102 return 1;
da6b5335 2103 }
477955bd 2104 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2105 break;
2106 case 3:
da6b5335 2107 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2108 tcg_temp_free_i32(tmp);
18c9b560 2109 return 1;
da6b5335 2110 }
477955bd 2111 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2112 break;
2113 }
7d1b0095 2114 tcg_temp_free_i32(tmp);
18c9b560
AZ
2115 gen_op_iwmmxt_movq_wRn_M0(wrd);
2116 gen_op_iwmmxt_set_mup();
2117 gen_op_iwmmxt_set_cup();
2118 break;
2119 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2120 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2121 wrd = (insn >> 12) & 0xf;
2122 rd0 = (insn >> 16) & 0xf;
2123 rd1 = (insn >> 0) & 0xf;
2124 gen_op_iwmmxt_movq_M0_wRn(rd0);
2125 switch ((insn >> 22) & 3) {
2126 case 0:
2127 if (insn & (1 << 21))
2128 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2129 else
2130 gen_op_iwmmxt_minub_M0_wRn(rd1);
2131 break;
2132 case 1:
2133 if (insn & (1 << 21))
2134 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2135 else
2136 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2137 break;
2138 case 2:
2139 if (insn & (1 << 21))
2140 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2141 else
2142 gen_op_iwmmxt_minul_M0_wRn(rd1);
2143 break;
2144 case 3:
2145 return 1;
2146 }
2147 gen_op_iwmmxt_movq_wRn_M0(wrd);
2148 gen_op_iwmmxt_set_mup();
2149 break;
2150 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2151 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2152 wrd = (insn >> 12) & 0xf;
2153 rd0 = (insn >> 16) & 0xf;
2154 rd1 = (insn >> 0) & 0xf;
2155 gen_op_iwmmxt_movq_M0_wRn(rd0);
2156 switch ((insn >> 22) & 3) {
2157 case 0:
2158 if (insn & (1 << 21))
2159 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2160 else
2161 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2162 break;
2163 case 1:
2164 if (insn & (1 << 21))
2165 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2166 else
2167 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2168 break;
2169 case 2:
2170 if (insn & (1 << 21))
2171 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2172 else
2173 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2174 break;
2175 case 3:
2176 return 1;
2177 }
2178 gen_op_iwmmxt_movq_wRn_M0(wrd);
2179 gen_op_iwmmxt_set_mup();
2180 break;
2181 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2182 case 0x402: case 0x502: case 0x602: case 0x702:
2183 wrd = (insn >> 12) & 0xf;
2184 rd0 = (insn >> 16) & 0xf;
2185 rd1 = (insn >> 0) & 0xf;
2186 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2187 tmp = tcg_const_i32((insn >> 20) & 3);
2188 iwmmxt_load_reg(cpu_V1, rd1);
2189 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2190 tcg_temp_free(tmp);
18c9b560
AZ
2191 gen_op_iwmmxt_movq_wRn_M0(wrd);
2192 gen_op_iwmmxt_set_mup();
2193 break;
2194 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2195 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2196 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2197 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2198 wrd = (insn >> 12) & 0xf;
2199 rd0 = (insn >> 16) & 0xf;
2200 rd1 = (insn >> 0) & 0xf;
2201 gen_op_iwmmxt_movq_M0_wRn(rd0);
2202 switch ((insn >> 20) & 0xf) {
2203 case 0x0:
2204 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2205 break;
2206 case 0x1:
2207 gen_op_iwmmxt_subub_M0_wRn(rd1);
2208 break;
2209 case 0x3:
2210 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2211 break;
2212 case 0x4:
2213 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2214 break;
2215 case 0x5:
2216 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2217 break;
2218 case 0x7:
2219 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2220 break;
2221 case 0x8:
2222 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2223 break;
2224 case 0x9:
2225 gen_op_iwmmxt_subul_M0_wRn(rd1);
2226 break;
2227 case 0xb:
2228 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2229 break;
2230 default:
2231 return 1;
2232 }
2233 gen_op_iwmmxt_movq_wRn_M0(wrd);
2234 gen_op_iwmmxt_set_mup();
2235 gen_op_iwmmxt_set_cup();
2236 break;
2237 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2238 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2239 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2240 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2241 wrd = (insn >> 12) & 0xf;
2242 rd0 = (insn >> 16) & 0xf;
2243 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2244 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2245 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
da6b5335 2246 tcg_temp_free(tmp);
18c9b560
AZ
2247 gen_op_iwmmxt_movq_wRn_M0(wrd);
2248 gen_op_iwmmxt_set_mup();
2249 gen_op_iwmmxt_set_cup();
2250 break;
2251 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2252 case 0x418: case 0x518: case 0x618: case 0x718:
2253 case 0x818: case 0x918: case 0xa18: case 0xb18:
2254 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2255 wrd = (insn >> 12) & 0xf;
2256 rd0 = (insn >> 16) & 0xf;
2257 rd1 = (insn >> 0) & 0xf;
2258 gen_op_iwmmxt_movq_M0_wRn(rd0);
2259 switch ((insn >> 20) & 0xf) {
2260 case 0x0:
2261 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2262 break;
2263 case 0x1:
2264 gen_op_iwmmxt_addub_M0_wRn(rd1);
2265 break;
2266 case 0x3:
2267 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2268 break;
2269 case 0x4:
2270 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2271 break;
2272 case 0x5:
2273 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2274 break;
2275 case 0x7:
2276 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2277 break;
2278 case 0x8:
2279 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2280 break;
2281 case 0x9:
2282 gen_op_iwmmxt_addul_M0_wRn(rd1);
2283 break;
2284 case 0xb:
2285 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2286 break;
2287 default:
2288 return 1;
2289 }
2290 gen_op_iwmmxt_movq_wRn_M0(wrd);
2291 gen_op_iwmmxt_set_mup();
2292 gen_op_iwmmxt_set_cup();
2293 break;
2294 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2295 case 0x408: case 0x508: case 0x608: case 0x708:
2296 case 0x808: case 0x908: case 0xa08: case 0xb08:
2297 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2298 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2299 return 1;
18c9b560
AZ
2300 wrd = (insn >> 12) & 0xf;
2301 rd0 = (insn >> 16) & 0xf;
2302 rd1 = (insn >> 0) & 0xf;
2303 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2304 switch ((insn >> 22) & 3) {
18c9b560
AZ
2305 case 1:
2306 if (insn & (1 << 21))
2307 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2308 else
2309 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2310 break;
2311 case 2:
2312 if (insn & (1 << 21))
2313 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2314 else
2315 gen_op_iwmmxt_packul_M0_wRn(rd1);
2316 break;
2317 case 3:
2318 if (insn & (1 << 21))
2319 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2320 else
2321 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2322 break;
2323 }
2324 gen_op_iwmmxt_movq_wRn_M0(wrd);
2325 gen_op_iwmmxt_set_mup();
2326 gen_op_iwmmxt_set_cup();
2327 break;
2328 case 0x201: case 0x203: case 0x205: case 0x207:
2329 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2330 case 0x211: case 0x213: case 0x215: case 0x217:
2331 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2332 wrd = (insn >> 5) & 0xf;
2333 rd0 = (insn >> 12) & 0xf;
2334 rd1 = (insn >> 0) & 0xf;
2335 if (rd0 == 0xf || rd1 == 0xf)
2336 return 1;
2337 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2338 tmp = load_reg(s, rd0);
2339 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2340 switch ((insn >> 16) & 0xf) {
2341 case 0x0: /* TMIA */
da6b5335 2342 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2343 break;
2344 case 0x8: /* TMIAPH */
da6b5335 2345 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2346 break;
2347 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2348 if (insn & (1 << 16))
da6b5335 2349 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2350 if (insn & (1 << 17))
da6b5335
FN
2351 tcg_gen_shri_i32(tmp2, tmp2, 16);
2352 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2353 break;
2354 default:
7d1b0095
PM
2355 tcg_temp_free_i32(tmp2);
2356 tcg_temp_free_i32(tmp);
18c9b560
AZ
2357 return 1;
2358 }
7d1b0095
PM
2359 tcg_temp_free_i32(tmp2);
2360 tcg_temp_free_i32(tmp);
18c9b560
AZ
2361 gen_op_iwmmxt_movq_wRn_M0(wrd);
2362 gen_op_iwmmxt_set_mup();
2363 break;
2364 default:
2365 return 1;
2366 }
2367
2368 return 0;
2369}
2370
a1c7273b 2371/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560
AZ
2372 (ie. an undefined instruction). */
2373static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2374{
2375 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2376 TCGv tmp, tmp2;
18c9b560
AZ
2377
2378 if ((insn & 0x0ff00f10) == 0x0e200010) {
2379 /* Multiply with Internal Accumulate Format */
2380 rd0 = (insn >> 12) & 0xf;
2381 rd1 = insn & 0xf;
2382 acc = (insn >> 5) & 7;
2383
2384 if (acc != 0)
2385 return 1;
2386
3a554c0f
FN
2387 tmp = load_reg(s, rd0);
2388 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2389 switch ((insn >> 16) & 0xf) {
2390 case 0x0: /* MIA */
3a554c0f 2391 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2392 break;
2393 case 0x8: /* MIAPH */
3a554c0f 2394 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2395 break;
2396 case 0xc: /* MIABB */
2397 case 0xd: /* MIABT */
2398 case 0xe: /* MIATB */
2399 case 0xf: /* MIATT */
18c9b560 2400 if (insn & (1 << 16))
3a554c0f 2401 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2402 if (insn & (1 << 17))
3a554c0f
FN
2403 tcg_gen_shri_i32(tmp2, tmp2, 16);
2404 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2405 break;
2406 default:
2407 return 1;
2408 }
7d1b0095
PM
2409 tcg_temp_free_i32(tmp2);
2410 tcg_temp_free_i32(tmp);
18c9b560
AZ
2411
2412 gen_op_iwmmxt_movq_wRn_M0(acc);
2413 return 0;
2414 }
2415
2416 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2417 /* Internal Accumulator Access Format */
2418 rdhi = (insn >> 16) & 0xf;
2419 rdlo = (insn >> 12) & 0xf;
2420 acc = insn & 7;
2421
2422 if (acc != 0)
2423 return 1;
2424
2425 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2426 iwmmxt_load_reg(cpu_V0, acc);
2427 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2428 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2429 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2430 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2431 } else { /* MAR */
3a554c0f
FN
2432 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2433 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2434 }
2435 return 0;
2436 }
2437
2438 return 1;
2439}
2440
c1713132
AZ
2441/* Disassemble system coprocessor instruction. Return nonzero if
2442 instruction is not defined. */
2443static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2444{
b75263d6 2445 TCGv tmp, tmp2;
c1713132
AZ
2446 uint32_t rd = (insn >> 12) & 0xf;
2447 uint32_t cp = (insn >> 8) & 0xf;
2448 if (IS_USER(s)) {
2449 return 1;
2450 }
2451
18c9b560 2452 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2453 if (!env->cp[cp].cp_read)
2454 return 1;
8984bd2e 2455 gen_set_pc_im(s->pc);
7d1b0095 2456 tmp = tcg_temp_new_i32();
b75263d6
JR
2457 tmp2 = tcg_const_i32(insn);
2458 gen_helper_get_cp(tmp, cpu_env, tmp2);
2459 tcg_temp_free(tmp2);
8984bd2e 2460 store_reg(s, rd, tmp);
c1713132
AZ
2461 } else {
2462 if (!env->cp[cp].cp_write)
2463 return 1;
8984bd2e
PB
2464 gen_set_pc_im(s->pc);
2465 tmp = load_reg(s, rd);
b75263d6
JR
2466 tmp2 = tcg_const_i32(insn);
2467 gen_helper_set_cp(cpu_env, tmp2, tmp);
2468 tcg_temp_free(tmp2);
7d1b0095 2469 tcg_temp_free_i32(tmp);
c1713132
AZ
2470 }
2471 return 0;
2472}
2473
74594c9d 2474static int cp15_user_ok(CPUState *env, uint32_t insn)
9ee6e8bb
PB
2475{
2476 int cpn = (insn >> 16) & 0xf;
2477 int cpm = insn & 0xf;
2478 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2479
74594c9d
PM
2480 if (arm_feature(env, ARM_FEATURE_V7) && cpn == 9) {
2481 /* Performance monitor registers fall into three categories:
2482 * (a) always UNDEF in usermode
2483 * (b) UNDEF only if PMUSERENR.EN is 0
2484 * (c) always read OK and UNDEF on write (PMUSERENR only)
2485 */
2486 if ((cpm == 12 && (op < 6)) ||
2487 (cpm == 13 && (op < 3))) {
2488 return env->cp15.c9_pmuserenr;
2489 } else if (cpm == 14 && op == 0 && (insn & ARM_CP_RW_BIT)) {
2490 /* PMUSERENR, read only */
2491 return 1;
2492 }
2493 return 0;
2494 }
2495
9ee6e8bb
PB
2496 if (cpn == 13 && cpm == 0) {
2497 /* TLS register. */
2498 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2499 return 1;
2500 }
9ee6e8bb
PB
2501 return 0;
2502}
2503
3f26c122
RV
2504static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2505{
2506 TCGv tmp;
2507 int cpn = (insn >> 16) & 0xf;
2508 int cpm = insn & 0xf;
2509 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2510
2511 if (!arm_feature(env, ARM_FEATURE_V6K))
2512 return 0;
2513
2514 if (!(cpn == 13 && cpm == 0))
2515 return 0;
2516
2517 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2518 switch (op) {
2519 case 2:
c5883be2 2520 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2521 break;
2522 case 3:
c5883be2 2523 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2524 break;
2525 case 4:
c5883be2 2526 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2527 break;
2528 default:
3f26c122
RV
2529 return 0;
2530 }
2531 store_reg(s, rd, tmp);
2532
2533 } else {
2534 tmp = load_reg(s, rd);
2535 switch (op) {
2536 case 2:
c5883be2 2537 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2538 break;
2539 case 3:
c5883be2 2540 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2541 break;
2542 case 4:
c5883be2 2543 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2544 break;
2545 default:
7d1b0095 2546 tcg_temp_free_i32(tmp);
3f26c122
RV
2547 return 0;
2548 }
3f26c122
RV
2549 }
2550 return 1;
2551}
2552
b5ff1b31
FB
2553/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2554 instruction is not defined. */
a90b7318 2555static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2556{
2557 uint32_t rd;
b75263d6 2558 TCGv tmp, tmp2;
b5ff1b31 2559
9ee6e8bb
PB
2560 /* M profile cores use memory mapped registers instead of cp15. */
2561 if (arm_feature(env, ARM_FEATURE_M))
2562 return 1;
2563
2564 if ((insn & (1 << 25)) == 0) {
2565 if (insn & (1 << 20)) {
2566 /* mrrc */
2567 return 1;
2568 }
2569 /* mcrr. Used for block cache operations, so implement as no-op. */
2570 return 0;
2571 }
2572 if ((insn & (1 << 4)) == 0) {
2573 /* cdp */
2574 return 1;
2575 }
87f19eb2
PM
2576 /* We special case a number of cp15 instructions which were used
2577 * for things which are real instructions in ARMv7. This allows
2578 * them to work in linux-user mode which doesn't provide functional
2579 * get_cp15/set_cp15 helpers, and is more efficient anyway.
cc688901 2580 */
87f19eb2
PM
2581 switch ((insn & 0x0fff0fff)) {
2582 case 0x0e070f90:
cc688901
PM
2583 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2584 * In v7, this must NOP.
2585 */
87f19eb2
PM
2586 if (IS_USER(s)) {
2587 return 1;
2588 }
cc688901
PM
2589 if (!arm_feature(env, ARM_FEATURE_V7)) {
2590 /* Wait for interrupt. */
2591 gen_set_pc_im(s->pc);
2592 s->is_jmp = DISAS_WFI;
2593 }
9332f9da 2594 return 0;
87f19eb2 2595 case 0x0e070f58:
cc688901
PM
2596 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2597 * so this is slightly over-broad.
2598 */
87f19eb2 2599 if (!IS_USER(s) && !arm_feature(env, ARM_FEATURE_V6)) {
cc688901
PM
2600 /* Wait for interrupt. */
2601 gen_set_pc_im(s->pc);
2602 s->is_jmp = DISAS_WFI;
2603 return 0;
2604 }
87f19eb2 2605 /* Otherwise continue to handle via helper function.
cc688901
PM
2606 * In particular, on v7 and some v6 cores this is one of
2607 * the VA-PA registers.
2608 */
87f19eb2
PM
2609 break;
2610 case 0x0e070f3d:
2611 /* 0,c7,c13,1: prefetch-by-MVA in v6, NOP in v7 */
2612 if (arm_feature(env, ARM_FEATURE_V6)) {
2613 return IS_USER(s) ? 1 : 0;
2614 }
2615 break;
2616 case 0x0e070f95: /* 0,c7,c5,4 : ISB */
2617 case 0x0e070f9a: /* 0,c7,c10,4: DSB */
2618 case 0x0e070fba: /* 0,c7,c10,5: DMB */
2619 /* Barriers in both v6 and v7 */
2620 if (arm_feature(env, ARM_FEATURE_V6)) {
2621 return 0;
2622 }
2623 break;
2624 default:
2625 break;
2626 }
2627
2628 if (IS_USER(s) && !cp15_user_ok(env, insn)) {
2629 return 1;
cc688901
PM
2630 }
2631
b5ff1b31 2632 rd = (insn >> 12) & 0xf;
3f26c122
RV
2633
2634 if (cp15_tls_load_store(env, s, insn, rd))
2635 return 0;
2636
b75263d6 2637 tmp2 = tcg_const_i32(insn);
18c9b560 2638 if (insn & ARM_CP_RW_BIT) {
7d1b0095 2639 tmp = tcg_temp_new_i32();
b75263d6 2640 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2641 /* If the destination register is r15 then sets condition codes. */
2642 if (rd != 15)
8984bd2e
PB
2643 store_reg(s, rd, tmp);
2644 else
7d1b0095 2645 tcg_temp_free_i32(tmp);
b5ff1b31 2646 } else {
8984bd2e 2647 tmp = load_reg(s, rd);
b75263d6 2648 gen_helper_set_cp15(cpu_env, tmp2, tmp);
7d1b0095 2649 tcg_temp_free_i32(tmp);
a90b7318
AZ
2650 /* Normally we would always end the TB here, but Linux
2651 * arch/arm/mach-pxa/sleep.S expects two instructions following
2652 * an MMU enable to execute from cache. Imitate this behaviour. */
2653 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2654 (insn & 0x0fff0fff) != 0x0e010f10)
2655 gen_lookup_tb(s);
b5ff1b31 2656 }
b75263d6 2657 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2658 return 0;
2659}
2660
9ee6e8bb
PB
2661#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2662#define VFP_SREG(insn, bigbit, smallbit) \
2663 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2664#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2665 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2666 reg = (((insn) >> (bigbit)) & 0x0f) \
2667 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2668 } else { \
2669 if (insn & (1 << (smallbit))) \
2670 return 1; \
2671 reg = ((insn) >> (bigbit)) & 0x0f; \
2672 }} while (0)
2673
2674#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2675#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2676#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2677#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2678#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2679#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2680
4373f3ce
PB
2681/* Move between integer and VFP cores. */
2682static TCGv gen_vfp_mrs(void)
2683{
7d1b0095 2684 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2685 tcg_gen_mov_i32(tmp, cpu_F0s);
2686 return tmp;
2687}
2688
2689static void gen_vfp_msr(TCGv tmp)
2690{
2691 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2692 tcg_temp_free_i32(tmp);
4373f3ce
PB
2693}
2694
ad69471c
PB
2695static void gen_neon_dup_u8(TCGv var, int shift)
2696{
7d1b0095 2697 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2698 if (shift)
2699 tcg_gen_shri_i32(var, var, shift);
86831435 2700 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2701 tcg_gen_shli_i32(tmp, var, 8);
2702 tcg_gen_or_i32(var, var, tmp);
2703 tcg_gen_shli_i32(tmp, var, 16);
2704 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2705 tcg_temp_free_i32(tmp);
ad69471c
PB
2706}
2707
2708static void gen_neon_dup_low16(TCGv var)
2709{
7d1b0095 2710 TCGv tmp = tcg_temp_new_i32();
86831435 2711 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2712 tcg_gen_shli_i32(tmp, var, 16);
2713 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2714 tcg_temp_free_i32(tmp);
ad69471c
PB
2715}
2716
2717static void gen_neon_dup_high16(TCGv var)
2718{
7d1b0095 2719 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2720 tcg_gen_andi_i32(var, var, 0xffff0000);
2721 tcg_gen_shri_i32(tmp, var, 16);
2722 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2723 tcg_temp_free_i32(tmp);
ad69471c
PB
2724}
2725
8e18cde3
PM
2726static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2727{
2728 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2729 TCGv tmp;
2730 switch (size) {
2731 case 0:
2732 tmp = gen_ld8u(addr, IS_USER(s));
2733 gen_neon_dup_u8(tmp, 0);
2734 break;
2735 case 1:
2736 tmp = gen_ld16u(addr, IS_USER(s));
2737 gen_neon_dup_low16(tmp);
2738 break;
2739 case 2:
2740 tmp = gen_ld32(addr, IS_USER(s));
2741 break;
2742 default: /* Avoid compiler warnings. */
2743 abort();
2744 }
2745 return tmp;
2746}
2747
a1c7273b 2748/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95
FB
2749 (ie. an undefined instruction). */
2750static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2751{
2752 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2753 int dp, veclen;
312eea9f 2754 TCGv addr;
4373f3ce 2755 TCGv tmp;
ad69471c 2756 TCGv tmp2;
b7bcbe95 2757
40f137e1
PB
2758 if (!arm_feature(env, ARM_FEATURE_VFP))
2759 return 1;
2760
5df8bac1 2761 if (!s->vfp_enabled) {
9ee6e8bb 2762 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2763 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2764 return 1;
2765 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2766 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2767 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2768 return 1;
2769 }
b7bcbe95
FB
2770 dp = ((insn & 0xf00) == 0xb00);
2771 switch ((insn >> 24) & 0xf) {
2772 case 0xe:
2773 if (insn & (1 << 4)) {
2774 /* single register transfer */
b7bcbe95
FB
2775 rd = (insn >> 12) & 0xf;
2776 if (dp) {
9ee6e8bb
PB
2777 int size;
2778 int pass;
2779
2780 VFP_DREG_N(rn, insn);
2781 if (insn & 0xf)
b7bcbe95 2782 return 1;
9ee6e8bb
PB
2783 if (insn & 0x00c00060
2784 && !arm_feature(env, ARM_FEATURE_NEON))
2785 return 1;
2786
2787 pass = (insn >> 21) & 1;
2788 if (insn & (1 << 22)) {
2789 size = 0;
2790 offset = ((insn >> 5) & 3) * 8;
2791 } else if (insn & (1 << 5)) {
2792 size = 1;
2793 offset = (insn & (1 << 6)) ? 16 : 0;
2794 } else {
2795 size = 2;
2796 offset = 0;
2797 }
18c9b560 2798 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2799 /* vfp->arm */
ad69471c 2800 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2801 switch (size) {
2802 case 0:
9ee6e8bb 2803 if (offset)
ad69471c 2804 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2805 if (insn & (1 << 23))
ad69471c 2806 gen_uxtb(tmp);
9ee6e8bb 2807 else
ad69471c 2808 gen_sxtb(tmp);
9ee6e8bb
PB
2809 break;
2810 case 1:
9ee6e8bb
PB
2811 if (insn & (1 << 23)) {
2812 if (offset) {
ad69471c 2813 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2814 } else {
ad69471c 2815 gen_uxth(tmp);
9ee6e8bb
PB
2816 }
2817 } else {
2818 if (offset) {
ad69471c 2819 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2820 } else {
ad69471c 2821 gen_sxth(tmp);
9ee6e8bb
PB
2822 }
2823 }
2824 break;
2825 case 2:
9ee6e8bb
PB
2826 break;
2827 }
ad69471c 2828 store_reg(s, rd, tmp);
b7bcbe95
FB
2829 } else {
2830 /* arm->vfp */
ad69471c 2831 tmp = load_reg(s, rd);
9ee6e8bb
PB
2832 if (insn & (1 << 23)) {
2833 /* VDUP */
2834 if (size == 0) {
ad69471c 2835 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2836 } else if (size == 1) {
ad69471c 2837 gen_neon_dup_low16(tmp);
9ee6e8bb 2838 }
cbbccffc 2839 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2840 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2841 tcg_gen_mov_i32(tmp2, tmp);
2842 neon_store_reg(rn, n, tmp2);
2843 }
2844 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2845 } else {
2846 /* VMOV */
2847 switch (size) {
2848 case 0:
ad69471c
PB
2849 tmp2 = neon_load_reg(rn, pass);
2850 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
7d1b0095 2851 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2852 break;
2853 case 1:
ad69471c
PB
2854 tmp2 = neon_load_reg(rn, pass);
2855 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
7d1b0095 2856 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2857 break;
2858 case 2:
9ee6e8bb
PB
2859 break;
2860 }
ad69471c 2861 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2862 }
b7bcbe95 2863 }
9ee6e8bb
PB
2864 } else { /* !dp */
2865 if ((insn & 0x6f) != 0x00)
2866 return 1;
2867 rn = VFP_SREG_N(insn);
18c9b560 2868 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2869 /* vfp->arm */
2870 if (insn & (1 << 21)) {
2871 /* system register */
40f137e1 2872 rn >>= 1;
9ee6e8bb 2873
b7bcbe95 2874 switch (rn) {
40f137e1 2875 case ARM_VFP_FPSID:
4373f3ce 2876 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2877 VFP3 restricts all id registers to privileged
2878 accesses. */
2879 if (IS_USER(s)
2880 && arm_feature(env, ARM_FEATURE_VFP3))
2881 return 1;
4373f3ce 2882 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2883 break;
40f137e1 2884 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2885 if (IS_USER(s))
2886 return 1;
4373f3ce 2887 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2888 break;
40f137e1
PB
2889 case ARM_VFP_FPINST:
2890 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2891 /* Not present in VFP3. */
2892 if (IS_USER(s)
2893 || arm_feature(env, ARM_FEATURE_VFP3))
2894 return 1;
4373f3ce 2895 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2896 break;
40f137e1 2897 case ARM_VFP_FPSCR:
601d70b9 2898 if (rd == 15) {
4373f3ce
PB
2899 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2900 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2901 } else {
7d1b0095 2902 tmp = tcg_temp_new_i32();
4373f3ce
PB
2903 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2904 }
b7bcbe95 2905 break;
9ee6e8bb
PB
2906 case ARM_VFP_MVFR0:
2907 case ARM_VFP_MVFR1:
2908 if (IS_USER(s)
2909 || !arm_feature(env, ARM_FEATURE_VFP3))
2910 return 1;
4373f3ce 2911 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2912 break;
b7bcbe95
FB
2913 default:
2914 return 1;
2915 }
2916 } else {
2917 gen_mov_F0_vreg(0, rn);
4373f3ce 2918 tmp = gen_vfp_mrs();
b7bcbe95
FB
2919 }
2920 if (rd == 15) {
b5ff1b31 2921 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2922 gen_set_nzcv(tmp);
7d1b0095 2923 tcg_temp_free_i32(tmp);
4373f3ce
PB
2924 } else {
2925 store_reg(s, rd, tmp);
2926 }
b7bcbe95
FB
2927 } else {
2928 /* arm->vfp */
4373f3ce 2929 tmp = load_reg(s, rd);
b7bcbe95 2930 if (insn & (1 << 21)) {
40f137e1 2931 rn >>= 1;
b7bcbe95
FB
2932 /* system register */
2933 switch (rn) {
40f137e1 2934 case ARM_VFP_FPSID:
9ee6e8bb
PB
2935 case ARM_VFP_MVFR0:
2936 case ARM_VFP_MVFR1:
b7bcbe95
FB
2937 /* Writes are ignored. */
2938 break;
40f137e1 2939 case ARM_VFP_FPSCR:
4373f3ce 2940 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2941 tcg_temp_free_i32(tmp);
b5ff1b31 2942 gen_lookup_tb(s);
b7bcbe95 2943 break;
40f137e1 2944 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2945 if (IS_USER(s))
2946 return 1;
71b3c3de
JR
2947 /* TODO: VFP subarchitecture support.
2948 * For now, keep the EN bit only */
2949 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2950 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2951 gen_lookup_tb(s);
2952 break;
2953 case ARM_VFP_FPINST:
2954 case ARM_VFP_FPINST2:
4373f3ce 2955 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2956 break;
b7bcbe95
FB
2957 default:
2958 return 1;
2959 }
2960 } else {
4373f3ce 2961 gen_vfp_msr(tmp);
b7bcbe95
FB
2962 gen_mov_vreg_F0(0, rn);
2963 }
2964 }
2965 }
2966 } else {
2967 /* data processing */
2968 /* The opcode is in bits 23, 21, 20 and 6. */
2969 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2970 if (dp) {
2971 if (op == 15) {
2972 /* rn is opcode */
2973 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2974 } else {
2975 /* rn is register number */
9ee6e8bb 2976 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2977 }
2978
04595bf6 2979 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2980 /* Integer or single precision destination. */
9ee6e8bb 2981 rd = VFP_SREG_D(insn);
b7bcbe95 2982 } else {
9ee6e8bb 2983 VFP_DREG_D(rd, insn);
b7bcbe95 2984 }
04595bf6
PM
2985 if (op == 15 &&
2986 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2987 /* VCVT from int is always from S reg regardless of dp bit.
2988 * VCVT with immediate frac_bits has same format as SREG_M
2989 */
2990 rm = VFP_SREG_M(insn);
b7bcbe95 2991 } else {
9ee6e8bb 2992 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2993 }
2994 } else {
9ee6e8bb 2995 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2996 if (op == 15 && rn == 15) {
2997 /* Double precision destination. */
9ee6e8bb
PB
2998 VFP_DREG_D(rd, insn);
2999 } else {
3000 rd = VFP_SREG_D(insn);
3001 }
04595bf6
PM
3002 /* NB that we implicitly rely on the encoding for the frac_bits
3003 * in VCVT of fixed to float being the same as that of an SREG_M
3004 */
9ee6e8bb 3005 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3006 }
3007
69d1fc22 3008 veclen = s->vec_len;
b7bcbe95
FB
3009 if (op == 15 && rn > 3)
3010 veclen = 0;
3011
3012 /* Shut up compiler warnings. */
3013 delta_m = 0;
3014 delta_d = 0;
3015 bank_mask = 0;
3b46e624 3016
b7bcbe95
FB
3017 if (veclen > 0) {
3018 if (dp)
3019 bank_mask = 0xc;
3020 else
3021 bank_mask = 0x18;
3022
3023 /* Figure out what type of vector operation this is. */
3024 if ((rd & bank_mask) == 0) {
3025 /* scalar */
3026 veclen = 0;
3027 } else {
3028 if (dp)
69d1fc22 3029 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3030 else
69d1fc22 3031 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3032
3033 if ((rm & bank_mask) == 0) {
3034 /* mixed scalar/vector */
3035 delta_m = 0;
3036 } else {
3037 /* vector */
3038 delta_m = delta_d;
3039 }
3040 }
3041 }
3042
3043 /* Load the initial operands. */
3044 if (op == 15) {
3045 switch (rn) {
3046 case 16:
3047 case 17:
3048 /* Integer source */
3049 gen_mov_F0_vreg(0, rm);
3050 break;
3051 case 8:
3052 case 9:
3053 /* Compare */
3054 gen_mov_F0_vreg(dp, rd);
3055 gen_mov_F1_vreg(dp, rm);
3056 break;
3057 case 10:
3058 case 11:
3059 /* Compare with zero */
3060 gen_mov_F0_vreg(dp, rd);
3061 gen_vfp_F1_ld0(dp);
3062 break;
9ee6e8bb
PB
3063 case 20:
3064 case 21:
3065 case 22:
3066 case 23:
644ad806
PB
3067 case 28:
3068 case 29:
3069 case 30:
3070 case 31:
9ee6e8bb
PB
3071 /* Source and destination the same. */
3072 gen_mov_F0_vreg(dp, rd);
3073 break;
6e0c0ed1
PM
3074 case 4:
3075 case 5:
3076 case 6:
3077 case 7:
3078 /* VCVTB, VCVTT: only present with the halfprec extension,
3079 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
3080 */
3081 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
3082 return 1;
3083 }
3084 /* Otherwise fall through */
b7bcbe95
FB
3085 default:
3086 /* One source operand. */
3087 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3088 break;
b7bcbe95
FB
3089 }
3090 } else {
3091 /* Two source operands. */
3092 gen_mov_F0_vreg(dp, rn);
3093 gen_mov_F1_vreg(dp, rm);
3094 }
3095
3096 for (;;) {
3097 /* Perform the calculation. */
3098 switch (op) {
605a6aed
PM
3099 case 0: /* VMLA: fd + (fn * fm) */
3100 /* Note that order of inputs to the add matters for NaNs */
3101 gen_vfp_F1_mul(dp);
3102 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3103 gen_vfp_add(dp);
3104 break;
605a6aed 3105 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3106 gen_vfp_mul(dp);
605a6aed
PM
3107 gen_vfp_F1_neg(dp);
3108 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3109 gen_vfp_add(dp);
3110 break;
605a6aed
PM
3111 case 2: /* VNMLS: -fd + (fn * fm) */
3112 /* Note that it isn't valid to replace (-A + B) with (B - A)
3113 * or similar plausible looking simplifications
3114 * because this will give wrong results for NaNs.
3115 */
3116 gen_vfp_F1_mul(dp);
3117 gen_mov_F0_vreg(dp, rd);
3118 gen_vfp_neg(dp);
3119 gen_vfp_add(dp);
b7bcbe95 3120 break;
605a6aed 3121 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3122 gen_vfp_mul(dp);
605a6aed
PM
3123 gen_vfp_F1_neg(dp);
3124 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3125 gen_vfp_neg(dp);
605a6aed 3126 gen_vfp_add(dp);
b7bcbe95
FB
3127 break;
3128 case 4: /* mul: fn * fm */
3129 gen_vfp_mul(dp);
3130 break;
3131 case 5: /* nmul: -(fn * fm) */
3132 gen_vfp_mul(dp);
3133 gen_vfp_neg(dp);
3134 break;
3135 case 6: /* add: fn + fm */
3136 gen_vfp_add(dp);
3137 break;
3138 case 7: /* sub: fn - fm */
3139 gen_vfp_sub(dp);
3140 break;
3141 case 8: /* div: fn / fm */
3142 gen_vfp_div(dp);
3143 break;
9ee6e8bb
PB
3144 case 14: /* fconst */
3145 if (!arm_feature(env, ARM_FEATURE_VFP3))
3146 return 1;
3147
3148 n = (insn << 12) & 0x80000000;
3149 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3150 if (dp) {
3151 if (i & 0x40)
3152 i |= 0x3f80;
3153 else
3154 i |= 0x4000;
3155 n |= i << 16;
4373f3ce 3156 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3157 } else {
3158 if (i & 0x40)
3159 i |= 0x780;
3160 else
3161 i |= 0x800;
3162 n |= i << 19;
5b340b51 3163 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3164 }
9ee6e8bb 3165 break;
b7bcbe95
FB
3166 case 15: /* extension space */
3167 switch (rn) {
3168 case 0: /* cpy */
3169 /* no-op */
3170 break;
3171 case 1: /* abs */
3172 gen_vfp_abs(dp);
3173 break;
3174 case 2: /* neg */
3175 gen_vfp_neg(dp);
3176 break;
3177 case 3: /* sqrt */
3178 gen_vfp_sqrt(dp);
3179 break;
60011498 3180 case 4: /* vcvtb.f32.f16 */
60011498
PB
3181 tmp = gen_vfp_mrs();
3182 tcg_gen_ext16u_i32(tmp, tmp);
3183 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3184 tcg_temp_free_i32(tmp);
60011498
PB
3185 break;
3186 case 5: /* vcvtt.f32.f16 */
60011498
PB
3187 tmp = gen_vfp_mrs();
3188 tcg_gen_shri_i32(tmp, tmp, 16);
3189 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3190 tcg_temp_free_i32(tmp);
60011498
PB
3191 break;
3192 case 6: /* vcvtb.f16.f32 */
7d1b0095 3193 tmp = tcg_temp_new_i32();
60011498
PB
3194 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3195 gen_mov_F0_vreg(0, rd);
3196 tmp2 = gen_vfp_mrs();
3197 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3198 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3199 tcg_temp_free_i32(tmp2);
60011498
PB
3200 gen_vfp_msr(tmp);
3201 break;
3202 case 7: /* vcvtt.f16.f32 */
7d1b0095 3203 tmp = tcg_temp_new_i32();
60011498
PB
3204 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3205 tcg_gen_shli_i32(tmp, tmp, 16);
3206 gen_mov_F0_vreg(0, rd);
3207 tmp2 = gen_vfp_mrs();
3208 tcg_gen_ext16u_i32(tmp2, tmp2);
3209 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3210 tcg_temp_free_i32(tmp2);
60011498
PB
3211 gen_vfp_msr(tmp);
3212 break;
b7bcbe95
FB
3213 case 8: /* cmp */
3214 gen_vfp_cmp(dp);
3215 break;
3216 case 9: /* cmpe */
3217 gen_vfp_cmpe(dp);
3218 break;
3219 case 10: /* cmpz */
3220 gen_vfp_cmp(dp);
3221 break;
3222 case 11: /* cmpez */
3223 gen_vfp_F1_ld0(dp);
3224 gen_vfp_cmpe(dp);
3225 break;
3226 case 15: /* single<->double conversion */
3227 if (dp)
4373f3ce 3228 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3229 else
4373f3ce 3230 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3231 break;
3232 case 16: /* fuito */
5500b06c 3233 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3234 break;
3235 case 17: /* fsito */
5500b06c 3236 gen_vfp_sito(dp, 0);
b7bcbe95 3237 break;
9ee6e8bb
PB
3238 case 20: /* fshto */
3239 if (!arm_feature(env, ARM_FEATURE_VFP3))
3240 return 1;
5500b06c 3241 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3242 break;
3243 case 21: /* fslto */
3244 if (!arm_feature(env, ARM_FEATURE_VFP3))
3245 return 1;
5500b06c 3246 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3247 break;
3248 case 22: /* fuhto */
3249 if (!arm_feature(env, ARM_FEATURE_VFP3))
3250 return 1;
5500b06c 3251 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3252 break;
3253 case 23: /* fulto */
3254 if (!arm_feature(env, ARM_FEATURE_VFP3))
3255 return 1;
5500b06c 3256 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3257 break;
b7bcbe95 3258 case 24: /* ftoui */
5500b06c 3259 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3260 break;
3261 case 25: /* ftouiz */
5500b06c 3262 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3263 break;
3264 case 26: /* ftosi */
5500b06c 3265 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3266 break;
3267 case 27: /* ftosiz */
5500b06c 3268 gen_vfp_tosiz(dp, 0);
b7bcbe95 3269 break;
9ee6e8bb
PB
3270 case 28: /* ftosh */
3271 if (!arm_feature(env, ARM_FEATURE_VFP3))
3272 return 1;
5500b06c 3273 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3274 break;
3275 case 29: /* ftosl */
3276 if (!arm_feature(env, ARM_FEATURE_VFP3))
3277 return 1;
5500b06c 3278 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3279 break;
3280 case 30: /* ftouh */
3281 if (!arm_feature(env, ARM_FEATURE_VFP3))
3282 return 1;
5500b06c 3283 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3284 break;
3285 case 31: /* ftoul */
3286 if (!arm_feature(env, ARM_FEATURE_VFP3))
3287 return 1;
5500b06c 3288 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3289 break;
b7bcbe95 3290 default: /* undefined */
b7bcbe95
FB
3291 return 1;
3292 }
3293 break;
3294 default: /* undefined */
b7bcbe95
FB
3295 return 1;
3296 }
3297
3298 /* Write back the result. */
3299 if (op == 15 && (rn >= 8 && rn <= 11))
3300 ; /* Comparison, do nothing. */
04595bf6
PM
3301 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3302 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3303 gen_mov_vreg_F0(0, rd);
3304 else if (op == 15 && rn == 15)
3305 /* conversion */
3306 gen_mov_vreg_F0(!dp, rd);
3307 else
3308 gen_mov_vreg_F0(dp, rd);
3309
3310 /* break out of the loop if we have finished */
3311 if (veclen == 0)
3312 break;
3313
3314 if (op == 15 && delta_m == 0) {
3315 /* single source one-many */
3316 while (veclen--) {
3317 rd = ((rd + delta_d) & (bank_mask - 1))
3318 | (rd & bank_mask);
3319 gen_mov_vreg_F0(dp, rd);
3320 }
3321 break;
3322 }
3323 /* Setup the next operands. */
3324 veclen--;
3325 rd = ((rd + delta_d) & (bank_mask - 1))
3326 | (rd & bank_mask);
3327
3328 if (op == 15) {
3329 /* One source operand. */
3330 rm = ((rm + delta_m) & (bank_mask - 1))
3331 | (rm & bank_mask);
3332 gen_mov_F0_vreg(dp, rm);
3333 } else {
3334 /* Two source operands. */
3335 rn = ((rn + delta_d) & (bank_mask - 1))
3336 | (rn & bank_mask);
3337 gen_mov_F0_vreg(dp, rn);
3338 if (delta_m) {
3339 rm = ((rm + delta_m) & (bank_mask - 1))
3340 | (rm & bank_mask);
3341 gen_mov_F1_vreg(dp, rm);
3342 }
3343 }
3344 }
3345 }
3346 break;
3347 case 0xc:
3348 case 0xd:
8387da81 3349 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3350 /* two-register transfer */
3351 rn = (insn >> 16) & 0xf;
3352 rd = (insn >> 12) & 0xf;
3353 if (dp) {
9ee6e8bb
PB
3354 VFP_DREG_M(rm, insn);
3355 } else {
3356 rm = VFP_SREG_M(insn);
3357 }
b7bcbe95 3358
18c9b560 3359 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3360 /* vfp->arm */
3361 if (dp) {
4373f3ce
PB
3362 gen_mov_F0_vreg(0, rm * 2);
3363 tmp = gen_vfp_mrs();
3364 store_reg(s, rd, tmp);
3365 gen_mov_F0_vreg(0, rm * 2 + 1);
3366 tmp = gen_vfp_mrs();
3367 store_reg(s, rn, tmp);
b7bcbe95
FB
3368 } else {
3369 gen_mov_F0_vreg(0, rm);
4373f3ce 3370 tmp = gen_vfp_mrs();
8387da81 3371 store_reg(s, rd, tmp);
b7bcbe95 3372 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3373 tmp = gen_vfp_mrs();
8387da81 3374 store_reg(s, rn, tmp);
b7bcbe95
FB
3375 }
3376 } else {
3377 /* arm->vfp */
3378 if (dp) {
4373f3ce
PB
3379 tmp = load_reg(s, rd);
3380 gen_vfp_msr(tmp);
3381 gen_mov_vreg_F0(0, rm * 2);
3382 tmp = load_reg(s, rn);
3383 gen_vfp_msr(tmp);
3384 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3385 } else {
8387da81 3386 tmp = load_reg(s, rd);
4373f3ce 3387 gen_vfp_msr(tmp);
b7bcbe95 3388 gen_mov_vreg_F0(0, rm);
8387da81 3389 tmp = load_reg(s, rn);
4373f3ce 3390 gen_vfp_msr(tmp);
b7bcbe95
FB
3391 gen_mov_vreg_F0(0, rm + 1);
3392 }
3393 }
3394 } else {
3395 /* Load/store */
3396 rn = (insn >> 16) & 0xf;
3397 if (dp)
9ee6e8bb 3398 VFP_DREG_D(rd, insn);
b7bcbe95 3399 else
9ee6e8bb 3400 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3401 if ((insn & 0x01200000) == 0x01000000) {
3402 /* Single load/store */
3403 offset = (insn & 0xff) << 2;
3404 if ((insn & (1 << 23)) == 0)
3405 offset = -offset;
934814f1
PM
3406 if (s->thumb && rn == 15) {
3407 /* This is actually UNPREDICTABLE */
3408 addr = tcg_temp_new_i32();
3409 tcg_gen_movi_i32(addr, s->pc & ~2);
3410 } else {
3411 addr = load_reg(s, rn);
3412 }
312eea9f 3413 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3414 if (insn & (1 << 20)) {
312eea9f 3415 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3416 gen_mov_vreg_F0(dp, rd);
3417 } else {
3418 gen_mov_F0_vreg(dp, rd);
312eea9f 3419 gen_vfp_st(s, dp, addr);
b7bcbe95 3420 }
7d1b0095 3421 tcg_temp_free_i32(addr);
b7bcbe95
FB
3422 } else {
3423 /* load/store multiple */
934814f1 3424 int w = insn & (1 << 21);
b7bcbe95
FB
3425 if (dp)
3426 n = (insn >> 1) & 0x7f;
3427 else
3428 n = insn & 0xff;
3429
934814f1
PM
3430 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3431 /* P == U , W == 1 => UNDEF */
3432 return 1;
3433 }
3434 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3435 /* UNPREDICTABLE cases for bad immediates: we choose to
3436 * UNDEF to avoid generating huge numbers of TCG ops
3437 */
3438 return 1;
3439 }
3440 if (rn == 15 && w) {
3441 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3442 return 1;
3443 }
3444
3445 if (s->thumb && rn == 15) {
3446 /* This is actually UNPREDICTABLE */
3447 addr = tcg_temp_new_i32();
3448 tcg_gen_movi_i32(addr, s->pc & ~2);
3449 } else {
3450 addr = load_reg(s, rn);
3451 }
b7bcbe95 3452 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3453 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3454
3455 if (dp)
3456 offset = 8;
3457 else
3458 offset = 4;
3459 for (i = 0; i < n; i++) {
18c9b560 3460 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3461 /* load */
312eea9f 3462 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3463 gen_mov_vreg_F0(dp, rd + i);
3464 } else {
3465 /* store */
3466 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3467 gen_vfp_st(s, dp, addr);
b7bcbe95 3468 }
312eea9f 3469 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3470 }
934814f1 3471 if (w) {
b7bcbe95
FB
3472 /* writeback */
3473 if (insn & (1 << 24))
3474 offset = -offset * n;
3475 else if (dp && (insn & 1))
3476 offset = 4;
3477 else
3478 offset = 0;
3479
3480 if (offset != 0)
312eea9f
FN
3481 tcg_gen_addi_i32(addr, addr, offset);
3482 store_reg(s, rn, addr);
3483 } else {
7d1b0095 3484 tcg_temp_free_i32(addr);
b7bcbe95
FB
3485 }
3486 }
3487 }
3488 break;
3489 default:
3490 /* Should never happen. */
3491 return 1;
3492 }
3493 return 0;
3494}
3495
6e256c93 3496static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3497{
6e256c93
FB
3498 TranslationBlock *tb;
3499
3500 tb = s->tb;
3501 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3502 tcg_gen_goto_tb(n);
8984bd2e 3503 gen_set_pc_im(dest);
4b4a72e5 3504 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3505 } else {
8984bd2e 3506 gen_set_pc_im(dest);
57fec1fe 3507 tcg_gen_exit_tb(0);
6e256c93 3508 }
c53be334
FB
3509}
3510
8aaca4c0
FB
3511static inline void gen_jmp (DisasContext *s, uint32_t dest)
3512{
551bd27f 3513 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3514 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3515 if (s->thumb)
d9ba4830
PB
3516 dest |= 1;
3517 gen_bx_im(s, dest);
8aaca4c0 3518 } else {
6e256c93 3519 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3520 s->is_jmp = DISAS_TB_JUMP;
3521 }
3522}
3523
d9ba4830 3524static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3525{
ee097184 3526 if (x)
d9ba4830 3527 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3528 else
d9ba4830 3529 gen_sxth(t0);
ee097184 3530 if (y)
d9ba4830 3531 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3532 else
d9ba4830
PB
3533 gen_sxth(t1);
3534 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3535}
3536
3537/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3538static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3539 uint32_t mask;
3540
3541 mask = 0;
3542 if (flags & (1 << 0))
3543 mask |= 0xff;
3544 if (flags & (1 << 1))
3545 mask |= 0xff00;
3546 if (flags & (1 << 2))
3547 mask |= 0xff0000;
3548 if (flags & (1 << 3))
3549 mask |= 0xff000000;
9ee6e8bb 3550
2ae23e75 3551 /* Mask out undefined bits. */
9ee6e8bb 3552 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3553 if (!arm_feature(env, ARM_FEATURE_V4T))
3554 mask &= ~CPSR_T;
3555 if (!arm_feature(env, ARM_FEATURE_V5))
3556 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3557 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3558 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3559 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3560 mask &= ~CPSR_IT;
9ee6e8bb 3561 /* Mask out execution state bits. */
2ae23e75 3562 if (!spsr)
e160c51c 3563 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3564 /* Mask out privileged bits. */
3565 if (IS_USER(s))
9ee6e8bb 3566 mask &= CPSR_USER;
b5ff1b31
FB
3567 return mask;
3568}
3569
2fbac54b
FN
3570/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3571static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3572{
d9ba4830 3573 TCGv tmp;
b5ff1b31
FB
3574 if (spsr) {
3575 /* ??? This is also undefined in system mode. */
3576 if (IS_USER(s))
3577 return 1;
d9ba4830
PB
3578
3579 tmp = load_cpu_field(spsr);
3580 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3581 tcg_gen_andi_i32(t0, t0, mask);
3582 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3583 store_cpu_field(tmp, spsr);
b5ff1b31 3584 } else {
2fbac54b 3585 gen_set_cpsr(t0, mask);
b5ff1b31 3586 }
7d1b0095 3587 tcg_temp_free_i32(t0);
b5ff1b31
FB
3588 gen_lookup_tb(s);
3589 return 0;
3590}
3591
2fbac54b
FN
3592/* Returns nonzero if access to the PSR is not permitted. */
3593static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3594{
3595 TCGv tmp;
7d1b0095 3596 tmp = tcg_temp_new_i32();
2fbac54b
FN
3597 tcg_gen_movi_i32(tmp, val);
3598 return gen_set_psr(s, mask, spsr, tmp);
3599}
3600
e9bb4aa9
JR
3601/* Generate an old-style exception return. Marks pc as dead. */
3602static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3603{
d9ba4830 3604 TCGv tmp;
e9bb4aa9 3605 store_reg(s, 15, pc);
d9ba4830
PB
3606 tmp = load_cpu_field(spsr);
3607 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3608 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3609 s->is_jmp = DISAS_UPDATE;
3610}
3611
b0109805
PB
3612/* Generate a v6 exception return. Marks both values as dead. */
3613static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3614{
b0109805 3615 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3616 tcg_temp_free_i32(cpsr);
b0109805 3617 store_reg(s, 15, pc);
9ee6e8bb
PB
3618 s->is_jmp = DISAS_UPDATE;
3619}
3b46e624 3620
9ee6e8bb
PB
3621static inline void
3622gen_set_condexec (DisasContext *s)
3623{
3624 if (s->condexec_mask) {
8f01245e 3625 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3626 TCGv tmp = tcg_temp_new_i32();
8f01245e 3627 tcg_gen_movi_i32(tmp, val);
d9ba4830 3628 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3629 }
3630}
3b46e624 3631
bc4a0de0
PM
3632static void gen_exception_insn(DisasContext *s, int offset, int excp)
3633{
3634 gen_set_condexec(s);
3635 gen_set_pc_im(s->pc - offset);
3636 gen_exception(excp);
3637 s->is_jmp = DISAS_JUMP;
3638}
3639
9ee6e8bb
PB
3640static void gen_nop_hint(DisasContext *s, int val)
3641{
3642 switch (val) {
3643 case 3: /* wfi */
8984bd2e 3644 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3645 s->is_jmp = DISAS_WFI;
3646 break;
3647 case 2: /* wfe */
3648 case 4: /* sev */
3649 /* TODO: Implement SEV and WFE. May help SMP performance. */
3650 default: /* nop */
3651 break;
3652 }
3653}
99c475ab 3654
ad69471c 3655#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3656
62698be3 3657static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3658{
3659 switch (size) {
dd8fbd78
FN
3660 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3661 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3662 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3663 default: abort();
9ee6e8bb 3664 }
9ee6e8bb
PB
3665}
3666
dd8fbd78 3667static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3668{
3669 switch (size) {
dd8fbd78
FN
3670 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3671 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3672 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3673 default: return;
3674 }
3675}
3676
3677/* 32-bit pairwise ops end up the same as the elementwise versions. */
3678#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3679#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3680#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3681#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3682
ad69471c
PB
3683#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3684 switch ((size << 1) | u) { \
3685 case 0: \
dd8fbd78 3686 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3687 break; \
3688 case 1: \
dd8fbd78 3689 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3690 break; \
3691 case 2: \
dd8fbd78 3692 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3693 break; \
3694 case 3: \
dd8fbd78 3695 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3696 break; \
3697 case 4: \
dd8fbd78 3698 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3699 break; \
3700 case 5: \
dd8fbd78 3701 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3702 break; \
3703 default: return 1; \
3704 }} while (0)
9ee6e8bb
PB
3705
3706#define GEN_NEON_INTEGER_OP(name) do { \
3707 switch ((size << 1) | u) { \
ad69471c 3708 case 0: \
dd8fbd78 3709 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3710 break; \
3711 case 1: \
dd8fbd78 3712 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3713 break; \
3714 case 2: \
dd8fbd78 3715 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3716 break; \
3717 case 3: \
dd8fbd78 3718 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3719 break; \
3720 case 4: \
dd8fbd78 3721 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3722 break; \
3723 case 5: \
dd8fbd78 3724 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3725 break; \
9ee6e8bb
PB
3726 default: return 1; \
3727 }} while (0)
3728
dd8fbd78 3729static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3730{
7d1b0095 3731 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3732 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3733 return tmp;
9ee6e8bb
PB
3734}
3735
dd8fbd78 3736static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3737{
dd8fbd78 3738 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3739 tcg_temp_free_i32(var);
9ee6e8bb
PB
3740}
3741
dd8fbd78 3742static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3743{
dd8fbd78 3744 TCGv tmp;
9ee6e8bb 3745 if (size == 1) {
0fad6efc
PM
3746 tmp = neon_load_reg(reg & 7, reg >> 4);
3747 if (reg & 8) {
dd8fbd78 3748 gen_neon_dup_high16(tmp);
0fad6efc
PM
3749 } else {
3750 gen_neon_dup_low16(tmp);
dd8fbd78 3751 }
0fad6efc
PM
3752 } else {
3753 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3754 }
dd8fbd78 3755 return tmp;
9ee6e8bb
PB
3756}
3757
02acedf9 3758static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3759{
02acedf9 3760 TCGv tmp, tmp2;
600b828c 3761 if (!q && size == 2) {
02acedf9
PM
3762 return 1;
3763 }
3764 tmp = tcg_const_i32(rd);
3765 tmp2 = tcg_const_i32(rm);
3766 if (q) {
3767 switch (size) {
3768 case 0:
02da0b2d 3769 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3770 break;
3771 case 1:
02da0b2d 3772 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3773 break;
3774 case 2:
02da0b2d 3775 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3776 break;
3777 default:
3778 abort();
3779 }
3780 } else {
3781 switch (size) {
3782 case 0:
02da0b2d 3783 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3784 break;
3785 case 1:
02da0b2d 3786 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3787 break;
3788 default:
3789 abort();
3790 }
3791 }
3792 tcg_temp_free_i32(tmp);
3793 tcg_temp_free_i32(tmp2);
3794 return 0;
19457615
FN
3795}
3796
d68a6f3a 3797static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3798{
3799 TCGv tmp, tmp2;
600b828c 3800 if (!q && size == 2) {
d68a6f3a
PM
3801 return 1;
3802 }
3803 tmp = tcg_const_i32(rd);
3804 tmp2 = tcg_const_i32(rm);
3805 if (q) {
3806 switch (size) {
3807 case 0:
02da0b2d 3808 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3809 break;
3810 case 1:
02da0b2d 3811 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3812 break;
3813 case 2:
02da0b2d 3814 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3815 break;
3816 default:
3817 abort();
3818 }
3819 } else {
3820 switch (size) {
3821 case 0:
02da0b2d 3822 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3823 break;
3824 case 1:
02da0b2d 3825 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3826 break;
3827 default:
3828 abort();
3829 }
3830 }
3831 tcg_temp_free_i32(tmp);
3832 tcg_temp_free_i32(tmp2);
3833 return 0;
19457615
FN
3834}
3835
19457615
FN
3836static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3837{
3838 TCGv rd, tmp;
3839
7d1b0095
PM
3840 rd = tcg_temp_new_i32();
3841 tmp = tcg_temp_new_i32();
19457615
FN
3842
3843 tcg_gen_shli_i32(rd, t0, 8);
3844 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3845 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3846 tcg_gen_or_i32(rd, rd, tmp);
3847
3848 tcg_gen_shri_i32(t1, t1, 8);
3849 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3850 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3851 tcg_gen_or_i32(t1, t1, tmp);
3852 tcg_gen_mov_i32(t0, rd);
3853
7d1b0095
PM
3854 tcg_temp_free_i32(tmp);
3855 tcg_temp_free_i32(rd);
19457615
FN
3856}
3857
3858static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3859{
3860 TCGv rd, tmp;
3861
7d1b0095
PM
3862 rd = tcg_temp_new_i32();
3863 tmp = tcg_temp_new_i32();
19457615
FN
3864
3865 tcg_gen_shli_i32(rd, t0, 16);
3866 tcg_gen_andi_i32(tmp, t1, 0xffff);
3867 tcg_gen_or_i32(rd, rd, tmp);
3868 tcg_gen_shri_i32(t1, t1, 16);
3869 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3870 tcg_gen_or_i32(t1, t1, tmp);
3871 tcg_gen_mov_i32(t0, rd);
3872
7d1b0095
PM
3873 tcg_temp_free_i32(tmp);
3874 tcg_temp_free_i32(rd);
19457615
FN
3875}
3876
3877
9ee6e8bb
PB
3878static struct {
3879 int nregs;
3880 int interleave;
3881 int spacing;
3882} neon_ls_element_type[11] = {
3883 {4, 4, 1},
3884 {4, 4, 2},
3885 {4, 1, 1},
3886 {4, 2, 1},
3887 {3, 3, 1},
3888 {3, 3, 2},
3889 {3, 1, 1},
3890 {1, 1, 1},
3891 {2, 2, 1},
3892 {2, 2, 2},
3893 {2, 1, 1}
3894};
3895
3896/* Translate a NEON load/store element instruction. Return nonzero if the
3897 instruction is invalid. */
3898static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3899{
3900 int rd, rn, rm;
3901 int op;
3902 int nregs;
3903 int interleave;
84496233 3904 int spacing;
9ee6e8bb
PB
3905 int stride;
3906 int size;
3907 int reg;
3908 int pass;
3909 int load;
3910 int shift;
9ee6e8bb 3911 int n;
1b2b1e54 3912 TCGv addr;
b0109805 3913 TCGv tmp;
8f8e3aa4 3914 TCGv tmp2;
84496233 3915 TCGv_i64 tmp64;
9ee6e8bb 3916
5df8bac1 3917 if (!s->vfp_enabled)
9ee6e8bb
PB
3918 return 1;
3919 VFP_DREG_D(rd, insn);
3920 rn = (insn >> 16) & 0xf;
3921 rm = insn & 0xf;
3922 load = (insn & (1 << 21)) != 0;
3923 if ((insn & (1 << 23)) == 0) {
3924 /* Load store all elements. */
3925 op = (insn >> 8) & 0xf;
3926 size = (insn >> 6) & 3;
84496233 3927 if (op > 10)
9ee6e8bb 3928 return 1;
f2dd89d0
PM
3929 /* Catch UNDEF cases for bad values of align field */
3930 switch (op & 0xc) {
3931 case 4:
3932 if (((insn >> 5) & 1) == 1) {
3933 return 1;
3934 }
3935 break;
3936 case 8:
3937 if (((insn >> 4) & 3) == 3) {
3938 return 1;
3939 }
3940 break;
3941 default:
3942 break;
3943 }
9ee6e8bb
PB
3944 nregs = neon_ls_element_type[op].nregs;
3945 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3946 spacing = neon_ls_element_type[op].spacing;
3947 if (size == 3 && (interleave | spacing) != 1)
3948 return 1;
e318a60b 3949 addr = tcg_temp_new_i32();
dcc65026 3950 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3951 stride = (1 << size) * interleave;
3952 for (reg = 0; reg < nregs; reg++) {
3953 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3954 load_reg_var(s, addr, rn);
3955 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3956 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3957 load_reg_var(s, addr, rn);
3958 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3959 }
84496233
JR
3960 if (size == 3) {
3961 if (load) {
3962 tmp64 = gen_ld64(addr, IS_USER(s));
3963 neon_store_reg64(tmp64, rd);
3964 tcg_temp_free_i64(tmp64);
3965 } else {
3966 tmp64 = tcg_temp_new_i64();
3967 neon_load_reg64(tmp64, rd);
3968 gen_st64(tmp64, addr, IS_USER(s));
3969 }
3970 tcg_gen_addi_i32(addr, addr, stride);
3971 } else {
3972 for (pass = 0; pass < 2; pass++) {
3973 if (size == 2) {
3974 if (load) {
3975 tmp = gen_ld32(addr, IS_USER(s));
3976 neon_store_reg(rd, pass, tmp);
3977 } else {
3978 tmp = neon_load_reg(rd, pass);
3979 gen_st32(tmp, addr, IS_USER(s));
3980 }
1b2b1e54 3981 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3982 } else if (size == 1) {
3983 if (load) {
3984 tmp = gen_ld16u(addr, IS_USER(s));
3985 tcg_gen_addi_i32(addr, addr, stride);
3986 tmp2 = gen_ld16u(addr, IS_USER(s));
3987 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3988 tcg_gen_shli_i32(tmp2, tmp2, 16);
3989 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3990 tcg_temp_free_i32(tmp2);
84496233
JR
3991 neon_store_reg(rd, pass, tmp);
3992 } else {
3993 tmp = neon_load_reg(rd, pass);
7d1b0095 3994 tmp2 = tcg_temp_new_i32();
84496233
JR
3995 tcg_gen_shri_i32(tmp2, tmp, 16);
3996 gen_st16(tmp, addr, IS_USER(s));
3997 tcg_gen_addi_i32(addr, addr, stride);
3998 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3999 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4000 }
84496233
JR
4001 } else /* size == 0 */ {
4002 if (load) {
4003 TCGV_UNUSED(tmp2);
4004 for (n = 0; n < 4; n++) {
4005 tmp = gen_ld8u(addr, IS_USER(s));
4006 tcg_gen_addi_i32(addr, addr, stride);
4007 if (n == 0) {
4008 tmp2 = tmp;
4009 } else {
41ba8341
PB
4010 tcg_gen_shli_i32(tmp, tmp, n * 8);
4011 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4012 tcg_temp_free_i32(tmp);
84496233 4013 }
9ee6e8bb 4014 }
84496233
JR
4015 neon_store_reg(rd, pass, tmp2);
4016 } else {
4017 tmp2 = neon_load_reg(rd, pass);
4018 for (n = 0; n < 4; n++) {
7d1b0095 4019 tmp = tcg_temp_new_i32();
84496233
JR
4020 if (n == 0) {
4021 tcg_gen_mov_i32(tmp, tmp2);
4022 } else {
4023 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4024 }
4025 gen_st8(tmp, addr, IS_USER(s));
4026 tcg_gen_addi_i32(addr, addr, stride);
4027 }
7d1b0095 4028 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4029 }
4030 }
4031 }
4032 }
84496233 4033 rd += spacing;
9ee6e8bb 4034 }
e318a60b 4035 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4036 stride = nregs * 8;
4037 } else {
4038 size = (insn >> 10) & 3;
4039 if (size == 3) {
4040 /* Load single element to all lanes. */
8e18cde3
PM
4041 int a = (insn >> 4) & 1;
4042 if (!load) {
9ee6e8bb 4043 return 1;
8e18cde3 4044 }
9ee6e8bb
PB
4045 size = (insn >> 6) & 3;
4046 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4047
4048 if (size == 3) {
4049 if (nregs != 4 || a == 0) {
9ee6e8bb 4050 return 1;
99c475ab 4051 }
8e18cde3
PM
4052 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4053 size = 2;
4054 }
4055 if (nregs == 1 && a == 1 && size == 0) {
4056 return 1;
4057 }
4058 if (nregs == 3 && a == 1) {
4059 return 1;
4060 }
e318a60b 4061 addr = tcg_temp_new_i32();
8e18cde3
PM
4062 load_reg_var(s, addr, rn);
4063 if (nregs == 1) {
4064 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4065 tmp = gen_load_and_replicate(s, addr, size);
4066 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4067 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4068 if (insn & (1 << 5)) {
4069 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4070 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4071 }
4072 tcg_temp_free_i32(tmp);
4073 } else {
4074 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4075 stride = (insn & (1 << 5)) ? 2 : 1;
4076 for (reg = 0; reg < nregs; reg++) {
4077 tmp = gen_load_and_replicate(s, addr, size);
4078 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4079 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4080 tcg_temp_free_i32(tmp);
4081 tcg_gen_addi_i32(addr, addr, 1 << size);
4082 rd += stride;
4083 }
9ee6e8bb 4084 }
e318a60b 4085 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4086 stride = (1 << size) * nregs;
4087 } else {
4088 /* Single element. */
93262b16 4089 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4090 pass = (insn >> 7) & 1;
4091 switch (size) {
4092 case 0:
4093 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4094 stride = 1;
4095 break;
4096 case 1:
4097 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4098 stride = (insn & (1 << 5)) ? 2 : 1;
4099 break;
4100 case 2:
4101 shift = 0;
9ee6e8bb
PB
4102 stride = (insn & (1 << 6)) ? 2 : 1;
4103 break;
4104 default:
4105 abort();
4106 }
4107 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4108 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4109 switch (nregs) {
4110 case 1:
4111 if (((idx & (1 << size)) != 0) ||
4112 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4113 return 1;
4114 }
4115 break;
4116 case 3:
4117 if ((idx & 1) != 0) {
4118 return 1;
4119 }
4120 /* fall through */
4121 case 2:
4122 if (size == 2 && (idx & 2) != 0) {
4123 return 1;
4124 }
4125 break;
4126 case 4:
4127 if ((size == 2) && ((idx & 3) == 3)) {
4128 return 1;
4129 }
4130 break;
4131 default:
4132 abort();
4133 }
4134 if ((rd + stride * (nregs - 1)) > 31) {
4135 /* Attempts to write off the end of the register file
4136 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4137 * the neon_load_reg() would write off the end of the array.
4138 */
4139 return 1;
4140 }
e318a60b 4141 addr = tcg_temp_new_i32();
dcc65026 4142 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4143 for (reg = 0; reg < nregs; reg++) {
4144 if (load) {
9ee6e8bb
PB
4145 switch (size) {
4146 case 0:
1b2b1e54 4147 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
4148 break;
4149 case 1:
1b2b1e54 4150 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
4151 break;
4152 case 2:
1b2b1e54 4153 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 4154 break;
a50f5b91
PB
4155 default: /* Avoid compiler warnings. */
4156 abort();
9ee6e8bb
PB
4157 }
4158 if (size != 2) {
8f8e3aa4
PB
4159 tmp2 = neon_load_reg(rd, pass);
4160 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
7d1b0095 4161 tcg_temp_free_i32(tmp2);
9ee6e8bb 4162 }
8f8e3aa4 4163 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4164 } else { /* Store */
8f8e3aa4
PB
4165 tmp = neon_load_reg(rd, pass);
4166 if (shift)
4167 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4168 switch (size) {
4169 case 0:
1b2b1e54 4170 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4171 break;
4172 case 1:
1b2b1e54 4173 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4174 break;
4175 case 2:
1b2b1e54 4176 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4177 break;
99c475ab 4178 }
99c475ab 4179 }
9ee6e8bb 4180 rd += stride;
1b2b1e54 4181 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4182 }
e318a60b 4183 tcg_temp_free_i32(addr);
9ee6e8bb 4184 stride = nregs * (1 << size);
99c475ab 4185 }
9ee6e8bb
PB
4186 }
4187 if (rm != 15) {
b26eefb6
PB
4188 TCGv base;
4189
4190 base = load_reg(s, rn);
9ee6e8bb 4191 if (rm == 13) {
b26eefb6 4192 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4193 } else {
b26eefb6
PB
4194 TCGv index;
4195 index = load_reg(s, rm);
4196 tcg_gen_add_i32(base, base, index);
7d1b0095 4197 tcg_temp_free_i32(index);
9ee6e8bb 4198 }
b26eefb6 4199 store_reg(s, rn, base);
9ee6e8bb
PB
4200 }
4201 return 0;
4202}
3b46e624 4203
8f8e3aa4
PB
4204/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4205static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4206{
4207 tcg_gen_and_i32(t, t, c);
f669df27 4208 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4209 tcg_gen_or_i32(dest, t, f);
4210}
4211
a7812ae4 4212static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4213{
4214 switch (size) {
4215 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4216 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4217 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4218 default: abort();
4219 }
4220}
4221
a7812ae4 4222static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4223{
4224 switch (size) {
02da0b2d
PM
4225 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4226 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4227 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4228 default: abort();
4229 }
4230}
4231
a7812ae4 4232static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4233{
4234 switch (size) {
02da0b2d
PM
4235 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4236 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4237 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4238 default: abort();
4239 }
4240}
4241
af1bbf30
JR
4242static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4243{
4244 switch (size) {
02da0b2d
PM
4245 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4246 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4247 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4248 default: abort();
4249 }
4250}
4251
ad69471c
PB
4252static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4253 int q, int u)
4254{
4255 if (q) {
4256 if (u) {
4257 switch (size) {
4258 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4259 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4260 default: abort();
4261 }
4262 } else {
4263 switch (size) {
4264 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4265 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4266 default: abort();
4267 }
4268 }
4269 } else {
4270 if (u) {
4271 switch (size) {
b408a9b0
CL
4272 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4273 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4274 default: abort();
4275 }
4276 } else {
4277 switch (size) {
4278 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4279 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4280 default: abort();
4281 }
4282 }
4283 }
4284}
4285
a7812ae4 4286static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4287{
4288 if (u) {
4289 switch (size) {
4290 case 0: gen_helper_neon_widen_u8(dest, src); break;
4291 case 1: gen_helper_neon_widen_u16(dest, src); break;
4292 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4293 default: abort();
4294 }
4295 } else {
4296 switch (size) {
4297 case 0: gen_helper_neon_widen_s8(dest, src); break;
4298 case 1: gen_helper_neon_widen_s16(dest, src); break;
4299 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4300 default: abort();
4301 }
4302 }
7d1b0095 4303 tcg_temp_free_i32(src);
ad69471c
PB
4304}
4305
4306static inline void gen_neon_addl(int size)
4307{
4308 switch (size) {
4309 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4310 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4311 case 2: tcg_gen_add_i64(CPU_V001); break;
4312 default: abort();
4313 }
4314}
4315
4316static inline void gen_neon_subl(int size)
4317{
4318 switch (size) {
4319 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4320 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4321 case 2: tcg_gen_sub_i64(CPU_V001); break;
4322 default: abort();
4323 }
4324}
4325
a7812ae4 4326static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4327{
4328 switch (size) {
4329 case 0: gen_helper_neon_negl_u16(var, var); break;
4330 case 1: gen_helper_neon_negl_u32(var, var); break;
4331 case 2: gen_helper_neon_negl_u64(var, var); break;
4332 default: abort();
4333 }
4334}
4335
a7812ae4 4336static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4337{
4338 switch (size) {
02da0b2d
PM
4339 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4340 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4341 default: abort();
4342 }
4343}
4344
a7812ae4 4345static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4346{
a7812ae4 4347 TCGv_i64 tmp;
ad69471c
PB
4348
4349 switch ((size << 1) | u) {
4350 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4351 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4352 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4353 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4354 case 4:
4355 tmp = gen_muls_i64_i32(a, b);
4356 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4357 tcg_temp_free_i64(tmp);
ad69471c
PB
4358 break;
4359 case 5:
4360 tmp = gen_mulu_i64_i32(a, b);
4361 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4362 tcg_temp_free_i64(tmp);
ad69471c
PB
4363 break;
4364 default: abort();
4365 }
c6067f04
CL
4366
4367 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4368 Don't forget to clean them now. */
4369 if (size < 2) {
7d1b0095
PM
4370 tcg_temp_free_i32(a);
4371 tcg_temp_free_i32(b);
c6067f04 4372 }
ad69471c
PB
4373}
4374
c33171c7
PM
4375static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4376{
4377 if (op) {
4378 if (u) {
4379 gen_neon_unarrow_sats(size, dest, src);
4380 } else {
4381 gen_neon_narrow(size, dest, src);
4382 }
4383 } else {
4384 if (u) {
4385 gen_neon_narrow_satu(size, dest, src);
4386 } else {
4387 gen_neon_narrow_sats(size, dest, src);
4388 }
4389 }
4390}
4391
62698be3
PM
4392/* Symbolic constants for op fields for Neon 3-register same-length.
4393 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4394 * table A7-9.
4395 */
4396#define NEON_3R_VHADD 0
4397#define NEON_3R_VQADD 1
4398#define NEON_3R_VRHADD 2
4399#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4400#define NEON_3R_VHSUB 4
4401#define NEON_3R_VQSUB 5
4402#define NEON_3R_VCGT 6
4403#define NEON_3R_VCGE 7
4404#define NEON_3R_VSHL 8
4405#define NEON_3R_VQSHL 9
4406#define NEON_3R_VRSHL 10
4407#define NEON_3R_VQRSHL 11
4408#define NEON_3R_VMAX 12
4409#define NEON_3R_VMIN 13
4410#define NEON_3R_VABD 14
4411#define NEON_3R_VABA 15
4412#define NEON_3R_VADD_VSUB 16
4413#define NEON_3R_VTST_VCEQ 17
4414#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4415#define NEON_3R_VMUL 19
4416#define NEON_3R_VPMAX 20
4417#define NEON_3R_VPMIN 21
4418#define NEON_3R_VQDMULH_VQRDMULH 22
4419#define NEON_3R_VPADD 23
4420#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4421#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4422#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4423#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4424#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4425#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4426
4427static const uint8_t neon_3r_sizes[] = {
4428 [NEON_3R_VHADD] = 0x7,
4429 [NEON_3R_VQADD] = 0xf,
4430 [NEON_3R_VRHADD] = 0x7,
4431 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4432 [NEON_3R_VHSUB] = 0x7,
4433 [NEON_3R_VQSUB] = 0xf,
4434 [NEON_3R_VCGT] = 0x7,
4435 [NEON_3R_VCGE] = 0x7,
4436 [NEON_3R_VSHL] = 0xf,
4437 [NEON_3R_VQSHL] = 0xf,
4438 [NEON_3R_VRSHL] = 0xf,
4439 [NEON_3R_VQRSHL] = 0xf,
4440 [NEON_3R_VMAX] = 0x7,
4441 [NEON_3R_VMIN] = 0x7,
4442 [NEON_3R_VABD] = 0x7,
4443 [NEON_3R_VABA] = 0x7,
4444 [NEON_3R_VADD_VSUB] = 0xf,
4445 [NEON_3R_VTST_VCEQ] = 0x7,
4446 [NEON_3R_VML] = 0x7,
4447 [NEON_3R_VMUL] = 0x7,
4448 [NEON_3R_VPMAX] = 0x7,
4449 [NEON_3R_VPMIN] = 0x7,
4450 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4451 [NEON_3R_VPADD] = 0x7,
4452 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4453 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4454 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4455 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4456 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4457 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4458};
4459
600b828c
PM
4460/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4461 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4462 * table A7-13.
4463 */
4464#define NEON_2RM_VREV64 0
4465#define NEON_2RM_VREV32 1
4466#define NEON_2RM_VREV16 2
4467#define NEON_2RM_VPADDL 4
4468#define NEON_2RM_VPADDL_U 5
4469#define NEON_2RM_VCLS 8
4470#define NEON_2RM_VCLZ 9
4471#define NEON_2RM_VCNT 10
4472#define NEON_2RM_VMVN 11
4473#define NEON_2RM_VPADAL 12
4474#define NEON_2RM_VPADAL_U 13
4475#define NEON_2RM_VQABS 14
4476#define NEON_2RM_VQNEG 15
4477#define NEON_2RM_VCGT0 16
4478#define NEON_2RM_VCGE0 17
4479#define NEON_2RM_VCEQ0 18
4480#define NEON_2RM_VCLE0 19
4481#define NEON_2RM_VCLT0 20
4482#define NEON_2RM_VABS 22
4483#define NEON_2RM_VNEG 23
4484#define NEON_2RM_VCGT0_F 24
4485#define NEON_2RM_VCGE0_F 25
4486#define NEON_2RM_VCEQ0_F 26
4487#define NEON_2RM_VCLE0_F 27
4488#define NEON_2RM_VCLT0_F 28
4489#define NEON_2RM_VABS_F 30
4490#define NEON_2RM_VNEG_F 31
4491#define NEON_2RM_VSWP 32
4492#define NEON_2RM_VTRN 33
4493#define NEON_2RM_VUZP 34
4494#define NEON_2RM_VZIP 35
4495#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4496#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4497#define NEON_2RM_VSHLL 38
4498#define NEON_2RM_VCVT_F16_F32 44
4499#define NEON_2RM_VCVT_F32_F16 46
4500#define NEON_2RM_VRECPE 56
4501#define NEON_2RM_VRSQRTE 57
4502#define NEON_2RM_VRECPE_F 58
4503#define NEON_2RM_VRSQRTE_F 59
4504#define NEON_2RM_VCVT_FS 60
4505#define NEON_2RM_VCVT_FU 61
4506#define NEON_2RM_VCVT_SF 62
4507#define NEON_2RM_VCVT_UF 63
4508
4509static int neon_2rm_is_float_op(int op)
4510{
4511 /* Return true if this neon 2reg-misc op is float-to-float */
4512 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4513 op >= NEON_2RM_VRECPE_F);
4514}
4515
4516/* Each entry in this array has bit n set if the insn allows
4517 * size value n (otherwise it will UNDEF). Since unallocated
4518 * op values will have no bits set they always UNDEF.
4519 */
4520static const uint8_t neon_2rm_sizes[] = {
4521 [NEON_2RM_VREV64] = 0x7,
4522 [NEON_2RM_VREV32] = 0x3,
4523 [NEON_2RM_VREV16] = 0x1,
4524 [NEON_2RM_VPADDL] = 0x7,
4525 [NEON_2RM_VPADDL_U] = 0x7,
4526 [NEON_2RM_VCLS] = 0x7,
4527 [NEON_2RM_VCLZ] = 0x7,
4528 [NEON_2RM_VCNT] = 0x1,
4529 [NEON_2RM_VMVN] = 0x1,
4530 [NEON_2RM_VPADAL] = 0x7,
4531 [NEON_2RM_VPADAL_U] = 0x7,
4532 [NEON_2RM_VQABS] = 0x7,
4533 [NEON_2RM_VQNEG] = 0x7,
4534 [NEON_2RM_VCGT0] = 0x7,
4535 [NEON_2RM_VCGE0] = 0x7,
4536 [NEON_2RM_VCEQ0] = 0x7,
4537 [NEON_2RM_VCLE0] = 0x7,
4538 [NEON_2RM_VCLT0] = 0x7,
4539 [NEON_2RM_VABS] = 0x7,
4540 [NEON_2RM_VNEG] = 0x7,
4541 [NEON_2RM_VCGT0_F] = 0x4,
4542 [NEON_2RM_VCGE0_F] = 0x4,
4543 [NEON_2RM_VCEQ0_F] = 0x4,
4544 [NEON_2RM_VCLE0_F] = 0x4,
4545 [NEON_2RM_VCLT0_F] = 0x4,
4546 [NEON_2RM_VABS_F] = 0x4,
4547 [NEON_2RM_VNEG_F] = 0x4,
4548 [NEON_2RM_VSWP] = 0x1,
4549 [NEON_2RM_VTRN] = 0x7,
4550 [NEON_2RM_VUZP] = 0x7,
4551 [NEON_2RM_VZIP] = 0x7,
4552 [NEON_2RM_VMOVN] = 0x7,
4553 [NEON_2RM_VQMOVN] = 0x7,
4554 [NEON_2RM_VSHLL] = 0x7,
4555 [NEON_2RM_VCVT_F16_F32] = 0x2,
4556 [NEON_2RM_VCVT_F32_F16] = 0x2,
4557 [NEON_2RM_VRECPE] = 0x4,
4558 [NEON_2RM_VRSQRTE] = 0x4,
4559 [NEON_2RM_VRECPE_F] = 0x4,
4560 [NEON_2RM_VRSQRTE_F] = 0x4,
4561 [NEON_2RM_VCVT_FS] = 0x4,
4562 [NEON_2RM_VCVT_FU] = 0x4,
4563 [NEON_2RM_VCVT_SF] = 0x4,
4564 [NEON_2RM_VCVT_UF] = 0x4,
4565};
4566
9ee6e8bb
PB
4567/* Translate a NEON data processing instruction. Return nonzero if the
4568 instruction is invalid.
ad69471c
PB
4569 We process data in a mixture of 32-bit and 64-bit chunks.
4570 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4571
9ee6e8bb
PB
4572static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4573{
4574 int op;
4575 int q;
4576 int rd, rn, rm;
4577 int size;
4578 int shift;
4579 int pass;
4580 int count;
4581 int pairwise;
4582 int u;
ca9a32e4 4583 uint32_t imm, mask;
b75263d6 4584 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4585 TCGv_i64 tmp64;
9ee6e8bb 4586
5df8bac1 4587 if (!s->vfp_enabled)
9ee6e8bb
PB
4588 return 1;
4589 q = (insn & (1 << 6)) != 0;
4590 u = (insn >> 24) & 1;
4591 VFP_DREG_D(rd, insn);
4592 VFP_DREG_N(rn, insn);
4593 VFP_DREG_M(rm, insn);
4594 size = (insn >> 20) & 3;
4595 if ((insn & (1 << 23)) == 0) {
4596 /* Three register same length. */
4597 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4598 /* Catch invalid op and bad size combinations: UNDEF */
4599 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4600 return 1;
4601 }
25f84f79
PM
4602 /* All insns of this form UNDEF for either this condition or the
4603 * superset of cases "Q==1"; we catch the latter later.
4604 */
4605 if (q && ((rd | rn | rm) & 1)) {
4606 return 1;
4607 }
62698be3
PM
4608 if (size == 3 && op != NEON_3R_LOGIC) {
4609 /* 64-bit element instructions. */
9ee6e8bb 4610 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4611 neon_load_reg64(cpu_V0, rn + pass);
4612 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4613 switch (op) {
62698be3 4614 case NEON_3R_VQADD:
9ee6e8bb 4615 if (u) {
02da0b2d
PM
4616 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4617 cpu_V0, cpu_V1);
2c0262af 4618 } else {
02da0b2d
PM
4619 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4620 cpu_V0, cpu_V1);
2c0262af 4621 }
9ee6e8bb 4622 break;
62698be3 4623 case NEON_3R_VQSUB:
9ee6e8bb 4624 if (u) {
02da0b2d
PM
4625 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4626 cpu_V0, cpu_V1);
ad69471c 4627 } else {
02da0b2d
PM
4628 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4629 cpu_V0, cpu_V1);
ad69471c
PB
4630 }
4631 break;
62698be3 4632 case NEON_3R_VSHL:
ad69471c
PB
4633 if (u) {
4634 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4635 } else {
4636 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4637 }
4638 break;
62698be3 4639 case NEON_3R_VQSHL:
ad69471c 4640 if (u) {
02da0b2d
PM
4641 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4642 cpu_V1, cpu_V0);
ad69471c 4643 } else {
02da0b2d
PM
4644 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4645 cpu_V1, cpu_V0);
ad69471c
PB
4646 }
4647 break;
62698be3 4648 case NEON_3R_VRSHL:
ad69471c
PB
4649 if (u) {
4650 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4651 } else {
ad69471c
PB
4652 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4653 }
4654 break;
62698be3 4655 case NEON_3R_VQRSHL:
ad69471c 4656 if (u) {
02da0b2d
PM
4657 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4658 cpu_V1, cpu_V0);
ad69471c 4659 } else {
02da0b2d
PM
4660 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4661 cpu_V1, cpu_V0);
1e8d4eec 4662 }
9ee6e8bb 4663 break;
62698be3 4664 case NEON_3R_VADD_VSUB:
9ee6e8bb 4665 if (u) {
ad69471c 4666 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4667 } else {
ad69471c 4668 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4669 }
4670 break;
4671 default:
4672 abort();
2c0262af 4673 }
ad69471c 4674 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4675 }
9ee6e8bb 4676 return 0;
2c0262af 4677 }
25f84f79 4678 pairwise = 0;
9ee6e8bb 4679 switch (op) {
62698be3
PM
4680 case NEON_3R_VSHL:
4681 case NEON_3R_VQSHL:
4682 case NEON_3R_VRSHL:
4683 case NEON_3R_VQRSHL:
9ee6e8bb 4684 {
ad69471c
PB
4685 int rtmp;
4686 /* Shift instruction operands are reversed. */
4687 rtmp = rn;
9ee6e8bb 4688 rn = rm;
ad69471c 4689 rm = rtmp;
9ee6e8bb 4690 }
2c0262af 4691 break;
25f84f79
PM
4692 case NEON_3R_VPADD:
4693 if (u) {
4694 return 1;
4695 }
4696 /* Fall through */
62698be3
PM
4697 case NEON_3R_VPMAX:
4698 case NEON_3R_VPMIN:
9ee6e8bb 4699 pairwise = 1;
2c0262af 4700 break;
25f84f79
PM
4701 case NEON_3R_FLOAT_ARITH:
4702 pairwise = (u && size < 2); /* if VPADD (float) */
4703 break;
4704 case NEON_3R_FLOAT_MINMAX:
4705 pairwise = u; /* if VPMIN/VPMAX (float) */
4706 break;
4707 case NEON_3R_FLOAT_CMP:
4708 if (!u && size) {
4709 /* no encoding for U=0 C=1x */
4710 return 1;
4711 }
4712 break;
4713 case NEON_3R_FLOAT_ACMP:
4714 if (!u) {
4715 return 1;
4716 }
4717 break;
4718 case NEON_3R_VRECPS_VRSQRTS:
4719 if (u) {
4720 return 1;
4721 }
2c0262af 4722 break;
25f84f79
PM
4723 case NEON_3R_VMUL:
4724 if (u && (size != 0)) {
4725 /* UNDEF on invalid size for polynomial subcase */
4726 return 1;
4727 }
2c0262af 4728 break;
9ee6e8bb 4729 default:
2c0262af 4730 break;
9ee6e8bb 4731 }
dd8fbd78 4732
25f84f79
PM
4733 if (pairwise && q) {
4734 /* All the pairwise insns UNDEF if Q is set */
4735 return 1;
4736 }
4737
9ee6e8bb
PB
4738 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4739
4740 if (pairwise) {
4741 /* Pairwise. */
a5a14945
JR
4742 if (pass < 1) {
4743 tmp = neon_load_reg(rn, 0);
4744 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4745 } else {
a5a14945
JR
4746 tmp = neon_load_reg(rm, 0);
4747 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4748 }
4749 } else {
4750 /* Elementwise. */
dd8fbd78
FN
4751 tmp = neon_load_reg(rn, pass);
4752 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4753 }
4754 switch (op) {
62698be3 4755 case NEON_3R_VHADD:
9ee6e8bb
PB
4756 GEN_NEON_INTEGER_OP(hadd);
4757 break;
62698be3 4758 case NEON_3R_VQADD:
02da0b2d 4759 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4760 break;
62698be3 4761 case NEON_3R_VRHADD:
9ee6e8bb 4762 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4763 break;
62698be3 4764 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4765 switch ((u << 2) | size) {
4766 case 0: /* VAND */
dd8fbd78 4767 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4768 break;
4769 case 1: /* BIC */
f669df27 4770 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4771 break;
4772 case 2: /* VORR */
dd8fbd78 4773 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4774 break;
4775 case 3: /* VORN */
f669df27 4776 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4777 break;
4778 case 4: /* VEOR */
dd8fbd78 4779 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4780 break;
4781 case 5: /* VBSL */
dd8fbd78
FN
4782 tmp3 = neon_load_reg(rd, pass);
4783 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4784 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4785 break;
4786 case 6: /* VBIT */
dd8fbd78
FN
4787 tmp3 = neon_load_reg(rd, pass);
4788 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4789 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4790 break;
4791 case 7: /* VBIF */
dd8fbd78
FN
4792 tmp3 = neon_load_reg(rd, pass);
4793 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4794 tcg_temp_free_i32(tmp3);
9ee6e8bb 4795 break;
2c0262af
FB
4796 }
4797 break;
62698be3 4798 case NEON_3R_VHSUB:
9ee6e8bb
PB
4799 GEN_NEON_INTEGER_OP(hsub);
4800 break;
62698be3 4801 case NEON_3R_VQSUB:
02da0b2d 4802 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4803 break;
62698be3 4804 case NEON_3R_VCGT:
9ee6e8bb
PB
4805 GEN_NEON_INTEGER_OP(cgt);
4806 break;
62698be3 4807 case NEON_3R_VCGE:
9ee6e8bb
PB
4808 GEN_NEON_INTEGER_OP(cge);
4809 break;
62698be3 4810 case NEON_3R_VSHL:
ad69471c 4811 GEN_NEON_INTEGER_OP(shl);
2c0262af 4812 break;
62698be3 4813 case NEON_3R_VQSHL:
02da0b2d 4814 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4815 break;
62698be3 4816 case NEON_3R_VRSHL:
ad69471c 4817 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4818 break;
62698be3 4819 case NEON_3R_VQRSHL:
02da0b2d 4820 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4821 break;
62698be3 4822 case NEON_3R_VMAX:
9ee6e8bb
PB
4823 GEN_NEON_INTEGER_OP(max);
4824 break;
62698be3 4825 case NEON_3R_VMIN:
9ee6e8bb
PB
4826 GEN_NEON_INTEGER_OP(min);
4827 break;
62698be3 4828 case NEON_3R_VABD:
9ee6e8bb
PB
4829 GEN_NEON_INTEGER_OP(abd);
4830 break;
62698be3 4831 case NEON_3R_VABA:
9ee6e8bb 4832 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4833 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4834 tmp2 = neon_load_reg(rd, pass);
4835 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4836 break;
62698be3 4837 case NEON_3R_VADD_VSUB:
9ee6e8bb 4838 if (!u) { /* VADD */
62698be3 4839 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4840 } else { /* VSUB */
4841 switch (size) {
dd8fbd78
FN
4842 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4843 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4844 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4845 default: abort();
9ee6e8bb
PB
4846 }
4847 }
4848 break;
62698be3 4849 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4850 if (!u) { /* VTST */
4851 switch (size) {
dd8fbd78
FN
4852 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4853 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4854 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4855 default: abort();
9ee6e8bb
PB
4856 }
4857 } else { /* VCEQ */
4858 switch (size) {
dd8fbd78
FN
4859 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4860 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4861 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4862 default: abort();
9ee6e8bb
PB
4863 }
4864 }
4865 break;
62698be3 4866 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4867 switch (size) {
dd8fbd78
FN
4868 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4869 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4870 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4871 default: abort();
9ee6e8bb 4872 }
7d1b0095 4873 tcg_temp_free_i32(tmp2);
dd8fbd78 4874 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4875 if (u) { /* VMLS */
dd8fbd78 4876 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4877 } else { /* VMLA */
dd8fbd78 4878 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4879 }
4880 break;
62698be3 4881 case NEON_3R_VMUL:
9ee6e8bb 4882 if (u) { /* polynomial */
dd8fbd78 4883 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4884 } else { /* Integer */
4885 switch (size) {
dd8fbd78
FN
4886 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4887 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4888 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4889 default: abort();
9ee6e8bb
PB
4890 }
4891 }
4892 break;
62698be3 4893 case NEON_3R_VPMAX:
9ee6e8bb
PB
4894 GEN_NEON_INTEGER_OP(pmax);
4895 break;
62698be3 4896 case NEON_3R_VPMIN:
9ee6e8bb
PB
4897 GEN_NEON_INTEGER_OP(pmin);
4898 break;
62698be3 4899 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4900 if (!u) { /* VQDMULH */
4901 switch (size) {
02da0b2d
PM
4902 case 1:
4903 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4904 break;
4905 case 2:
4906 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4907 break;
62698be3 4908 default: abort();
9ee6e8bb 4909 }
62698be3 4910 } else { /* VQRDMULH */
9ee6e8bb 4911 switch (size) {
02da0b2d
PM
4912 case 1:
4913 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4914 break;
4915 case 2:
4916 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4917 break;
62698be3 4918 default: abort();
9ee6e8bb
PB
4919 }
4920 }
4921 break;
62698be3 4922 case NEON_3R_VPADD:
9ee6e8bb 4923 switch (size) {
dd8fbd78
FN
4924 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4925 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4926 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4927 default: abort();
9ee6e8bb
PB
4928 }
4929 break;
62698be3 4930 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4931 {
4932 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4933 switch ((u << 2) | size) {
4934 case 0: /* VADD */
aa47cfdd
PM
4935 case 4: /* VPADD */
4936 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4937 break;
4938 case 2: /* VSUB */
aa47cfdd 4939 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4940 break;
4941 case 6: /* VABD */
aa47cfdd 4942 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4943 break;
4944 default:
62698be3 4945 abort();
9ee6e8bb 4946 }
aa47cfdd 4947 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4948 break;
aa47cfdd 4949 }
62698be3 4950 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
4951 {
4952 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4953 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4954 if (!u) {
7d1b0095 4955 tcg_temp_free_i32(tmp2);
dd8fbd78 4956 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4957 if (size == 0) {
aa47cfdd 4958 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4959 } else {
aa47cfdd 4960 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
4961 }
4962 }
aa47cfdd 4963 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4964 break;
aa47cfdd 4965 }
62698be3 4966 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
4967 {
4968 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 4969 if (!u) {
aa47cfdd 4970 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 4971 } else {
aa47cfdd
PM
4972 if (size == 0) {
4973 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4974 } else {
4975 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4976 }
b5ff1b31 4977 }
aa47cfdd 4978 tcg_temp_free_ptr(fpstatus);
2c0262af 4979 break;
aa47cfdd 4980 }
62698be3 4981 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
4982 {
4983 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4984 if (size == 0) {
4985 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4986 } else {
4987 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4988 }
4989 tcg_temp_free_ptr(fpstatus);
2c0262af 4990 break;
aa47cfdd 4991 }
62698be3 4992 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
4993 {
4994 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4995 if (size == 0) {
4996 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4997 } else {
4998 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4999 }
5000 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5001 break;
aa47cfdd 5002 }
62698be3 5003 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 5004 if (size == 0)
dd8fbd78 5005 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 5006 else
dd8fbd78 5007 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 5008 break;
9ee6e8bb
PB
5009 default:
5010 abort();
2c0262af 5011 }
7d1b0095 5012 tcg_temp_free_i32(tmp2);
dd8fbd78 5013
9ee6e8bb
PB
5014 /* Save the result. For elementwise operations we can put it
5015 straight into the destination register. For pairwise operations
5016 we have to be careful to avoid clobbering the source operands. */
5017 if (pairwise && rd == rm) {
dd8fbd78 5018 neon_store_scratch(pass, tmp);
9ee6e8bb 5019 } else {
dd8fbd78 5020 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5021 }
5022
5023 } /* for pass */
5024 if (pairwise && rd == rm) {
5025 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5026 tmp = neon_load_scratch(pass);
5027 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5028 }
5029 }
ad69471c 5030 /* End of 3 register same size operations. */
9ee6e8bb
PB
5031 } else if (insn & (1 << 4)) {
5032 if ((insn & 0x00380080) != 0) {
5033 /* Two registers and shift. */
5034 op = (insn >> 8) & 0xf;
5035 if (insn & (1 << 7)) {
cc13115b
PM
5036 /* 64-bit shift. */
5037 if (op > 7) {
5038 return 1;
5039 }
9ee6e8bb
PB
5040 size = 3;
5041 } else {
5042 size = 2;
5043 while ((insn & (1 << (size + 19))) == 0)
5044 size--;
5045 }
5046 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5047 /* To avoid excessive dumplication of ops we implement shift
5048 by immediate using the variable shift operations. */
5049 if (op < 8) {
5050 /* Shift by immediate:
5051 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5052 if (q && ((rd | rm) & 1)) {
5053 return 1;
5054 }
5055 if (!u && (op == 4 || op == 6)) {
5056 return 1;
5057 }
9ee6e8bb
PB
5058 /* Right shifts are encoded as N - shift, where N is the
5059 element size in bits. */
5060 if (op <= 4)
5061 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5062 if (size == 3) {
5063 count = q + 1;
5064 } else {
5065 count = q ? 4: 2;
5066 }
5067 switch (size) {
5068 case 0:
5069 imm = (uint8_t) shift;
5070 imm |= imm << 8;
5071 imm |= imm << 16;
5072 break;
5073 case 1:
5074 imm = (uint16_t) shift;
5075 imm |= imm << 16;
5076 break;
5077 case 2:
5078 case 3:
5079 imm = shift;
5080 break;
5081 default:
5082 abort();
5083 }
5084
5085 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5086 if (size == 3) {
5087 neon_load_reg64(cpu_V0, rm + pass);
5088 tcg_gen_movi_i64(cpu_V1, imm);
5089 switch (op) {
5090 case 0: /* VSHR */
5091 case 1: /* VSRA */
5092 if (u)
5093 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5094 else
ad69471c 5095 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5096 break;
ad69471c
PB
5097 case 2: /* VRSHR */
5098 case 3: /* VRSRA */
5099 if (u)
5100 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5101 else
ad69471c 5102 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5103 break;
ad69471c 5104 case 4: /* VSRI */
ad69471c
PB
5105 case 5: /* VSHL, VSLI */
5106 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5107 break;
0322b26e 5108 case 6: /* VQSHLU */
02da0b2d
PM
5109 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5110 cpu_V0, cpu_V1);
ad69471c 5111 break;
0322b26e
PM
5112 case 7: /* VQSHL */
5113 if (u) {
02da0b2d 5114 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5115 cpu_V0, cpu_V1);
5116 } else {
02da0b2d 5117 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5118 cpu_V0, cpu_V1);
5119 }
9ee6e8bb 5120 break;
9ee6e8bb 5121 }
ad69471c
PB
5122 if (op == 1 || op == 3) {
5123 /* Accumulate. */
5371cb81 5124 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5125 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5126 } else if (op == 4 || (op == 5 && u)) {
5127 /* Insert */
923e6509
CL
5128 neon_load_reg64(cpu_V1, rd + pass);
5129 uint64_t mask;
5130 if (shift < -63 || shift > 63) {
5131 mask = 0;
5132 } else {
5133 if (op == 4) {
5134 mask = 0xffffffffffffffffull >> -shift;
5135 } else {
5136 mask = 0xffffffffffffffffull << shift;
5137 }
5138 }
5139 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5140 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5141 }
5142 neon_store_reg64(cpu_V0, rd + pass);
5143 } else { /* size < 3 */
5144 /* Operands in T0 and T1. */
dd8fbd78 5145 tmp = neon_load_reg(rm, pass);
7d1b0095 5146 tmp2 = tcg_temp_new_i32();
dd8fbd78 5147 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5148 switch (op) {
5149 case 0: /* VSHR */
5150 case 1: /* VSRA */
5151 GEN_NEON_INTEGER_OP(shl);
5152 break;
5153 case 2: /* VRSHR */
5154 case 3: /* VRSRA */
5155 GEN_NEON_INTEGER_OP(rshl);
5156 break;
5157 case 4: /* VSRI */
ad69471c
PB
5158 case 5: /* VSHL, VSLI */
5159 switch (size) {
dd8fbd78
FN
5160 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5161 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5162 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5163 default: abort();
ad69471c
PB
5164 }
5165 break;
0322b26e 5166 case 6: /* VQSHLU */
ad69471c 5167 switch (size) {
0322b26e 5168 case 0:
02da0b2d
PM
5169 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5170 tmp, tmp2);
0322b26e
PM
5171 break;
5172 case 1:
02da0b2d
PM
5173 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5174 tmp, tmp2);
0322b26e
PM
5175 break;
5176 case 2:
02da0b2d
PM
5177 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5178 tmp, tmp2);
0322b26e
PM
5179 break;
5180 default:
cc13115b 5181 abort();
ad69471c
PB
5182 }
5183 break;
0322b26e 5184 case 7: /* VQSHL */
02da0b2d 5185 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5186 break;
ad69471c 5187 }
7d1b0095 5188 tcg_temp_free_i32(tmp2);
ad69471c
PB
5189
5190 if (op == 1 || op == 3) {
5191 /* Accumulate. */
dd8fbd78 5192 tmp2 = neon_load_reg(rd, pass);
5371cb81 5193 gen_neon_add(size, tmp, tmp2);
7d1b0095 5194 tcg_temp_free_i32(tmp2);
ad69471c
PB
5195 } else if (op == 4 || (op == 5 && u)) {
5196 /* Insert */
5197 switch (size) {
5198 case 0:
5199 if (op == 4)
ca9a32e4 5200 mask = 0xff >> -shift;
ad69471c 5201 else
ca9a32e4
JR
5202 mask = (uint8_t)(0xff << shift);
5203 mask |= mask << 8;
5204 mask |= mask << 16;
ad69471c
PB
5205 break;
5206 case 1:
5207 if (op == 4)
ca9a32e4 5208 mask = 0xffff >> -shift;
ad69471c 5209 else
ca9a32e4
JR
5210 mask = (uint16_t)(0xffff << shift);
5211 mask |= mask << 16;
ad69471c
PB
5212 break;
5213 case 2:
ca9a32e4
JR
5214 if (shift < -31 || shift > 31) {
5215 mask = 0;
5216 } else {
5217 if (op == 4)
5218 mask = 0xffffffffu >> -shift;
5219 else
5220 mask = 0xffffffffu << shift;
5221 }
ad69471c
PB
5222 break;
5223 default:
5224 abort();
5225 }
dd8fbd78 5226 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5227 tcg_gen_andi_i32(tmp, tmp, mask);
5228 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5229 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5230 tcg_temp_free_i32(tmp2);
ad69471c 5231 }
dd8fbd78 5232 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5233 }
5234 } /* for pass */
5235 } else if (op < 10) {
ad69471c 5236 /* Shift by immediate and narrow:
9ee6e8bb 5237 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5238 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5239 if (rm & 1) {
5240 return 1;
5241 }
9ee6e8bb
PB
5242 shift = shift - (1 << (size + 3));
5243 size++;
92cdfaeb 5244 if (size == 3) {
a7812ae4 5245 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5246 neon_load_reg64(cpu_V0, rm);
5247 neon_load_reg64(cpu_V1, rm + 1);
5248 for (pass = 0; pass < 2; pass++) {
5249 TCGv_i64 in;
5250 if (pass == 0) {
5251 in = cpu_V0;
5252 } else {
5253 in = cpu_V1;
5254 }
ad69471c 5255 if (q) {
0b36f4cd 5256 if (input_unsigned) {
92cdfaeb 5257 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5258 } else {
92cdfaeb 5259 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5260 }
ad69471c 5261 } else {
0b36f4cd 5262 if (input_unsigned) {
92cdfaeb 5263 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5264 } else {
92cdfaeb 5265 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5266 }
ad69471c 5267 }
7d1b0095 5268 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5269 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5270 neon_store_reg(rd, pass, tmp);
5271 } /* for pass */
5272 tcg_temp_free_i64(tmp64);
5273 } else {
5274 if (size == 1) {
5275 imm = (uint16_t)shift;
5276 imm |= imm << 16;
2c0262af 5277 } else {
92cdfaeb
PM
5278 /* size == 2 */
5279 imm = (uint32_t)shift;
5280 }
5281 tmp2 = tcg_const_i32(imm);
5282 tmp4 = neon_load_reg(rm + 1, 0);
5283 tmp5 = neon_load_reg(rm + 1, 1);
5284 for (pass = 0; pass < 2; pass++) {
5285 if (pass == 0) {
5286 tmp = neon_load_reg(rm, 0);
5287 } else {
5288 tmp = tmp4;
5289 }
0b36f4cd
CL
5290 gen_neon_shift_narrow(size, tmp, tmp2, q,
5291 input_unsigned);
92cdfaeb
PM
5292 if (pass == 0) {
5293 tmp3 = neon_load_reg(rm, 1);
5294 } else {
5295 tmp3 = tmp5;
5296 }
0b36f4cd
CL
5297 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5298 input_unsigned);
36aa55dc 5299 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5300 tcg_temp_free_i32(tmp);
5301 tcg_temp_free_i32(tmp3);
5302 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5303 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5304 neon_store_reg(rd, pass, tmp);
5305 } /* for pass */
c6067f04 5306 tcg_temp_free_i32(tmp2);
b75263d6 5307 }
9ee6e8bb 5308 } else if (op == 10) {
cc13115b
PM
5309 /* VSHLL, VMOVL */
5310 if (q || (rd & 1)) {
9ee6e8bb 5311 return 1;
cc13115b 5312 }
ad69471c
PB
5313 tmp = neon_load_reg(rm, 0);
5314 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5315 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5316 if (pass == 1)
5317 tmp = tmp2;
5318
5319 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5320
9ee6e8bb
PB
5321 if (shift != 0) {
5322 /* The shift is less than the width of the source
ad69471c
PB
5323 type, so we can just shift the whole register. */
5324 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5325 /* Widen the result of shift: we need to clear
5326 * the potential overflow bits resulting from
5327 * left bits of the narrow input appearing as
5328 * right bits of left the neighbour narrow
5329 * input. */
ad69471c
PB
5330 if (size < 2 || !u) {
5331 uint64_t imm64;
5332 if (size == 0) {
5333 imm = (0xffu >> (8 - shift));
5334 imm |= imm << 16;
acdf01ef 5335 } else if (size == 1) {
ad69471c 5336 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5337 } else {
5338 /* size == 2 */
5339 imm = 0xffffffff >> (32 - shift);
5340 }
5341 if (size < 2) {
5342 imm64 = imm | (((uint64_t)imm) << 32);
5343 } else {
5344 imm64 = imm;
9ee6e8bb 5345 }
acdf01ef 5346 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5347 }
5348 }
ad69471c 5349 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5350 }
f73534a5 5351 } else if (op >= 14) {
9ee6e8bb 5352 /* VCVT fixed-point. */
cc13115b
PM
5353 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5354 return 1;
5355 }
f73534a5
PM
5356 /* We have already masked out the must-be-1 top bit of imm6,
5357 * hence this 32-shift where the ARM ARM has 64-imm6.
5358 */
5359 shift = 32 - shift;
9ee6e8bb 5360 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5361 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5362 if (!(op & 1)) {
9ee6e8bb 5363 if (u)
5500b06c 5364 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5365 else
5500b06c 5366 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5367 } else {
5368 if (u)
5500b06c 5369 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5370 else
5500b06c 5371 gen_vfp_tosl(0, shift, 1);
2c0262af 5372 }
4373f3ce 5373 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5374 }
5375 } else {
9ee6e8bb
PB
5376 return 1;
5377 }
5378 } else { /* (insn & 0x00380080) == 0 */
5379 int invert;
7d80fee5
PM
5380 if (q && (rd & 1)) {
5381 return 1;
5382 }
9ee6e8bb
PB
5383
5384 op = (insn >> 8) & 0xf;
5385 /* One register and immediate. */
5386 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5387 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5388 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5389 * We choose to not special-case this and will behave as if a
5390 * valid constant encoding of 0 had been given.
5391 */
9ee6e8bb
PB
5392 switch (op) {
5393 case 0: case 1:
5394 /* no-op */
5395 break;
5396 case 2: case 3:
5397 imm <<= 8;
5398 break;
5399 case 4: case 5:
5400 imm <<= 16;
5401 break;
5402 case 6: case 7:
5403 imm <<= 24;
5404 break;
5405 case 8: case 9:
5406 imm |= imm << 16;
5407 break;
5408 case 10: case 11:
5409 imm = (imm << 8) | (imm << 24);
5410 break;
5411 case 12:
8e31209e 5412 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5413 break;
5414 case 13:
5415 imm = (imm << 16) | 0xffff;
5416 break;
5417 case 14:
5418 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5419 if (invert)
5420 imm = ~imm;
5421 break;
5422 case 15:
7d80fee5
PM
5423 if (invert) {
5424 return 1;
5425 }
9ee6e8bb
PB
5426 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5427 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5428 break;
5429 }
5430 if (invert)
5431 imm = ~imm;
5432
9ee6e8bb
PB
5433 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5434 if (op & 1 && op < 12) {
ad69471c 5435 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5436 if (invert) {
5437 /* The immediate value has already been inverted, so
5438 BIC becomes AND. */
ad69471c 5439 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5440 } else {
ad69471c 5441 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5442 }
9ee6e8bb 5443 } else {
ad69471c 5444 /* VMOV, VMVN. */
7d1b0095 5445 tmp = tcg_temp_new_i32();
9ee6e8bb 5446 if (op == 14 && invert) {
a5a14945 5447 int n;
ad69471c
PB
5448 uint32_t val;
5449 val = 0;
9ee6e8bb
PB
5450 for (n = 0; n < 4; n++) {
5451 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5452 val |= 0xff << (n * 8);
9ee6e8bb 5453 }
ad69471c
PB
5454 tcg_gen_movi_i32(tmp, val);
5455 } else {
5456 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5457 }
9ee6e8bb 5458 }
ad69471c 5459 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5460 }
5461 }
e4b3861d 5462 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5463 if (size != 3) {
5464 op = (insn >> 8) & 0xf;
5465 if ((insn & (1 << 6)) == 0) {
5466 /* Three registers of different lengths. */
5467 int src1_wide;
5468 int src2_wide;
5469 int prewiden;
695272dc
PM
5470 /* undefreq: bit 0 : UNDEF if size != 0
5471 * bit 1 : UNDEF if size == 0
5472 * bit 2 : UNDEF if U == 1
5473 * Note that [1:0] set implies 'always UNDEF'
5474 */
5475 int undefreq;
5476 /* prewiden, src1_wide, src2_wide, undefreq */
5477 static const int neon_3reg_wide[16][4] = {
5478 {1, 0, 0, 0}, /* VADDL */
5479 {1, 1, 0, 0}, /* VADDW */
5480 {1, 0, 0, 0}, /* VSUBL */
5481 {1, 1, 0, 0}, /* VSUBW */
5482 {0, 1, 1, 0}, /* VADDHN */
5483 {0, 0, 0, 0}, /* VABAL */
5484 {0, 1, 1, 0}, /* VSUBHN */
5485 {0, 0, 0, 0}, /* VABDL */
5486 {0, 0, 0, 0}, /* VMLAL */
5487 {0, 0, 0, 6}, /* VQDMLAL */
5488 {0, 0, 0, 0}, /* VMLSL */
5489 {0, 0, 0, 6}, /* VQDMLSL */
5490 {0, 0, 0, 0}, /* Integer VMULL */
5491 {0, 0, 0, 2}, /* VQDMULL */
5492 {0, 0, 0, 5}, /* Polynomial VMULL */
5493 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5494 };
5495
5496 prewiden = neon_3reg_wide[op][0];
5497 src1_wide = neon_3reg_wide[op][1];
5498 src2_wide = neon_3reg_wide[op][2];
695272dc 5499 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5500
695272dc
PM
5501 if (((undefreq & 1) && (size != 0)) ||
5502 ((undefreq & 2) && (size == 0)) ||
5503 ((undefreq & 4) && u)) {
5504 return 1;
5505 }
5506 if ((src1_wide && (rn & 1)) ||
5507 (src2_wide && (rm & 1)) ||
5508 (!src2_wide && (rd & 1))) {
ad69471c 5509 return 1;
695272dc 5510 }
ad69471c 5511
9ee6e8bb
PB
5512 /* Avoid overlapping operands. Wide source operands are
5513 always aligned so will never overlap with wide
5514 destinations in problematic ways. */
8f8e3aa4 5515 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5516 tmp = neon_load_reg(rm, 1);
5517 neon_store_scratch(2, tmp);
8f8e3aa4 5518 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5519 tmp = neon_load_reg(rn, 1);
5520 neon_store_scratch(2, tmp);
9ee6e8bb 5521 }
a50f5b91 5522 TCGV_UNUSED(tmp3);
9ee6e8bb 5523 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5524 if (src1_wide) {
5525 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5526 TCGV_UNUSED(tmp);
9ee6e8bb 5527 } else {
ad69471c 5528 if (pass == 1 && rd == rn) {
dd8fbd78 5529 tmp = neon_load_scratch(2);
9ee6e8bb 5530 } else {
ad69471c
PB
5531 tmp = neon_load_reg(rn, pass);
5532 }
5533 if (prewiden) {
5534 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5535 }
5536 }
ad69471c
PB
5537 if (src2_wide) {
5538 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5539 TCGV_UNUSED(tmp2);
9ee6e8bb 5540 } else {
ad69471c 5541 if (pass == 1 && rd == rm) {
dd8fbd78 5542 tmp2 = neon_load_scratch(2);
9ee6e8bb 5543 } else {
ad69471c
PB
5544 tmp2 = neon_load_reg(rm, pass);
5545 }
5546 if (prewiden) {
5547 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5548 }
9ee6e8bb
PB
5549 }
5550 switch (op) {
5551 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5552 gen_neon_addl(size);
9ee6e8bb 5553 break;
79b0e534 5554 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5555 gen_neon_subl(size);
9ee6e8bb
PB
5556 break;
5557 case 5: case 7: /* VABAL, VABDL */
5558 switch ((size << 1) | u) {
ad69471c
PB
5559 case 0:
5560 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5561 break;
5562 case 1:
5563 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5564 break;
5565 case 2:
5566 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5567 break;
5568 case 3:
5569 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5570 break;
5571 case 4:
5572 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5573 break;
5574 case 5:
5575 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5576 break;
9ee6e8bb
PB
5577 default: abort();
5578 }
7d1b0095
PM
5579 tcg_temp_free_i32(tmp2);
5580 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5581 break;
5582 case 8: case 9: case 10: case 11: case 12: case 13:
5583 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5584 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5585 break;
5586 case 14: /* Polynomial VMULL */
e5ca24cb 5587 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5588 tcg_temp_free_i32(tmp2);
5589 tcg_temp_free_i32(tmp);
e5ca24cb 5590 break;
695272dc
PM
5591 default: /* 15 is RESERVED: caught earlier */
5592 abort();
9ee6e8bb 5593 }
ebcd88ce
PM
5594 if (op == 13) {
5595 /* VQDMULL */
5596 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5597 neon_store_reg64(cpu_V0, rd + pass);
5598 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5599 /* Accumulate. */
ebcd88ce 5600 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5601 switch (op) {
4dc064e6
PM
5602 case 10: /* VMLSL */
5603 gen_neon_negl(cpu_V0, size);
5604 /* Fall through */
5605 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5606 gen_neon_addl(size);
9ee6e8bb
PB
5607 break;
5608 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5609 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5610 if (op == 11) {
5611 gen_neon_negl(cpu_V0, size);
5612 }
ad69471c
PB
5613 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5614 break;
9ee6e8bb
PB
5615 default:
5616 abort();
5617 }
ad69471c 5618 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5619 } else if (op == 4 || op == 6) {
5620 /* Narrowing operation. */
7d1b0095 5621 tmp = tcg_temp_new_i32();
79b0e534 5622 if (!u) {
9ee6e8bb 5623 switch (size) {
ad69471c
PB
5624 case 0:
5625 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5626 break;
5627 case 1:
5628 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5629 break;
5630 case 2:
5631 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5632 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5633 break;
9ee6e8bb
PB
5634 default: abort();
5635 }
5636 } else {
5637 switch (size) {
ad69471c
PB
5638 case 0:
5639 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5640 break;
5641 case 1:
5642 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5643 break;
5644 case 2:
5645 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5646 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5647 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5648 break;
9ee6e8bb
PB
5649 default: abort();
5650 }
5651 }
ad69471c
PB
5652 if (pass == 0) {
5653 tmp3 = tmp;
5654 } else {
5655 neon_store_reg(rd, 0, tmp3);
5656 neon_store_reg(rd, 1, tmp);
5657 }
9ee6e8bb
PB
5658 } else {
5659 /* Write back the result. */
ad69471c 5660 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5661 }
5662 }
5663 } else {
3e3326df
PM
5664 /* Two registers and a scalar. NB that for ops of this form
5665 * the ARM ARM labels bit 24 as Q, but it is in our variable
5666 * 'u', not 'q'.
5667 */
5668 if (size == 0) {
5669 return 1;
5670 }
9ee6e8bb 5671 switch (op) {
9ee6e8bb 5672 case 1: /* Float VMLA scalar */
9ee6e8bb 5673 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5674 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5675 if (size == 1) {
5676 return 1;
5677 }
5678 /* fall through */
5679 case 0: /* Integer VMLA scalar */
5680 case 4: /* Integer VMLS scalar */
5681 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5682 case 12: /* VQDMULH scalar */
5683 case 13: /* VQRDMULH scalar */
3e3326df
PM
5684 if (u && ((rd | rn) & 1)) {
5685 return 1;
5686 }
dd8fbd78
FN
5687 tmp = neon_get_scalar(size, rm);
5688 neon_store_scratch(0, tmp);
9ee6e8bb 5689 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5690 tmp = neon_load_scratch(0);
5691 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5692 if (op == 12) {
5693 if (size == 1) {
02da0b2d 5694 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5695 } else {
02da0b2d 5696 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5697 }
5698 } else if (op == 13) {
5699 if (size == 1) {
02da0b2d 5700 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5701 } else {
02da0b2d 5702 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5703 }
5704 } else if (op & 1) {
aa47cfdd
PM
5705 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5706 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5707 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5708 } else {
5709 switch (size) {
dd8fbd78
FN
5710 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5711 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5712 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5713 default: abort();
9ee6e8bb
PB
5714 }
5715 }
7d1b0095 5716 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5717 if (op < 8) {
5718 /* Accumulate. */
dd8fbd78 5719 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5720 switch (op) {
5721 case 0:
dd8fbd78 5722 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5723 break;
5724 case 1:
aa47cfdd
PM
5725 {
5726 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5727 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5728 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5729 break;
aa47cfdd 5730 }
9ee6e8bb 5731 case 4:
dd8fbd78 5732 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5733 break;
5734 case 5:
aa47cfdd
PM
5735 {
5736 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5737 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5738 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5739 break;
aa47cfdd 5740 }
9ee6e8bb
PB
5741 default:
5742 abort();
5743 }
7d1b0095 5744 tcg_temp_free_i32(tmp2);
9ee6e8bb 5745 }
dd8fbd78 5746 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5747 }
5748 break;
9ee6e8bb 5749 case 3: /* VQDMLAL scalar */
9ee6e8bb 5750 case 7: /* VQDMLSL scalar */
9ee6e8bb 5751 case 11: /* VQDMULL scalar */
3e3326df 5752 if (u == 1) {
ad69471c 5753 return 1;
3e3326df
PM
5754 }
5755 /* fall through */
5756 case 2: /* VMLAL sclar */
5757 case 6: /* VMLSL scalar */
5758 case 10: /* VMULL scalar */
5759 if (rd & 1) {
5760 return 1;
5761 }
dd8fbd78 5762 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5763 /* We need a copy of tmp2 because gen_neon_mull
5764 * deletes it during pass 0. */
7d1b0095 5765 tmp4 = tcg_temp_new_i32();
c6067f04 5766 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5767 tmp3 = neon_load_reg(rn, 1);
ad69471c 5768
9ee6e8bb 5769 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5770 if (pass == 0) {
5771 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5772 } else {
dd8fbd78 5773 tmp = tmp3;
c6067f04 5774 tmp2 = tmp4;
9ee6e8bb 5775 }
ad69471c 5776 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5777 if (op != 11) {
5778 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5779 }
9ee6e8bb 5780 switch (op) {
4dc064e6
PM
5781 case 6:
5782 gen_neon_negl(cpu_V0, size);
5783 /* Fall through */
5784 case 2:
ad69471c 5785 gen_neon_addl(size);
9ee6e8bb
PB
5786 break;
5787 case 3: case 7:
ad69471c 5788 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5789 if (op == 7) {
5790 gen_neon_negl(cpu_V0, size);
5791 }
ad69471c 5792 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5793 break;
5794 case 10:
5795 /* no-op */
5796 break;
5797 case 11:
ad69471c 5798 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5799 break;
5800 default:
5801 abort();
5802 }
ad69471c 5803 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5804 }
dd8fbd78 5805
dd8fbd78 5806
9ee6e8bb
PB
5807 break;
5808 default: /* 14 and 15 are RESERVED */
5809 return 1;
5810 }
5811 }
5812 } else { /* size == 3 */
5813 if (!u) {
5814 /* Extract. */
9ee6e8bb 5815 imm = (insn >> 8) & 0xf;
ad69471c
PB
5816
5817 if (imm > 7 && !q)
5818 return 1;
5819
52579ea1
PM
5820 if (q && ((rd | rn | rm) & 1)) {
5821 return 1;
5822 }
5823
ad69471c
PB
5824 if (imm == 0) {
5825 neon_load_reg64(cpu_V0, rn);
5826 if (q) {
5827 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5828 }
ad69471c
PB
5829 } else if (imm == 8) {
5830 neon_load_reg64(cpu_V0, rn + 1);
5831 if (q) {
5832 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5833 }
ad69471c 5834 } else if (q) {
a7812ae4 5835 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5836 if (imm < 8) {
5837 neon_load_reg64(cpu_V0, rn);
a7812ae4 5838 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5839 } else {
5840 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5841 neon_load_reg64(tmp64, rm);
ad69471c
PB
5842 }
5843 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5844 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5845 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5846 if (imm < 8) {
5847 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5848 } else {
ad69471c
PB
5849 neon_load_reg64(cpu_V1, rm + 1);
5850 imm -= 8;
9ee6e8bb 5851 }
ad69471c 5852 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5853 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5854 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5855 tcg_temp_free_i64(tmp64);
ad69471c 5856 } else {
a7812ae4 5857 /* BUGFIX */
ad69471c 5858 neon_load_reg64(cpu_V0, rn);
a7812ae4 5859 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5860 neon_load_reg64(cpu_V1, rm);
a7812ae4 5861 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5862 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5863 }
5864 neon_store_reg64(cpu_V0, rd);
5865 if (q) {
5866 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5867 }
5868 } else if ((insn & (1 << 11)) == 0) {
5869 /* Two register misc. */
5870 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5871 size = (insn >> 18) & 3;
600b828c
PM
5872 /* UNDEF for unknown op values and bad op-size combinations */
5873 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5874 return 1;
5875 }
fc2a9b37
PM
5876 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5877 q && ((rm | rd) & 1)) {
5878 return 1;
5879 }
9ee6e8bb 5880 switch (op) {
600b828c 5881 case NEON_2RM_VREV64:
9ee6e8bb 5882 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5883 tmp = neon_load_reg(rm, pass * 2);
5884 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5885 switch (size) {
dd8fbd78
FN
5886 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5887 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5888 case 2: /* no-op */ break;
5889 default: abort();
5890 }
dd8fbd78 5891 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5892 if (size == 2) {
dd8fbd78 5893 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5894 } else {
9ee6e8bb 5895 switch (size) {
dd8fbd78
FN
5896 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5897 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5898 default: abort();
5899 }
dd8fbd78 5900 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5901 }
5902 }
5903 break;
600b828c
PM
5904 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5905 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5906 for (pass = 0; pass < q + 1; pass++) {
5907 tmp = neon_load_reg(rm, pass * 2);
5908 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5909 tmp = neon_load_reg(rm, pass * 2 + 1);
5910 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5911 switch (size) {
5912 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5913 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5914 case 2: tcg_gen_add_i64(CPU_V001); break;
5915 default: abort();
5916 }
600b828c 5917 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5918 /* Accumulate. */
ad69471c
PB
5919 neon_load_reg64(cpu_V1, rd + pass);
5920 gen_neon_addl(size);
9ee6e8bb 5921 }
ad69471c 5922 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5923 }
5924 break;
600b828c 5925 case NEON_2RM_VTRN:
9ee6e8bb 5926 if (size == 2) {
a5a14945 5927 int n;
9ee6e8bb 5928 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5929 tmp = neon_load_reg(rm, n);
5930 tmp2 = neon_load_reg(rd, n + 1);
5931 neon_store_reg(rm, n, tmp2);
5932 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5933 }
5934 } else {
5935 goto elementwise;
5936 }
5937 break;
600b828c 5938 case NEON_2RM_VUZP:
02acedf9 5939 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5940 return 1;
9ee6e8bb
PB
5941 }
5942 break;
600b828c 5943 case NEON_2RM_VZIP:
d68a6f3a 5944 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5945 return 1;
9ee6e8bb
PB
5946 }
5947 break;
600b828c
PM
5948 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5949 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5950 if (rm & 1) {
5951 return 1;
5952 }
a50f5b91 5953 TCGV_UNUSED(tmp2);
9ee6e8bb 5954 for (pass = 0; pass < 2; pass++) {
ad69471c 5955 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5956 tmp = tcg_temp_new_i32();
600b828c
PM
5957 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5958 tmp, cpu_V0);
ad69471c
PB
5959 if (pass == 0) {
5960 tmp2 = tmp;
5961 } else {
5962 neon_store_reg(rd, 0, tmp2);
5963 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5964 }
9ee6e8bb
PB
5965 }
5966 break;
600b828c 5967 case NEON_2RM_VSHLL:
fc2a9b37 5968 if (q || (rd & 1)) {
9ee6e8bb 5969 return 1;
600b828c 5970 }
ad69471c
PB
5971 tmp = neon_load_reg(rm, 0);
5972 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5973 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5974 if (pass == 1)
5975 tmp = tmp2;
5976 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5977 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5978 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5979 }
5980 break;
600b828c 5981 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5982 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5983 q || (rm & 1)) {
5984 return 1;
5985 }
7d1b0095
PM
5986 tmp = tcg_temp_new_i32();
5987 tmp2 = tcg_temp_new_i32();
60011498 5988 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5989 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5990 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5991 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5992 tcg_gen_shli_i32(tmp2, tmp2, 16);
5993 tcg_gen_or_i32(tmp2, tmp2, tmp);
5994 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5995 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5996 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5997 neon_store_reg(rd, 0, tmp2);
7d1b0095 5998 tmp2 = tcg_temp_new_i32();
2d981da7 5999 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6000 tcg_gen_shli_i32(tmp2, tmp2, 16);
6001 tcg_gen_or_i32(tmp2, tmp2, tmp);
6002 neon_store_reg(rd, 1, tmp2);
7d1b0095 6003 tcg_temp_free_i32(tmp);
60011498 6004 break;
600b828c 6005 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
6006 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6007 q || (rd & 1)) {
6008 return 1;
6009 }
7d1b0095 6010 tmp3 = tcg_temp_new_i32();
60011498
PB
6011 tmp = neon_load_reg(rm, 0);
6012 tmp2 = neon_load_reg(rm, 1);
6013 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6014 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6015 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6016 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6017 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6018 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6019 tcg_temp_free_i32(tmp);
60011498 6020 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6021 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6022 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6023 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6024 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6025 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6026 tcg_temp_free_i32(tmp2);
6027 tcg_temp_free_i32(tmp3);
60011498 6028 break;
9ee6e8bb
PB
6029 default:
6030 elementwise:
6031 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6032 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6033 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6034 neon_reg_offset(rm, pass));
dd8fbd78 6035 TCGV_UNUSED(tmp);
9ee6e8bb 6036 } else {
dd8fbd78 6037 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6038 }
6039 switch (op) {
600b828c 6040 case NEON_2RM_VREV32:
9ee6e8bb 6041 switch (size) {
dd8fbd78
FN
6042 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6043 case 1: gen_swap_half(tmp); break;
600b828c 6044 default: abort();
9ee6e8bb
PB
6045 }
6046 break;
600b828c 6047 case NEON_2RM_VREV16:
dd8fbd78 6048 gen_rev16(tmp);
9ee6e8bb 6049 break;
600b828c 6050 case NEON_2RM_VCLS:
9ee6e8bb 6051 switch (size) {
dd8fbd78
FN
6052 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6053 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6054 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6055 default: abort();
9ee6e8bb
PB
6056 }
6057 break;
600b828c 6058 case NEON_2RM_VCLZ:
9ee6e8bb 6059 switch (size) {
dd8fbd78
FN
6060 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6061 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6062 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6063 default: abort();
9ee6e8bb
PB
6064 }
6065 break;
600b828c 6066 case NEON_2RM_VCNT:
dd8fbd78 6067 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6068 break;
600b828c 6069 case NEON_2RM_VMVN:
dd8fbd78 6070 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6071 break;
600b828c 6072 case NEON_2RM_VQABS:
9ee6e8bb 6073 switch (size) {
02da0b2d
PM
6074 case 0:
6075 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6076 break;
6077 case 1:
6078 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6079 break;
6080 case 2:
6081 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6082 break;
600b828c 6083 default: abort();
9ee6e8bb
PB
6084 }
6085 break;
600b828c 6086 case NEON_2RM_VQNEG:
9ee6e8bb 6087 switch (size) {
02da0b2d
PM
6088 case 0:
6089 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6090 break;
6091 case 1:
6092 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6093 break;
6094 case 2:
6095 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6096 break;
600b828c 6097 default: abort();
9ee6e8bb
PB
6098 }
6099 break;
600b828c 6100 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6101 tmp2 = tcg_const_i32(0);
9ee6e8bb 6102 switch(size) {
dd8fbd78
FN
6103 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6104 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6105 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6106 default: abort();
9ee6e8bb 6107 }
dd8fbd78 6108 tcg_temp_free(tmp2);
600b828c 6109 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6110 tcg_gen_not_i32(tmp, tmp);
600b828c 6111 }
9ee6e8bb 6112 break;
600b828c 6113 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6114 tmp2 = tcg_const_i32(0);
9ee6e8bb 6115 switch(size) {
dd8fbd78
FN
6116 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6117 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6118 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6119 default: abort();
9ee6e8bb 6120 }
dd8fbd78 6121 tcg_temp_free(tmp2);
600b828c 6122 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6123 tcg_gen_not_i32(tmp, tmp);
600b828c 6124 }
9ee6e8bb 6125 break;
600b828c 6126 case NEON_2RM_VCEQ0:
dd8fbd78 6127 tmp2 = tcg_const_i32(0);
9ee6e8bb 6128 switch(size) {
dd8fbd78
FN
6129 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6130 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6131 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6132 default: abort();
9ee6e8bb 6133 }
dd8fbd78 6134 tcg_temp_free(tmp2);
9ee6e8bb 6135 break;
600b828c 6136 case NEON_2RM_VABS:
9ee6e8bb 6137 switch(size) {
dd8fbd78
FN
6138 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6139 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6140 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6141 default: abort();
9ee6e8bb
PB
6142 }
6143 break;
600b828c 6144 case NEON_2RM_VNEG:
dd8fbd78
FN
6145 tmp2 = tcg_const_i32(0);
6146 gen_neon_rsb(size, tmp, tmp2);
6147 tcg_temp_free(tmp2);
9ee6e8bb 6148 break;
600b828c 6149 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6150 {
6151 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6152 tmp2 = tcg_const_i32(0);
aa47cfdd 6153 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6154 tcg_temp_free(tmp2);
aa47cfdd 6155 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6156 break;
aa47cfdd 6157 }
600b828c 6158 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6159 {
6160 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6161 tmp2 = tcg_const_i32(0);
aa47cfdd 6162 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6163 tcg_temp_free(tmp2);
aa47cfdd 6164 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6165 break;
aa47cfdd 6166 }
600b828c 6167 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6168 {
6169 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6170 tmp2 = tcg_const_i32(0);
aa47cfdd 6171 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6172 tcg_temp_free(tmp2);
aa47cfdd 6173 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6174 break;
aa47cfdd 6175 }
600b828c 6176 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6177 {
6178 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6179 tmp2 = tcg_const_i32(0);
aa47cfdd 6180 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6181 tcg_temp_free(tmp2);
aa47cfdd 6182 tcg_temp_free_ptr(fpstatus);
0e326109 6183 break;
aa47cfdd 6184 }
600b828c 6185 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6186 {
6187 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6188 tmp2 = tcg_const_i32(0);
aa47cfdd 6189 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6190 tcg_temp_free(tmp2);
aa47cfdd 6191 tcg_temp_free_ptr(fpstatus);
0e326109 6192 break;
aa47cfdd 6193 }
600b828c 6194 case NEON_2RM_VABS_F:
4373f3ce 6195 gen_vfp_abs(0);
9ee6e8bb 6196 break;
600b828c 6197 case NEON_2RM_VNEG_F:
4373f3ce 6198 gen_vfp_neg(0);
9ee6e8bb 6199 break;
600b828c 6200 case NEON_2RM_VSWP:
dd8fbd78
FN
6201 tmp2 = neon_load_reg(rd, pass);
6202 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6203 break;
600b828c 6204 case NEON_2RM_VTRN:
dd8fbd78 6205 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6206 switch (size) {
dd8fbd78
FN
6207 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6208 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6209 default: abort();
9ee6e8bb 6210 }
dd8fbd78 6211 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6212 break;
600b828c 6213 case NEON_2RM_VRECPE:
dd8fbd78 6214 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6215 break;
600b828c 6216 case NEON_2RM_VRSQRTE:
dd8fbd78 6217 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6218 break;
600b828c 6219 case NEON_2RM_VRECPE_F:
4373f3ce 6220 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6221 break;
600b828c 6222 case NEON_2RM_VRSQRTE_F:
4373f3ce 6223 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6224 break;
600b828c 6225 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6226 gen_vfp_sito(0, 1);
9ee6e8bb 6227 break;
600b828c 6228 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6229 gen_vfp_uito(0, 1);
9ee6e8bb 6230 break;
600b828c 6231 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6232 gen_vfp_tosiz(0, 1);
9ee6e8bb 6233 break;
600b828c 6234 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6235 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6236 break;
6237 default:
600b828c
PM
6238 /* Reserved op values were caught by the
6239 * neon_2rm_sizes[] check earlier.
6240 */
6241 abort();
9ee6e8bb 6242 }
600b828c 6243 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6244 tcg_gen_st_f32(cpu_F0s, cpu_env,
6245 neon_reg_offset(rd, pass));
9ee6e8bb 6246 } else {
dd8fbd78 6247 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6248 }
6249 }
6250 break;
6251 }
6252 } else if ((insn & (1 << 10)) == 0) {
6253 /* VTBL, VTBX. */
56907d77
PM
6254 int n = ((insn >> 8) & 3) + 1;
6255 if ((rn + n) > 32) {
6256 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6257 * helper function running off the end of the register file.
6258 */
6259 return 1;
6260 }
6261 n <<= 3;
9ee6e8bb 6262 if (insn & (1 << 6)) {
8f8e3aa4 6263 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6264 } else {
7d1b0095 6265 tmp = tcg_temp_new_i32();
8f8e3aa4 6266 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6267 }
8f8e3aa4 6268 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6269 tmp4 = tcg_const_i32(rn);
6270 tmp5 = tcg_const_i32(n);
6271 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
7d1b0095 6272 tcg_temp_free_i32(tmp);
9ee6e8bb 6273 if (insn & (1 << 6)) {
8f8e3aa4 6274 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6275 } else {
7d1b0095 6276 tmp = tcg_temp_new_i32();
8f8e3aa4 6277 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6278 }
8f8e3aa4 6279 tmp3 = neon_load_reg(rm, 1);
b75263d6 6280 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6281 tcg_temp_free_i32(tmp5);
6282 tcg_temp_free_i32(tmp4);
8f8e3aa4 6283 neon_store_reg(rd, 0, tmp2);
3018f259 6284 neon_store_reg(rd, 1, tmp3);
7d1b0095 6285 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6286 } else if ((insn & 0x380) == 0) {
6287 /* VDUP */
133da6aa
JR
6288 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6289 return 1;
6290 }
9ee6e8bb 6291 if (insn & (1 << 19)) {
dd8fbd78 6292 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6293 } else {
dd8fbd78 6294 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6295 }
6296 if (insn & (1 << 16)) {
dd8fbd78 6297 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6298 } else if (insn & (1 << 17)) {
6299 if ((insn >> 18) & 1)
dd8fbd78 6300 gen_neon_dup_high16(tmp);
9ee6e8bb 6301 else
dd8fbd78 6302 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6303 }
6304 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6305 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6306 tcg_gen_mov_i32(tmp2, tmp);
6307 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6308 }
7d1b0095 6309 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6310 } else {
6311 return 1;
6312 }
6313 }
6314 }
6315 return 0;
6316}
6317
fe1479c3
PB
6318static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
6319{
6320 int crn = (insn >> 16) & 0xf;
6321 int crm = insn & 0xf;
6322 int op1 = (insn >> 21) & 7;
6323 int op2 = (insn >> 5) & 7;
6324 int rt = (insn >> 12) & 0xf;
6325 TCGv tmp;
6326
ca27c052
PM
6327 /* Minimal set of debug registers, since we don't support debug */
6328 if (op1 == 0 && crn == 0 && op2 == 0) {
6329 switch (crm) {
6330 case 0:
6331 /* DBGDIDR: just RAZ. In particular this means the
6332 * "debug architecture version" bits will read as
6333 * a reserved value, which should cause Linux to
6334 * not try to use the debug hardware.
6335 */
6336 tmp = tcg_const_i32(0);
6337 store_reg(s, rt, tmp);
6338 return 0;
6339 case 1:
6340 case 2:
6341 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6342 * don't implement memory mapped debug components
6343 */
6344 if (ENABLE_ARCH_7) {
6345 tmp = tcg_const_i32(0);
6346 store_reg(s, rt, tmp);
6347 return 0;
6348 }
6349 break;
6350 default:
6351 break;
6352 }
6353 }
6354
fe1479c3
PB
6355 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6356 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6357 /* TEECR */
6358 if (IS_USER(s))
6359 return 1;
6360 tmp = load_cpu_field(teecr);
6361 store_reg(s, rt, tmp);
6362 return 0;
6363 }
6364 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6365 /* TEEHBR */
6366 if (IS_USER(s) && (env->teecr & 1))
6367 return 1;
6368 tmp = load_cpu_field(teehbr);
6369 store_reg(s, rt, tmp);
6370 return 0;
6371 }
6372 }
fe1479c3
PB
6373 return 1;
6374}
6375
6376static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
6377{
6378 int crn = (insn >> 16) & 0xf;
6379 int crm = insn & 0xf;
6380 int op1 = (insn >> 21) & 7;
6381 int op2 = (insn >> 5) & 7;
6382 int rt = (insn >> 12) & 0xf;
6383 TCGv tmp;
6384
6385 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6386 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6387 /* TEECR */
6388 if (IS_USER(s))
6389 return 1;
6390 tmp = load_reg(s, rt);
6391 gen_helper_set_teecr(cpu_env, tmp);
7d1b0095 6392 tcg_temp_free_i32(tmp);
fe1479c3
PB
6393 return 0;
6394 }
6395 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6396 /* TEEHBR */
6397 if (IS_USER(s) && (env->teecr & 1))
6398 return 1;
6399 tmp = load_reg(s, rt);
6400 store_cpu_field(tmp, teehbr);
6401 return 0;
6402 }
6403 }
fe1479c3
PB
6404 return 1;
6405}
6406
9ee6e8bb
PB
6407static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
6408{
6409 int cpnum;
6410
6411 cpnum = (insn >> 8) & 0xf;
6412 if (arm_feature(env, ARM_FEATURE_XSCALE)
6413 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6414 return 1;
6415
6416 switch (cpnum) {
6417 case 0:
6418 case 1:
6419 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6420 return disas_iwmmxt_insn(env, s, insn);
6421 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6422 return disas_dsp_insn(env, s, insn);
6423 }
6424 return 1;
6425 case 10:
6426 case 11:
6427 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
6428 case 14:
6429 /* Coprocessors 7-15 are architecturally reserved by ARM.
6430 Unfortunately Intel decided to ignore this. */
6431 if (arm_feature(env, ARM_FEATURE_XSCALE))
6432 goto board;
6433 if (insn & (1 << 20))
6434 return disas_cp14_read(env, s, insn);
6435 else
6436 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
6437 case 15:
6438 return disas_cp15_insn (env, s, insn);
6439 default:
fe1479c3 6440 board:
9ee6e8bb
PB
6441 /* Unknown coprocessor. See if the board has hooked it. */
6442 return disas_cp_insn (env, s, insn);
6443 }
6444}
6445
5e3f878a
PB
6446
6447/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6448static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
6449{
6450 TCGv tmp;
7d1b0095 6451 tmp = tcg_temp_new_i32();
5e3f878a
PB
6452 tcg_gen_trunc_i64_i32(tmp, val);
6453 store_reg(s, rlow, tmp);
7d1b0095 6454 tmp = tcg_temp_new_i32();
5e3f878a
PB
6455 tcg_gen_shri_i64(val, val, 32);
6456 tcg_gen_trunc_i64_i32(tmp, val);
6457 store_reg(s, rhigh, tmp);
6458}
6459
6460/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6461static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6462{
a7812ae4 6463 TCGv_i64 tmp;
5e3f878a
PB
6464 TCGv tmp2;
6465
36aa55dc 6466 /* Load value and extend to 64 bits. */
a7812ae4 6467 tmp = tcg_temp_new_i64();
5e3f878a
PB
6468 tmp2 = load_reg(s, rlow);
6469 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6470 tcg_temp_free_i32(tmp2);
5e3f878a 6471 tcg_gen_add_i64(val, val, tmp);
b75263d6 6472 tcg_temp_free_i64(tmp);
5e3f878a
PB
6473}
6474
6475/* load and add a 64-bit value from a register pair. */
a7812ae4 6476static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6477{
a7812ae4 6478 TCGv_i64 tmp;
36aa55dc
PB
6479 TCGv tmpl;
6480 TCGv tmph;
5e3f878a
PB
6481
6482 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6483 tmpl = load_reg(s, rlow);
6484 tmph = load_reg(s, rhigh);
a7812ae4 6485 tmp = tcg_temp_new_i64();
36aa55dc 6486 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6487 tcg_temp_free_i32(tmpl);
6488 tcg_temp_free_i32(tmph);
5e3f878a 6489 tcg_gen_add_i64(val, val, tmp);
b75263d6 6490 tcg_temp_free_i64(tmp);
5e3f878a
PB
6491}
6492
6493/* Set N and Z flags from a 64-bit value. */
a7812ae4 6494static void gen_logicq_cc(TCGv_i64 val)
5e3f878a 6495{
7d1b0095 6496 TCGv tmp = tcg_temp_new_i32();
5e3f878a 6497 gen_helper_logicq_cc(tmp, val);
6fbe23d5 6498 gen_logic_CC(tmp);
7d1b0095 6499 tcg_temp_free_i32(tmp);
5e3f878a
PB
6500}
6501
426f5abc
PB
6502/* Load/Store exclusive instructions are implemented by remembering
6503 the value/address loaded, and seeing if these are the same
6504 when the store is performed. This should be is sufficient to implement
6505 the architecturally mandated semantics, and avoids having to monitor
6506 regular stores.
6507
6508 In system emulation mode only one CPU will be running at once, so
6509 this sequence is effectively atomic. In user emulation mode we
6510 throw an exception and handle the atomic operation elsewhere. */
6511static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6512 TCGv addr, int size)
6513{
6514 TCGv tmp;
6515
6516 switch (size) {
6517 case 0:
6518 tmp = gen_ld8u(addr, IS_USER(s));
6519 break;
6520 case 1:
6521 tmp = gen_ld16u(addr, IS_USER(s));
6522 break;
6523 case 2:
6524 case 3:
6525 tmp = gen_ld32(addr, IS_USER(s));
6526 break;
6527 default:
6528 abort();
6529 }
6530 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6531 store_reg(s, rt, tmp);
6532 if (size == 3) {
7d1b0095 6533 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6534 tcg_gen_addi_i32(tmp2, addr, 4);
6535 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6536 tcg_temp_free_i32(tmp2);
426f5abc
PB
6537 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6538 store_reg(s, rt2, tmp);
6539 }
6540 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6541}
6542
6543static void gen_clrex(DisasContext *s)
6544{
6545 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6546}
6547
6548#ifdef CONFIG_USER_ONLY
6549static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6550 TCGv addr, int size)
6551{
6552 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6553 tcg_gen_movi_i32(cpu_exclusive_info,
6554 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6555 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6556}
6557#else
6558static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6559 TCGv addr, int size)
6560{
6561 TCGv tmp;
6562 int done_label;
6563 int fail_label;
6564
6565 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6566 [addr] = {Rt};
6567 {Rd} = 0;
6568 } else {
6569 {Rd} = 1;
6570 } */
6571 fail_label = gen_new_label();
6572 done_label = gen_new_label();
6573 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6574 switch (size) {
6575 case 0:
6576 tmp = gen_ld8u(addr, IS_USER(s));
6577 break;
6578 case 1:
6579 tmp = gen_ld16u(addr, IS_USER(s));
6580 break;
6581 case 2:
6582 case 3:
6583 tmp = gen_ld32(addr, IS_USER(s));
6584 break;
6585 default:
6586 abort();
6587 }
6588 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6589 tcg_temp_free_i32(tmp);
426f5abc 6590 if (size == 3) {
7d1b0095 6591 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6592 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6593 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6594 tcg_temp_free_i32(tmp2);
426f5abc 6595 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6596 tcg_temp_free_i32(tmp);
426f5abc
PB
6597 }
6598 tmp = load_reg(s, rt);
6599 switch (size) {
6600 case 0:
6601 gen_st8(tmp, addr, IS_USER(s));
6602 break;
6603 case 1:
6604 gen_st16(tmp, addr, IS_USER(s));
6605 break;
6606 case 2:
6607 case 3:
6608 gen_st32(tmp, addr, IS_USER(s));
6609 break;
6610 default:
6611 abort();
6612 }
6613 if (size == 3) {
6614 tcg_gen_addi_i32(addr, addr, 4);
6615 tmp = load_reg(s, rt2);
6616 gen_st32(tmp, addr, IS_USER(s));
6617 }
6618 tcg_gen_movi_i32(cpu_R[rd], 0);
6619 tcg_gen_br(done_label);
6620 gen_set_label(fail_label);
6621 tcg_gen_movi_i32(cpu_R[rd], 1);
6622 gen_set_label(done_label);
6623 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6624}
6625#endif
6626
9ee6e8bb
PB
6627static void disas_arm_insn(CPUState * env, DisasContext *s)
6628{
6629 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6630 TCGv tmp;
3670669c 6631 TCGv tmp2;
6ddbc6e4 6632 TCGv tmp3;
b0109805 6633 TCGv addr;
a7812ae4 6634 TCGv_i64 tmp64;
9ee6e8bb
PB
6635
6636 insn = ldl_code(s->pc);
6637 s->pc += 4;
6638
6639 /* M variants do not implement ARM mode. */
6640 if (IS_M(env))
6641 goto illegal_op;
6642 cond = insn >> 28;
6643 if (cond == 0xf){
be5e7a76
DES
6644 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6645 * choose to UNDEF. In ARMv5 and above the space is used
6646 * for miscellaneous unconditional instructions.
6647 */
6648 ARCH(5);
6649
9ee6e8bb
PB
6650 /* Unconditional instructions. */
6651 if (((insn >> 25) & 7) == 1) {
6652 /* NEON Data processing. */
6653 if (!arm_feature(env, ARM_FEATURE_NEON))
6654 goto illegal_op;
6655
6656 if (disas_neon_data_insn(env, s, insn))
6657 goto illegal_op;
6658 return;
6659 }
6660 if ((insn & 0x0f100000) == 0x04000000) {
6661 /* NEON load/store. */
6662 if (!arm_feature(env, ARM_FEATURE_NEON))
6663 goto illegal_op;
6664
6665 if (disas_neon_ls_insn(env, s, insn))
6666 goto illegal_op;
6667 return;
6668 }
3d185e5d
PM
6669 if (((insn & 0x0f30f000) == 0x0510f000) ||
6670 ((insn & 0x0f30f010) == 0x0710f000)) {
6671 if ((insn & (1 << 22)) == 0) {
6672 /* PLDW; v7MP */
6673 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6674 goto illegal_op;
6675 }
6676 }
6677 /* Otherwise PLD; v5TE+ */
be5e7a76 6678 ARCH(5TE);
3d185e5d
PM
6679 return;
6680 }
6681 if (((insn & 0x0f70f000) == 0x0450f000) ||
6682 ((insn & 0x0f70f010) == 0x0650f000)) {
6683 ARCH(7);
6684 return; /* PLI; V7 */
6685 }
6686 if (((insn & 0x0f700000) == 0x04100000) ||
6687 ((insn & 0x0f700010) == 0x06100000)) {
6688 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6689 goto illegal_op;
6690 }
6691 return; /* v7MP: Unallocated memory hint: must NOP */
6692 }
6693
6694 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6695 ARCH(6);
6696 /* setend */
6697 if (insn & (1 << 9)) {
6698 /* BE8 mode not implemented. */
6699 goto illegal_op;
6700 }
6701 return;
6702 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6703 switch ((insn >> 4) & 0xf) {
6704 case 1: /* clrex */
6705 ARCH(6K);
426f5abc 6706 gen_clrex(s);
9ee6e8bb
PB
6707 return;
6708 case 4: /* dsb */
6709 case 5: /* dmb */
6710 case 6: /* isb */
6711 ARCH(7);
6712 /* We don't emulate caches so these are a no-op. */
6713 return;
6714 default:
6715 goto illegal_op;
6716 }
6717 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6718 /* srs */
c67b6b71 6719 int32_t offset;
9ee6e8bb
PB
6720 if (IS_USER(s))
6721 goto illegal_op;
6722 ARCH(6);
6723 op1 = (insn & 0x1f);
7d1b0095 6724 addr = tcg_temp_new_i32();
39ea3d4e
PM
6725 tmp = tcg_const_i32(op1);
6726 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6727 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6728 i = (insn >> 23) & 3;
6729 switch (i) {
6730 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6731 case 1: offset = 0; break; /* IA */
6732 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6733 case 3: offset = 4; break; /* IB */
6734 default: abort();
6735 }
6736 if (offset)
b0109805
PB
6737 tcg_gen_addi_i32(addr, addr, offset);
6738 tmp = load_reg(s, 14);
6739 gen_st32(tmp, addr, 0);
c67b6b71 6740 tmp = load_cpu_field(spsr);
b0109805
PB
6741 tcg_gen_addi_i32(addr, addr, 4);
6742 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6743 if (insn & (1 << 21)) {
6744 /* Base writeback. */
6745 switch (i) {
6746 case 0: offset = -8; break;
c67b6b71
FN
6747 case 1: offset = 4; break;
6748 case 2: offset = -4; break;
9ee6e8bb
PB
6749 case 3: offset = 0; break;
6750 default: abort();
6751 }
6752 if (offset)
c67b6b71 6753 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6754 tmp = tcg_const_i32(op1);
6755 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6756 tcg_temp_free_i32(tmp);
7d1b0095 6757 tcg_temp_free_i32(addr);
b0109805 6758 } else {
7d1b0095 6759 tcg_temp_free_i32(addr);
9ee6e8bb 6760 }
a990f58f 6761 return;
ea825eee 6762 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6763 /* rfe */
c67b6b71 6764 int32_t offset;
9ee6e8bb
PB
6765 if (IS_USER(s))
6766 goto illegal_op;
6767 ARCH(6);
6768 rn = (insn >> 16) & 0xf;
b0109805 6769 addr = load_reg(s, rn);
9ee6e8bb
PB
6770 i = (insn >> 23) & 3;
6771 switch (i) {
b0109805 6772 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6773 case 1: offset = 0; break; /* IA */
6774 case 2: offset = -8; break; /* DB */
b0109805 6775 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6776 default: abort();
6777 }
6778 if (offset)
b0109805
PB
6779 tcg_gen_addi_i32(addr, addr, offset);
6780 /* Load PC into tmp and CPSR into tmp2. */
6781 tmp = gen_ld32(addr, 0);
6782 tcg_gen_addi_i32(addr, addr, 4);
6783 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6784 if (insn & (1 << 21)) {
6785 /* Base writeback. */
6786 switch (i) {
b0109805 6787 case 0: offset = -8; break;
c67b6b71
FN
6788 case 1: offset = 4; break;
6789 case 2: offset = -4; break;
b0109805 6790 case 3: offset = 0; break;
9ee6e8bb
PB
6791 default: abort();
6792 }
6793 if (offset)
b0109805
PB
6794 tcg_gen_addi_i32(addr, addr, offset);
6795 store_reg(s, rn, addr);
6796 } else {
7d1b0095 6797 tcg_temp_free_i32(addr);
9ee6e8bb 6798 }
b0109805 6799 gen_rfe(s, tmp, tmp2);
c67b6b71 6800 return;
9ee6e8bb
PB
6801 } else if ((insn & 0x0e000000) == 0x0a000000) {
6802 /* branch link and change to thumb (blx <offset>) */
6803 int32_t offset;
6804
6805 val = (uint32_t)s->pc;
7d1b0095 6806 tmp = tcg_temp_new_i32();
d9ba4830
PB
6807 tcg_gen_movi_i32(tmp, val);
6808 store_reg(s, 14, tmp);
9ee6e8bb
PB
6809 /* Sign-extend the 24-bit offset */
6810 offset = (((int32_t)insn) << 8) >> 8;
6811 /* offset * 4 + bit24 * 2 + (thumb bit) */
6812 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6813 /* pipeline offset */
6814 val += 4;
be5e7a76 6815 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6816 gen_bx_im(s, val);
9ee6e8bb
PB
6817 return;
6818 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6819 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6820 /* iWMMXt register transfer. */
6821 if (env->cp15.c15_cpar & (1 << 1))
6822 if (!disas_iwmmxt_insn(env, s, insn))
6823 return;
6824 }
6825 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6826 /* Coprocessor double register transfer. */
be5e7a76 6827 ARCH(5TE);
9ee6e8bb
PB
6828 } else if ((insn & 0x0f000010) == 0x0e000010) {
6829 /* Additional coprocessor register transfer. */
7997d92f 6830 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6831 uint32_t mask;
6832 uint32_t val;
6833 /* cps (privileged) */
6834 if (IS_USER(s))
6835 return;
6836 mask = val = 0;
6837 if (insn & (1 << 19)) {
6838 if (insn & (1 << 8))
6839 mask |= CPSR_A;
6840 if (insn & (1 << 7))
6841 mask |= CPSR_I;
6842 if (insn & (1 << 6))
6843 mask |= CPSR_F;
6844 if (insn & (1 << 18))
6845 val |= mask;
6846 }
7997d92f 6847 if (insn & (1 << 17)) {
9ee6e8bb
PB
6848 mask |= CPSR_M;
6849 val |= (insn & 0x1f);
6850 }
6851 if (mask) {
2fbac54b 6852 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6853 }
6854 return;
6855 }
6856 goto illegal_op;
6857 }
6858 if (cond != 0xe) {
6859 /* if not always execute, we generate a conditional jump to
6860 next instruction */
6861 s->condlabel = gen_new_label();
d9ba4830 6862 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6863 s->condjmp = 1;
6864 }
6865 if ((insn & 0x0f900000) == 0x03000000) {
6866 if ((insn & (1 << 21)) == 0) {
6867 ARCH(6T2);
6868 rd = (insn >> 12) & 0xf;
6869 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6870 if ((insn & (1 << 22)) == 0) {
6871 /* MOVW */
7d1b0095 6872 tmp = tcg_temp_new_i32();
5e3f878a 6873 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6874 } else {
6875 /* MOVT */
5e3f878a 6876 tmp = load_reg(s, rd);
86831435 6877 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6878 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6879 }
5e3f878a 6880 store_reg(s, rd, tmp);
9ee6e8bb
PB
6881 } else {
6882 if (((insn >> 12) & 0xf) != 0xf)
6883 goto illegal_op;
6884 if (((insn >> 16) & 0xf) == 0) {
6885 gen_nop_hint(s, insn & 0xff);
6886 } else {
6887 /* CPSR = immediate */
6888 val = insn & 0xff;
6889 shift = ((insn >> 8) & 0xf) * 2;
6890 if (shift)
6891 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6892 i = ((insn & (1 << 22)) != 0);
2fbac54b 6893 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6894 goto illegal_op;
6895 }
6896 }
6897 } else if ((insn & 0x0f900000) == 0x01000000
6898 && (insn & 0x00000090) != 0x00000090) {
6899 /* miscellaneous instructions */
6900 op1 = (insn >> 21) & 3;
6901 sh = (insn >> 4) & 0xf;
6902 rm = insn & 0xf;
6903 switch (sh) {
6904 case 0x0: /* move program status register */
6905 if (op1 & 1) {
6906 /* PSR = reg */
2fbac54b 6907 tmp = load_reg(s, rm);
9ee6e8bb 6908 i = ((op1 & 2) != 0);
2fbac54b 6909 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6910 goto illegal_op;
6911 } else {
6912 /* reg = PSR */
6913 rd = (insn >> 12) & 0xf;
6914 if (op1 & 2) {
6915 if (IS_USER(s))
6916 goto illegal_op;
d9ba4830 6917 tmp = load_cpu_field(spsr);
9ee6e8bb 6918 } else {
7d1b0095 6919 tmp = tcg_temp_new_i32();
d9ba4830 6920 gen_helper_cpsr_read(tmp);
9ee6e8bb 6921 }
d9ba4830 6922 store_reg(s, rd, tmp);
9ee6e8bb
PB
6923 }
6924 break;
6925 case 0x1:
6926 if (op1 == 1) {
6927 /* branch/exchange thumb (bx). */
be5e7a76 6928 ARCH(4T);
d9ba4830
PB
6929 tmp = load_reg(s, rm);
6930 gen_bx(s, tmp);
9ee6e8bb
PB
6931 } else if (op1 == 3) {
6932 /* clz */
be5e7a76 6933 ARCH(5);
9ee6e8bb 6934 rd = (insn >> 12) & 0xf;
1497c961
PB
6935 tmp = load_reg(s, rm);
6936 gen_helper_clz(tmp, tmp);
6937 store_reg(s, rd, tmp);
9ee6e8bb
PB
6938 } else {
6939 goto illegal_op;
6940 }
6941 break;
6942 case 0x2:
6943 if (op1 == 1) {
6944 ARCH(5J); /* bxj */
6945 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6946 tmp = load_reg(s, rm);
6947 gen_bx(s, tmp);
9ee6e8bb
PB
6948 } else {
6949 goto illegal_op;
6950 }
6951 break;
6952 case 0x3:
6953 if (op1 != 1)
6954 goto illegal_op;
6955
be5e7a76 6956 ARCH(5);
9ee6e8bb 6957 /* branch link/exchange thumb (blx) */
d9ba4830 6958 tmp = load_reg(s, rm);
7d1b0095 6959 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6960 tcg_gen_movi_i32(tmp2, s->pc);
6961 store_reg(s, 14, tmp2);
6962 gen_bx(s, tmp);
9ee6e8bb
PB
6963 break;
6964 case 0x5: /* saturating add/subtract */
be5e7a76 6965 ARCH(5TE);
9ee6e8bb
PB
6966 rd = (insn >> 12) & 0xf;
6967 rn = (insn >> 16) & 0xf;
b40d0353 6968 tmp = load_reg(s, rm);
5e3f878a 6969 tmp2 = load_reg(s, rn);
9ee6e8bb 6970 if (op1 & 2)
5e3f878a 6971 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6972 if (op1 & 1)
5e3f878a 6973 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6974 else
5e3f878a 6975 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 6976 tcg_temp_free_i32(tmp2);
5e3f878a 6977 store_reg(s, rd, tmp);
9ee6e8bb 6978 break;
49e14940
AL
6979 case 7:
6980 /* SMC instruction (op1 == 3)
6981 and undefined instructions (op1 == 0 || op1 == 2)
6982 will trap */
6983 if (op1 != 1) {
6984 goto illegal_op;
6985 }
6986 /* bkpt */
be5e7a76 6987 ARCH(5);
bc4a0de0 6988 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6989 break;
6990 case 0x8: /* signed multiply */
6991 case 0xa:
6992 case 0xc:
6993 case 0xe:
be5e7a76 6994 ARCH(5TE);
9ee6e8bb
PB
6995 rs = (insn >> 8) & 0xf;
6996 rn = (insn >> 12) & 0xf;
6997 rd = (insn >> 16) & 0xf;
6998 if (op1 == 1) {
6999 /* (32 * 16) >> 16 */
5e3f878a
PB
7000 tmp = load_reg(s, rm);
7001 tmp2 = load_reg(s, rs);
9ee6e8bb 7002 if (sh & 4)
5e3f878a 7003 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7004 else
5e3f878a 7005 gen_sxth(tmp2);
a7812ae4
PB
7006 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7007 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7008 tmp = tcg_temp_new_i32();
a7812ae4 7009 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7010 tcg_temp_free_i64(tmp64);
9ee6e8bb 7011 if ((sh & 2) == 0) {
5e3f878a
PB
7012 tmp2 = load_reg(s, rn);
7013 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7014 tcg_temp_free_i32(tmp2);
9ee6e8bb 7015 }
5e3f878a 7016 store_reg(s, rd, tmp);
9ee6e8bb
PB
7017 } else {
7018 /* 16 * 16 */
5e3f878a
PB
7019 tmp = load_reg(s, rm);
7020 tmp2 = load_reg(s, rs);
7021 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7022 tcg_temp_free_i32(tmp2);
9ee6e8bb 7023 if (op1 == 2) {
a7812ae4
PB
7024 tmp64 = tcg_temp_new_i64();
7025 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7026 tcg_temp_free_i32(tmp);
a7812ae4
PB
7027 gen_addq(s, tmp64, rn, rd);
7028 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7029 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7030 } else {
7031 if (op1 == 0) {
5e3f878a
PB
7032 tmp2 = load_reg(s, rn);
7033 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7034 tcg_temp_free_i32(tmp2);
9ee6e8bb 7035 }
5e3f878a 7036 store_reg(s, rd, tmp);
9ee6e8bb
PB
7037 }
7038 }
7039 break;
7040 default:
7041 goto illegal_op;
7042 }
7043 } else if (((insn & 0x0e000000) == 0 &&
7044 (insn & 0x00000090) != 0x90) ||
7045 ((insn & 0x0e000000) == (1 << 25))) {
7046 int set_cc, logic_cc, shiftop;
7047
7048 op1 = (insn >> 21) & 0xf;
7049 set_cc = (insn >> 20) & 1;
7050 logic_cc = table_logic_cc[op1] & set_cc;
7051
7052 /* data processing instruction */
7053 if (insn & (1 << 25)) {
7054 /* immediate operand */
7055 val = insn & 0xff;
7056 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7057 if (shift) {
9ee6e8bb 7058 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7059 }
7d1b0095 7060 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7061 tcg_gen_movi_i32(tmp2, val);
7062 if (logic_cc && shift) {
7063 gen_set_CF_bit31(tmp2);
7064 }
9ee6e8bb
PB
7065 } else {
7066 /* register */
7067 rm = (insn) & 0xf;
e9bb4aa9 7068 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7069 shiftop = (insn >> 5) & 3;
7070 if (!(insn & (1 << 4))) {
7071 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7072 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7073 } else {
7074 rs = (insn >> 8) & 0xf;
8984bd2e 7075 tmp = load_reg(s, rs);
e9bb4aa9 7076 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7077 }
7078 }
7079 if (op1 != 0x0f && op1 != 0x0d) {
7080 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7081 tmp = load_reg(s, rn);
7082 } else {
7083 TCGV_UNUSED(tmp);
9ee6e8bb
PB
7084 }
7085 rd = (insn >> 12) & 0xf;
7086 switch(op1) {
7087 case 0x00:
e9bb4aa9
JR
7088 tcg_gen_and_i32(tmp, tmp, tmp2);
7089 if (logic_cc) {
7090 gen_logic_CC(tmp);
7091 }
21aeb343 7092 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7093 break;
7094 case 0x01:
e9bb4aa9
JR
7095 tcg_gen_xor_i32(tmp, tmp, tmp2);
7096 if (logic_cc) {
7097 gen_logic_CC(tmp);
7098 }
21aeb343 7099 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7100 break;
7101 case 0x02:
7102 if (set_cc && rd == 15) {
7103 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7104 if (IS_USER(s)) {
9ee6e8bb 7105 goto illegal_op;
e9bb4aa9
JR
7106 }
7107 gen_helper_sub_cc(tmp, tmp, tmp2);
7108 gen_exception_return(s, tmp);
9ee6e8bb 7109 } else {
e9bb4aa9
JR
7110 if (set_cc) {
7111 gen_helper_sub_cc(tmp, tmp, tmp2);
7112 } else {
7113 tcg_gen_sub_i32(tmp, tmp, tmp2);
7114 }
21aeb343 7115 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7116 }
7117 break;
7118 case 0x03:
e9bb4aa9
JR
7119 if (set_cc) {
7120 gen_helper_sub_cc(tmp, tmp2, tmp);
7121 } else {
7122 tcg_gen_sub_i32(tmp, tmp2, tmp);
7123 }
21aeb343 7124 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7125 break;
7126 case 0x04:
e9bb4aa9
JR
7127 if (set_cc) {
7128 gen_helper_add_cc(tmp, tmp, tmp2);
7129 } else {
7130 tcg_gen_add_i32(tmp, tmp, tmp2);
7131 }
21aeb343 7132 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7133 break;
7134 case 0x05:
e9bb4aa9
JR
7135 if (set_cc) {
7136 gen_helper_adc_cc(tmp, tmp, tmp2);
7137 } else {
7138 gen_add_carry(tmp, tmp, tmp2);
7139 }
21aeb343 7140 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7141 break;
7142 case 0x06:
e9bb4aa9
JR
7143 if (set_cc) {
7144 gen_helper_sbc_cc(tmp, tmp, tmp2);
7145 } else {
7146 gen_sub_carry(tmp, tmp, tmp2);
7147 }
21aeb343 7148 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7149 break;
7150 case 0x07:
e9bb4aa9
JR
7151 if (set_cc) {
7152 gen_helper_sbc_cc(tmp, tmp2, tmp);
7153 } else {
7154 gen_sub_carry(tmp, tmp2, tmp);
7155 }
21aeb343 7156 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7157 break;
7158 case 0x08:
7159 if (set_cc) {
e9bb4aa9
JR
7160 tcg_gen_and_i32(tmp, tmp, tmp2);
7161 gen_logic_CC(tmp);
9ee6e8bb 7162 }
7d1b0095 7163 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7164 break;
7165 case 0x09:
7166 if (set_cc) {
e9bb4aa9
JR
7167 tcg_gen_xor_i32(tmp, tmp, tmp2);
7168 gen_logic_CC(tmp);
9ee6e8bb 7169 }
7d1b0095 7170 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7171 break;
7172 case 0x0a:
7173 if (set_cc) {
e9bb4aa9 7174 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 7175 }
7d1b0095 7176 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7177 break;
7178 case 0x0b:
7179 if (set_cc) {
e9bb4aa9 7180 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 7181 }
7d1b0095 7182 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7183 break;
7184 case 0x0c:
e9bb4aa9
JR
7185 tcg_gen_or_i32(tmp, tmp, tmp2);
7186 if (logic_cc) {
7187 gen_logic_CC(tmp);
7188 }
21aeb343 7189 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7190 break;
7191 case 0x0d:
7192 if (logic_cc && rd == 15) {
7193 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7194 if (IS_USER(s)) {
9ee6e8bb 7195 goto illegal_op;
e9bb4aa9
JR
7196 }
7197 gen_exception_return(s, tmp2);
9ee6e8bb 7198 } else {
e9bb4aa9
JR
7199 if (logic_cc) {
7200 gen_logic_CC(tmp2);
7201 }
21aeb343 7202 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7203 }
7204 break;
7205 case 0x0e:
f669df27 7206 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7207 if (logic_cc) {
7208 gen_logic_CC(tmp);
7209 }
21aeb343 7210 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7211 break;
7212 default:
7213 case 0x0f:
e9bb4aa9
JR
7214 tcg_gen_not_i32(tmp2, tmp2);
7215 if (logic_cc) {
7216 gen_logic_CC(tmp2);
7217 }
21aeb343 7218 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7219 break;
7220 }
e9bb4aa9 7221 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7222 tcg_temp_free_i32(tmp2);
e9bb4aa9 7223 }
9ee6e8bb
PB
7224 } else {
7225 /* other instructions */
7226 op1 = (insn >> 24) & 0xf;
7227 switch(op1) {
7228 case 0x0:
7229 case 0x1:
7230 /* multiplies, extra load/stores */
7231 sh = (insn >> 5) & 3;
7232 if (sh == 0) {
7233 if (op1 == 0x0) {
7234 rd = (insn >> 16) & 0xf;
7235 rn = (insn >> 12) & 0xf;
7236 rs = (insn >> 8) & 0xf;
7237 rm = (insn) & 0xf;
7238 op1 = (insn >> 20) & 0xf;
7239 switch (op1) {
7240 case 0: case 1: case 2: case 3: case 6:
7241 /* 32 bit mul */
5e3f878a
PB
7242 tmp = load_reg(s, rs);
7243 tmp2 = load_reg(s, rm);
7244 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7245 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7246 if (insn & (1 << 22)) {
7247 /* Subtract (mls) */
7248 ARCH(6T2);
5e3f878a
PB
7249 tmp2 = load_reg(s, rn);
7250 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7251 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7252 } else if (insn & (1 << 21)) {
7253 /* Add */
5e3f878a
PB
7254 tmp2 = load_reg(s, rn);
7255 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7256 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7257 }
7258 if (insn & (1 << 20))
5e3f878a
PB
7259 gen_logic_CC(tmp);
7260 store_reg(s, rd, tmp);
9ee6e8bb 7261 break;
8aac08b1
AJ
7262 case 4:
7263 /* 64 bit mul double accumulate (UMAAL) */
7264 ARCH(6);
7265 tmp = load_reg(s, rs);
7266 tmp2 = load_reg(s, rm);
7267 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7268 gen_addq_lo(s, tmp64, rn);
7269 gen_addq_lo(s, tmp64, rd);
7270 gen_storeq_reg(s, rn, rd, tmp64);
7271 tcg_temp_free_i64(tmp64);
7272 break;
7273 case 8: case 9: case 10: case 11:
7274 case 12: case 13: case 14: case 15:
7275 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7276 tmp = load_reg(s, rs);
7277 tmp2 = load_reg(s, rm);
8aac08b1 7278 if (insn & (1 << 22)) {
a7812ae4 7279 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 7280 } else {
a7812ae4 7281 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
7282 }
7283 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 7284 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 7285 }
8aac08b1 7286 if (insn & (1 << 20)) {
a7812ae4 7287 gen_logicq_cc(tmp64);
8aac08b1 7288 }
a7812ae4 7289 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7290 tcg_temp_free_i64(tmp64);
9ee6e8bb 7291 break;
8aac08b1
AJ
7292 default:
7293 goto illegal_op;
9ee6e8bb
PB
7294 }
7295 } else {
7296 rn = (insn >> 16) & 0xf;
7297 rd = (insn >> 12) & 0xf;
7298 if (insn & (1 << 23)) {
7299 /* load/store exclusive */
86753403
PB
7300 op1 = (insn >> 21) & 0x3;
7301 if (op1)
a47f43d2 7302 ARCH(6K);
86753403
PB
7303 else
7304 ARCH(6);
3174f8e9 7305 addr = tcg_temp_local_new_i32();
98a46317 7306 load_reg_var(s, addr, rn);
9ee6e8bb 7307 if (insn & (1 << 20)) {
86753403
PB
7308 switch (op1) {
7309 case 0: /* ldrex */
426f5abc 7310 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7311 break;
7312 case 1: /* ldrexd */
426f5abc 7313 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7314 break;
7315 case 2: /* ldrexb */
426f5abc 7316 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7317 break;
7318 case 3: /* ldrexh */
426f5abc 7319 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7320 break;
7321 default:
7322 abort();
7323 }
9ee6e8bb
PB
7324 } else {
7325 rm = insn & 0xf;
86753403
PB
7326 switch (op1) {
7327 case 0: /* strex */
426f5abc 7328 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7329 break;
7330 case 1: /* strexd */
502e64fe 7331 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7332 break;
7333 case 2: /* strexb */
426f5abc 7334 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7335 break;
7336 case 3: /* strexh */
426f5abc 7337 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7338 break;
7339 default:
7340 abort();
7341 }
9ee6e8bb 7342 }
3174f8e9 7343 tcg_temp_free(addr);
9ee6e8bb
PB
7344 } else {
7345 /* SWP instruction */
7346 rm = (insn) & 0xf;
7347
8984bd2e
PB
7348 /* ??? This is not really atomic. However we know
7349 we never have multiple CPUs running in parallel,
7350 so it is good enough. */
7351 addr = load_reg(s, rn);
7352 tmp = load_reg(s, rm);
9ee6e8bb 7353 if (insn & (1 << 22)) {
8984bd2e
PB
7354 tmp2 = gen_ld8u(addr, IS_USER(s));
7355 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7356 } else {
8984bd2e
PB
7357 tmp2 = gen_ld32(addr, IS_USER(s));
7358 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7359 }
7d1b0095 7360 tcg_temp_free_i32(addr);
8984bd2e 7361 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7362 }
7363 }
7364 } else {
7365 int address_offset;
7366 int load;
7367 /* Misc load/store */
7368 rn = (insn >> 16) & 0xf;
7369 rd = (insn >> 12) & 0xf;
b0109805 7370 addr = load_reg(s, rn);
9ee6e8bb 7371 if (insn & (1 << 24))
b0109805 7372 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7373 address_offset = 0;
7374 if (insn & (1 << 20)) {
7375 /* load */
7376 switch(sh) {
7377 case 1:
b0109805 7378 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
7379 break;
7380 case 2:
b0109805 7381 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
7382 break;
7383 default:
7384 case 3:
b0109805 7385 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
7386 break;
7387 }
7388 load = 1;
7389 } else if (sh & 2) {
be5e7a76 7390 ARCH(5TE);
9ee6e8bb
PB
7391 /* doubleword */
7392 if (sh & 1) {
7393 /* store */
b0109805
PB
7394 tmp = load_reg(s, rd);
7395 gen_st32(tmp, addr, IS_USER(s));
7396 tcg_gen_addi_i32(addr, addr, 4);
7397 tmp = load_reg(s, rd + 1);
7398 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7399 load = 0;
7400 } else {
7401 /* load */
b0109805
PB
7402 tmp = gen_ld32(addr, IS_USER(s));
7403 store_reg(s, rd, tmp);
7404 tcg_gen_addi_i32(addr, addr, 4);
7405 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
7406 rd++;
7407 load = 1;
7408 }
7409 address_offset = -4;
7410 } else {
7411 /* store */
b0109805
PB
7412 tmp = load_reg(s, rd);
7413 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7414 load = 0;
7415 }
7416 /* Perform base writeback before the loaded value to
7417 ensure correct behavior with overlapping index registers.
7418 ldrd with base writeback is is undefined if the
7419 destination and index registers overlap. */
7420 if (!(insn & (1 << 24))) {
b0109805
PB
7421 gen_add_datah_offset(s, insn, address_offset, addr);
7422 store_reg(s, rn, addr);
9ee6e8bb
PB
7423 } else if (insn & (1 << 21)) {
7424 if (address_offset)
b0109805
PB
7425 tcg_gen_addi_i32(addr, addr, address_offset);
7426 store_reg(s, rn, addr);
7427 } else {
7d1b0095 7428 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7429 }
7430 if (load) {
7431 /* Complete the load. */
b0109805 7432 store_reg(s, rd, tmp);
9ee6e8bb
PB
7433 }
7434 }
7435 break;
7436 case 0x4:
7437 case 0x5:
7438 goto do_ldst;
7439 case 0x6:
7440 case 0x7:
7441 if (insn & (1 << 4)) {
7442 ARCH(6);
7443 /* Armv6 Media instructions. */
7444 rm = insn & 0xf;
7445 rn = (insn >> 16) & 0xf;
2c0262af 7446 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7447 rs = (insn >> 8) & 0xf;
7448 switch ((insn >> 23) & 3) {
7449 case 0: /* Parallel add/subtract. */
7450 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7451 tmp = load_reg(s, rn);
7452 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7453 sh = (insn >> 5) & 7;
7454 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7455 goto illegal_op;
6ddbc6e4 7456 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7457 tcg_temp_free_i32(tmp2);
6ddbc6e4 7458 store_reg(s, rd, tmp);
9ee6e8bb
PB
7459 break;
7460 case 1:
7461 if ((insn & 0x00700020) == 0) {
6c95676b 7462 /* Halfword pack. */
3670669c
PB
7463 tmp = load_reg(s, rn);
7464 tmp2 = load_reg(s, rm);
9ee6e8bb 7465 shift = (insn >> 7) & 0x1f;
3670669c
PB
7466 if (insn & (1 << 6)) {
7467 /* pkhtb */
22478e79
AZ
7468 if (shift == 0)
7469 shift = 31;
7470 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7471 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7472 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7473 } else {
7474 /* pkhbt */
22478e79
AZ
7475 if (shift)
7476 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7477 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7478 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7479 }
7480 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7481 tcg_temp_free_i32(tmp2);
3670669c 7482 store_reg(s, rd, tmp);
9ee6e8bb
PB
7483 } else if ((insn & 0x00200020) == 0x00200000) {
7484 /* [us]sat */
6ddbc6e4 7485 tmp = load_reg(s, rm);
9ee6e8bb
PB
7486 shift = (insn >> 7) & 0x1f;
7487 if (insn & (1 << 6)) {
7488 if (shift == 0)
7489 shift = 31;
6ddbc6e4 7490 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7491 } else {
6ddbc6e4 7492 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7493 }
7494 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7495 tmp2 = tcg_const_i32(sh);
7496 if (insn & (1 << 22))
7497 gen_helper_usat(tmp, tmp, tmp2);
7498 else
7499 gen_helper_ssat(tmp, tmp, tmp2);
7500 tcg_temp_free_i32(tmp2);
6ddbc6e4 7501 store_reg(s, rd, tmp);
9ee6e8bb
PB
7502 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7503 /* [us]sat16 */
6ddbc6e4 7504 tmp = load_reg(s, rm);
9ee6e8bb 7505 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7506 tmp2 = tcg_const_i32(sh);
7507 if (insn & (1 << 22))
7508 gen_helper_usat16(tmp, tmp, tmp2);
7509 else
7510 gen_helper_ssat16(tmp, tmp, tmp2);
7511 tcg_temp_free_i32(tmp2);
6ddbc6e4 7512 store_reg(s, rd, tmp);
9ee6e8bb
PB
7513 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7514 /* Select bytes. */
6ddbc6e4
PB
7515 tmp = load_reg(s, rn);
7516 tmp2 = load_reg(s, rm);
7d1b0095 7517 tmp3 = tcg_temp_new_i32();
6ddbc6e4
PB
7518 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7519 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7520 tcg_temp_free_i32(tmp3);
7521 tcg_temp_free_i32(tmp2);
6ddbc6e4 7522 store_reg(s, rd, tmp);
9ee6e8bb 7523 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7524 tmp = load_reg(s, rm);
9ee6e8bb 7525 shift = (insn >> 10) & 3;
1301f322 7526 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7527 rotate, a shift is sufficient. */
7528 if (shift != 0)
f669df27 7529 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7530 op1 = (insn >> 20) & 7;
7531 switch (op1) {
5e3f878a
PB
7532 case 0: gen_sxtb16(tmp); break;
7533 case 2: gen_sxtb(tmp); break;
7534 case 3: gen_sxth(tmp); break;
7535 case 4: gen_uxtb16(tmp); break;
7536 case 6: gen_uxtb(tmp); break;
7537 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7538 default: goto illegal_op;
7539 }
7540 if (rn != 15) {
5e3f878a 7541 tmp2 = load_reg(s, rn);
9ee6e8bb 7542 if ((op1 & 3) == 0) {
5e3f878a 7543 gen_add16(tmp, tmp2);
9ee6e8bb 7544 } else {
5e3f878a 7545 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7546 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7547 }
7548 }
6c95676b 7549 store_reg(s, rd, tmp);
9ee6e8bb
PB
7550 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7551 /* rev */
b0109805 7552 tmp = load_reg(s, rm);
9ee6e8bb
PB
7553 if (insn & (1 << 22)) {
7554 if (insn & (1 << 7)) {
b0109805 7555 gen_revsh(tmp);
9ee6e8bb
PB
7556 } else {
7557 ARCH(6T2);
b0109805 7558 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7559 }
7560 } else {
7561 if (insn & (1 << 7))
b0109805 7562 gen_rev16(tmp);
9ee6e8bb 7563 else
66896cb8 7564 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7565 }
b0109805 7566 store_reg(s, rd, tmp);
9ee6e8bb
PB
7567 } else {
7568 goto illegal_op;
7569 }
7570 break;
7571 case 2: /* Multiplies (Type 3). */
41e9564d
PM
7572 switch ((insn >> 20) & 0x7) {
7573 case 5:
7574 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7575 /* op2 not 00x or 11x : UNDEF */
7576 goto illegal_op;
7577 }
838fa72d
AJ
7578 /* Signed multiply most significant [accumulate].
7579 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
7580 tmp = load_reg(s, rm);
7581 tmp2 = load_reg(s, rs);
a7812ae4 7582 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7583
955a7dd5 7584 if (rd != 15) {
838fa72d 7585 tmp = load_reg(s, rd);
9ee6e8bb 7586 if (insn & (1 << 6)) {
838fa72d 7587 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7588 } else {
838fa72d 7589 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7590 }
7591 }
838fa72d
AJ
7592 if (insn & (1 << 5)) {
7593 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7594 }
7595 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7596 tmp = tcg_temp_new_i32();
838fa72d
AJ
7597 tcg_gen_trunc_i64_i32(tmp, tmp64);
7598 tcg_temp_free_i64(tmp64);
955a7dd5 7599 store_reg(s, rn, tmp);
41e9564d
PM
7600 break;
7601 case 0:
7602 case 4:
7603 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7604 if (insn & (1 << 7)) {
7605 goto illegal_op;
7606 }
7607 tmp = load_reg(s, rm);
7608 tmp2 = load_reg(s, rs);
9ee6e8bb 7609 if (insn & (1 << 5))
5e3f878a
PB
7610 gen_swap_half(tmp2);
7611 gen_smul_dual(tmp, tmp2);
5e3f878a 7612 if (insn & (1 << 6)) {
e1d177b9 7613 /* This subtraction cannot overflow. */
5e3f878a
PB
7614 tcg_gen_sub_i32(tmp, tmp, tmp2);
7615 } else {
e1d177b9
PM
7616 /* This addition cannot overflow 32 bits;
7617 * however it may overflow considered as a signed
7618 * operation, in which case we must set the Q flag.
7619 */
7620 gen_helper_add_setq(tmp, tmp, tmp2);
5e3f878a 7621 }
7d1b0095 7622 tcg_temp_free_i32(tmp2);
9ee6e8bb 7623 if (insn & (1 << 22)) {
5e3f878a 7624 /* smlald, smlsld */
a7812ae4
PB
7625 tmp64 = tcg_temp_new_i64();
7626 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7627 tcg_temp_free_i32(tmp);
a7812ae4
PB
7628 gen_addq(s, tmp64, rd, rn);
7629 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7630 tcg_temp_free_i64(tmp64);
9ee6e8bb 7631 } else {
5e3f878a 7632 /* smuad, smusd, smlad, smlsd */
22478e79 7633 if (rd != 15)
9ee6e8bb 7634 {
22478e79 7635 tmp2 = load_reg(s, rd);
5e3f878a 7636 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7637 tcg_temp_free_i32(tmp2);
9ee6e8bb 7638 }
22478e79 7639 store_reg(s, rn, tmp);
9ee6e8bb 7640 }
41e9564d
PM
7641 break;
7642 default:
7643 goto illegal_op;
9ee6e8bb
PB
7644 }
7645 break;
7646 case 3:
7647 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7648 switch (op1) {
7649 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7650 ARCH(6);
7651 tmp = load_reg(s, rm);
7652 tmp2 = load_reg(s, rs);
7653 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7654 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7655 if (rd != 15) {
7656 tmp2 = load_reg(s, rd);
6ddbc6e4 7657 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7658 tcg_temp_free_i32(tmp2);
9ee6e8bb 7659 }
ded9d295 7660 store_reg(s, rn, tmp);
9ee6e8bb
PB
7661 break;
7662 case 0x20: case 0x24: case 0x28: case 0x2c:
7663 /* Bitfield insert/clear. */
7664 ARCH(6T2);
7665 shift = (insn >> 7) & 0x1f;
7666 i = (insn >> 16) & 0x1f;
7667 i = i + 1 - shift;
7668 if (rm == 15) {
7d1b0095 7669 tmp = tcg_temp_new_i32();
5e3f878a 7670 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7671 } else {
5e3f878a 7672 tmp = load_reg(s, rm);
9ee6e8bb
PB
7673 }
7674 if (i != 32) {
5e3f878a 7675 tmp2 = load_reg(s, rd);
8f8e3aa4 7676 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7d1b0095 7677 tcg_temp_free_i32(tmp2);
9ee6e8bb 7678 }
5e3f878a 7679 store_reg(s, rd, tmp);
9ee6e8bb
PB
7680 break;
7681 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7682 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7683 ARCH(6T2);
5e3f878a 7684 tmp = load_reg(s, rm);
9ee6e8bb
PB
7685 shift = (insn >> 7) & 0x1f;
7686 i = ((insn >> 16) & 0x1f) + 1;
7687 if (shift + i > 32)
7688 goto illegal_op;
7689 if (i < 32) {
7690 if (op1 & 0x20) {
5e3f878a 7691 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7692 } else {
5e3f878a 7693 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7694 }
7695 }
5e3f878a 7696 store_reg(s, rd, tmp);
9ee6e8bb
PB
7697 break;
7698 default:
7699 goto illegal_op;
7700 }
7701 break;
7702 }
7703 break;
7704 }
7705 do_ldst:
7706 /* Check for undefined extension instructions
7707 * per the ARM Bible IE:
7708 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7709 */
7710 sh = (0xf << 20) | (0xf << 4);
7711 if (op1 == 0x7 && ((insn & sh) == sh))
7712 {
7713 goto illegal_op;
7714 }
7715 /* load/store byte/word */
7716 rn = (insn >> 16) & 0xf;
7717 rd = (insn >> 12) & 0xf;
b0109805 7718 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7719 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7720 if (insn & (1 << 24))
b0109805 7721 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7722 if (insn & (1 << 20)) {
7723 /* load */
9ee6e8bb 7724 if (insn & (1 << 22)) {
b0109805 7725 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7726 } else {
b0109805 7727 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7728 }
9ee6e8bb
PB
7729 } else {
7730 /* store */
b0109805 7731 tmp = load_reg(s, rd);
9ee6e8bb 7732 if (insn & (1 << 22))
b0109805 7733 gen_st8(tmp, tmp2, i);
9ee6e8bb 7734 else
b0109805 7735 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7736 }
7737 if (!(insn & (1 << 24))) {
b0109805
PB
7738 gen_add_data_offset(s, insn, tmp2);
7739 store_reg(s, rn, tmp2);
7740 } else if (insn & (1 << 21)) {
7741 store_reg(s, rn, tmp2);
7742 } else {
7d1b0095 7743 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7744 }
7745 if (insn & (1 << 20)) {
7746 /* Complete the load. */
be5e7a76 7747 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7748 }
7749 break;
7750 case 0x08:
7751 case 0x09:
7752 {
7753 int j, n, user, loaded_base;
b0109805 7754 TCGv loaded_var;
9ee6e8bb
PB
7755 /* load/store multiple words */
7756 /* XXX: store correct base if write back */
7757 user = 0;
7758 if (insn & (1 << 22)) {
7759 if (IS_USER(s))
7760 goto illegal_op; /* only usable in supervisor mode */
7761
7762 if ((insn & (1 << 15)) == 0)
7763 user = 1;
7764 }
7765 rn = (insn >> 16) & 0xf;
b0109805 7766 addr = load_reg(s, rn);
9ee6e8bb
PB
7767
7768 /* compute total size */
7769 loaded_base = 0;
a50f5b91 7770 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7771 n = 0;
7772 for(i=0;i<16;i++) {
7773 if (insn & (1 << i))
7774 n++;
7775 }
7776 /* XXX: test invalid n == 0 case ? */
7777 if (insn & (1 << 23)) {
7778 if (insn & (1 << 24)) {
7779 /* pre increment */
b0109805 7780 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7781 } else {
7782 /* post increment */
7783 }
7784 } else {
7785 if (insn & (1 << 24)) {
7786 /* pre decrement */
b0109805 7787 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7788 } else {
7789 /* post decrement */
7790 if (n != 1)
b0109805 7791 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7792 }
7793 }
7794 j = 0;
7795 for(i=0;i<16;i++) {
7796 if (insn & (1 << i)) {
7797 if (insn & (1 << 20)) {
7798 /* load */
b0109805 7799 tmp = gen_ld32(addr, IS_USER(s));
be5e7a76 7800 if (user) {
b75263d6
JR
7801 tmp2 = tcg_const_i32(i);
7802 gen_helper_set_user_reg(tmp2, tmp);
7803 tcg_temp_free_i32(tmp2);
7d1b0095 7804 tcg_temp_free_i32(tmp);
9ee6e8bb 7805 } else if (i == rn) {
b0109805 7806 loaded_var = tmp;
9ee6e8bb
PB
7807 loaded_base = 1;
7808 } else {
be5e7a76 7809 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7810 }
7811 } else {
7812 /* store */
7813 if (i == 15) {
7814 /* special case: r15 = PC + 8 */
7815 val = (long)s->pc + 4;
7d1b0095 7816 tmp = tcg_temp_new_i32();
b0109805 7817 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7818 } else if (user) {
7d1b0095 7819 tmp = tcg_temp_new_i32();
b75263d6
JR
7820 tmp2 = tcg_const_i32(i);
7821 gen_helper_get_user_reg(tmp, tmp2);
7822 tcg_temp_free_i32(tmp2);
9ee6e8bb 7823 } else {
b0109805 7824 tmp = load_reg(s, i);
9ee6e8bb 7825 }
b0109805 7826 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7827 }
7828 j++;
7829 /* no need to add after the last transfer */
7830 if (j != n)
b0109805 7831 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7832 }
7833 }
7834 if (insn & (1 << 21)) {
7835 /* write back */
7836 if (insn & (1 << 23)) {
7837 if (insn & (1 << 24)) {
7838 /* pre increment */
7839 } else {
7840 /* post increment */
b0109805 7841 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7842 }
7843 } else {
7844 if (insn & (1 << 24)) {
7845 /* pre decrement */
7846 if (n != 1)
b0109805 7847 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7848 } else {
7849 /* post decrement */
b0109805 7850 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7851 }
7852 }
b0109805
PB
7853 store_reg(s, rn, addr);
7854 } else {
7d1b0095 7855 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7856 }
7857 if (loaded_base) {
b0109805 7858 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7859 }
7860 if ((insn & (1 << 22)) && !user) {
7861 /* Restore CPSR from SPSR. */
d9ba4830
PB
7862 tmp = load_cpu_field(spsr);
7863 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7864 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7865 s->is_jmp = DISAS_UPDATE;
7866 }
7867 }
7868 break;
7869 case 0xa:
7870 case 0xb:
7871 {
7872 int32_t offset;
7873
7874 /* branch (and link) */
7875 val = (int32_t)s->pc;
7876 if (insn & (1 << 24)) {
7d1b0095 7877 tmp = tcg_temp_new_i32();
5e3f878a
PB
7878 tcg_gen_movi_i32(tmp, val);
7879 store_reg(s, 14, tmp);
9ee6e8bb
PB
7880 }
7881 offset = (((int32_t)insn << 8) >> 8);
7882 val += (offset << 2) + 4;
7883 gen_jmp(s, val);
7884 }
7885 break;
7886 case 0xc:
7887 case 0xd:
7888 case 0xe:
7889 /* Coprocessor. */
7890 if (disas_coproc_insn(env, s, insn))
7891 goto illegal_op;
7892 break;
7893 case 0xf:
7894 /* swi */
5e3f878a 7895 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7896 s->is_jmp = DISAS_SWI;
7897 break;
7898 default:
7899 illegal_op:
bc4a0de0 7900 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7901 break;
7902 }
7903 }
7904}
7905
7906/* Return true if this is a Thumb-2 logical op. */
7907static int
7908thumb2_logic_op(int op)
7909{
7910 return (op < 8);
7911}
7912
7913/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7914 then set condition code flags based on the result of the operation.
7915 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7916 to the high bit of T1.
7917 Returns zero if the opcode is valid. */
7918
7919static int
396e467c 7920gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7921{
7922 int logic_cc;
7923
7924 logic_cc = 0;
7925 switch (op) {
7926 case 0: /* and */
396e467c 7927 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7928 logic_cc = conds;
7929 break;
7930 case 1: /* bic */
f669df27 7931 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7932 logic_cc = conds;
7933 break;
7934 case 2: /* orr */
396e467c 7935 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7936 logic_cc = conds;
7937 break;
7938 case 3: /* orn */
29501f1b 7939 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7940 logic_cc = conds;
7941 break;
7942 case 4: /* eor */
396e467c 7943 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7944 logic_cc = conds;
7945 break;
7946 case 8: /* add */
7947 if (conds)
396e467c 7948 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7949 else
396e467c 7950 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7951 break;
7952 case 10: /* adc */
7953 if (conds)
396e467c 7954 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7955 else
396e467c 7956 gen_adc(t0, t1);
9ee6e8bb
PB
7957 break;
7958 case 11: /* sbc */
7959 if (conds)
396e467c 7960 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7961 else
396e467c 7962 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7963 break;
7964 case 13: /* sub */
7965 if (conds)
396e467c 7966 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7967 else
396e467c 7968 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7969 break;
7970 case 14: /* rsb */
7971 if (conds)
396e467c 7972 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7973 else
396e467c 7974 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7975 break;
7976 default: /* 5, 6, 7, 9, 12, 15. */
7977 return 1;
7978 }
7979 if (logic_cc) {
396e467c 7980 gen_logic_CC(t0);
9ee6e8bb 7981 if (shifter_out)
396e467c 7982 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7983 }
7984 return 0;
7985}
7986
7987/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7988 is not legal. */
7989static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7990{
b0109805 7991 uint32_t insn, imm, shift, offset;
9ee6e8bb 7992 uint32_t rd, rn, rm, rs;
b26eefb6 7993 TCGv tmp;
6ddbc6e4
PB
7994 TCGv tmp2;
7995 TCGv tmp3;
b0109805 7996 TCGv addr;
a7812ae4 7997 TCGv_i64 tmp64;
9ee6e8bb
PB
7998 int op;
7999 int shiftop;
8000 int conds;
8001 int logic_cc;
8002
8003 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8004 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8005 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8006 16-bit instructions to get correct prefetch abort behavior. */
8007 insn = insn_hw1;
8008 if ((insn & (1 << 12)) == 0) {
be5e7a76 8009 ARCH(5);
9ee6e8bb
PB
8010 /* Second half of blx. */
8011 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8012 tmp = load_reg(s, 14);
8013 tcg_gen_addi_i32(tmp, tmp, offset);
8014 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8015
7d1b0095 8016 tmp2 = tcg_temp_new_i32();
b0109805 8017 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8018 store_reg(s, 14, tmp2);
8019 gen_bx(s, tmp);
9ee6e8bb
PB
8020 return 0;
8021 }
8022 if (insn & (1 << 11)) {
8023 /* Second half of bl. */
8024 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8025 tmp = load_reg(s, 14);
6a0d8a1d 8026 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8027
7d1b0095 8028 tmp2 = tcg_temp_new_i32();
b0109805 8029 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8030 store_reg(s, 14, tmp2);
8031 gen_bx(s, tmp);
9ee6e8bb
PB
8032 return 0;
8033 }
8034 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8035 /* Instruction spans a page boundary. Implement it as two
8036 16-bit instructions in case the second half causes an
8037 prefetch abort. */
8038 offset = ((int32_t)insn << 21) >> 9;
396e467c 8039 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8040 return 0;
8041 }
8042 /* Fall through to 32-bit decode. */
8043 }
8044
8045 insn = lduw_code(s->pc);
8046 s->pc += 2;
8047 insn |= (uint32_t)insn_hw1 << 16;
8048
8049 if ((insn & 0xf800e800) != 0xf000e800) {
8050 ARCH(6T2);
8051 }
8052
8053 rn = (insn >> 16) & 0xf;
8054 rs = (insn >> 12) & 0xf;
8055 rd = (insn >> 8) & 0xf;
8056 rm = insn & 0xf;
8057 switch ((insn >> 25) & 0xf) {
8058 case 0: case 1: case 2: case 3:
8059 /* 16-bit instructions. Should never happen. */
8060 abort();
8061 case 4:
8062 if (insn & (1 << 22)) {
8063 /* Other load/store, table branch. */
8064 if (insn & 0x01200000) {
8065 /* Load/store doubleword. */
8066 if (rn == 15) {
7d1b0095 8067 addr = tcg_temp_new_i32();
b0109805 8068 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8069 } else {
b0109805 8070 addr = load_reg(s, rn);
9ee6e8bb
PB
8071 }
8072 offset = (insn & 0xff) * 4;
8073 if ((insn & (1 << 23)) == 0)
8074 offset = -offset;
8075 if (insn & (1 << 24)) {
b0109805 8076 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8077 offset = 0;
8078 }
8079 if (insn & (1 << 20)) {
8080 /* ldrd */
b0109805
PB
8081 tmp = gen_ld32(addr, IS_USER(s));
8082 store_reg(s, rs, tmp);
8083 tcg_gen_addi_i32(addr, addr, 4);
8084 tmp = gen_ld32(addr, IS_USER(s));
8085 store_reg(s, rd, tmp);
9ee6e8bb
PB
8086 } else {
8087 /* strd */
b0109805
PB
8088 tmp = load_reg(s, rs);
8089 gen_st32(tmp, addr, IS_USER(s));
8090 tcg_gen_addi_i32(addr, addr, 4);
8091 tmp = load_reg(s, rd);
8092 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8093 }
8094 if (insn & (1 << 21)) {
8095 /* Base writeback. */
8096 if (rn == 15)
8097 goto illegal_op;
b0109805
PB
8098 tcg_gen_addi_i32(addr, addr, offset - 4);
8099 store_reg(s, rn, addr);
8100 } else {
7d1b0095 8101 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8102 }
8103 } else if ((insn & (1 << 23)) == 0) {
8104 /* Load/store exclusive word. */
3174f8e9 8105 addr = tcg_temp_local_new();
98a46317 8106 load_reg_var(s, addr, rn);
426f5abc 8107 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8108 if (insn & (1 << 20)) {
426f5abc 8109 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8110 } else {
426f5abc 8111 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8112 }
3174f8e9 8113 tcg_temp_free(addr);
9ee6e8bb
PB
8114 } else if ((insn & (1 << 6)) == 0) {
8115 /* Table Branch. */
8116 if (rn == 15) {
7d1b0095 8117 addr = tcg_temp_new_i32();
b0109805 8118 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8119 } else {
b0109805 8120 addr = load_reg(s, rn);
9ee6e8bb 8121 }
b26eefb6 8122 tmp = load_reg(s, rm);
b0109805 8123 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8124 if (insn & (1 << 4)) {
8125 /* tbh */
b0109805 8126 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8127 tcg_temp_free_i32(tmp);
b0109805 8128 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 8129 } else { /* tbb */
7d1b0095 8130 tcg_temp_free_i32(tmp);
b0109805 8131 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 8132 }
7d1b0095 8133 tcg_temp_free_i32(addr);
b0109805
PB
8134 tcg_gen_shli_i32(tmp, tmp, 1);
8135 tcg_gen_addi_i32(tmp, tmp, s->pc);
8136 store_reg(s, 15, tmp);
9ee6e8bb
PB
8137 } else {
8138 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 8139 ARCH(7);
9ee6e8bb 8140 op = (insn >> 4) & 0x3;
426f5abc
PB
8141 if (op == 2) {
8142 goto illegal_op;
8143 }
3174f8e9 8144 addr = tcg_temp_local_new();
98a46317 8145 load_reg_var(s, addr, rn);
9ee6e8bb 8146 if (insn & (1 << 20)) {
426f5abc 8147 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8148 } else {
426f5abc 8149 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8150 }
3174f8e9 8151 tcg_temp_free(addr);
9ee6e8bb
PB
8152 }
8153 } else {
8154 /* Load/store multiple, RFE, SRS. */
8155 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8156 /* Not available in user mode. */
b0109805 8157 if (IS_USER(s))
9ee6e8bb
PB
8158 goto illegal_op;
8159 if (insn & (1 << 20)) {
8160 /* rfe */
b0109805
PB
8161 addr = load_reg(s, rn);
8162 if ((insn & (1 << 24)) == 0)
8163 tcg_gen_addi_i32(addr, addr, -8);
8164 /* Load PC into tmp and CPSR into tmp2. */
8165 tmp = gen_ld32(addr, 0);
8166 tcg_gen_addi_i32(addr, addr, 4);
8167 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
8168 if (insn & (1 << 21)) {
8169 /* Base writeback. */
b0109805
PB
8170 if (insn & (1 << 24)) {
8171 tcg_gen_addi_i32(addr, addr, 4);
8172 } else {
8173 tcg_gen_addi_i32(addr, addr, -4);
8174 }
8175 store_reg(s, rn, addr);
8176 } else {
7d1b0095 8177 tcg_temp_free_i32(addr);
9ee6e8bb 8178 }
b0109805 8179 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8180 } else {
8181 /* srs */
8182 op = (insn & 0x1f);
7d1b0095 8183 addr = tcg_temp_new_i32();
39ea3d4e
PM
8184 tmp = tcg_const_i32(op);
8185 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8186 tcg_temp_free_i32(tmp);
9ee6e8bb 8187 if ((insn & (1 << 24)) == 0) {
b0109805 8188 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 8189 }
b0109805
PB
8190 tmp = load_reg(s, 14);
8191 gen_st32(tmp, addr, 0);
8192 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 8193 tmp = tcg_temp_new_i32();
b0109805
PB
8194 gen_helper_cpsr_read(tmp);
8195 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
8196 if (insn & (1 << 21)) {
8197 if ((insn & (1 << 24)) == 0) {
b0109805 8198 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 8199 } else {
b0109805 8200 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8201 }
39ea3d4e
PM
8202 tmp = tcg_const_i32(op);
8203 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8204 tcg_temp_free_i32(tmp);
b0109805 8205 } else {
7d1b0095 8206 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8207 }
8208 }
8209 } else {
5856d44e
YO
8210 int i, loaded_base = 0;
8211 TCGv loaded_var;
9ee6e8bb 8212 /* Load/store multiple. */
b0109805 8213 addr = load_reg(s, rn);
9ee6e8bb
PB
8214 offset = 0;
8215 for (i = 0; i < 16; i++) {
8216 if (insn & (1 << i))
8217 offset += 4;
8218 }
8219 if (insn & (1 << 24)) {
b0109805 8220 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8221 }
8222
5856d44e 8223 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
8224 for (i = 0; i < 16; i++) {
8225 if ((insn & (1 << i)) == 0)
8226 continue;
8227 if (insn & (1 << 20)) {
8228 /* Load. */
b0109805 8229 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 8230 if (i == 15) {
b0109805 8231 gen_bx(s, tmp);
5856d44e
YO
8232 } else if (i == rn) {
8233 loaded_var = tmp;
8234 loaded_base = 1;
9ee6e8bb 8235 } else {
b0109805 8236 store_reg(s, i, tmp);
9ee6e8bb
PB
8237 }
8238 } else {
8239 /* Store. */
b0109805
PB
8240 tmp = load_reg(s, i);
8241 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8242 }
b0109805 8243 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8244 }
5856d44e
YO
8245 if (loaded_base) {
8246 store_reg(s, rn, loaded_var);
8247 }
9ee6e8bb
PB
8248 if (insn & (1 << 21)) {
8249 /* Base register writeback. */
8250 if (insn & (1 << 24)) {
b0109805 8251 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8252 }
8253 /* Fault if writeback register is in register list. */
8254 if (insn & (1 << rn))
8255 goto illegal_op;
b0109805
PB
8256 store_reg(s, rn, addr);
8257 } else {
7d1b0095 8258 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8259 }
8260 }
8261 }
8262 break;
2af9ab77
JB
8263 case 5:
8264
9ee6e8bb 8265 op = (insn >> 21) & 0xf;
2af9ab77
JB
8266 if (op == 6) {
8267 /* Halfword pack. */
8268 tmp = load_reg(s, rn);
8269 tmp2 = load_reg(s, rm);
8270 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8271 if (insn & (1 << 5)) {
8272 /* pkhtb */
8273 if (shift == 0)
8274 shift = 31;
8275 tcg_gen_sari_i32(tmp2, tmp2, shift);
8276 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8277 tcg_gen_ext16u_i32(tmp2, tmp2);
8278 } else {
8279 /* pkhbt */
8280 if (shift)
8281 tcg_gen_shli_i32(tmp2, tmp2, shift);
8282 tcg_gen_ext16u_i32(tmp, tmp);
8283 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8284 }
8285 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8286 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8287 store_reg(s, rd, tmp);
8288 } else {
2af9ab77
JB
8289 /* Data processing register constant shift. */
8290 if (rn == 15) {
7d1b0095 8291 tmp = tcg_temp_new_i32();
2af9ab77
JB
8292 tcg_gen_movi_i32(tmp, 0);
8293 } else {
8294 tmp = load_reg(s, rn);
8295 }
8296 tmp2 = load_reg(s, rm);
8297
8298 shiftop = (insn >> 4) & 3;
8299 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8300 conds = (insn & (1 << 20)) != 0;
8301 logic_cc = (conds && thumb2_logic_op(op));
8302 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8303 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8304 goto illegal_op;
7d1b0095 8305 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8306 if (rd != 15) {
8307 store_reg(s, rd, tmp);
8308 } else {
7d1b0095 8309 tcg_temp_free_i32(tmp);
2af9ab77 8310 }
3174f8e9 8311 }
9ee6e8bb
PB
8312 break;
8313 case 13: /* Misc data processing. */
8314 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8315 if (op < 4 && (insn & 0xf000) != 0xf000)
8316 goto illegal_op;
8317 switch (op) {
8318 case 0: /* Register controlled shift. */
8984bd2e
PB
8319 tmp = load_reg(s, rn);
8320 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8321 if ((insn & 0x70) != 0)
8322 goto illegal_op;
8323 op = (insn >> 21) & 3;
8984bd2e
PB
8324 logic_cc = (insn & (1 << 20)) != 0;
8325 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8326 if (logic_cc)
8327 gen_logic_CC(tmp);
21aeb343 8328 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8329 break;
8330 case 1: /* Sign/zero extend. */
5e3f878a 8331 tmp = load_reg(s, rm);
9ee6e8bb 8332 shift = (insn >> 4) & 3;
1301f322 8333 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8334 rotate, a shift is sufficient. */
8335 if (shift != 0)
f669df27 8336 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8337 op = (insn >> 20) & 7;
8338 switch (op) {
5e3f878a
PB
8339 case 0: gen_sxth(tmp); break;
8340 case 1: gen_uxth(tmp); break;
8341 case 2: gen_sxtb16(tmp); break;
8342 case 3: gen_uxtb16(tmp); break;
8343 case 4: gen_sxtb(tmp); break;
8344 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8345 default: goto illegal_op;
8346 }
8347 if (rn != 15) {
5e3f878a 8348 tmp2 = load_reg(s, rn);
9ee6e8bb 8349 if ((op >> 1) == 1) {
5e3f878a 8350 gen_add16(tmp, tmp2);
9ee6e8bb 8351 } else {
5e3f878a 8352 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8353 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8354 }
8355 }
5e3f878a 8356 store_reg(s, rd, tmp);
9ee6e8bb
PB
8357 break;
8358 case 2: /* SIMD add/subtract. */
8359 op = (insn >> 20) & 7;
8360 shift = (insn >> 4) & 7;
8361 if ((op & 3) == 3 || (shift & 3) == 3)
8362 goto illegal_op;
6ddbc6e4
PB
8363 tmp = load_reg(s, rn);
8364 tmp2 = load_reg(s, rm);
8365 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8366 tcg_temp_free_i32(tmp2);
6ddbc6e4 8367 store_reg(s, rd, tmp);
9ee6e8bb
PB
8368 break;
8369 case 3: /* Other data processing. */
8370 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8371 if (op < 4) {
8372 /* Saturating add/subtract. */
d9ba4830
PB
8373 tmp = load_reg(s, rn);
8374 tmp2 = load_reg(s, rm);
9ee6e8bb 8375 if (op & 1)
4809c612
JB
8376 gen_helper_double_saturate(tmp, tmp);
8377 if (op & 2)
d9ba4830 8378 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 8379 else
d9ba4830 8380 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 8381 tcg_temp_free_i32(tmp2);
9ee6e8bb 8382 } else {
d9ba4830 8383 tmp = load_reg(s, rn);
9ee6e8bb
PB
8384 switch (op) {
8385 case 0x0a: /* rbit */
d9ba4830 8386 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8387 break;
8388 case 0x08: /* rev */
66896cb8 8389 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8390 break;
8391 case 0x09: /* rev16 */
d9ba4830 8392 gen_rev16(tmp);
9ee6e8bb
PB
8393 break;
8394 case 0x0b: /* revsh */
d9ba4830 8395 gen_revsh(tmp);
9ee6e8bb
PB
8396 break;
8397 case 0x10: /* sel */
d9ba4830 8398 tmp2 = load_reg(s, rm);
7d1b0095 8399 tmp3 = tcg_temp_new_i32();
6ddbc6e4 8400 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 8401 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8402 tcg_temp_free_i32(tmp3);
8403 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8404 break;
8405 case 0x18: /* clz */
d9ba4830 8406 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8407 break;
8408 default:
8409 goto illegal_op;
8410 }
8411 }
d9ba4830 8412 store_reg(s, rd, tmp);
9ee6e8bb
PB
8413 break;
8414 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8415 op = (insn >> 4) & 0xf;
d9ba4830
PB
8416 tmp = load_reg(s, rn);
8417 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8418 switch ((insn >> 20) & 7) {
8419 case 0: /* 32 x 32 -> 32 */
d9ba4830 8420 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8421 tcg_temp_free_i32(tmp2);
9ee6e8bb 8422 if (rs != 15) {
d9ba4830 8423 tmp2 = load_reg(s, rs);
9ee6e8bb 8424 if (op)
d9ba4830 8425 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8426 else
d9ba4830 8427 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8428 tcg_temp_free_i32(tmp2);
9ee6e8bb 8429 }
9ee6e8bb
PB
8430 break;
8431 case 1: /* 16 x 16 -> 32 */
d9ba4830 8432 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8433 tcg_temp_free_i32(tmp2);
9ee6e8bb 8434 if (rs != 15) {
d9ba4830
PB
8435 tmp2 = load_reg(s, rs);
8436 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8437 tcg_temp_free_i32(tmp2);
9ee6e8bb 8438 }
9ee6e8bb
PB
8439 break;
8440 case 2: /* Dual multiply add. */
8441 case 4: /* Dual multiply subtract. */
8442 if (op)
d9ba4830
PB
8443 gen_swap_half(tmp2);
8444 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8445 if (insn & (1 << 22)) {
e1d177b9 8446 /* This subtraction cannot overflow. */
d9ba4830 8447 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8448 } else {
e1d177b9
PM
8449 /* This addition cannot overflow 32 bits;
8450 * however it may overflow considered as a signed
8451 * operation, in which case we must set the Q flag.
8452 */
8453 gen_helper_add_setq(tmp, tmp, tmp2);
9ee6e8bb 8454 }
7d1b0095 8455 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8456 if (rs != 15)
8457 {
d9ba4830
PB
8458 tmp2 = load_reg(s, rs);
8459 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8460 tcg_temp_free_i32(tmp2);
9ee6e8bb 8461 }
9ee6e8bb
PB
8462 break;
8463 case 3: /* 32 * 16 -> 32msb */
8464 if (op)
d9ba4830 8465 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8466 else
d9ba4830 8467 gen_sxth(tmp2);
a7812ae4
PB
8468 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8469 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8470 tmp = tcg_temp_new_i32();
a7812ae4 8471 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8472 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8473 if (rs != 15)
8474 {
d9ba4830
PB
8475 tmp2 = load_reg(s, rs);
8476 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8477 tcg_temp_free_i32(tmp2);
9ee6e8bb 8478 }
9ee6e8bb 8479 break;
838fa72d
AJ
8480 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8481 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8482 if (rs != 15) {
838fa72d
AJ
8483 tmp = load_reg(s, rs);
8484 if (insn & (1 << 20)) {
8485 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8486 } else {
838fa72d 8487 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8488 }
2c0262af 8489 }
838fa72d
AJ
8490 if (insn & (1 << 4)) {
8491 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8492 }
8493 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8494 tmp = tcg_temp_new_i32();
838fa72d
AJ
8495 tcg_gen_trunc_i64_i32(tmp, tmp64);
8496 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8497 break;
8498 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8499 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8500 tcg_temp_free_i32(tmp2);
9ee6e8bb 8501 if (rs != 15) {
d9ba4830
PB
8502 tmp2 = load_reg(s, rs);
8503 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8504 tcg_temp_free_i32(tmp2);
5fd46862 8505 }
9ee6e8bb 8506 break;
2c0262af 8507 }
d9ba4830 8508 store_reg(s, rd, tmp);
2c0262af 8509 break;
9ee6e8bb
PB
8510 case 6: case 7: /* 64-bit multiply, Divide. */
8511 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8512 tmp = load_reg(s, rn);
8513 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8514 if ((op & 0x50) == 0x10) {
8515 /* sdiv, udiv */
8516 if (!arm_feature(env, ARM_FEATURE_DIV))
8517 goto illegal_op;
8518 if (op & 0x20)
5e3f878a 8519 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8520 else
5e3f878a 8521 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8522 tcg_temp_free_i32(tmp2);
5e3f878a 8523 store_reg(s, rd, tmp);
9ee6e8bb
PB
8524 } else if ((op & 0xe) == 0xc) {
8525 /* Dual multiply accumulate long. */
8526 if (op & 1)
5e3f878a
PB
8527 gen_swap_half(tmp2);
8528 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8529 if (op & 0x10) {
5e3f878a 8530 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8531 } else {
5e3f878a 8532 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8533 }
7d1b0095 8534 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8535 /* BUGFIX */
8536 tmp64 = tcg_temp_new_i64();
8537 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8538 tcg_temp_free_i32(tmp);
a7812ae4
PB
8539 gen_addq(s, tmp64, rs, rd);
8540 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8541 tcg_temp_free_i64(tmp64);
2c0262af 8542 } else {
9ee6e8bb
PB
8543 if (op & 0x20) {
8544 /* Unsigned 64-bit multiply */
a7812ae4 8545 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8546 } else {
9ee6e8bb
PB
8547 if (op & 8) {
8548 /* smlalxy */
5e3f878a 8549 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8550 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8551 tmp64 = tcg_temp_new_i64();
8552 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8553 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8554 } else {
8555 /* Signed 64-bit multiply */
a7812ae4 8556 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8557 }
b5ff1b31 8558 }
9ee6e8bb
PB
8559 if (op & 4) {
8560 /* umaal */
a7812ae4
PB
8561 gen_addq_lo(s, tmp64, rs);
8562 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8563 } else if (op & 0x40) {
8564 /* 64-bit accumulate. */
a7812ae4 8565 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8566 }
a7812ae4 8567 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8568 tcg_temp_free_i64(tmp64);
5fd46862 8569 }
2c0262af 8570 break;
9ee6e8bb
PB
8571 }
8572 break;
8573 case 6: case 7: case 14: case 15:
8574 /* Coprocessor. */
8575 if (((insn >> 24) & 3) == 3) {
8576 /* Translate into the equivalent ARM encoding. */
f06053e3 8577 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8578 if (disas_neon_data_insn(env, s, insn))
8579 goto illegal_op;
8580 } else {
8581 if (insn & (1 << 28))
8582 goto illegal_op;
8583 if (disas_coproc_insn (env, s, insn))
8584 goto illegal_op;
8585 }
8586 break;
8587 case 8: case 9: case 10: case 11:
8588 if (insn & (1 << 15)) {
8589 /* Branches, misc control. */
8590 if (insn & 0x5000) {
8591 /* Unconditional branch. */
8592 /* signextend(hw1[10:0]) -> offset[:12]. */
8593 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8594 /* hw1[10:0] -> offset[11:1]. */
8595 offset |= (insn & 0x7ff) << 1;
8596 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8597 offset[24:22] already have the same value because of the
8598 sign extension above. */
8599 offset ^= ((~insn) & (1 << 13)) << 10;
8600 offset ^= ((~insn) & (1 << 11)) << 11;
8601
9ee6e8bb
PB
8602 if (insn & (1 << 14)) {
8603 /* Branch and link. */
3174f8e9 8604 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8605 }
3b46e624 8606
b0109805 8607 offset += s->pc;
9ee6e8bb
PB
8608 if (insn & (1 << 12)) {
8609 /* b/bl */
b0109805 8610 gen_jmp(s, offset);
9ee6e8bb
PB
8611 } else {
8612 /* blx */
b0109805 8613 offset &= ~(uint32_t)2;
be5e7a76 8614 /* thumb2 bx, no need to check */
b0109805 8615 gen_bx_im(s, offset);
2c0262af 8616 }
9ee6e8bb
PB
8617 } else if (((insn >> 23) & 7) == 7) {
8618 /* Misc control */
8619 if (insn & (1 << 13))
8620 goto illegal_op;
8621
8622 if (insn & (1 << 26)) {
8623 /* Secure monitor call (v6Z) */
8624 goto illegal_op; /* not implemented. */
2c0262af 8625 } else {
9ee6e8bb
PB
8626 op = (insn >> 20) & 7;
8627 switch (op) {
8628 case 0: /* msr cpsr. */
8629 if (IS_M(env)) {
8984bd2e
PB
8630 tmp = load_reg(s, rn);
8631 addr = tcg_const_i32(insn & 0xff);
8632 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8633 tcg_temp_free_i32(addr);
7d1b0095 8634 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8635 gen_lookup_tb(s);
8636 break;
8637 }
8638 /* fall through */
8639 case 1: /* msr spsr. */
8640 if (IS_M(env))
8641 goto illegal_op;
2fbac54b
FN
8642 tmp = load_reg(s, rn);
8643 if (gen_set_psr(s,
9ee6e8bb 8644 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8645 op == 1, tmp))
9ee6e8bb
PB
8646 goto illegal_op;
8647 break;
8648 case 2: /* cps, nop-hint. */
8649 if (((insn >> 8) & 7) == 0) {
8650 gen_nop_hint(s, insn & 0xff);
8651 }
8652 /* Implemented as NOP in user mode. */
8653 if (IS_USER(s))
8654 break;
8655 offset = 0;
8656 imm = 0;
8657 if (insn & (1 << 10)) {
8658 if (insn & (1 << 7))
8659 offset |= CPSR_A;
8660 if (insn & (1 << 6))
8661 offset |= CPSR_I;
8662 if (insn & (1 << 5))
8663 offset |= CPSR_F;
8664 if (insn & (1 << 9))
8665 imm = CPSR_A | CPSR_I | CPSR_F;
8666 }
8667 if (insn & (1 << 8)) {
8668 offset |= 0x1f;
8669 imm |= (insn & 0x1f);
8670 }
8671 if (offset) {
2fbac54b 8672 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8673 }
8674 break;
8675 case 3: /* Special control operations. */
426f5abc 8676 ARCH(7);
9ee6e8bb
PB
8677 op = (insn >> 4) & 0xf;
8678 switch (op) {
8679 case 2: /* clrex */
426f5abc 8680 gen_clrex(s);
9ee6e8bb
PB
8681 break;
8682 case 4: /* dsb */
8683 case 5: /* dmb */
8684 case 6: /* isb */
8685 /* These execute as NOPs. */
9ee6e8bb
PB
8686 break;
8687 default:
8688 goto illegal_op;
8689 }
8690 break;
8691 case 4: /* bxj */
8692 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8693 tmp = load_reg(s, rn);
8694 gen_bx(s, tmp);
9ee6e8bb
PB
8695 break;
8696 case 5: /* Exception return. */
b8b45b68
RV
8697 if (IS_USER(s)) {
8698 goto illegal_op;
8699 }
8700 if (rn != 14 || rd != 15) {
8701 goto illegal_op;
8702 }
8703 tmp = load_reg(s, rn);
8704 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8705 gen_exception_return(s, tmp);
8706 break;
9ee6e8bb 8707 case 6: /* mrs cpsr. */
7d1b0095 8708 tmp = tcg_temp_new_i32();
9ee6e8bb 8709 if (IS_M(env)) {
8984bd2e
PB
8710 addr = tcg_const_i32(insn & 0xff);
8711 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8712 tcg_temp_free_i32(addr);
9ee6e8bb 8713 } else {
8984bd2e 8714 gen_helper_cpsr_read(tmp);
9ee6e8bb 8715 }
8984bd2e 8716 store_reg(s, rd, tmp);
9ee6e8bb
PB
8717 break;
8718 case 7: /* mrs spsr. */
8719 /* Not accessible in user mode. */
8720 if (IS_USER(s) || IS_M(env))
8721 goto illegal_op;
d9ba4830
PB
8722 tmp = load_cpu_field(spsr);
8723 store_reg(s, rd, tmp);
9ee6e8bb 8724 break;
2c0262af
FB
8725 }
8726 }
9ee6e8bb
PB
8727 } else {
8728 /* Conditional branch. */
8729 op = (insn >> 22) & 0xf;
8730 /* Generate a conditional jump to next instruction. */
8731 s->condlabel = gen_new_label();
d9ba4830 8732 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8733 s->condjmp = 1;
8734
8735 /* offset[11:1] = insn[10:0] */
8736 offset = (insn & 0x7ff) << 1;
8737 /* offset[17:12] = insn[21:16]. */
8738 offset |= (insn & 0x003f0000) >> 4;
8739 /* offset[31:20] = insn[26]. */
8740 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8741 /* offset[18] = insn[13]. */
8742 offset |= (insn & (1 << 13)) << 5;
8743 /* offset[19] = insn[11]. */
8744 offset |= (insn & (1 << 11)) << 8;
8745
8746 /* jump to the offset */
b0109805 8747 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8748 }
8749 } else {
8750 /* Data processing immediate. */
8751 if (insn & (1 << 25)) {
8752 if (insn & (1 << 24)) {
8753 if (insn & (1 << 20))
8754 goto illegal_op;
8755 /* Bitfield/Saturate. */
8756 op = (insn >> 21) & 7;
8757 imm = insn & 0x1f;
8758 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8759 if (rn == 15) {
7d1b0095 8760 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8761 tcg_gen_movi_i32(tmp, 0);
8762 } else {
8763 tmp = load_reg(s, rn);
8764 }
9ee6e8bb
PB
8765 switch (op) {
8766 case 2: /* Signed bitfield extract. */
8767 imm++;
8768 if (shift + imm > 32)
8769 goto illegal_op;
8770 if (imm < 32)
6ddbc6e4 8771 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8772 break;
8773 case 6: /* Unsigned bitfield extract. */
8774 imm++;
8775 if (shift + imm > 32)
8776 goto illegal_op;
8777 if (imm < 32)
6ddbc6e4 8778 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8779 break;
8780 case 3: /* Bitfield insert/clear. */
8781 if (imm < shift)
8782 goto illegal_op;
8783 imm = imm + 1 - shift;
8784 if (imm != 32) {
6ddbc6e4 8785 tmp2 = load_reg(s, rd);
8f8e3aa4 8786 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7d1b0095 8787 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8788 }
8789 break;
8790 case 7:
8791 goto illegal_op;
8792 default: /* Saturate. */
9ee6e8bb
PB
8793 if (shift) {
8794 if (op & 1)
6ddbc6e4 8795 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8796 else
6ddbc6e4 8797 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8798 }
6ddbc6e4 8799 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8800 if (op & 4) {
8801 /* Unsigned. */
9ee6e8bb 8802 if ((op & 1) && shift == 0)
6ddbc6e4 8803 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8804 else
6ddbc6e4 8805 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8806 } else {
9ee6e8bb 8807 /* Signed. */
9ee6e8bb 8808 if ((op & 1) && shift == 0)
6ddbc6e4 8809 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8810 else
6ddbc6e4 8811 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8812 }
b75263d6 8813 tcg_temp_free_i32(tmp2);
9ee6e8bb 8814 break;
2c0262af 8815 }
6ddbc6e4 8816 store_reg(s, rd, tmp);
9ee6e8bb
PB
8817 } else {
8818 imm = ((insn & 0x04000000) >> 15)
8819 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8820 if (insn & (1 << 22)) {
8821 /* 16-bit immediate. */
8822 imm |= (insn >> 4) & 0xf000;
8823 if (insn & (1 << 23)) {
8824 /* movt */
5e3f878a 8825 tmp = load_reg(s, rd);
86831435 8826 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8827 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8828 } else {
9ee6e8bb 8829 /* movw */
7d1b0095 8830 tmp = tcg_temp_new_i32();
5e3f878a 8831 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8832 }
8833 } else {
9ee6e8bb
PB
8834 /* Add/sub 12-bit immediate. */
8835 if (rn == 15) {
b0109805 8836 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8837 if (insn & (1 << 23))
b0109805 8838 offset -= imm;
9ee6e8bb 8839 else
b0109805 8840 offset += imm;
7d1b0095 8841 tmp = tcg_temp_new_i32();
5e3f878a 8842 tcg_gen_movi_i32(tmp, offset);
2c0262af 8843 } else {
5e3f878a 8844 tmp = load_reg(s, rn);
9ee6e8bb 8845 if (insn & (1 << 23))
5e3f878a 8846 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8847 else
5e3f878a 8848 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8849 }
9ee6e8bb 8850 }
5e3f878a 8851 store_reg(s, rd, tmp);
191abaa2 8852 }
9ee6e8bb
PB
8853 } else {
8854 int shifter_out = 0;
8855 /* modified 12-bit immediate. */
8856 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8857 imm = (insn & 0xff);
8858 switch (shift) {
8859 case 0: /* XY */
8860 /* Nothing to do. */
8861 break;
8862 case 1: /* 00XY00XY */
8863 imm |= imm << 16;
8864 break;
8865 case 2: /* XY00XY00 */
8866 imm |= imm << 16;
8867 imm <<= 8;
8868 break;
8869 case 3: /* XYXYXYXY */
8870 imm |= imm << 16;
8871 imm |= imm << 8;
8872 break;
8873 default: /* Rotated constant. */
8874 shift = (shift << 1) | (imm >> 7);
8875 imm |= 0x80;
8876 imm = imm << (32 - shift);
8877 shifter_out = 1;
8878 break;
b5ff1b31 8879 }
7d1b0095 8880 tmp2 = tcg_temp_new_i32();
3174f8e9 8881 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8882 rn = (insn >> 16) & 0xf;
3174f8e9 8883 if (rn == 15) {
7d1b0095 8884 tmp = tcg_temp_new_i32();
3174f8e9
FN
8885 tcg_gen_movi_i32(tmp, 0);
8886 } else {
8887 tmp = load_reg(s, rn);
8888 }
9ee6e8bb
PB
8889 op = (insn >> 21) & 0xf;
8890 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8891 shifter_out, tmp, tmp2))
9ee6e8bb 8892 goto illegal_op;
7d1b0095 8893 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8894 rd = (insn >> 8) & 0xf;
8895 if (rd != 15) {
3174f8e9
FN
8896 store_reg(s, rd, tmp);
8897 } else {
7d1b0095 8898 tcg_temp_free_i32(tmp);
2c0262af 8899 }
2c0262af 8900 }
9ee6e8bb
PB
8901 }
8902 break;
8903 case 12: /* Load/store single data item. */
8904 {
8905 int postinc = 0;
8906 int writeback = 0;
b0109805 8907 int user;
9ee6e8bb
PB
8908 if ((insn & 0x01100000) == 0x01000000) {
8909 if (disas_neon_ls_insn(env, s, insn))
c1713132 8910 goto illegal_op;
9ee6e8bb
PB
8911 break;
8912 }
a2fdc890
PM
8913 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8914 if (rs == 15) {
8915 if (!(insn & (1 << 20))) {
8916 goto illegal_op;
8917 }
8918 if (op != 2) {
8919 /* Byte or halfword load space with dest == r15 : memory hints.
8920 * Catch them early so we don't emit pointless addressing code.
8921 * This space is a mix of:
8922 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8923 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8924 * cores)
8925 * unallocated hints, which must be treated as NOPs
8926 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8927 * which is easiest for the decoding logic
8928 * Some space which must UNDEF
8929 */
8930 int op1 = (insn >> 23) & 3;
8931 int op2 = (insn >> 6) & 0x3f;
8932 if (op & 2) {
8933 goto illegal_op;
8934 }
8935 if (rn == 15) {
8936 /* UNPREDICTABLE or unallocated hint */
8937 return 0;
8938 }
8939 if (op1 & 1) {
8940 return 0; /* PLD* or unallocated hint */
8941 }
8942 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8943 return 0; /* PLD* or unallocated hint */
8944 }
8945 /* UNDEF space, or an UNPREDICTABLE */
8946 return 1;
8947 }
8948 }
b0109805 8949 user = IS_USER(s);
9ee6e8bb 8950 if (rn == 15) {
7d1b0095 8951 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8952 /* PC relative. */
8953 /* s->pc has already been incremented by 4. */
8954 imm = s->pc & 0xfffffffc;
8955 if (insn & (1 << 23))
8956 imm += insn & 0xfff;
8957 else
8958 imm -= insn & 0xfff;
b0109805 8959 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8960 } else {
b0109805 8961 addr = load_reg(s, rn);
9ee6e8bb
PB
8962 if (insn & (1 << 23)) {
8963 /* Positive offset. */
8964 imm = insn & 0xfff;
b0109805 8965 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8966 } else {
9ee6e8bb 8967 imm = insn & 0xff;
2a0308c5
PM
8968 switch ((insn >> 8) & 0xf) {
8969 case 0x0: /* Shifted Register. */
9ee6e8bb 8970 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8971 if (shift > 3) {
8972 tcg_temp_free_i32(addr);
18c9b560 8973 goto illegal_op;
2a0308c5 8974 }
b26eefb6 8975 tmp = load_reg(s, rm);
9ee6e8bb 8976 if (shift)
b26eefb6 8977 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8978 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8979 tcg_temp_free_i32(tmp);
9ee6e8bb 8980 break;
2a0308c5 8981 case 0xc: /* Negative offset. */
b0109805 8982 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8983 break;
2a0308c5 8984 case 0xe: /* User privilege. */
b0109805
PB
8985 tcg_gen_addi_i32(addr, addr, imm);
8986 user = 1;
9ee6e8bb 8987 break;
2a0308c5 8988 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8989 imm = -imm;
8990 /* Fall through. */
2a0308c5 8991 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8992 postinc = 1;
8993 writeback = 1;
8994 break;
2a0308c5 8995 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
8996 imm = -imm;
8997 /* Fall through. */
2a0308c5 8998 case 0xf: /* Pre-increment. */
b0109805 8999 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
9000 writeback = 1;
9001 break;
9002 default:
2a0308c5 9003 tcg_temp_free_i32(addr);
b7bcbe95 9004 goto illegal_op;
9ee6e8bb
PB
9005 }
9006 }
9007 }
9ee6e8bb
PB
9008 if (insn & (1 << 20)) {
9009 /* Load. */
a2fdc890
PM
9010 switch (op) {
9011 case 0: tmp = gen_ld8u(addr, user); break;
9012 case 4: tmp = gen_ld8s(addr, user); break;
9013 case 1: tmp = gen_ld16u(addr, user); break;
9014 case 5: tmp = gen_ld16s(addr, user); break;
9015 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
9016 default:
9017 tcg_temp_free_i32(addr);
9018 goto illegal_op;
a2fdc890
PM
9019 }
9020 if (rs == 15) {
9021 gen_bx(s, tmp);
9ee6e8bb 9022 } else {
a2fdc890 9023 store_reg(s, rs, tmp);
9ee6e8bb
PB
9024 }
9025 } else {
9026 /* Store. */
b0109805 9027 tmp = load_reg(s, rs);
9ee6e8bb 9028 switch (op) {
b0109805
PB
9029 case 0: gen_st8(tmp, addr, user); break;
9030 case 1: gen_st16(tmp, addr, user); break;
9031 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
9032 default:
9033 tcg_temp_free_i32(addr);
9034 goto illegal_op;
b7bcbe95 9035 }
2c0262af 9036 }
9ee6e8bb 9037 if (postinc)
b0109805
PB
9038 tcg_gen_addi_i32(addr, addr, imm);
9039 if (writeback) {
9040 store_reg(s, rn, addr);
9041 } else {
7d1b0095 9042 tcg_temp_free_i32(addr);
b0109805 9043 }
9ee6e8bb
PB
9044 }
9045 break;
9046 default:
9047 goto illegal_op;
2c0262af 9048 }
9ee6e8bb
PB
9049 return 0;
9050illegal_op:
9051 return 1;
2c0262af
FB
9052}
9053
9ee6e8bb 9054static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
9055{
9056 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9057 int32_t offset;
9058 int i;
b26eefb6 9059 TCGv tmp;
d9ba4830 9060 TCGv tmp2;
b0109805 9061 TCGv addr;
99c475ab 9062
9ee6e8bb
PB
9063 if (s->condexec_mask) {
9064 cond = s->condexec_cond;
bedd2912
JB
9065 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9066 s->condlabel = gen_new_label();
9067 gen_test_cc(cond ^ 1, s->condlabel);
9068 s->condjmp = 1;
9069 }
9ee6e8bb
PB
9070 }
9071
b5ff1b31 9072 insn = lduw_code(s->pc);
99c475ab 9073 s->pc += 2;
b5ff1b31 9074
99c475ab
FB
9075 switch (insn >> 12) {
9076 case 0: case 1:
396e467c 9077
99c475ab
FB
9078 rd = insn & 7;
9079 op = (insn >> 11) & 3;
9080 if (op == 3) {
9081 /* add/subtract */
9082 rn = (insn >> 3) & 7;
396e467c 9083 tmp = load_reg(s, rn);
99c475ab
FB
9084 if (insn & (1 << 10)) {
9085 /* immediate */
7d1b0095 9086 tmp2 = tcg_temp_new_i32();
396e467c 9087 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9088 } else {
9089 /* reg */
9090 rm = (insn >> 6) & 7;
396e467c 9091 tmp2 = load_reg(s, rm);
99c475ab 9092 }
9ee6e8bb
PB
9093 if (insn & (1 << 9)) {
9094 if (s->condexec_mask)
396e467c 9095 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9096 else
396e467c 9097 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
9098 } else {
9099 if (s->condexec_mask)
396e467c 9100 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9101 else
396e467c 9102 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 9103 }
7d1b0095 9104 tcg_temp_free_i32(tmp2);
396e467c 9105 store_reg(s, rd, tmp);
99c475ab
FB
9106 } else {
9107 /* shift immediate */
9108 rm = (insn >> 3) & 7;
9109 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9110 tmp = load_reg(s, rm);
9111 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9112 if (!s->condexec_mask)
9113 gen_logic_CC(tmp);
9114 store_reg(s, rd, tmp);
99c475ab
FB
9115 }
9116 break;
9117 case 2: case 3:
9118 /* arithmetic large immediate */
9119 op = (insn >> 11) & 3;
9120 rd = (insn >> 8) & 0x7;
396e467c 9121 if (op == 0) { /* mov */
7d1b0095 9122 tmp = tcg_temp_new_i32();
396e467c 9123 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9124 if (!s->condexec_mask)
396e467c
FN
9125 gen_logic_CC(tmp);
9126 store_reg(s, rd, tmp);
9127 } else {
9128 tmp = load_reg(s, rd);
7d1b0095 9129 tmp2 = tcg_temp_new_i32();
396e467c
FN
9130 tcg_gen_movi_i32(tmp2, insn & 0xff);
9131 switch (op) {
9132 case 1: /* cmp */
9133 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
9134 tcg_temp_free_i32(tmp);
9135 tcg_temp_free_i32(tmp2);
396e467c
FN
9136 break;
9137 case 2: /* add */
9138 if (s->condexec_mask)
9139 tcg_gen_add_i32(tmp, tmp, tmp2);
9140 else
9141 gen_helper_add_cc(tmp, tmp, tmp2);
7d1b0095 9142 tcg_temp_free_i32(tmp2);
396e467c
FN
9143 store_reg(s, rd, tmp);
9144 break;
9145 case 3: /* sub */
9146 if (s->condexec_mask)
9147 tcg_gen_sub_i32(tmp, tmp, tmp2);
9148 else
9149 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095 9150 tcg_temp_free_i32(tmp2);
396e467c
FN
9151 store_reg(s, rd, tmp);
9152 break;
9153 }
99c475ab 9154 }
99c475ab
FB
9155 break;
9156 case 4:
9157 if (insn & (1 << 11)) {
9158 rd = (insn >> 8) & 7;
5899f386
FB
9159 /* load pc-relative. Bit 1 of PC is ignored. */
9160 val = s->pc + 2 + ((insn & 0xff) * 4);
9161 val &= ~(uint32_t)2;
7d1b0095 9162 addr = tcg_temp_new_i32();
b0109805
PB
9163 tcg_gen_movi_i32(addr, val);
9164 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 9165 tcg_temp_free_i32(addr);
b0109805 9166 store_reg(s, rd, tmp);
99c475ab
FB
9167 break;
9168 }
9169 if (insn & (1 << 10)) {
9170 /* data processing extended or blx */
9171 rd = (insn & 7) | ((insn >> 4) & 8);
9172 rm = (insn >> 3) & 0xf;
9173 op = (insn >> 8) & 3;
9174 switch (op) {
9175 case 0: /* add */
396e467c
FN
9176 tmp = load_reg(s, rd);
9177 tmp2 = load_reg(s, rm);
9178 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9179 tcg_temp_free_i32(tmp2);
396e467c 9180 store_reg(s, rd, tmp);
99c475ab
FB
9181 break;
9182 case 1: /* cmp */
396e467c
FN
9183 tmp = load_reg(s, rd);
9184 tmp2 = load_reg(s, rm);
9185 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
9186 tcg_temp_free_i32(tmp2);
9187 tcg_temp_free_i32(tmp);
99c475ab
FB
9188 break;
9189 case 2: /* mov/cpy */
396e467c
FN
9190 tmp = load_reg(s, rm);
9191 store_reg(s, rd, tmp);
99c475ab
FB
9192 break;
9193 case 3:/* branch [and link] exchange thumb register */
b0109805 9194 tmp = load_reg(s, rm);
99c475ab 9195 if (insn & (1 << 7)) {
be5e7a76 9196 ARCH(5);
99c475ab 9197 val = (uint32_t)s->pc | 1;
7d1b0095 9198 tmp2 = tcg_temp_new_i32();
b0109805
PB
9199 tcg_gen_movi_i32(tmp2, val);
9200 store_reg(s, 14, tmp2);
99c475ab 9201 }
be5e7a76 9202 /* already thumb, no need to check */
d9ba4830 9203 gen_bx(s, tmp);
99c475ab
FB
9204 break;
9205 }
9206 break;
9207 }
9208
9209 /* data processing register */
9210 rd = insn & 7;
9211 rm = (insn >> 3) & 7;
9212 op = (insn >> 6) & 0xf;
9213 if (op == 2 || op == 3 || op == 4 || op == 7) {
9214 /* the shift/rotate ops want the operands backwards */
9215 val = rm;
9216 rm = rd;
9217 rd = val;
9218 val = 1;
9219 } else {
9220 val = 0;
9221 }
9222
396e467c 9223 if (op == 9) { /* neg */
7d1b0095 9224 tmp = tcg_temp_new_i32();
396e467c
FN
9225 tcg_gen_movi_i32(tmp, 0);
9226 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9227 tmp = load_reg(s, rd);
9228 } else {
9229 TCGV_UNUSED(tmp);
9230 }
99c475ab 9231
396e467c 9232 tmp2 = load_reg(s, rm);
5899f386 9233 switch (op) {
99c475ab 9234 case 0x0: /* and */
396e467c 9235 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9236 if (!s->condexec_mask)
396e467c 9237 gen_logic_CC(tmp);
99c475ab
FB
9238 break;
9239 case 0x1: /* eor */
396e467c 9240 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9241 if (!s->condexec_mask)
396e467c 9242 gen_logic_CC(tmp);
99c475ab
FB
9243 break;
9244 case 0x2: /* lsl */
9ee6e8bb 9245 if (s->condexec_mask) {
396e467c 9246 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 9247 } else {
396e467c
FN
9248 gen_helper_shl_cc(tmp2, tmp2, tmp);
9249 gen_logic_CC(tmp2);
9ee6e8bb 9250 }
99c475ab
FB
9251 break;
9252 case 0x3: /* lsr */
9ee6e8bb 9253 if (s->condexec_mask) {
396e467c 9254 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 9255 } else {
396e467c
FN
9256 gen_helper_shr_cc(tmp2, tmp2, tmp);
9257 gen_logic_CC(tmp2);
9ee6e8bb 9258 }
99c475ab
FB
9259 break;
9260 case 0x4: /* asr */
9ee6e8bb 9261 if (s->condexec_mask) {
396e467c 9262 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 9263 } else {
396e467c
FN
9264 gen_helper_sar_cc(tmp2, tmp2, tmp);
9265 gen_logic_CC(tmp2);
9ee6e8bb 9266 }
99c475ab
FB
9267 break;
9268 case 0x5: /* adc */
9ee6e8bb 9269 if (s->condexec_mask)
396e467c 9270 gen_adc(tmp, tmp2);
9ee6e8bb 9271 else
396e467c 9272 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
9273 break;
9274 case 0x6: /* sbc */
9ee6e8bb 9275 if (s->condexec_mask)
396e467c 9276 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 9277 else
396e467c 9278 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
9279 break;
9280 case 0x7: /* ror */
9ee6e8bb 9281 if (s->condexec_mask) {
f669df27
AJ
9282 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9283 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9284 } else {
396e467c
FN
9285 gen_helper_ror_cc(tmp2, tmp2, tmp);
9286 gen_logic_CC(tmp2);
9ee6e8bb 9287 }
99c475ab
FB
9288 break;
9289 case 0x8: /* tst */
396e467c
FN
9290 tcg_gen_and_i32(tmp, tmp, tmp2);
9291 gen_logic_CC(tmp);
99c475ab 9292 rd = 16;
5899f386 9293 break;
99c475ab 9294 case 0x9: /* neg */
9ee6e8bb 9295 if (s->condexec_mask)
396e467c 9296 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9297 else
396e467c 9298 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9299 break;
9300 case 0xa: /* cmp */
396e467c 9301 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9302 rd = 16;
9303 break;
9304 case 0xb: /* cmn */
396e467c 9305 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
9306 rd = 16;
9307 break;
9308 case 0xc: /* orr */
396e467c 9309 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9310 if (!s->condexec_mask)
396e467c 9311 gen_logic_CC(tmp);
99c475ab
FB
9312 break;
9313 case 0xd: /* mul */
7b2919a0 9314 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9315 if (!s->condexec_mask)
396e467c 9316 gen_logic_CC(tmp);
99c475ab
FB
9317 break;
9318 case 0xe: /* bic */
f669df27 9319 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9320 if (!s->condexec_mask)
396e467c 9321 gen_logic_CC(tmp);
99c475ab
FB
9322 break;
9323 case 0xf: /* mvn */
396e467c 9324 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9325 if (!s->condexec_mask)
396e467c 9326 gen_logic_CC(tmp2);
99c475ab 9327 val = 1;
5899f386 9328 rm = rd;
99c475ab
FB
9329 break;
9330 }
9331 if (rd != 16) {
396e467c
FN
9332 if (val) {
9333 store_reg(s, rm, tmp2);
9334 if (op != 0xf)
7d1b0095 9335 tcg_temp_free_i32(tmp);
396e467c
FN
9336 } else {
9337 store_reg(s, rd, tmp);
7d1b0095 9338 tcg_temp_free_i32(tmp2);
396e467c
FN
9339 }
9340 } else {
7d1b0095
PM
9341 tcg_temp_free_i32(tmp);
9342 tcg_temp_free_i32(tmp2);
99c475ab
FB
9343 }
9344 break;
9345
9346 case 5:
9347 /* load/store register offset. */
9348 rd = insn & 7;
9349 rn = (insn >> 3) & 7;
9350 rm = (insn >> 6) & 7;
9351 op = (insn >> 9) & 7;
b0109805 9352 addr = load_reg(s, rn);
b26eefb6 9353 tmp = load_reg(s, rm);
b0109805 9354 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9355 tcg_temp_free_i32(tmp);
99c475ab
FB
9356
9357 if (op < 3) /* store */
b0109805 9358 tmp = load_reg(s, rd);
99c475ab
FB
9359
9360 switch (op) {
9361 case 0: /* str */
b0109805 9362 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9363 break;
9364 case 1: /* strh */
b0109805 9365 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9366 break;
9367 case 2: /* strb */
b0109805 9368 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9369 break;
9370 case 3: /* ldrsb */
b0109805 9371 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
9372 break;
9373 case 4: /* ldr */
b0109805 9374 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9375 break;
9376 case 5: /* ldrh */
b0109805 9377 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
9378 break;
9379 case 6: /* ldrb */
b0109805 9380 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
9381 break;
9382 case 7: /* ldrsh */
b0109805 9383 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
9384 break;
9385 }
9386 if (op >= 3) /* load */
b0109805 9387 store_reg(s, rd, tmp);
7d1b0095 9388 tcg_temp_free_i32(addr);
99c475ab
FB
9389 break;
9390
9391 case 6:
9392 /* load/store word immediate offset */
9393 rd = insn & 7;
9394 rn = (insn >> 3) & 7;
b0109805 9395 addr = load_reg(s, rn);
99c475ab 9396 val = (insn >> 4) & 0x7c;
b0109805 9397 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9398
9399 if (insn & (1 << 11)) {
9400 /* load */
b0109805
PB
9401 tmp = gen_ld32(addr, IS_USER(s));
9402 store_reg(s, rd, tmp);
99c475ab
FB
9403 } else {
9404 /* store */
b0109805
PB
9405 tmp = load_reg(s, rd);
9406 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9407 }
7d1b0095 9408 tcg_temp_free_i32(addr);
99c475ab
FB
9409 break;
9410
9411 case 7:
9412 /* load/store byte immediate offset */
9413 rd = insn & 7;
9414 rn = (insn >> 3) & 7;
b0109805 9415 addr = load_reg(s, rn);
99c475ab 9416 val = (insn >> 6) & 0x1f;
b0109805 9417 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9418
9419 if (insn & (1 << 11)) {
9420 /* load */
b0109805
PB
9421 tmp = gen_ld8u(addr, IS_USER(s));
9422 store_reg(s, rd, tmp);
99c475ab
FB
9423 } else {
9424 /* store */
b0109805
PB
9425 tmp = load_reg(s, rd);
9426 gen_st8(tmp, addr, IS_USER(s));
99c475ab 9427 }
7d1b0095 9428 tcg_temp_free_i32(addr);
99c475ab
FB
9429 break;
9430
9431 case 8:
9432 /* load/store halfword immediate offset */
9433 rd = insn & 7;
9434 rn = (insn >> 3) & 7;
b0109805 9435 addr = load_reg(s, rn);
99c475ab 9436 val = (insn >> 5) & 0x3e;
b0109805 9437 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9438
9439 if (insn & (1 << 11)) {
9440 /* load */
b0109805
PB
9441 tmp = gen_ld16u(addr, IS_USER(s));
9442 store_reg(s, rd, tmp);
99c475ab
FB
9443 } else {
9444 /* store */
b0109805
PB
9445 tmp = load_reg(s, rd);
9446 gen_st16(tmp, addr, IS_USER(s));
99c475ab 9447 }
7d1b0095 9448 tcg_temp_free_i32(addr);
99c475ab
FB
9449 break;
9450
9451 case 9:
9452 /* load/store from stack */
9453 rd = (insn >> 8) & 7;
b0109805 9454 addr = load_reg(s, 13);
99c475ab 9455 val = (insn & 0xff) * 4;
b0109805 9456 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9457
9458 if (insn & (1 << 11)) {
9459 /* load */
b0109805
PB
9460 tmp = gen_ld32(addr, IS_USER(s));
9461 store_reg(s, rd, tmp);
99c475ab
FB
9462 } else {
9463 /* store */
b0109805
PB
9464 tmp = load_reg(s, rd);
9465 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9466 }
7d1b0095 9467 tcg_temp_free_i32(addr);
99c475ab
FB
9468 break;
9469
9470 case 10:
9471 /* add to high reg */
9472 rd = (insn >> 8) & 7;
5899f386
FB
9473 if (insn & (1 << 11)) {
9474 /* SP */
5e3f878a 9475 tmp = load_reg(s, 13);
5899f386
FB
9476 } else {
9477 /* PC. bit 1 is ignored. */
7d1b0095 9478 tmp = tcg_temp_new_i32();
5e3f878a 9479 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9480 }
99c475ab 9481 val = (insn & 0xff) * 4;
5e3f878a
PB
9482 tcg_gen_addi_i32(tmp, tmp, val);
9483 store_reg(s, rd, tmp);
99c475ab
FB
9484 break;
9485
9486 case 11:
9487 /* misc */
9488 op = (insn >> 8) & 0xf;
9489 switch (op) {
9490 case 0:
9491 /* adjust stack pointer */
b26eefb6 9492 tmp = load_reg(s, 13);
99c475ab
FB
9493 val = (insn & 0x7f) * 4;
9494 if (insn & (1 << 7))
6a0d8a1d 9495 val = -(int32_t)val;
b26eefb6
PB
9496 tcg_gen_addi_i32(tmp, tmp, val);
9497 store_reg(s, 13, tmp);
99c475ab
FB
9498 break;
9499
9ee6e8bb
PB
9500 case 2: /* sign/zero extend. */
9501 ARCH(6);
9502 rd = insn & 7;
9503 rm = (insn >> 3) & 7;
b0109805 9504 tmp = load_reg(s, rm);
9ee6e8bb 9505 switch ((insn >> 6) & 3) {
b0109805
PB
9506 case 0: gen_sxth(tmp); break;
9507 case 1: gen_sxtb(tmp); break;
9508 case 2: gen_uxth(tmp); break;
9509 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9510 }
b0109805 9511 store_reg(s, rd, tmp);
9ee6e8bb 9512 break;
99c475ab
FB
9513 case 4: case 5: case 0xc: case 0xd:
9514 /* push/pop */
b0109805 9515 addr = load_reg(s, 13);
5899f386
FB
9516 if (insn & (1 << 8))
9517 offset = 4;
99c475ab 9518 else
5899f386
FB
9519 offset = 0;
9520 for (i = 0; i < 8; i++) {
9521 if (insn & (1 << i))
9522 offset += 4;
9523 }
9524 if ((insn & (1 << 11)) == 0) {
b0109805 9525 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9526 }
99c475ab
FB
9527 for (i = 0; i < 8; i++) {
9528 if (insn & (1 << i)) {
9529 if (insn & (1 << 11)) {
9530 /* pop */
b0109805
PB
9531 tmp = gen_ld32(addr, IS_USER(s));
9532 store_reg(s, i, tmp);
99c475ab
FB
9533 } else {
9534 /* push */
b0109805
PB
9535 tmp = load_reg(s, i);
9536 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9537 }
5899f386 9538 /* advance to the next address. */
b0109805 9539 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9540 }
9541 }
a50f5b91 9542 TCGV_UNUSED(tmp);
99c475ab
FB
9543 if (insn & (1 << 8)) {
9544 if (insn & (1 << 11)) {
9545 /* pop pc */
b0109805 9546 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9547 /* don't set the pc until the rest of the instruction
9548 has completed */
9549 } else {
9550 /* push lr */
b0109805
PB
9551 tmp = load_reg(s, 14);
9552 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9553 }
b0109805 9554 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9555 }
5899f386 9556 if ((insn & (1 << 11)) == 0) {
b0109805 9557 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9558 }
99c475ab 9559 /* write back the new stack pointer */
b0109805 9560 store_reg(s, 13, addr);
99c475ab 9561 /* set the new PC value */
be5e7a76
DES
9562 if ((insn & 0x0900) == 0x0900) {
9563 store_reg_from_load(env, s, 15, tmp);
9564 }
99c475ab
FB
9565 break;
9566
9ee6e8bb
PB
9567 case 1: case 3: case 9: case 11: /* czb */
9568 rm = insn & 7;
d9ba4830 9569 tmp = load_reg(s, rm);
9ee6e8bb
PB
9570 s->condlabel = gen_new_label();
9571 s->condjmp = 1;
9572 if (insn & (1 << 11))
cb63669a 9573 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9574 else
cb63669a 9575 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9576 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9577 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9578 val = (uint32_t)s->pc + 2;
9579 val += offset;
9580 gen_jmp(s, val);
9581 break;
9582
9583 case 15: /* IT, nop-hint. */
9584 if ((insn & 0xf) == 0) {
9585 gen_nop_hint(s, (insn >> 4) & 0xf);
9586 break;
9587 }
9588 /* If Then. */
9589 s->condexec_cond = (insn >> 4) & 0xe;
9590 s->condexec_mask = insn & 0x1f;
9591 /* No actual code generated for this insn, just setup state. */
9592 break;
9593
06c949e6 9594 case 0xe: /* bkpt */
be5e7a76 9595 ARCH(5);
bc4a0de0 9596 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9597 break;
9598
9ee6e8bb
PB
9599 case 0xa: /* rev */
9600 ARCH(6);
9601 rn = (insn >> 3) & 0x7;
9602 rd = insn & 0x7;
b0109805 9603 tmp = load_reg(s, rn);
9ee6e8bb 9604 switch ((insn >> 6) & 3) {
66896cb8 9605 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9606 case 1: gen_rev16(tmp); break;
9607 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9608 default: goto illegal_op;
9609 }
b0109805 9610 store_reg(s, rd, tmp);
9ee6e8bb
PB
9611 break;
9612
9613 case 6: /* cps */
9614 ARCH(6);
9615 if (IS_USER(s))
9616 break;
9617 if (IS_M(env)) {
8984bd2e 9618 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 9619 /* PRIMASK */
8984bd2e
PB
9620 if (insn & 1) {
9621 addr = tcg_const_i32(16);
9622 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9623 tcg_temp_free_i32(addr);
8984bd2e 9624 }
9ee6e8bb 9625 /* FAULTMASK */
8984bd2e
PB
9626 if (insn & 2) {
9627 addr = tcg_const_i32(17);
9628 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9629 tcg_temp_free_i32(addr);
8984bd2e 9630 }
b75263d6 9631 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9632 gen_lookup_tb(s);
9633 } else {
9634 if (insn & (1 << 4))
9635 shift = CPSR_A | CPSR_I | CPSR_F;
9636 else
9637 shift = 0;
fa26df03 9638 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
9639 }
9640 break;
9641
99c475ab
FB
9642 default:
9643 goto undef;
9644 }
9645 break;
9646
9647 case 12:
a7d3970d 9648 {
99c475ab 9649 /* load/store multiple */
a7d3970d
PM
9650 TCGv loaded_var;
9651 TCGV_UNUSED(loaded_var);
99c475ab 9652 rn = (insn >> 8) & 0x7;
b0109805 9653 addr = load_reg(s, rn);
99c475ab
FB
9654 for (i = 0; i < 8; i++) {
9655 if (insn & (1 << i)) {
99c475ab
FB
9656 if (insn & (1 << 11)) {
9657 /* load */
b0109805 9658 tmp = gen_ld32(addr, IS_USER(s));
a7d3970d
PM
9659 if (i == rn) {
9660 loaded_var = tmp;
9661 } else {
9662 store_reg(s, i, tmp);
9663 }
99c475ab
FB
9664 } else {
9665 /* store */
b0109805
PB
9666 tmp = load_reg(s, i);
9667 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9668 }
5899f386 9669 /* advance to the next address */
b0109805 9670 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9671 }
9672 }
b0109805 9673 if ((insn & (1 << rn)) == 0) {
a7d3970d 9674 /* base reg not in list: base register writeback */
b0109805
PB
9675 store_reg(s, rn, addr);
9676 } else {
a7d3970d
PM
9677 /* base reg in list: if load, complete it now */
9678 if (insn & (1 << 11)) {
9679 store_reg(s, rn, loaded_var);
9680 }
7d1b0095 9681 tcg_temp_free_i32(addr);
b0109805 9682 }
99c475ab 9683 break;
a7d3970d 9684 }
99c475ab
FB
9685 case 13:
9686 /* conditional branch or swi */
9687 cond = (insn >> 8) & 0xf;
9688 if (cond == 0xe)
9689 goto undef;
9690
9691 if (cond == 0xf) {
9692 /* swi */
422ebf69 9693 gen_set_pc_im(s->pc);
9ee6e8bb 9694 s->is_jmp = DISAS_SWI;
99c475ab
FB
9695 break;
9696 }
9697 /* generate a conditional jump to next instruction */
e50e6a20 9698 s->condlabel = gen_new_label();
d9ba4830 9699 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9700 s->condjmp = 1;
99c475ab
FB
9701
9702 /* jump to the offset */
5899f386 9703 val = (uint32_t)s->pc + 2;
99c475ab 9704 offset = ((int32_t)insn << 24) >> 24;
5899f386 9705 val += offset << 1;
8aaca4c0 9706 gen_jmp(s, val);
99c475ab
FB
9707 break;
9708
9709 case 14:
358bf29e 9710 if (insn & (1 << 11)) {
9ee6e8bb
PB
9711 if (disas_thumb2_insn(env, s, insn))
9712 goto undef32;
358bf29e
PB
9713 break;
9714 }
9ee6e8bb 9715 /* unconditional branch */
99c475ab
FB
9716 val = (uint32_t)s->pc;
9717 offset = ((int32_t)insn << 21) >> 21;
9718 val += (offset << 1) + 2;
8aaca4c0 9719 gen_jmp(s, val);
99c475ab
FB
9720 break;
9721
9722 case 15:
9ee6e8bb 9723 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9724 goto undef32;
9ee6e8bb 9725 break;
99c475ab
FB
9726 }
9727 return;
9ee6e8bb 9728undef32:
bc4a0de0 9729 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9730 return;
9731illegal_op:
99c475ab 9732undef:
bc4a0de0 9733 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9734}
9735
2c0262af
FB
9736/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9737 basic block 'tb'. If search_pc is TRUE, also generate PC
9738 information for each intermediate instruction. */
2cfc5f17
TS
9739static inline void gen_intermediate_code_internal(CPUState *env,
9740 TranslationBlock *tb,
9741 int search_pc)
2c0262af
FB
9742{
9743 DisasContext dc1, *dc = &dc1;
a1d1bb31 9744 CPUBreakpoint *bp;
2c0262af
FB
9745 uint16_t *gen_opc_end;
9746 int j, lj;
0fa85d43 9747 target_ulong pc_start;
b5ff1b31 9748 uint32_t next_page_start;
2e70f6ef
PB
9749 int num_insns;
9750 int max_insns;
3b46e624 9751
2c0262af 9752 /* generate intermediate code */
0fa85d43 9753 pc_start = tb->pc;
3b46e624 9754
2c0262af
FB
9755 dc->tb = tb;
9756
2c0262af 9757 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9758
9759 dc->is_jmp = DISAS_NEXT;
9760 dc->pc = pc_start;
8aaca4c0 9761 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9762 dc->condjmp = 0;
7204ab88 9763 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
98eac7ca
PM
9764 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9765 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9766#if !defined(CONFIG_USER_ONLY)
61f74d6a 9767 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9768#endif
5df8bac1 9769 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9770 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9771 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9772 cpu_F0s = tcg_temp_new_i32();
9773 cpu_F1s = tcg_temp_new_i32();
9774 cpu_F0d = tcg_temp_new_i64();
9775 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9776 cpu_V0 = cpu_F0d;
9777 cpu_V1 = cpu_F1d;
e677137d 9778 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9779 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9780 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9781 lj = -1;
2e70f6ef
PB
9782 num_insns = 0;
9783 max_insns = tb->cflags & CF_COUNT_MASK;
9784 if (max_insns == 0)
9785 max_insns = CF_COUNT_MASK;
9786
9787 gen_icount_start();
e12ce78d 9788
3849902c
PM
9789 tcg_clear_temp_count();
9790
e12ce78d
PM
9791 /* A note on handling of the condexec (IT) bits:
9792 *
9793 * We want to avoid the overhead of having to write the updated condexec
9794 * bits back to the CPUState for every instruction in an IT block. So:
9795 * (1) if the condexec bits are not already zero then we write
9796 * zero back into the CPUState now. This avoids complications trying
9797 * to do it at the end of the block. (For example if we don't do this
9798 * it's hard to identify whether we can safely skip writing condexec
9799 * at the end of the TB, which we definitely want to do for the case
9800 * where a TB doesn't do anything with the IT state at all.)
9801 * (2) if we are going to leave the TB then we call gen_set_condexec()
9802 * which will write the correct value into CPUState if zero is wrong.
9803 * This is done both for leaving the TB at the end, and for leaving
9804 * it because of an exception we know will happen, which is done in
9805 * gen_exception_insn(). The latter is necessary because we need to
9806 * leave the TB with the PC/IT state just prior to execution of the
9807 * instruction which caused the exception.
9808 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9809 * then the CPUState will be wrong and we need to reset it.
9810 * This is handled in the same way as restoration of the
9811 * PC in these situations: we will be called again with search_pc=1
9812 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9813 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9814 * this to restore the condexec bits.
e12ce78d
PM
9815 *
9816 * Note that there are no instructions which can read the condexec
9817 * bits, and none which can write non-static values to them, so
9818 * we don't need to care about whether CPUState is correct in the
9819 * middle of a TB.
9820 */
9821
9ee6e8bb
PB
9822 /* Reset the conditional execution bits immediately. This avoids
9823 complications trying to do it at the end of the block. */
98eac7ca 9824 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9825 {
7d1b0095 9826 TCGv tmp = tcg_temp_new_i32();
8f01245e 9827 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9828 store_cpu_field(tmp, condexec_bits);
8f01245e 9829 }
2c0262af 9830 do {
fbb4a2e3
PB
9831#ifdef CONFIG_USER_ONLY
9832 /* Intercept jump to the magic kernel page. */
9833 if (dc->pc >= 0xffff0000) {
9834 /* We always get here via a jump, so know we are not in a
9835 conditional execution block. */
9836 gen_exception(EXCP_KERNEL_TRAP);
9837 dc->is_jmp = DISAS_UPDATE;
9838 break;
9839 }
9840#else
9ee6e8bb
PB
9841 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9842 /* We always get here via a jump, so know we are not in a
9843 conditional execution block. */
d9ba4830 9844 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9845 dc->is_jmp = DISAS_UPDATE;
9846 break;
9ee6e8bb
PB
9847 }
9848#endif
9849
72cf2d4f
BS
9850 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9851 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9852 if (bp->pc == dc->pc) {
bc4a0de0 9853 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9854 /* Advance PC so that clearing the breakpoint will
9855 invalidate this TB. */
9856 dc->pc += 2;
9857 goto done_generating;
1fddef4b
FB
9858 break;
9859 }
9860 }
9861 }
2c0262af
FB
9862 if (search_pc) {
9863 j = gen_opc_ptr - gen_opc_buf;
9864 if (lj < j) {
9865 lj++;
9866 while (lj < j)
9867 gen_opc_instr_start[lj++] = 0;
9868 }
0fa85d43 9869 gen_opc_pc[lj] = dc->pc;
e12ce78d 9870 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
2c0262af 9871 gen_opc_instr_start[lj] = 1;
2e70f6ef 9872 gen_opc_icount[lj] = num_insns;
2c0262af 9873 }
e50e6a20 9874
2e70f6ef
PB
9875 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9876 gen_io_start();
9877
5642463a
PM
9878 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9879 tcg_gen_debug_insn_start(dc->pc);
9880 }
9881
7204ab88 9882 if (dc->thumb) {
9ee6e8bb
PB
9883 disas_thumb_insn(env, dc);
9884 if (dc->condexec_mask) {
9885 dc->condexec_cond = (dc->condexec_cond & 0xe)
9886 | ((dc->condexec_mask >> 4) & 1);
9887 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9888 if (dc->condexec_mask == 0) {
9889 dc->condexec_cond = 0;
9890 }
9891 }
9892 } else {
9893 disas_arm_insn(env, dc);
9894 }
e50e6a20
FB
9895
9896 if (dc->condjmp && !dc->is_jmp) {
9897 gen_set_label(dc->condlabel);
9898 dc->condjmp = 0;
9899 }
3849902c
PM
9900
9901 if (tcg_check_temp_count()) {
9902 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9903 }
9904
aaf2d97d 9905 /* Translation stops when a conditional branch is encountered.
e50e6a20 9906 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9907 * Also stop translation when a page boundary is reached. This
bf20dc07 9908 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9909 num_insns ++;
1fddef4b
FB
9910 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9911 !env->singlestep_enabled &&
1b530a6d 9912 !singlestep &&
2e70f6ef
PB
9913 dc->pc < next_page_start &&
9914 num_insns < max_insns);
9915
9916 if (tb->cflags & CF_LAST_IO) {
9917 if (dc->condjmp) {
9918 /* FIXME: This can theoretically happen with self-modifying
9919 code. */
9920 cpu_abort(env, "IO on conditional branch instruction");
9921 }
9922 gen_io_end();
9923 }
9ee6e8bb 9924
b5ff1b31 9925 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9926 instruction was a conditional branch or trap, and the PC has
9927 already been written. */
551bd27f 9928 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9929 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9930 if (dc->condjmp) {
9ee6e8bb
PB
9931 gen_set_condexec(dc);
9932 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9933 gen_exception(EXCP_SWI);
9ee6e8bb 9934 } else {
d9ba4830 9935 gen_exception(EXCP_DEBUG);
9ee6e8bb 9936 }
e50e6a20
FB
9937 gen_set_label(dc->condlabel);
9938 }
9939 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9940 gen_set_pc_im(dc->pc);
e50e6a20 9941 dc->condjmp = 0;
8aaca4c0 9942 }
9ee6e8bb
PB
9943 gen_set_condexec(dc);
9944 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9945 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9946 } else {
9947 /* FIXME: Single stepping a WFI insn will not halt
9948 the CPU. */
d9ba4830 9949 gen_exception(EXCP_DEBUG);
9ee6e8bb 9950 }
8aaca4c0 9951 } else {
9ee6e8bb
PB
9952 /* While branches must always occur at the end of an IT block,
9953 there are a few other things that can cause us to terminate
9954 the TB in the middel of an IT block:
9955 - Exception generating instructions (bkpt, swi, undefined).
9956 - Page boundaries.
9957 - Hardware watchpoints.
9958 Hardware breakpoints have already been handled and skip this code.
9959 */
9960 gen_set_condexec(dc);
8aaca4c0 9961 switch(dc->is_jmp) {
8aaca4c0 9962 case DISAS_NEXT:
6e256c93 9963 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9964 break;
9965 default:
9966 case DISAS_JUMP:
9967 case DISAS_UPDATE:
9968 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9969 tcg_gen_exit_tb(0);
8aaca4c0
FB
9970 break;
9971 case DISAS_TB_JUMP:
9972 /* nothing more to generate */
9973 break;
9ee6e8bb 9974 case DISAS_WFI:
d9ba4830 9975 gen_helper_wfi();
9ee6e8bb
PB
9976 break;
9977 case DISAS_SWI:
d9ba4830 9978 gen_exception(EXCP_SWI);
9ee6e8bb 9979 break;
8aaca4c0 9980 }
e50e6a20
FB
9981 if (dc->condjmp) {
9982 gen_set_label(dc->condlabel);
9ee6e8bb 9983 gen_set_condexec(dc);
6e256c93 9984 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9985 dc->condjmp = 0;
9986 }
2c0262af 9987 }
2e70f6ef 9988
9ee6e8bb 9989done_generating:
2e70f6ef 9990 gen_icount_end(tb, num_insns);
2c0262af
FB
9991 *gen_opc_ptr = INDEX_op_end;
9992
9993#ifdef DEBUG_DISAS
8fec2b8c 9994 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9995 qemu_log("----------------\n");
9996 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7204ab88 9997 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
93fcfe39 9998 qemu_log("\n");
2c0262af
FB
9999 }
10000#endif
b5ff1b31
FB
10001 if (search_pc) {
10002 j = gen_opc_ptr - gen_opc_buf;
10003 lj++;
10004 while (lj <= j)
10005 gen_opc_instr_start[lj++] = 0;
b5ff1b31 10006 } else {
2c0262af 10007 tb->size = dc->pc - pc_start;
2e70f6ef 10008 tb->icount = num_insns;
b5ff1b31 10009 }
2c0262af
FB
10010}
10011
2cfc5f17 10012void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 10013{
2cfc5f17 10014 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
10015}
10016
2cfc5f17 10017void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 10018{
2cfc5f17 10019 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
10020}
10021
b5ff1b31
FB
10022static const char *cpu_mode_names[16] = {
10023 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10024 "???", "???", "???", "und", "???", "???", "???", "sys"
10025};
9ee6e8bb 10026
9a78eead 10027void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 10028 int flags)
2c0262af
FB
10029{
10030 int i;
06e80fc9 10031#if 0
bc380d17 10032 union {
b7bcbe95
FB
10033 uint32_t i;
10034 float s;
10035 } s0, s1;
10036 CPU_DoubleU d;
a94a6abf
PB
10037 /* ??? This assumes float64 and double have the same layout.
10038 Oh well, it's only debug dumps. */
10039 union {
10040 float64 f64;
10041 double d;
10042 } d0;
06e80fc9 10043#endif
b5ff1b31 10044 uint32_t psr;
2c0262af
FB
10045
10046 for(i=0;i<16;i++) {
7fe48483 10047 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10048 if ((i % 4) == 3)
7fe48483 10049 cpu_fprintf(f, "\n");
2c0262af 10050 else
7fe48483 10051 cpu_fprintf(f, " ");
2c0262af 10052 }
b5ff1b31 10053 psr = cpsr_read(env);
687fa640
TS
10054 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10055 psr,
b5ff1b31
FB
10056 psr & (1 << 31) ? 'N' : '-',
10057 psr & (1 << 30) ? 'Z' : '-',
10058 psr & (1 << 29) ? 'C' : '-',
10059 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10060 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10061 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10062
5e3f878a 10063#if 0
b7bcbe95 10064 for (i = 0; i < 16; i++) {
8e96005d
FB
10065 d.d = env->vfp.regs[i];
10066 s0.i = d.l.lower;
10067 s1.i = d.l.upper;
a94a6abf
PB
10068 d0.f64 = d.d;
10069 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 10070 i * 2, (int)s0.i, s0.s,
a94a6abf 10071 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 10072 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 10073 d0.d);
b7bcbe95 10074 }
40f137e1 10075 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 10076#endif
2c0262af 10077}
a6b025d3 10078
e87b7cb0 10079void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
10080{
10081 env->regs[15] = gen_opc_pc[pc_pos];
e12ce78d 10082 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10083}