]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
linux-user/signal.c: Rename s390 target_ucontext fields to fix ia64
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
2c0262af 28#include "disas.h"
57fec1fe 29#include "tcg-op.h"
79383c9c 30#include "qemu-log.h"
1497c961 31
7b59220e 32#include "helper.h"
1497c961 33#define GEN_HELPER 1
7b59220e 34#include "helper.h"
2c0262af 35
be5e7a76
DES
36#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38/* currently all emulated v5 cores are also v5TE, so don't bother */
39#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
40#define ENABLE_ARCH_5J 0
41#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 45
86753403 46#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 47
2c0262af
FB
48/* internal defines */
49typedef struct DisasContext {
0fa85d43 50 target_ulong pc;
2c0262af 51 int is_jmp;
e50e6a20
FB
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
9ee6e8bb
PB
56 /* Thumb-2 condtional execution bits. */
57 int condexec_mask;
58 int condexec_cond;
2c0262af 59 struct TranslationBlock *tb;
8aaca4c0 60 int singlestep_enabled;
5899f386 61 int thumb;
b5ff1b31
FB
62#if !defined(CONFIG_USER_ONLY)
63 int user;
64#endif
5df8bac1 65 int vfp_enabled;
69d1fc22
PM
66 int vec_len;
67 int vec_stride;
2c0262af
FB
68} DisasContext;
69
e12ce78d
PM
70static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
71
b5ff1b31
FB
72#if defined(CONFIG_USER_ONLY)
73#define IS_USER(s) 1
74#else
75#define IS_USER(s) (s->user)
76#endif
77
9ee6e8bb
PB
78/* These instructions trap after executing, so defer them until after the
79 conditional executions state has been updated. */
80#define DISAS_WFI 4
81#define DISAS_SWI 5
2c0262af 82
a7812ae4 83static TCGv_ptr cpu_env;
ad69471c 84/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 85static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 86static TCGv_i32 cpu_R[16];
426f5abc
PB
87static TCGv_i32 cpu_exclusive_addr;
88static TCGv_i32 cpu_exclusive_val;
89static TCGv_i32 cpu_exclusive_high;
90#ifdef CONFIG_USER_ONLY
91static TCGv_i32 cpu_exclusive_test;
92static TCGv_i32 cpu_exclusive_info;
93#endif
ad69471c 94
b26eefb6 95/* FIXME: These should be removed. */
a7812ae4
PB
96static TCGv cpu_F0s, cpu_F1s;
97static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 98
2e70f6ef
PB
99#include "gen-icount.h"
100
155c3eac
FN
101static const char *regnames[] =
102 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
103 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
104
b26eefb6
PB
105/* initialize TCG globals. */
106void arm_translate_init(void)
107{
155c3eac
FN
108 int i;
109
a7812ae4
PB
110 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
111
155c3eac
FN
112 for (i = 0; i < 16; i++) {
113 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, regs[i]),
115 regnames[i]);
116 }
426f5abc
PB
117 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUState, exclusive_addr), "exclusive_addr");
119 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUState, exclusive_val), "exclusive_val");
121 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_high), "exclusive_high");
123#ifdef CONFIG_USER_ONLY
124 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUState, exclusive_test), "exclusive_test");
126 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUState, exclusive_info), "exclusive_info");
128#endif
155c3eac 129
a7812ae4 130#define GEN_HELPER 2
7b59220e 131#include "helper.h"
b26eefb6
PB
132}
133
d9ba4830
PB
134static inline TCGv load_cpu_offset(int offset)
135{
7d1b0095 136 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
137 tcg_gen_ld_i32(tmp, cpu_env, offset);
138 return tmp;
139}
140
141#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
142
143static inline void store_cpu_offset(TCGv var, int offset)
144{
145 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 146 tcg_temp_free_i32(var);
d9ba4830
PB
147}
148
149#define store_cpu_field(var, name) \
150 store_cpu_offset(var, offsetof(CPUState, name))
151
b26eefb6
PB
152/* Set a variable to the value of a CPU register. */
153static void load_reg_var(DisasContext *s, TCGv var, int reg)
154{
155 if (reg == 15) {
156 uint32_t addr;
157 /* normaly, since we updated PC, we need only to add one insn */
158 if (s->thumb)
159 addr = (long)s->pc + 2;
160 else
161 addr = (long)s->pc + 4;
162 tcg_gen_movi_i32(var, addr);
163 } else {
155c3eac 164 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
165 }
166}
167
168/* Create a new temporary and set it to the value of a CPU register. */
169static inline TCGv load_reg(DisasContext *s, int reg)
170{
7d1b0095 171 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
172 load_reg_var(s, tmp, reg);
173 return tmp;
174}
175
176/* Set a CPU register. The source must be a temporary and will be
177 marked as dead. */
178static void store_reg(DisasContext *s, int reg, TCGv var)
179{
180 if (reg == 15) {
181 tcg_gen_andi_i32(var, var, ~1);
182 s->is_jmp = DISAS_JUMP;
183 }
155c3eac 184 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 185 tcg_temp_free_i32(var);
b26eefb6
PB
186}
187
b26eefb6 188/* Value extensions. */
86831435
PB
189#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
190#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
191#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
192#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
193
1497c961
PB
194#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
195#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 196
b26eefb6 197
b75263d6
JR
198static inline void gen_set_cpsr(TCGv var, uint32_t mask)
199{
200 TCGv tmp_mask = tcg_const_i32(mask);
201 gen_helper_cpsr_write(var, tmp_mask);
202 tcg_temp_free_i32(tmp_mask);
203}
d9ba4830
PB
204/* Set NZCV flags from the high 4 bits of var. */
205#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
206
207static void gen_exception(int excp)
208{
7d1b0095 209 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
210 tcg_gen_movi_i32(tmp, excp);
211 gen_helper_exception(tmp);
7d1b0095 212 tcg_temp_free_i32(tmp);
d9ba4830
PB
213}
214
3670669c
PB
215static void gen_smul_dual(TCGv a, TCGv b)
216{
7d1b0095
PM
217 TCGv tmp1 = tcg_temp_new_i32();
218 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
219 tcg_gen_ext16s_i32(tmp1, a);
220 tcg_gen_ext16s_i32(tmp2, b);
3670669c 221 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 222 tcg_temp_free_i32(tmp2);
3670669c
PB
223 tcg_gen_sari_i32(a, a, 16);
224 tcg_gen_sari_i32(b, b, 16);
225 tcg_gen_mul_i32(b, b, a);
226 tcg_gen_mov_i32(a, tmp1);
7d1b0095 227 tcg_temp_free_i32(tmp1);
3670669c
PB
228}
229
230/* Byteswap each halfword. */
231static void gen_rev16(TCGv var)
232{
7d1b0095 233 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
234 tcg_gen_shri_i32(tmp, var, 8);
235 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
236 tcg_gen_shli_i32(var, var, 8);
237 tcg_gen_andi_i32(var, var, 0xff00ff00);
238 tcg_gen_or_i32(var, var, tmp);
7d1b0095 239 tcg_temp_free_i32(tmp);
3670669c
PB
240}
241
242/* Byteswap low halfword and sign extend. */
243static void gen_revsh(TCGv var)
244{
1a855029
AJ
245 tcg_gen_ext16u_i32(var, var);
246 tcg_gen_bswap16_i32(var, var);
247 tcg_gen_ext16s_i32(var, var);
3670669c
PB
248}
249
250/* Unsigned bitfield extract. */
251static void gen_ubfx(TCGv var, int shift, uint32_t mask)
252{
253 if (shift)
254 tcg_gen_shri_i32(var, var, shift);
255 tcg_gen_andi_i32(var, var, mask);
256}
257
258/* Signed bitfield extract. */
259static void gen_sbfx(TCGv var, int shift, int width)
260{
261 uint32_t signbit;
262
263 if (shift)
264 tcg_gen_sari_i32(var, var, shift);
265 if (shift + width < 32) {
266 signbit = 1u << (width - 1);
267 tcg_gen_andi_i32(var, var, (1u << width) - 1);
268 tcg_gen_xori_i32(var, var, signbit);
269 tcg_gen_subi_i32(var, var, signbit);
270 }
271}
272
273/* Bitfield insertion. Insert val into base. Clobbers base and val. */
274static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
275{
3670669c 276 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
277 tcg_gen_shli_i32(val, val, shift);
278 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
279 tcg_gen_or_i32(dest, base, val);
280}
281
838fa72d
AJ
282/* Return (b << 32) + a. Mark inputs as dead */
283static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 284{
838fa72d
AJ
285 TCGv_i64 tmp64 = tcg_temp_new_i64();
286
287 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 288 tcg_temp_free_i32(b);
838fa72d
AJ
289 tcg_gen_shli_i64(tmp64, tmp64, 32);
290 tcg_gen_add_i64(a, tmp64, a);
291
292 tcg_temp_free_i64(tmp64);
293 return a;
294}
295
296/* Return (b << 32) - a. Mark inputs as dead. */
297static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
298{
299 TCGv_i64 tmp64 = tcg_temp_new_i64();
300
301 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 302 tcg_temp_free_i32(b);
838fa72d
AJ
303 tcg_gen_shli_i64(tmp64, tmp64, 32);
304 tcg_gen_sub_i64(a, tmp64, a);
305
306 tcg_temp_free_i64(tmp64);
307 return a;
3670669c
PB
308}
309
8f01245e
PB
310/* FIXME: Most targets have native widening multiplication.
311 It would be good to use that instead of a full wide multiply. */
5e3f878a 312/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 313static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 314{
a7812ae4
PB
315 TCGv_i64 tmp1 = tcg_temp_new_i64();
316 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
317
318 tcg_gen_extu_i32_i64(tmp1, a);
7d1b0095 319 tcg_temp_free_i32(a);
5e3f878a 320 tcg_gen_extu_i32_i64(tmp2, b);
7d1b0095 321 tcg_temp_free_i32(b);
5e3f878a 322 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 323 tcg_temp_free_i64(tmp2);
5e3f878a
PB
324 return tmp1;
325}
326
a7812ae4 327static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 328{
a7812ae4
PB
329 TCGv_i64 tmp1 = tcg_temp_new_i64();
330 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
331
332 tcg_gen_ext_i32_i64(tmp1, a);
7d1b0095 333 tcg_temp_free_i32(a);
5e3f878a 334 tcg_gen_ext_i32_i64(tmp2, b);
7d1b0095 335 tcg_temp_free_i32(b);
5e3f878a 336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 337 tcg_temp_free_i64(tmp2);
5e3f878a
PB
338 return tmp1;
339}
340
8f01245e
PB
341/* Swap low and high halfwords. */
342static void gen_swap_half(TCGv var)
343{
7d1b0095 344 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
345 tcg_gen_shri_i32(tmp, var, 16);
346 tcg_gen_shli_i32(var, var, 16);
347 tcg_gen_or_i32(var, var, tmp);
7d1b0095 348 tcg_temp_free_i32(tmp);
8f01245e
PB
349}
350
b26eefb6
PB
351/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
352 tmp = (t0 ^ t1) & 0x8000;
353 t0 &= ~0x8000;
354 t1 &= ~0x8000;
355 t0 = (t0 + t1) ^ tmp;
356 */
357
358static void gen_add16(TCGv t0, TCGv t1)
359{
7d1b0095 360 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
361 tcg_gen_xor_i32(tmp, t0, t1);
362 tcg_gen_andi_i32(tmp, tmp, 0x8000);
363 tcg_gen_andi_i32(t0, t0, ~0x8000);
364 tcg_gen_andi_i32(t1, t1, ~0x8000);
365 tcg_gen_add_i32(t0, t0, t1);
366 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
367 tcg_temp_free_i32(tmp);
368 tcg_temp_free_i32(t1);
b26eefb6
PB
369}
370
9a119ff6
PB
371#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
372
b26eefb6
PB
373/* Set CF to the top bit of var. */
374static void gen_set_CF_bit31(TCGv var)
375{
7d1b0095 376 TCGv tmp = tcg_temp_new_i32();
b26eefb6 377 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 378 gen_set_CF(tmp);
7d1b0095 379 tcg_temp_free_i32(tmp);
b26eefb6
PB
380}
381
382/* Set N and Z flags from var. */
383static inline void gen_logic_CC(TCGv var)
384{
6fbe23d5
PB
385 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
387}
388
389/* T0 += T1 + CF. */
396e467c 390static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 391{
d9ba4830 392 TCGv tmp;
396e467c 393 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 394 tmp = load_cpu_field(CF);
396e467c 395 tcg_gen_add_i32(t0, t0, tmp);
7d1b0095 396 tcg_temp_free_i32(tmp);
b26eefb6
PB
397}
398
e9bb4aa9
JR
399/* dest = T0 + T1 + CF. */
400static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
401{
402 TCGv tmp;
403 tcg_gen_add_i32(dest, t0, t1);
404 tmp = load_cpu_field(CF);
405 tcg_gen_add_i32(dest, dest, tmp);
7d1b0095 406 tcg_temp_free_i32(tmp);
e9bb4aa9
JR
407}
408
3670669c
PB
409/* dest = T0 - T1 + CF - 1. */
410static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
411{
d9ba4830 412 TCGv tmp;
3670669c 413 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 414 tmp = load_cpu_field(CF);
3670669c
PB
415 tcg_gen_add_i32(dest, dest, tmp);
416 tcg_gen_subi_i32(dest, dest, 1);
7d1b0095 417 tcg_temp_free_i32(tmp);
3670669c
PB
418}
419
ad69471c
PB
420/* FIXME: Implement this natively. */
421#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
422
9a119ff6 423static void shifter_out_im(TCGv var, int shift)
b26eefb6 424{
7d1b0095 425 TCGv tmp = tcg_temp_new_i32();
9a119ff6
PB
426 if (shift == 0) {
427 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 428 } else {
9a119ff6 429 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 430 if (shift != 31)
9a119ff6
PB
431 tcg_gen_andi_i32(tmp, tmp, 1);
432 }
433 gen_set_CF(tmp);
7d1b0095 434 tcg_temp_free_i32(tmp);
9a119ff6 435}
b26eefb6 436
9a119ff6
PB
437/* Shift by immediate. Includes special handling for shift == 0. */
438static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
439{
440 switch (shiftop) {
441 case 0: /* LSL */
442 if (shift != 0) {
443 if (flags)
444 shifter_out_im(var, 32 - shift);
445 tcg_gen_shli_i32(var, var, shift);
446 }
447 break;
448 case 1: /* LSR */
449 if (shift == 0) {
450 if (flags) {
451 tcg_gen_shri_i32(var, var, 31);
452 gen_set_CF(var);
453 }
454 tcg_gen_movi_i32(var, 0);
455 } else {
456 if (flags)
457 shifter_out_im(var, shift - 1);
458 tcg_gen_shri_i32(var, var, shift);
459 }
460 break;
461 case 2: /* ASR */
462 if (shift == 0)
463 shift = 32;
464 if (flags)
465 shifter_out_im(var, shift - 1);
466 if (shift == 32)
467 shift = 31;
468 tcg_gen_sari_i32(var, var, shift);
469 break;
470 case 3: /* ROR/RRX */
471 if (shift != 0) {
472 if (flags)
473 shifter_out_im(var, shift - 1);
f669df27 474 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 475 } else {
d9ba4830 476 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
477 if (flags)
478 shifter_out_im(var, 0);
479 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
480 tcg_gen_shli_i32(tmp, tmp, 31);
481 tcg_gen_or_i32(var, var, tmp);
7d1b0095 482 tcg_temp_free_i32(tmp);
b26eefb6
PB
483 }
484 }
485};
486
8984bd2e
PB
487static inline void gen_arm_shift_reg(TCGv var, int shiftop,
488 TCGv shift, int flags)
489{
490 if (flags) {
491 switch (shiftop) {
492 case 0: gen_helper_shl_cc(var, var, shift); break;
493 case 1: gen_helper_shr_cc(var, var, shift); break;
494 case 2: gen_helper_sar_cc(var, var, shift); break;
495 case 3: gen_helper_ror_cc(var, var, shift); break;
496 }
497 } else {
498 switch (shiftop) {
499 case 0: gen_helper_shl(var, var, shift); break;
500 case 1: gen_helper_shr(var, var, shift); break;
501 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
502 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
503 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
504 }
505 }
7d1b0095 506 tcg_temp_free_i32(shift);
8984bd2e
PB
507}
508
6ddbc6e4
PB
509#define PAS_OP(pfx) \
510 switch (op2) { \
511 case 0: gen_pas_helper(glue(pfx,add16)); break; \
512 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
513 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
514 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
515 case 4: gen_pas_helper(glue(pfx,add8)); break; \
516 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
517 }
d9ba4830 518static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 519{
a7812ae4 520 TCGv_ptr tmp;
6ddbc6e4
PB
521
522 switch (op1) {
523#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
524 case 1:
a7812ae4 525 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
526 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
527 PAS_OP(s)
b75263d6 528 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
529 break;
530 case 5:
a7812ae4 531 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
532 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
533 PAS_OP(u)
b75263d6 534 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
535 break;
536#undef gen_pas_helper
537#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
538 case 2:
539 PAS_OP(q);
540 break;
541 case 3:
542 PAS_OP(sh);
543 break;
544 case 6:
545 PAS_OP(uq);
546 break;
547 case 7:
548 PAS_OP(uh);
549 break;
550#undef gen_pas_helper
551 }
552}
9ee6e8bb
PB
553#undef PAS_OP
554
6ddbc6e4
PB
555/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
556#define PAS_OP(pfx) \
ed89a2f1 557 switch (op1) { \
6ddbc6e4
PB
558 case 0: gen_pas_helper(glue(pfx,add8)); break; \
559 case 1: gen_pas_helper(glue(pfx,add16)); break; \
560 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
561 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
562 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
563 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
564 }
d9ba4830 565static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 566{
a7812ae4 567 TCGv_ptr tmp;
6ddbc6e4 568
ed89a2f1 569 switch (op2) {
6ddbc6e4
PB
570#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
571 case 0:
a7812ae4 572 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
573 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
574 PAS_OP(s)
b75263d6 575 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
576 break;
577 case 4:
a7812ae4 578 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
579 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
580 PAS_OP(u)
b75263d6 581 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
582 break;
583#undef gen_pas_helper
584#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
585 case 1:
586 PAS_OP(q);
587 break;
588 case 2:
589 PAS_OP(sh);
590 break;
591 case 5:
592 PAS_OP(uq);
593 break;
594 case 6:
595 PAS_OP(uh);
596 break;
597#undef gen_pas_helper
598 }
599}
9ee6e8bb
PB
600#undef PAS_OP
601
d9ba4830
PB
602static void gen_test_cc(int cc, int label)
603{
604 TCGv tmp;
605 TCGv tmp2;
d9ba4830
PB
606 int inv;
607
d9ba4830
PB
608 switch (cc) {
609 case 0: /* eq: Z */
6fbe23d5 610 tmp = load_cpu_field(ZF);
cb63669a 611 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
612 break;
613 case 1: /* ne: !Z */
6fbe23d5 614 tmp = load_cpu_field(ZF);
cb63669a 615 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
616 break;
617 case 2: /* cs: C */
618 tmp = load_cpu_field(CF);
cb63669a 619 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
620 break;
621 case 3: /* cc: !C */
622 tmp = load_cpu_field(CF);
cb63669a 623 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
624 break;
625 case 4: /* mi: N */
6fbe23d5 626 tmp = load_cpu_field(NF);
cb63669a 627 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
628 break;
629 case 5: /* pl: !N */
6fbe23d5 630 tmp = load_cpu_field(NF);
cb63669a 631 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
632 break;
633 case 6: /* vs: V */
634 tmp = load_cpu_field(VF);
cb63669a 635 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
636 break;
637 case 7: /* vc: !V */
638 tmp = load_cpu_field(VF);
cb63669a 639 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
640 break;
641 case 8: /* hi: C && !Z */
642 inv = gen_new_label();
643 tmp = load_cpu_field(CF);
cb63669a 644 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 645 tcg_temp_free_i32(tmp);
6fbe23d5 646 tmp = load_cpu_field(ZF);
cb63669a 647 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
648 gen_set_label(inv);
649 break;
650 case 9: /* ls: !C || Z */
651 tmp = load_cpu_field(CF);
cb63669a 652 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 653 tcg_temp_free_i32(tmp);
6fbe23d5 654 tmp = load_cpu_field(ZF);
cb63669a 655 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
656 break;
657 case 10: /* ge: N == V -> N ^ V == 0 */
658 tmp = load_cpu_field(VF);
6fbe23d5 659 tmp2 = load_cpu_field(NF);
d9ba4830 660 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 661 tcg_temp_free_i32(tmp2);
cb63669a 662 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
663 break;
664 case 11: /* lt: N != V -> N ^ V != 0 */
665 tmp = load_cpu_field(VF);
6fbe23d5 666 tmp2 = load_cpu_field(NF);
d9ba4830 667 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 668 tcg_temp_free_i32(tmp2);
cb63669a 669 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
670 break;
671 case 12: /* gt: !Z && N == V */
672 inv = gen_new_label();
6fbe23d5 673 tmp = load_cpu_field(ZF);
cb63669a 674 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 675 tcg_temp_free_i32(tmp);
d9ba4830 676 tmp = load_cpu_field(VF);
6fbe23d5 677 tmp2 = load_cpu_field(NF);
d9ba4830 678 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 679 tcg_temp_free_i32(tmp2);
cb63669a 680 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
681 gen_set_label(inv);
682 break;
683 case 13: /* le: Z || N != V */
6fbe23d5 684 tmp = load_cpu_field(ZF);
cb63669a 685 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 686 tcg_temp_free_i32(tmp);
d9ba4830 687 tmp = load_cpu_field(VF);
6fbe23d5 688 tmp2 = load_cpu_field(NF);
d9ba4830 689 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 690 tcg_temp_free_i32(tmp2);
cb63669a 691 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
692 break;
693 default:
694 fprintf(stderr, "Bad condition code 0x%x\n", cc);
695 abort();
696 }
7d1b0095 697 tcg_temp_free_i32(tmp);
d9ba4830 698}
2c0262af 699
b1d8e52e 700static const uint8_t table_logic_cc[16] = {
2c0262af
FB
701 1, /* and */
702 1, /* xor */
703 0, /* sub */
704 0, /* rsb */
705 0, /* add */
706 0, /* adc */
707 0, /* sbc */
708 0, /* rsc */
709 1, /* andl */
710 1, /* xorl */
711 0, /* cmp */
712 0, /* cmn */
713 1, /* orr */
714 1, /* mov */
715 1, /* bic */
716 1, /* mvn */
717};
3b46e624 718
d9ba4830
PB
719/* Set PC and Thumb state from an immediate address. */
720static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 721{
b26eefb6 722 TCGv tmp;
99c475ab 723
b26eefb6 724 s->is_jmp = DISAS_UPDATE;
d9ba4830 725 if (s->thumb != (addr & 1)) {
7d1b0095 726 tmp = tcg_temp_new_i32();
d9ba4830
PB
727 tcg_gen_movi_i32(tmp, addr & 1);
728 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
7d1b0095 729 tcg_temp_free_i32(tmp);
d9ba4830 730 }
155c3eac 731 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
732}
733
734/* Set PC and Thumb state from var. var is marked as dead. */
735static inline void gen_bx(DisasContext *s, TCGv var)
736{
d9ba4830 737 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
738 tcg_gen_andi_i32(cpu_R[15], var, ~1);
739 tcg_gen_andi_i32(var, var, 1);
740 store_cpu_field(var, thumb);
d9ba4830
PB
741}
742
21aeb343
JR
743/* Variant of store_reg which uses branch&exchange logic when storing
744 to r15 in ARM architecture v7 and above. The source must be a temporary
745 and will be marked as dead. */
746static inline void store_reg_bx(CPUState *env, DisasContext *s,
747 int reg, TCGv var)
748{
749 if (reg == 15 && ENABLE_ARCH_7) {
750 gen_bx(s, var);
751 } else {
752 store_reg(s, reg, var);
753 }
754}
755
be5e7a76
DES
756/* Variant of store_reg which uses branch&exchange logic when storing
757 * to r15 in ARM architecture v5T and above. This is used for storing
758 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
759 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
760static inline void store_reg_from_load(CPUState *env, DisasContext *s,
761 int reg, TCGv var)
762{
763 if (reg == 15 && ENABLE_ARCH_5) {
764 gen_bx(s, var);
765 } else {
766 store_reg(s, reg, var);
767 }
768}
769
b0109805
PB
770static inline TCGv gen_ld8s(TCGv addr, int index)
771{
7d1b0095 772 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
773 tcg_gen_qemu_ld8s(tmp, addr, index);
774 return tmp;
775}
776static inline TCGv gen_ld8u(TCGv addr, int index)
777{
7d1b0095 778 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
779 tcg_gen_qemu_ld8u(tmp, addr, index);
780 return tmp;
781}
782static inline TCGv gen_ld16s(TCGv addr, int index)
783{
7d1b0095 784 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
785 tcg_gen_qemu_ld16s(tmp, addr, index);
786 return tmp;
787}
788static inline TCGv gen_ld16u(TCGv addr, int index)
789{
7d1b0095 790 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
791 tcg_gen_qemu_ld16u(tmp, addr, index);
792 return tmp;
793}
794static inline TCGv gen_ld32(TCGv addr, int index)
795{
7d1b0095 796 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
797 tcg_gen_qemu_ld32u(tmp, addr, index);
798 return tmp;
799}
84496233
JR
800static inline TCGv_i64 gen_ld64(TCGv addr, int index)
801{
802 TCGv_i64 tmp = tcg_temp_new_i64();
803 tcg_gen_qemu_ld64(tmp, addr, index);
804 return tmp;
805}
b0109805
PB
806static inline void gen_st8(TCGv val, TCGv addr, int index)
807{
808 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 809 tcg_temp_free_i32(val);
b0109805
PB
810}
811static inline void gen_st16(TCGv val, TCGv addr, int index)
812{
813 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 814 tcg_temp_free_i32(val);
b0109805
PB
815}
816static inline void gen_st32(TCGv val, TCGv addr, int index)
817{
818 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 819 tcg_temp_free_i32(val);
b0109805 820}
84496233
JR
821static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
822{
823 tcg_gen_qemu_st64(val, addr, index);
824 tcg_temp_free_i64(val);
825}
b5ff1b31 826
5e3f878a
PB
827static inline void gen_set_pc_im(uint32_t val)
828{
155c3eac 829 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
830}
831
b5ff1b31
FB
832/* Force a TB lookup after an instruction that changes the CPU state. */
833static inline void gen_lookup_tb(DisasContext *s)
834{
a6445c52 835 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
836 s->is_jmp = DISAS_UPDATE;
837}
838
b0109805
PB
839static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
840 TCGv var)
2c0262af 841{
1e8d4eec 842 int val, rm, shift, shiftop;
b26eefb6 843 TCGv offset;
2c0262af
FB
844
845 if (!(insn & (1 << 25))) {
846 /* immediate */
847 val = insn & 0xfff;
848 if (!(insn & (1 << 23)))
849 val = -val;
537730b9 850 if (val != 0)
b0109805 851 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
852 } else {
853 /* shift/register */
854 rm = (insn) & 0xf;
855 shift = (insn >> 7) & 0x1f;
1e8d4eec 856 shiftop = (insn >> 5) & 3;
b26eefb6 857 offset = load_reg(s, rm);
9a119ff6 858 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 859 if (!(insn & (1 << 23)))
b0109805 860 tcg_gen_sub_i32(var, var, offset);
2c0262af 861 else
b0109805 862 tcg_gen_add_i32(var, var, offset);
7d1b0095 863 tcg_temp_free_i32(offset);
2c0262af
FB
864 }
865}
866
191f9a93 867static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 868 int extra, TCGv var)
2c0262af
FB
869{
870 int val, rm;
b26eefb6 871 TCGv offset;
3b46e624 872
2c0262af
FB
873 if (insn & (1 << 22)) {
874 /* immediate */
875 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
876 if (!(insn & (1 << 23)))
877 val = -val;
18acad92 878 val += extra;
537730b9 879 if (val != 0)
b0109805 880 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
881 } else {
882 /* register */
191f9a93 883 if (extra)
b0109805 884 tcg_gen_addi_i32(var, var, extra);
2c0262af 885 rm = (insn) & 0xf;
b26eefb6 886 offset = load_reg(s, rm);
2c0262af 887 if (!(insn & (1 << 23)))
b0109805 888 tcg_gen_sub_i32(var, var, offset);
2c0262af 889 else
b0109805 890 tcg_gen_add_i32(var, var, offset);
7d1b0095 891 tcg_temp_free_i32(offset);
2c0262af
FB
892 }
893}
894
4373f3ce
PB
895#define VFP_OP2(name) \
896static inline void gen_vfp_##name(int dp) \
897{ \
898 if (dp) \
899 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
900 else \
901 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
902}
903
4373f3ce
PB
904VFP_OP2(add)
905VFP_OP2(sub)
906VFP_OP2(mul)
907VFP_OP2(div)
908
909#undef VFP_OP2
910
605a6aed
PM
911static inline void gen_vfp_F1_mul(int dp)
912{
913 /* Like gen_vfp_mul() but put result in F1 */
914 if (dp) {
915 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, cpu_env);
916 } else {
917 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, cpu_env);
918 }
919}
920
921static inline void gen_vfp_F1_neg(int dp)
922{
923 /* Like gen_vfp_neg() but put result in F1 */
924 if (dp) {
925 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
926 } else {
927 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
928 }
929}
930
4373f3ce
PB
931static inline void gen_vfp_abs(int dp)
932{
933 if (dp)
934 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
935 else
936 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
937}
938
939static inline void gen_vfp_neg(int dp)
940{
941 if (dp)
942 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
943 else
944 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
945}
946
947static inline void gen_vfp_sqrt(int dp)
948{
949 if (dp)
950 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
951 else
952 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
953}
954
955static inline void gen_vfp_cmp(int dp)
956{
957 if (dp)
958 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
959 else
960 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
961}
962
963static inline void gen_vfp_cmpe(int dp)
964{
965 if (dp)
966 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
967 else
968 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
969}
970
971static inline void gen_vfp_F1_ld0(int dp)
972{
973 if (dp)
5b340b51 974 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 975 else
5b340b51 976 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
977}
978
5500b06c
PM
979#define VFP_GEN_ITOF(name) \
980static inline void gen_vfp_##name(int dp, int neon) \
981{ \
b7fa9214 982 TCGv_ptr statusptr = tcg_temp_new_ptr(); \
5500b06c
PM
983 int offset; \
984 if (neon) { \
985 offset = offsetof(CPUState, vfp.standard_fp_status); \
986 } else { \
987 offset = offsetof(CPUState, vfp.fp_status); \
988 } \
b7fa9214 989 tcg_gen_addi_ptr(statusptr, cpu_env, offset); \
5500b06c
PM
990 if (dp) { \
991 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
992 } else { \
993 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
994 } \
b7fa9214 995 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
996}
997
5500b06c
PM
998VFP_GEN_ITOF(uito)
999VFP_GEN_ITOF(sito)
1000#undef VFP_GEN_ITOF
4373f3ce 1001
5500b06c
PM
1002#define VFP_GEN_FTOI(name) \
1003static inline void gen_vfp_##name(int dp, int neon) \
1004{ \
b7fa9214 1005 TCGv_ptr statusptr = tcg_temp_new_ptr(); \
5500b06c
PM
1006 int offset; \
1007 if (neon) { \
1008 offset = offsetof(CPUState, vfp.standard_fp_status); \
1009 } else { \
1010 offset = offsetof(CPUState, vfp.fp_status); \
1011 } \
b7fa9214 1012 tcg_gen_addi_ptr(statusptr, cpu_env, offset); \
5500b06c
PM
1013 if (dp) { \
1014 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1015 } else { \
1016 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1017 } \
b7fa9214 1018 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1019}
1020
5500b06c
PM
1021VFP_GEN_FTOI(toui)
1022VFP_GEN_FTOI(touiz)
1023VFP_GEN_FTOI(tosi)
1024VFP_GEN_FTOI(tosiz)
1025#undef VFP_GEN_FTOI
4373f3ce
PB
1026
1027#define VFP_GEN_FIX(name) \
5500b06c 1028static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1029{ \
b75263d6 1030 TCGv tmp_shift = tcg_const_i32(shift); \
b7fa9214 1031 TCGv_ptr statusptr = tcg_temp_new_ptr(); \
5500b06c
PM
1032 int offset; \
1033 if (neon) { \
1034 offset = offsetof(CPUState, vfp.standard_fp_status); \
1035 } else { \
1036 offset = offsetof(CPUState, vfp.fp_status); \
1037 } \
b7fa9214 1038 tcg_gen_addi_ptr(statusptr, cpu_env, offset); \
5500b06c
PM
1039 if (dp) { \
1040 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1041 } else { \
1042 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1043 } \
b75263d6 1044 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1045 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1046}
4373f3ce
PB
1047VFP_GEN_FIX(tosh)
1048VFP_GEN_FIX(tosl)
1049VFP_GEN_FIX(touh)
1050VFP_GEN_FIX(toul)
1051VFP_GEN_FIX(shto)
1052VFP_GEN_FIX(slto)
1053VFP_GEN_FIX(uhto)
1054VFP_GEN_FIX(ulto)
1055#undef VFP_GEN_FIX
9ee6e8bb 1056
312eea9f 1057static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1058{
1059 if (dp)
312eea9f 1060 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1061 else
312eea9f 1062 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1063}
1064
312eea9f 1065static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1066{
1067 if (dp)
312eea9f 1068 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1069 else
312eea9f 1070 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1071}
1072
8e96005d
FB
1073static inline long
1074vfp_reg_offset (int dp, int reg)
1075{
1076 if (dp)
1077 return offsetof(CPUARMState, vfp.regs[reg]);
1078 else if (reg & 1) {
1079 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1080 + offsetof(CPU_DoubleU, l.upper);
1081 } else {
1082 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1083 + offsetof(CPU_DoubleU, l.lower);
1084 }
1085}
9ee6e8bb
PB
1086
1087/* Return the offset of a 32-bit piece of a NEON register.
1088 zero is the least significant end of the register. */
1089static inline long
1090neon_reg_offset (int reg, int n)
1091{
1092 int sreg;
1093 sreg = reg * 2 + n;
1094 return vfp_reg_offset(0, sreg);
1095}
1096
8f8e3aa4
PB
1097static TCGv neon_load_reg(int reg, int pass)
1098{
7d1b0095 1099 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1100 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1101 return tmp;
1102}
1103
1104static void neon_store_reg(int reg, int pass, TCGv var)
1105{
1106 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1107 tcg_temp_free_i32(var);
8f8e3aa4
PB
1108}
1109
a7812ae4 1110static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1111{
1112 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1113}
1114
a7812ae4 1115static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1116{
1117 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1118}
1119
4373f3ce
PB
1120#define tcg_gen_ld_f32 tcg_gen_ld_i32
1121#define tcg_gen_ld_f64 tcg_gen_ld_i64
1122#define tcg_gen_st_f32 tcg_gen_st_i32
1123#define tcg_gen_st_f64 tcg_gen_st_i64
1124
b7bcbe95
FB
1125static inline void gen_mov_F0_vreg(int dp, int reg)
1126{
1127 if (dp)
4373f3ce 1128 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1129 else
4373f3ce 1130 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1131}
1132
1133static inline void gen_mov_F1_vreg(int dp, int reg)
1134{
1135 if (dp)
4373f3ce 1136 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1137 else
4373f3ce 1138 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1139}
1140
1141static inline void gen_mov_vreg_F0(int dp, int reg)
1142{
1143 if (dp)
4373f3ce 1144 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1145 else
4373f3ce 1146 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1147}
1148
18c9b560
AZ
1149#define ARM_CP_RW_BIT (1 << 20)
1150
a7812ae4 1151static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1152{
1153 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1154}
1155
a7812ae4 1156static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1157{
1158 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1159}
1160
da6b5335 1161static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1162{
7d1b0095 1163 TCGv var = tcg_temp_new_i32();
da6b5335
FN
1164 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1165 return var;
e677137d
PB
1166}
1167
da6b5335 1168static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1169{
da6b5335 1170 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
7d1b0095 1171 tcg_temp_free_i32(var);
e677137d
PB
1172}
1173
1174static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1175{
1176 iwmmxt_store_reg(cpu_M0, rn);
1177}
1178
1179static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1180{
1181 iwmmxt_load_reg(cpu_M0, rn);
1182}
1183
1184static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1185{
1186 iwmmxt_load_reg(cpu_V1, rn);
1187 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1188}
1189
1190static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1191{
1192 iwmmxt_load_reg(cpu_V1, rn);
1193 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1194}
1195
1196static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1197{
1198 iwmmxt_load_reg(cpu_V1, rn);
1199 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1200}
1201
1202#define IWMMXT_OP(name) \
1203static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1204{ \
1205 iwmmxt_load_reg(cpu_V1, rn); \
1206 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1207}
1208
947a2fa2
PM
1209#define IWMMXT_OP_SIZE(name) \
1210IWMMXT_OP(name##b) \
1211IWMMXT_OP(name##w) \
1212IWMMXT_OP(name##l)
e677137d 1213
947a2fa2 1214#define IWMMXT_OP_1(name) \
e677137d
PB
1215static inline void gen_op_iwmmxt_##name##_M0(void) \
1216{ \
947a2fa2 1217 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0); \
e677137d
PB
1218}
1219
1220IWMMXT_OP(maddsq)
1221IWMMXT_OP(madduq)
1222IWMMXT_OP(sadb)
1223IWMMXT_OP(sadw)
1224IWMMXT_OP(mulslw)
1225IWMMXT_OP(mulshw)
1226IWMMXT_OP(mululw)
1227IWMMXT_OP(muluhw)
1228IWMMXT_OP(macsw)
1229IWMMXT_OP(macuw)
1230
947a2fa2
PM
1231IWMMXT_OP_SIZE(unpackl)
1232IWMMXT_OP_SIZE(unpackh)
1233
1234IWMMXT_OP_1(unpacklub)
1235IWMMXT_OP_1(unpackluw)
1236IWMMXT_OP_1(unpacklul)
1237IWMMXT_OP_1(unpackhub)
1238IWMMXT_OP_1(unpackhuw)
1239IWMMXT_OP_1(unpackhul)
1240IWMMXT_OP_1(unpacklsb)
1241IWMMXT_OP_1(unpacklsw)
1242IWMMXT_OP_1(unpacklsl)
1243IWMMXT_OP_1(unpackhsb)
1244IWMMXT_OP_1(unpackhsw)
1245IWMMXT_OP_1(unpackhsl)
1246
1247IWMMXT_OP_SIZE(cmpeq)
1248IWMMXT_OP_SIZE(cmpgtu)
1249IWMMXT_OP_SIZE(cmpgts)
1250
1251IWMMXT_OP_SIZE(mins)
1252IWMMXT_OP_SIZE(minu)
1253IWMMXT_OP_SIZE(maxs)
1254IWMMXT_OP_SIZE(maxu)
1255
1256IWMMXT_OP_SIZE(subn)
1257IWMMXT_OP_SIZE(addn)
1258IWMMXT_OP_SIZE(subu)
1259IWMMXT_OP_SIZE(addu)
1260IWMMXT_OP_SIZE(subs)
1261IWMMXT_OP_SIZE(adds)
1262
1263IWMMXT_OP(avgb0)
1264IWMMXT_OP(avgb1)
1265IWMMXT_OP(avgw0)
1266IWMMXT_OP(avgw1)
e677137d
PB
1267
1268IWMMXT_OP(msadb)
1269
947a2fa2
PM
1270IWMMXT_OP(packuw)
1271IWMMXT_OP(packul)
1272IWMMXT_OP(packuq)
1273IWMMXT_OP(packsw)
1274IWMMXT_OP(packsl)
1275IWMMXT_OP(packsq)
e677137d 1276
e677137d
PB
1277static void gen_op_iwmmxt_set_mup(void)
1278{
1279 TCGv tmp;
1280 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1281 tcg_gen_ori_i32(tmp, tmp, 2);
1282 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1283}
1284
1285static void gen_op_iwmmxt_set_cup(void)
1286{
1287 TCGv tmp;
1288 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1289 tcg_gen_ori_i32(tmp, tmp, 1);
1290 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1291}
1292
1293static void gen_op_iwmmxt_setpsr_nz(void)
1294{
7d1b0095 1295 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1296 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1297 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1298}
1299
1300static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1301{
1302 iwmmxt_load_reg(cpu_V1, rn);
86831435 1303 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1304 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1305}
1306
da6b5335 1307static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1308{
1309 int rd;
1310 uint32_t offset;
da6b5335 1311 TCGv tmp;
18c9b560
AZ
1312
1313 rd = (insn >> 16) & 0xf;
da6b5335 1314 tmp = load_reg(s, rd);
18c9b560
AZ
1315
1316 offset = (insn & 0xff) << ((insn >> 7) & 2);
1317 if (insn & (1 << 24)) {
1318 /* Pre indexed */
1319 if (insn & (1 << 23))
da6b5335 1320 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1321 else
da6b5335
FN
1322 tcg_gen_addi_i32(tmp, tmp, -offset);
1323 tcg_gen_mov_i32(dest, tmp);
18c9b560 1324 if (insn & (1 << 21))
da6b5335
FN
1325 store_reg(s, rd, tmp);
1326 else
7d1b0095 1327 tcg_temp_free_i32(tmp);
18c9b560
AZ
1328 } else if (insn & (1 << 21)) {
1329 /* Post indexed */
da6b5335 1330 tcg_gen_mov_i32(dest, tmp);
18c9b560 1331 if (insn & (1 << 23))
da6b5335 1332 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1333 else
da6b5335
FN
1334 tcg_gen_addi_i32(tmp, tmp, -offset);
1335 store_reg(s, rd, tmp);
18c9b560
AZ
1336 } else if (!(insn & (1 << 23)))
1337 return 1;
1338 return 0;
1339}
1340
da6b5335 1341static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1342{
1343 int rd = (insn >> 0) & 0xf;
da6b5335 1344 TCGv tmp;
18c9b560 1345
da6b5335
FN
1346 if (insn & (1 << 8)) {
1347 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1348 return 1;
da6b5335
FN
1349 } else {
1350 tmp = iwmmxt_load_creg(rd);
1351 }
1352 } else {
7d1b0095 1353 tmp = tcg_temp_new_i32();
da6b5335
FN
1354 iwmmxt_load_reg(cpu_V0, rd);
1355 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1356 }
1357 tcg_gen_andi_i32(tmp, tmp, mask);
1358 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1359 tcg_temp_free_i32(tmp);
18c9b560
AZ
1360 return 0;
1361}
1362
a1c7273b 1363/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560
AZ
1364 (ie. an undefined instruction). */
1365static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1366{
1367 int rd, wrd;
1368 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1369 TCGv addr;
1370 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1371
1372 if ((insn & 0x0e000e00) == 0x0c000000) {
1373 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1374 wrd = insn & 0xf;
1375 rdlo = (insn >> 12) & 0xf;
1376 rdhi = (insn >> 16) & 0xf;
1377 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1378 iwmmxt_load_reg(cpu_V0, wrd);
1379 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1380 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1381 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1382 } else { /* TMCRR */
da6b5335
FN
1383 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1384 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1385 gen_op_iwmmxt_set_mup();
1386 }
1387 return 0;
1388 }
1389
1390 wrd = (insn >> 12) & 0xf;
7d1b0095 1391 addr = tcg_temp_new_i32();
da6b5335 1392 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1393 tcg_temp_free_i32(addr);
18c9b560 1394 return 1;
da6b5335 1395 }
18c9b560
AZ
1396 if (insn & ARM_CP_RW_BIT) {
1397 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1398 tmp = tcg_temp_new_i32();
da6b5335
FN
1399 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1400 iwmmxt_store_creg(wrd, tmp);
18c9b560 1401 } else {
e677137d
PB
1402 i = 1;
1403 if (insn & (1 << 8)) {
1404 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1405 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1406 i = 0;
1407 } else { /* WLDRW wRd */
da6b5335 1408 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1409 }
1410 } else {
1411 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1412 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1413 } else { /* WLDRB */
da6b5335 1414 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1415 }
1416 }
1417 if (i) {
1418 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1419 tcg_temp_free_i32(tmp);
e677137d 1420 }
18c9b560
AZ
1421 gen_op_iwmmxt_movq_wRn_M0(wrd);
1422 }
1423 } else {
1424 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1425 tmp = iwmmxt_load_creg(wrd);
1426 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1427 } else {
1428 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1429 tmp = tcg_temp_new_i32();
e677137d
PB
1430 if (insn & (1 << 8)) {
1431 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1432 tcg_temp_free_i32(tmp);
da6b5335 1433 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1434 } else { /* WSTRW wRd */
1435 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1436 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1437 }
1438 } else {
1439 if (insn & (1 << 22)) { /* WSTRH */
1440 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1441 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1442 } else { /* WSTRB */
1443 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1444 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1445 }
1446 }
18c9b560
AZ
1447 }
1448 }
7d1b0095 1449 tcg_temp_free_i32(addr);
18c9b560
AZ
1450 return 0;
1451 }
1452
1453 if ((insn & 0x0f000000) != 0x0e000000)
1454 return 1;
1455
1456 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1457 case 0x000: /* WOR */
1458 wrd = (insn >> 12) & 0xf;
1459 rd0 = (insn >> 0) & 0xf;
1460 rd1 = (insn >> 16) & 0xf;
1461 gen_op_iwmmxt_movq_M0_wRn(rd0);
1462 gen_op_iwmmxt_orq_M0_wRn(rd1);
1463 gen_op_iwmmxt_setpsr_nz();
1464 gen_op_iwmmxt_movq_wRn_M0(wrd);
1465 gen_op_iwmmxt_set_mup();
1466 gen_op_iwmmxt_set_cup();
1467 break;
1468 case 0x011: /* TMCR */
1469 if (insn & 0xf)
1470 return 1;
1471 rd = (insn >> 12) & 0xf;
1472 wrd = (insn >> 16) & 0xf;
1473 switch (wrd) {
1474 case ARM_IWMMXT_wCID:
1475 case ARM_IWMMXT_wCASF:
1476 break;
1477 case ARM_IWMMXT_wCon:
1478 gen_op_iwmmxt_set_cup();
1479 /* Fall through. */
1480 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1481 tmp = iwmmxt_load_creg(wrd);
1482 tmp2 = load_reg(s, rd);
f669df27 1483 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1484 tcg_temp_free_i32(tmp2);
da6b5335 1485 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1486 break;
1487 case ARM_IWMMXT_wCGR0:
1488 case ARM_IWMMXT_wCGR1:
1489 case ARM_IWMMXT_wCGR2:
1490 case ARM_IWMMXT_wCGR3:
1491 gen_op_iwmmxt_set_cup();
da6b5335
FN
1492 tmp = load_reg(s, rd);
1493 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1494 break;
1495 default:
1496 return 1;
1497 }
1498 break;
1499 case 0x100: /* WXOR */
1500 wrd = (insn >> 12) & 0xf;
1501 rd0 = (insn >> 0) & 0xf;
1502 rd1 = (insn >> 16) & 0xf;
1503 gen_op_iwmmxt_movq_M0_wRn(rd0);
1504 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1505 gen_op_iwmmxt_setpsr_nz();
1506 gen_op_iwmmxt_movq_wRn_M0(wrd);
1507 gen_op_iwmmxt_set_mup();
1508 gen_op_iwmmxt_set_cup();
1509 break;
1510 case 0x111: /* TMRC */
1511 if (insn & 0xf)
1512 return 1;
1513 rd = (insn >> 12) & 0xf;
1514 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1515 tmp = iwmmxt_load_creg(wrd);
1516 store_reg(s, rd, tmp);
18c9b560
AZ
1517 break;
1518 case 0x300: /* WANDN */
1519 wrd = (insn >> 12) & 0xf;
1520 rd0 = (insn >> 0) & 0xf;
1521 rd1 = (insn >> 16) & 0xf;
1522 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1523 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1524 gen_op_iwmmxt_andq_M0_wRn(rd1);
1525 gen_op_iwmmxt_setpsr_nz();
1526 gen_op_iwmmxt_movq_wRn_M0(wrd);
1527 gen_op_iwmmxt_set_mup();
1528 gen_op_iwmmxt_set_cup();
1529 break;
1530 case 0x200: /* WAND */
1531 wrd = (insn >> 12) & 0xf;
1532 rd0 = (insn >> 0) & 0xf;
1533 rd1 = (insn >> 16) & 0xf;
1534 gen_op_iwmmxt_movq_M0_wRn(rd0);
1535 gen_op_iwmmxt_andq_M0_wRn(rd1);
1536 gen_op_iwmmxt_setpsr_nz();
1537 gen_op_iwmmxt_movq_wRn_M0(wrd);
1538 gen_op_iwmmxt_set_mup();
1539 gen_op_iwmmxt_set_cup();
1540 break;
1541 case 0x810: case 0xa10: /* WMADD */
1542 wrd = (insn >> 12) & 0xf;
1543 rd0 = (insn >> 0) & 0xf;
1544 rd1 = (insn >> 16) & 0xf;
1545 gen_op_iwmmxt_movq_M0_wRn(rd0);
1546 if (insn & (1 << 21))
1547 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1548 else
1549 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1550 gen_op_iwmmxt_movq_wRn_M0(wrd);
1551 gen_op_iwmmxt_set_mup();
1552 break;
1553 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1554 wrd = (insn >> 12) & 0xf;
1555 rd0 = (insn >> 16) & 0xf;
1556 rd1 = (insn >> 0) & 0xf;
1557 gen_op_iwmmxt_movq_M0_wRn(rd0);
1558 switch ((insn >> 22) & 3) {
1559 case 0:
1560 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1561 break;
1562 case 1:
1563 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1564 break;
1565 case 2:
1566 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1567 break;
1568 case 3:
1569 return 1;
1570 }
1571 gen_op_iwmmxt_movq_wRn_M0(wrd);
1572 gen_op_iwmmxt_set_mup();
1573 gen_op_iwmmxt_set_cup();
1574 break;
1575 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1576 wrd = (insn >> 12) & 0xf;
1577 rd0 = (insn >> 16) & 0xf;
1578 rd1 = (insn >> 0) & 0xf;
1579 gen_op_iwmmxt_movq_M0_wRn(rd0);
1580 switch ((insn >> 22) & 3) {
1581 case 0:
1582 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1583 break;
1584 case 1:
1585 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1586 break;
1587 case 2:
1588 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1589 break;
1590 case 3:
1591 return 1;
1592 }
1593 gen_op_iwmmxt_movq_wRn_M0(wrd);
1594 gen_op_iwmmxt_set_mup();
1595 gen_op_iwmmxt_set_cup();
1596 break;
1597 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1598 wrd = (insn >> 12) & 0xf;
1599 rd0 = (insn >> 16) & 0xf;
1600 rd1 = (insn >> 0) & 0xf;
1601 gen_op_iwmmxt_movq_M0_wRn(rd0);
1602 if (insn & (1 << 22))
1603 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1604 else
1605 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1606 if (!(insn & (1 << 20)))
1607 gen_op_iwmmxt_addl_M0_wRn(wrd);
1608 gen_op_iwmmxt_movq_wRn_M0(wrd);
1609 gen_op_iwmmxt_set_mup();
1610 break;
1611 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1612 wrd = (insn >> 12) & 0xf;
1613 rd0 = (insn >> 16) & 0xf;
1614 rd1 = (insn >> 0) & 0xf;
1615 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1616 if (insn & (1 << 21)) {
1617 if (insn & (1 << 20))
1618 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1619 else
1620 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1621 } else {
1622 if (insn & (1 << 20))
1623 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1624 else
1625 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1626 }
18c9b560
AZ
1627 gen_op_iwmmxt_movq_wRn_M0(wrd);
1628 gen_op_iwmmxt_set_mup();
1629 break;
1630 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1631 wrd = (insn >> 12) & 0xf;
1632 rd0 = (insn >> 16) & 0xf;
1633 rd1 = (insn >> 0) & 0xf;
1634 gen_op_iwmmxt_movq_M0_wRn(rd0);
1635 if (insn & (1 << 21))
1636 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1637 else
1638 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1639 if (!(insn & (1 << 20))) {
e677137d
PB
1640 iwmmxt_load_reg(cpu_V1, wrd);
1641 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1642 }
1643 gen_op_iwmmxt_movq_wRn_M0(wrd);
1644 gen_op_iwmmxt_set_mup();
1645 break;
1646 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1647 wrd = (insn >> 12) & 0xf;
1648 rd0 = (insn >> 16) & 0xf;
1649 rd1 = (insn >> 0) & 0xf;
1650 gen_op_iwmmxt_movq_M0_wRn(rd0);
1651 switch ((insn >> 22) & 3) {
1652 case 0:
1653 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1654 break;
1655 case 1:
1656 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1657 break;
1658 case 2:
1659 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1660 break;
1661 case 3:
1662 return 1;
1663 }
1664 gen_op_iwmmxt_movq_wRn_M0(wrd);
1665 gen_op_iwmmxt_set_mup();
1666 gen_op_iwmmxt_set_cup();
1667 break;
1668 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1669 wrd = (insn >> 12) & 0xf;
1670 rd0 = (insn >> 16) & 0xf;
1671 rd1 = (insn >> 0) & 0xf;
1672 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1673 if (insn & (1 << 22)) {
1674 if (insn & (1 << 20))
1675 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1676 else
1677 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1678 } else {
1679 if (insn & (1 << 20))
1680 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1681 else
1682 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1683 }
18c9b560
AZ
1684 gen_op_iwmmxt_movq_wRn_M0(wrd);
1685 gen_op_iwmmxt_set_mup();
1686 gen_op_iwmmxt_set_cup();
1687 break;
1688 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1689 wrd = (insn >> 12) & 0xf;
1690 rd0 = (insn >> 16) & 0xf;
1691 rd1 = (insn >> 0) & 0xf;
1692 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1693 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1694 tcg_gen_andi_i32(tmp, tmp, 7);
1695 iwmmxt_load_reg(cpu_V1, rd1);
1696 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1697 tcg_temp_free_i32(tmp);
18c9b560
AZ
1698 gen_op_iwmmxt_movq_wRn_M0(wrd);
1699 gen_op_iwmmxt_set_mup();
1700 break;
1701 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1702 if (((insn >> 6) & 3) == 3)
1703 return 1;
18c9b560
AZ
1704 rd = (insn >> 12) & 0xf;
1705 wrd = (insn >> 16) & 0xf;
da6b5335 1706 tmp = load_reg(s, rd);
18c9b560
AZ
1707 gen_op_iwmmxt_movq_M0_wRn(wrd);
1708 switch ((insn >> 6) & 3) {
1709 case 0:
da6b5335
FN
1710 tmp2 = tcg_const_i32(0xff);
1711 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1712 break;
1713 case 1:
da6b5335
FN
1714 tmp2 = tcg_const_i32(0xffff);
1715 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1716 break;
1717 case 2:
da6b5335
FN
1718 tmp2 = tcg_const_i32(0xffffffff);
1719 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1720 break;
da6b5335
FN
1721 default:
1722 TCGV_UNUSED(tmp2);
1723 TCGV_UNUSED(tmp3);
18c9b560 1724 }
da6b5335
FN
1725 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1726 tcg_temp_free(tmp3);
1727 tcg_temp_free(tmp2);
7d1b0095 1728 tcg_temp_free_i32(tmp);
18c9b560
AZ
1729 gen_op_iwmmxt_movq_wRn_M0(wrd);
1730 gen_op_iwmmxt_set_mup();
1731 break;
1732 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1733 rd = (insn >> 12) & 0xf;
1734 wrd = (insn >> 16) & 0xf;
da6b5335 1735 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1736 return 1;
1737 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1738 tmp = tcg_temp_new_i32();
18c9b560
AZ
1739 switch ((insn >> 22) & 3) {
1740 case 0:
da6b5335
FN
1741 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1742 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1743 if (insn & 8) {
1744 tcg_gen_ext8s_i32(tmp, tmp);
1745 } else {
1746 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1747 }
1748 break;
1749 case 1:
da6b5335
FN
1750 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1751 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1752 if (insn & 8) {
1753 tcg_gen_ext16s_i32(tmp, tmp);
1754 } else {
1755 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1756 }
1757 break;
1758 case 2:
da6b5335
FN
1759 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1760 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1761 break;
18c9b560 1762 }
da6b5335 1763 store_reg(s, rd, tmp);
18c9b560
AZ
1764 break;
1765 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1766 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1767 return 1;
da6b5335 1768 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1769 switch ((insn >> 22) & 3) {
1770 case 0:
da6b5335 1771 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1772 break;
1773 case 1:
da6b5335 1774 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1775 break;
1776 case 2:
da6b5335 1777 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1778 break;
18c9b560 1779 }
da6b5335
FN
1780 tcg_gen_shli_i32(tmp, tmp, 28);
1781 gen_set_nzcv(tmp);
7d1b0095 1782 tcg_temp_free_i32(tmp);
18c9b560
AZ
1783 break;
1784 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1785 if (((insn >> 6) & 3) == 3)
1786 return 1;
18c9b560
AZ
1787 rd = (insn >> 12) & 0xf;
1788 wrd = (insn >> 16) & 0xf;
da6b5335 1789 tmp = load_reg(s, rd);
18c9b560
AZ
1790 switch ((insn >> 6) & 3) {
1791 case 0:
da6b5335 1792 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1793 break;
1794 case 1:
da6b5335 1795 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1796 break;
1797 case 2:
da6b5335 1798 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1799 break;
18c9b560 1800 }
7d1b0095 1801 tcg_temp_free_i32(tmp);
18c9b560
AZ
1802 gen_op_iwmmxt_movq_wRn_M0(wrd);
1803 gen_op_iwmmxt_set_mup();
1804 break;
1805 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1806 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1807 return 1;
da6b5335 1808 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1809 tmp2 = tcg_temp_new_i32();
da6b5335 1810 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1811 switch ((insn >> 22) & 3) {
1812 case 0:
1813 for (i = 0; i < 7; i ++) {
da6b5335
FN
1814 tcg_gen_shli_i32(tmp2, tmp2, 4);
1815 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1816 }
1817 break;
1818 case 1:
1819 for (i = 0; i < 3; i ++) {
da6b5335
FN
1820 tcg_gen_shli_i32(tmp2, tmp2, 8);
1821 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1822 }
1823 break;
1824 case 2:
da6b5335
FN
1825 tcg_gen_shli_i32(tmp2, tmp2, 16);
1826 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1827 break;
18c9b560 1828 }
da6b5335 1829 gen_set_nzcv(tmp);
7d1b0095
PM
1830 tcg_temp_free_i32(tmp2);
1831 tcg_temp_free_i32(tmp);
18c9b560
AZ
1832 break;
1833 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1834 wrd = (insn >> 12) & 0xf;
1835 rd0 = (insn >> 16) & 0xf;
1836 gen_op_iwmmxt_movq_M0_wRn(rd0);
1837 switch ((insn >> 22) & 3) {
1838 case 0:
e677137d 1839 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1840 break;
1841 case 1:
e677137d 1842 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1843 break;
1844 case 2:
e677137d 1845 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1846 break;
1847 case 3:
1848 return 1;
1849 }
1850 gen_op_iwmmxt_movq_wRn_M0(wrd);
1851 gen_op_iwmmxt_set_mup();
1852 break;
1853 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1854 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1855 return 1;
da6b5335 1856 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1857 tmp2 = tcg_temp_new_i32();
da6b5335 1858 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1859 switch ((insn >> 22) & 3) {
1860 case 0:
1861 for (i = 0; i < 7; i ++) {
da6b5335
FN
1862 tcg_gen_shli_i32(tmp2, tmp2, 4);
1863 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1864 }
1865 break;
1866 case 1:
1867 for (i = 0; i < 3; i ++) {
da6b5335
FN
1868 tcg_gen_shli_i32(tmp2, tmp2, 8);
1869 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1870 }
1871 break;
1872 case 2:
da6b5335
FN
1873 tcg_gen_shli_i32(tmp2, tmp2, 16);
1874 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1875 break;
18c9b560 1876 }
da6b5335 1877 gen_set_nzcv(tmp);
7d1b0095
PM
1878 tcg_temp_free_i32(tmp2);
1879 tcg_temp_free_i32(tmp);
18c9b560
AZ
1880 break;
1881 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1882 rd = (insn >> 12) & 0xf;
1883 rd0 = (insn >> 16) & 0xf;
da6b5335 1884 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1885 return 1;
1886 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1887 tmp = tcg_temp_new_i32();
18c9b560
AZ
1888 switch ((insn >> 22) & 3) {
1889 case 0:
da6b5335 1890 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1891 break;
1892 case 1:
da6b5335 1893 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1894 break;
1895 case 2:
da6b5335 1896 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1897 break;
18c9b560 1898 }
da6b5335 1899 store_reg(s, rd, tmp);
18c9b560
AZ
1900 break;
1901 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1902 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1903 wrd = (insn >> 12) & 0xf;
1904 rd0 = (insn >> 16) & 0xf;
1905 rd1 = (insn >> 0) & 0xf;
1906 gen_op_iwmmxt_movq_M0_wRn(rd0);
1907 switch ((insn >> 22) & 3) {
1908 case 0:
1909 if (insn & (1 << 21))
1910 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1911 else
1912 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1913 break;
1914 case 1:
1915 if (insn & (1 << 21))
1916 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1917 else
1918 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1919 break;
1920 case 2:
1921 if (insn & (1 << 21))
1922 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1923 else
1924 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1925 break;
1926 case 3:
1927 return 1;
1928 }
1929 gen_op_iwmmxt_movq_wRn_M0(wrd);
1930 gen_op_iwmmxt_set_mup();
1931 gen_op_iwmmxt_set_cup();
1932 break;
1933 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1934 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1935 wrd = (insn >> 12) & 0xf;
1936 rd0 = (insn >> 16) & 0xf;
1937 gen_op_iwmmxt_movq_M0_wRn(rd0);
1938 switch ((insn >> 22) & 3) {
1939 case 0:
1940 if (insn & (1 << 21))
1941 gen_op_iwmmxt_unpacklsb_M0();
1942 else
1943 gen_op_iwmmxt_unpacklub_M0();
1944 break;
1945 case 1:
1946 if (insn & (1 << 21))
1947 gen_op_iwmmxt_unpacklsw_M0();
1948 else
1949 gen_op_iwmmxt_unpackluw_M0();
1950 break;
1951 case 2:
1952 if (insn & (1 << 21))
1953 gen_op_iwmmxt_unpacklsl_M0();
1954 else
1955 gen_op_iwmmxt_unpacklul_M0();
1956 break;
1957 case 3:
1958 return 1;
1959 }
1960 gen_op_iwmmxt_movq_wRn_M0(wrd);
1961 gen_op_iwmmxt_set_mup();
1962 gen_op_iwmmxt_set_cup();
1963 break;
1964 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1965 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1966 wrd = (insn >> 12) & 0xf;
1967 rd0 = (insn >> 16) & 0xf;
1968 gen_op_iwmmxt_movq_M0_wRn(rd0);
1969 switch ((insn >> 22) & 3) {
1970 case 0:
1971 if (insn & (1 << 21))
1972 gen_op_iwmmxt_unpackhsb_M0();
1973 else
1974 gen_op_iwmmxt_unpackhub_M0();
1975 break;
1976 case 1:
1977 if (insn & (1 << 21))
1978 gen_op_iwmmxt_unpackhsw_M0();
1979 else
1980 gen_op_iwmmxt_unpackhuw_M0();
1981 break;
1982 case 2:
1983 if (insn & (1 << 21))
1984 gen_op_iwmmxt_unpackhsl_M0();
1985 else
1986 gen_op_iwmmxt_unpackhul_M0();
1987 break;
1988 case 3:
1989 return 1;
1990 }
1991 gen_op_iwmmxt_movq_wRn_M0(wrd);
1992 gen_op_iwmmxt_set_mup();
1993 gen_op_iwmmxt_set_cup();
1994 break;
1995 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1996 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1997 if (((insn >> 22) & 3) == 0)
1998 return 1;
18c9b560
AZ
1999 wrd = (insn >> 12) & 0xf;
2000 rd0 = (insn >> 16) & 0xf;
2001 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2002 tmp = tcg_temp_new_i32();
da6b5335 2003 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2004 tcg_temp_free_i32(tmp);
18c9b560 2005 return 1;
da6b5335 2006 }
18c9b560 2007 switch ((insn >> 22) & 3) {
18c9b560 2008 case 1:
947a2fa2 2009 gen_helper_iwmmxt_srlw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2010 break;
2011 case 2:
947a2fa2 2012 gen_helper_iwmmxt_srll(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2013 break;
2014 case 3:
947a2fa2 2015 gen_helper_iwmmxt_srlq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2016 break;
2017 }
7d1b0095 2018 tcg_temp_free_i32(tmp);
18c9b560
AZ
2019 gen_op_iwmmxt_movq_wRn_M0(wrd);
2020 gen_op_iwmmxt_set_mup();
2021 gen_op_iwmmxt_set_cup();
2022 break;
2023 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2024 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2025 if (((insn >> 22) & 3) == 0)
2026 return 1;
18c9b560
AZ
2027 wrd = (insn >> 12) & 0xf;
2028 rd0 = (insn >> 16) & 0xf;
2029 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2030 tmp = tcg_temp_new_i32();
da6b5335 2031 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2032 tcg_temp_free_i32(tmp);
18c9b560 2033 return 1;
da6b5335 2034 }
18c9b560 2035 switch ((insn >> 22) & 3) {
18c9b560 2036 case 1:
947a2fa2 2037 gen_helper_iwmmxt_sraw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2038 break;
2039 case 2:
947a2fa2 2040 gen_helper_iwmmxt_sral(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2041 break;
2042 case 3:
947a2fa2 2043 gen_helper_iwmmxt_sraq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2044 break;
2045 }
7d1b0095 2046 tcg_temp_free_i32(tmp);
18c9b560
AZ
2047 gen_op_iwmmxt_movq_wRn_M0(wrd);
2048 gen_op_iwmmxt_set_mup();
2049 gen_op_iwmmxt_set_cup();
2050 break;
2051 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2052 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2053 if (((insn >> 22) & 3) == 0)
2054 return 1;
18c9b560
AZ
2055 wrd = (insn >> 12) & 0xf;
2056 rd0 = (insn >> 16) & 0xf;
2057 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2058 tmp = tcg_temp_new_i32();
da6b5335 2059 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2060 tcg_temp_free_i32(tmp);
18c9b560 2061 return 1;
da6b5335 2062 }
18c9b560 2063 switch ((insn >> 22) & 3) {
18c9b560 2064 case 1:
947a2fa2 2065 gen_helper_iwmmxt_sllw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2066 break;
2067 case 2:
947a2fa2 2068 gen_helper_iwmmxt_slll(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2069 break;
2070 case 3:
947a2fa2 2071 gen_helper_iwmmxt_sllq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2072 break;
2073 }
7d1b0095 2074 tcg_temp_free_i32(tmp);
18c9b560
AZ
2075 gen_op_iwmmxt_movq_wRn_M0(wrd);
2076 gen_op_iwmmxt_set_mup();
2077 gen_op_iwmmxt_set_cup();
2078 break;
2079 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2080 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2081 if (((insn >> 22) & 3) == 0)
2082 return 1;
18c9b560
AZ
2083 wrd = (insn >> 12) & 0xf;
2084 rd0 = (insn >> 16) & 0xf;
2085 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2086 tmp = tcg_temp_new_i32();
18c9b560 2087 switch ((insn >> 22) & 3) {
18c9b560 2088 case 1:
da6b5335 2089 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2090 tcg_temp_free_i32(tmp);
18c9b560 2091 return 1;
da6b5335 2092 }
947a2fa2 2093 gen_helper_iwmmxt_rorw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2094 break;
2095 case 2:
da6b5335 2096 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2097 tcg_temp_free_i32(tmp);
18c9b560 2098 return 1;
da6b5335 2099 }
947a2fa2 2100 gen_helper_iwmmxt_rorl(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2101 break;
2102 case 3:
da6b5335 2103 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2104 tcg_temp_free_i32(tmp);
18c9b560 2105 return 1;
da6b5335 2106 }
947a2fa2 2107 gen_helper_iwmmxt_rorq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2108 break;
2109 }
7d1b0095 2110 tcg_temp_free_i32(tmp);
18c9b560
AZ
2111 gen_op_iwmmxt_movq_wRn_M0(wrd);
2112 gen_op_iwmmxt_set_mup();
2113 gen_op_iwmmxt_set_cup();
2114 break;
2115 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2116 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2117 wrd = (insn >> 12) & 0xf;
2118 rd0 = (insn >> 16) & 0xf;
2119 rd1 = (insn >> 0) & 0xf;
2120 gen_op_iwmmxt_movq_M0_wRn(rd0);
2121 switch ((insn >> 22) & 3) {
2122 case 0:
2123 if (insn & (1 << 21))
2124 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2125 else
2126 gen_op_iwmmxt_minub_M0_wRn(rd1);
2127 break;
2128 case 1:
2129 if (insn & (1 << 21))
2130 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2131 else
2132 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2133 break;
2134 case 2:
2135 if (insn & (1 << 21))
2136 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2137 else
2138 gen_op_iwmmxt_minul_M0_wRn(rd1);
2139 break;
2140 case 3:
2141 return 1;
2142 }
2143 gen_op_iwmmxt_movq_wRn_M0(wrd);
2144 gen_op_iwmmxt_set_mup();
2145 break;
2146 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2147 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2148 wrd = (insn >> 12) & 0xf;
2149 rd0 = (insn >> 16) & 0xf;
2150 rd1 = (insn >> 0) & 0xf;
2151 gen_op_iwmmxt_movq_M0_wRn(rd0);
2152 switch ((insn >> 22) & 3) {
2153 case 0:
2154 if (insn & (1 << 21))
2155 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2156 else
2157 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2158 break;
2159 case 1:
2160 if (insn & (1 << 21))
2161 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2162 else
2163 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2164 break;
2165 case 2:
2166 if (insn & (1 << 21))
2167 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2168 else
2169 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2170 break;
2171 case 3:
2172 return 1;
2173 }
2174 gen_op_iwmmxt_movq_wRn_M0(wrd);
2175 gen_op_iwmmxt_set_mup();
2176 break;
2177 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2178 case 0x402: case 0x502: case 0x602: case 0x702:
2179 wrd = (insn >> 12) & 0xf;
2180 rd0 = (insn >> 16) & 0xf;
2181 rd1 = (insn >> 0) & 0xf;
2182 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2183 tmp = tcg_const_i32((insn >> 20) & 3);
2184 iwmmxt_load_reg(cpu_V1, rd1);
2185 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2186 tcg_temp_free(tmp);
18c9b560
AZ
2187 gen_op_iwmmxt_movq_wRn_M0(wrd);
2188 gen_op_iwmmxt_set_mup();
2189 break;
2190 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2191 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2192 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2193 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2194 wrd = (insn >> 12) & 0xf;
2195 rd0 = (insn >> 16) & 0xf;
2196 rd1 = (insn >> 0) & 0xf;
2197 gen_op_iwmmxt_movq_M0_wRn(rd0);
2198 switch ((insn >> 20) & 0xf) {
2199 case 0x0:
2200 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2201 break;
2202 case 0x1:
2203 gen_op_iwmmxt_subub_M0_wRn(rd1);
2204 break;
2205 case 0x3:
2206 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2207 break;
2208 case 0x4:
2209 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2210 break;
2211 case 0x5:
2212 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2213 break;
2214 case 0x7:
2215 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2216 break;
2217 case 0x8:
2218 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2219 break;
2220 case 0x9:
2221 gen_op_iwmmxt_subul_M0_wRn(rd1);
2222 break;
2223 case 0xb:
2224 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2225 break;
2226 default:
2227 return 1;
2228 }
2229 gen_op_iwmmxt_movq_wRn_M0(wrd);
2230 gen_op_iwmmxt_set_mup();
2231 gen_op_iwmmxt_set_cup();
2232 break;
2233 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2234 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2235 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2236 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2237 wrd = (insn >> 12) & 0xf;
2238 rd0 = (insn >> 16) & 0xf;
2239 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2240 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
947a2fa2 2241 gen_helper_iwmmxt_shufh(cpu_M0, cpu_M0, tmp);
da6b5335 2242 tcg_temp_free(tmp);
18c9b560
AZ
2243 gen_op_iwmmxt_movq_wRn_M0(wrd);
2244 gen_op_iwmmxt_set_mup();
2245 gen_op_iwmmxt_set_cup();
2246 break;
2247 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2248 case 0x418: case 0x518: case 0x618: case 0x718:
2249 case 0x818: case 0x918: case 0xa18: case 0xb18:
2250 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2251 wrd = (insn >> 12) & 0xf;
2252 rd0 = (insn >> 16) & 0xf;
2253 rd1 = (insn >> 0) & 0xf;
2254 gen_op_iwmmxt_movq_M0_wRn(rd0);
2255 switch ((insn >> 20) & 0xf) {
2256 case 0x0:
2257 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2258 break;
2259 case 0x1:
2260 gen_op_iwmmxt_addub_M0_wRn(rd1);
2261 break;
2262 case 0x3:
2263 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2264 break;
2265 case 0x4:
2266 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2267 break;
2268 case 0x5:
2269 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2270 break;
2271 case 0x7:
2272 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2273 break;
2274 case 0x8:
2275 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2276 break;
2277 case 0x9:
2278 gen_op_iwmmxt_addul_M0_wRn(rd1);
2279 break;
2280 case 0xb:
2281 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2282 break;
2283 default:
2284 return 1;
2285 }
2286 gen_op_iwmmxt_movq_wRn_M0(wrd);
2287 gen_op_iwmmxt_set_mup();
2288 gen_op_iwmmxt_set_cup();
2289 break;
2290 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2291 case 0x408: case 0x508: case 0x608: case 0x708:
2292 case 0x808: case 0x908: case 0xa08: case 0xb08:
2293 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2294 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2295 return 1;
18c9b560
AZ
2296 wrd = (insn >> 12) & 0xf;
2297 rd0 = (insn >> 16) & 0xf;
2298 rd1 = (insn >> 0) & 0xf;
2299 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2300 switch ((insn >> 22) & 3) {
18c9b560
AZ
2301 case 1:
2302 if (insn & (1 << 21))
2303 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2304 else
2305 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2306 break;
2307 case 2:
2308 if (insn & (1 << 21))
2309 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2310 else
2311 gen_op_iwmmxt_packul_M0_wRn(rd1);
2312 break;
2313 case 3:
2314 if (insn & (1 << 21))
2315 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2316 else
2317 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2318 break;
2319 }
2320 gen_op_iwmmxt_movq_wRn_M0(wrd);
2321 gen_op_iwmmxt_set_mup();
2322 gen_op_iwmmxt_set_cup();
2323 break;
2324 case 0x201: case 0x203: case 0x205: case 0x207:
2325 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2326 case 0x211: case 0x213: case 0x215: case 0x217:
2327 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2328 wrd = (insn >> 5) & 0xf;
2329 rd0 = (insn >> 12) & 0xf;
2330 rd1 = (insn >> 0) & 0xf;
2331 if (rd0 == 0xf || rd1 == 0xf)
2332 return 1;
2333 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2334 tmp = load_reg(s, rd0);
2335 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2336 switch ((insn >> 16) & 0xf) {
2337 case 0x0: /* TMIA */
da6b5335 2338 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2339 break;
2340 case 0x8: /* TMIAPH */
da6b5335 2341 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2342 break;
2343 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2344 if (insn & (1 << 16))
da6b5335 2345 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2346 if (insn & (1 << 17))
da6b5335
FN
2347 tcg_gen_shri_i32(tmp2, tmp2, 16);
2348 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2349 break;
2350 default:
7d1b0095
PM
2351 tcg_temp_free_i32(tmp2);
2352 tcg_temp_free_i32(tmp);
18c9b560
AZ
2353 return 1;
2354 }
7d1b0095
PM
2355 tcg_temp_free_i32(tmp2);
2356 tcg_temp_free_i32(tmp);
18c9b560
AZ
2357 gen_op_iwmmxt_movq_wRn_M0(wrd);
2358 gen_op_iwmmxt_set_mup();
2359 break;
2360 default:
2361 return 1;
2362 }
2363
2364 return 0;
2365}
2366
a1c7273b 2367/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560
AZ
2368 (ie. an undefined instruction). */
2369static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2370{
2371 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2372 TCGv tmp, tmp2;
18c9b560
AZ
2373
2374 if ((insn & 0x0ff00f10) == 0x0e200010) {
2375 /* Multiply with Internal Accumulate Format */
2376 rd0 = (insn >> 12) & 0xf;
2377 rd1 = insn & 0xf;
2378 acc = (insn >> 5) & 7;
2379
2380 if (acc != 0)
2381 return 1;
2382
3a554c0f
FN
2383 tmp = load_reg(s, rd0);
2384 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2385 switch ((insn >> 16) & 0xf) {
2386 case 0x0: /* MIA */
3a554c0f 2387 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2388 break;
2389 case 0x8: /* MIAPH */
3a554c0f 2390 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2391 break;
2392 case 0xc: /* MIABB */
2393 case 0xd: /* MIABT */
2394 case 0xe: /* MIATB */
2395 case 0xf: /* MIATT */
18c9b560 2396 if (insn & (1 << 16))
3a554c0f 2397 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2398 if (insn & (1 << 17))
3a554c0f
FN
2399 tcg_gen_shri_i32(tmp2, tmp2, 16);
2400 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2401 break;
2402 default:
2403 return 1;
2404 }
7d1b0095
PM
2405 tcg_temp_free_i32(tmp2);
2406 tcg_temp_free_i32(tmp);
18c9b560
AZ
2407
2408 gen_op_iwmmxt_movq_wRn_M0(acc);
2409 return 0;
2410 }
2411
2412 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2413 /* Internal Accumulator Access Format */
2414 rdhi = (insn >> 16) & 0xf;
2415 rdlo = (insn >> 12) & 0xf;
2416 acc = insn & 7;
2417
2418 if (acc != 0)
2419 return 1;
2420
2421 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2422 iwmmxt_load_reg(cpu_V0, acc);
2423 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2424 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2425 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2426 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2427 } else { /* MAR */
3a554c0f
FN
2428 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2429 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2430 }
2431 return 0;
2432 }
2433
2434 return 1;
2435}
2436
c1713132
AZ
2437/* Disassemble system coprocessor instruction. Return nonzero if
2438 instruction is not defined. */
2439static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2440{
b75263d6 2441 TCGv tmp, tmp2;
c1713132
AZ
2442 uint32_t rd = (insn >> 12) & 0xf;
2443 uint32_t cp = (insn >> 8) & 0xf;
2444 if (IS_USER(s)) {
2445 return 1;
2446 }
2447
18c9b560 2448 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2449 if (!env->cp[cp].cp_read)
2450 return 1;
8984bd2e 2451 gen_set_pc_im(s->pc);
7d1b0095 2452 tmp = tcg_temp_new_i32();
b75263d6
JR
2453 tmp2 = tcg_const_i32(insn);
2454 gen_helper_get_cp(tmp, cpu_env, tmp2);
2455 tcg_temp_free(tmp2);
8984bd2e 2456 store_reg(s, rd, tmp);
c1713132
AZ
2457 } else {
2458 if (!env->cp[cp].cp_write)
2459 return 1;
8984bd2e
PB
2460 gen_set_pc_im(s->pc);
2461 tmp = load_reg(s, rd);
b75263d6
JR
2462 tmp2 = tcg_const_i32(insn);
2463 gen_helper_set_cp(cpu_env, tmp2, tmp);
2464 tcg_temp_free(tmp2);
7d1b0095 2465 tcg_temp_free_i32(tmp);
c1713132
AZ
2466 }
2467 return 0;
2468}
2469
9ee6e8bb
PB
2470static int cp15_user_ok(uint32_t insn)
2471{
2472 int cpn = (insn >> 16) & 0xf;
2473 int cpm = insn & 0xf;
2474 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2475
2476 if (cpn == 13 && cpm == 0) {
2477 /* TLS register. */
2478 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2479 return 1;
2480 }
2481 if (cpn == 7) {
2482 /* ISB, DSB, DMB. */
2483 if ((cpm == 5 && op == 4)
2484 || (cpm == 10 && (op == 4 || op == 5)))
2485 return 1;
2486 }
2487 return 0;
2488}
2489
3f26c122
RV
2490static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2491{
2492 TCGv tmp;
2493 int cpn = (insn >> 16) & 0xf;
2494 int cpm = insn & 0xf;
2495 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2496
2497 if (!arm_feature(env, ARM_FEATURE_V6K))
2498 return 0;
2499
2500 if (!(cpn == 13 && cpm == 0))
2501 return 0;
2502
2503 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2504 switch (op) {
2505 case 2:
c5883be2 2506 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2507 break;
2508 case 3:
c5883be2 2509 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2510 break;
2511 case 4:
c5883be2 2512 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2513 break;
2514 default:
3f26c122
RV
2515 return 0;
2516 }
2517 store_reg(s, rd, tmp);
2518
2519 } else {
2520 tmp = load_reg(s, rd);
2521 switch (op) {
2522 case 2:
c5883be2 2523 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2524 break;
2525 case 3:
c5883be2 2526 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2527 break;
2528 case 4:
c5883be2 2529 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2530 break;
2531 default:
7d1b0095 2532 tcg_temp_free_i32(tmp);
3f26c122
RV
2533 return 0;
2534 }
3f26c122
RV
2535 }
2536 return 1;
2537}
2538
b5ff1b31
FB
2539/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2540 instruction is not defined. */
a90b7318 2541static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2542{
2543 uint32_t rd;
b75263d6 2544 TCGv tmp, tmp2;
b5ff1b31 2545
9ee6e8bb
PB
2546 /* M profile cores use memory mapped registers instead of cp15. */
2547 if (arm_feature(env, ARM_FEATURE_M))
2548 return 1;
2549
2550 if ((insn & (1 << 25)) == 0) {
2551 if (insn & (1 << 20)) {
2552 /* mrrc */
2553 return 1;
2554 }
2555 /* mcrr. Used for block cache operations, so implement as no-op. */
2556 return 0;
2557 }
2558 if ((insn & (1 << 4)) == 0) {
2559 /* cdp */
2560 return 1;
2561 }
2562 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2563 return 1;
2564 }
cc688901
PM
2565
2566 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2567 * instructions rather than a separate instruction.
2568 */
2569 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2570 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2571 * In v7, this must NOP.
2572 */
2573 if (!arm_feature(env, ARM_FEATURE_V7)) {
2574 /* Wait for interrupt. */
2575 gen_set_pc_im(s->pc);
2576 s->is_jmp = DISAS_WFI;
2577 }
9332f9da
FB
2578 return 0;
2579 }
cc688901
PM
2580
2581 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2582 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2583 * so this is slightly over-broad.
2584 */
2585 if (!arm_feature(env, ARM_FEATURE_V6)) {
2586 /* Wait for interrupt. */
2587 gen_set_pc_im(s->pc);
2588 s->is_jmp = DISAS_WFI;
2589 return 0;
2590 }
2591 /* Otherwise fall through to handle via helper function.
2592 * In particular, on v7 and some v6 cores this is one of
2593 * the VA-PA registers.
2594 */
2595 }
2596
b5ff1b31 2597 rd = (insn >> 12) & 0xf;
3f26c122
RV
2598
2599 if (cp15_tls_load_store(env, s, insn, rd))
2600 return 0;
2601
b75263d6 2602 tmp2 = tcg_const_i32(insn);
18c9b560 2603 if (insn & ARM_CP_RW_BIT) {
7d1b0095 2604 tmp = tcg_temp_new_i32();
b75263d6 2605 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2606 /* If the destination register is r15 then sets condition codes. */
2607 if (rd != 15)
8984bd2e
PB
2608 store_reg(s, rd, tmp);
2609 else
7d1b0095 2610 tcg_temp_free_i32(tmp);
b5ff1b31 2611 } else {
8984bd2e 2612 tmp = load_reg(s, rd);
b75263d6 2613 gen_helper_set_cp15(cpu_env, tmp2, tmp);
7d1b0095 2614 tcg_temp_free_i32(tmp);
a90b7318
AZ
2615 /* Normally we would always end the TB here, but Linux
2616 * arch/arm/mach-pxa/sleep.S expects two instructions following
2617 * an MMU enable to execute from cache. Imitate this behaviour. */
2618 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2619 (insn & 0x0fff0fff) != 0x0e010f10)
2620 gen_lookup_tb(s);
b5ff1b31 2621 }
b75263d6 2622 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2623 return 0;
2624}
2625
9ee6e8bb
PB
2626#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2627#define VFP_SREG(insn, bigbit, smallbit) \
2628 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2629#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2630 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2631 reg = (((insn) >> (bigbit)) & 0x0f) \
2632 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2633 } else { \
2634 if (insn & (1 << (smallbit))) \
2635 return 1; \
2636 reg = ((insn) >> (bigbit)) & 0x0f; \
2637 }} while (0)
2638
2639#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2640#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2641#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2642#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2643#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2644#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2645
4373f3ce
PB
2646/* Move between integer and VFP cores. */
2647static TCGv gen_vfp_mrs(void)
2648{
7d1b0095 2649 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2650 tcg_gen_mov_i32(tmp, cpu_F0s);
2651 return tmp;
2652}
2653
2654static void gen_vfp_msr(TCGv tmp)
2655{
2656 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2657 tcg_temp_free_i32(tmp);
4373f3ce
PB
2658}
2659
ad69471c
PB
2660static void gen_neon_dup_u8(TCGv var, int shift)
2661{
7d1b0095 2662 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2663 if (shift)
2664 tcg_gen_shri_i32(var, var, shift);
86831435 2665 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2666 tcg_gen_shli_i32(tmp, var, 8);
2667 tcg_gen_or_i32(var, var, tmp);
2668 tcg_gen_shli_i32(tmp, var, 16);
2669 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2670 tcg_temp_free_i32(tmp);
ad69471c
PB
2671}
2672
2673static void gen_neon_dup_low16(TCGv var)
2674{
7d1b0095 2675 TCGv tmp = tcg_temp_new_i32();
86831435 2676 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2677 tcg_gen_shli_i32(tmp, var, 16);
2678 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2679 tcg_temp_free_i32(tmp);
ad69471c
PB
2680}
2681
2682static void gen_neon_dup_high16(TCGv var)
2683{
7d1b0095 2684 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2685 tcg_gen_andi_i32(var, var, 0xffff0000);
2686 tcg_gen_shri_i32(tmp, var, 16);
2687 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2688 tcg_temp_free_i32(tmp);
ad69471c
PB
2689}
2690
8e18cde3
PM
2691static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2692{
2693 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2694 TCGv tmp;
2695 switch (size) {
2696 case 0:
2697 tmp = gen_ld8u(addr, IS_USER(s));
2698 gen_neon_dup_u8(tmp, 0);
2699 break;
2700 case 1:
2701 tmp = gen_ld16u(addr, IS_USER(s));
2702 gen_neon_dup_low16(tmp);
2703 break;
2704 case 2:
2705 tmp = gen_ld32(addr, IS_USER(s));
2706 break;
2707 default: /* Avoid compiler warnings. */
2708 abort();
2709 }
2710 return tmp;
2711}
2712
a1c7273b 2713/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95
FB
2714 (ie. an undefined instruction). */
2715static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2716{
2717 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2718 int dp, veclen;
312eea9f 2719 TCGv addr;
4373f3ce 2720 TCGv tmp;
ad69471c 2721 TCGv tmp2;
b7bcbe95 2722
40f137e1
PB
2723 if (!arm_feature(env, ARM_FEATURE_VFP))
2724 return 1;
2725
5df8bac1 2726 if (!s->vfp_enabled) {
9ee6e8bb 2727 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2728 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2729 return 1;
2730 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2731 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2732 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2733 return 1;
2734 }
b7bcbe95
FB
2735 dp = ((insn & 0xf00) == 0xb00);
2736 switch ((insn >> 24) & 0xf) {
2737 case 0xe:
2738 if (insn & (1 << 4)) {
2739 /* single register transfer */
b7bcbe95
FB
2740 rd = (insn >> 12) & 0xf;
2741 if (dp) {
9ee6e8bb
PB
2742 int size;
2743 int pass;
2744
2745 VFP_DREG_N(rn, insn);
2746 if (insn & 0xf)
b7bcbe95 2747 return 1;
9ee6e8bb
PB
2748 if (insn & 0x00c00060
2749 && !arm_feature(env, ARM_FEATURE_NEON))
2750 return 1;
2751
2752 pass = (insn >> 21) & 1;
2753 if (insn & (1 << 22)) {
2754 size = 0;
2755 offset = ((insn >> 5) & 3) * 8;
2756 } else if (insn & (1 << 5)) {
2757 size = 1;
2758 offset = (insn & (1 << 6)) ? 16 : 0;
2759 } else {
2760 size = 2;
2761 offset = 0;
2762 }
18c9b560 2763 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2764 /* vfp->arm */
ad69471c 2765 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2766 switch (size) {
2767 case 0:
9ee6e8bb 2768 if (offset)
ad69471c 2769 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2770 if (insn & (1 << 23))
ad69471c 2771 gen_uxtb(tmp);
9ee6e8bb 2772 else
ad69471c 2773 gen_sxtb(tmp);
9ee6e8bb
PB
2774 break;
2775 case 1:
9ee6e8bb
PB
2776 if (insn & (1 << 23)) {
2777 if (offset) {
ad69471c 2778 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2779 } else {
ad69471c 2780 gen_uxth(tmp);
9ee6e8bb
PB
2781 }
2782 } else {
2783 if (offset) {
ad69471c 2784 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2785 } else {
ad69471c 2786 gen_sxth(tmp);
9ee6e8bb
PB
2787 }
2788 }
2789 break;
2790 case 2:
9ee6e8bb
PB
2791 break;
2792 }
ad69471c 2793 store_reg(s, rd, tmp);
b7bcbe95
FB
2794 } else {
2795 /* arm->vfp */
ad69471c 2796 tmp = load_reg(s, rd);
9ee6e8bb
PB
2797 if (insn & (1 << 23)) {
2798 /* VDUP */
2799 if (size == 0) {
ad69471c 2800 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2801 } else if (size == 1) {
ad69471c 2802 gen_neon_dup_low16(tmp);
9ee6e8bb 2803 }
cbbccffc 2804 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2805 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2806 tcg_gen_mov_i32(tmp2, tmp);
2807 neon_store_reg(rn, n, tmp2);
2808 }
2809 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2810 } else {
2811 /* VMOV */
2812 switch (size) {
2813 case 0:
ad69471c
PB
2814 tmp2 = neon_load_reg(rn, pass);
2815 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
7d1b0095 2816 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2817 break;
2818 case 1:
ad69471c
PB
2819 tmp2 = neon_load_reg(rn, pass);
2820 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
7d1b0095 2821 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2822 break;
2823 case 2:
9ee6e8bb
PB
2824 break;
2825 }
ad69471c 2826 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2827 }
b7bcbe95 2828 }
9ee6e8bb
PB
2829 } else { /* !dp */
2830 if ((insn & 0x6f) != 0x00)
2831 return 1;
2832 rn = VFP_SREG_N(insn);
18c9b560 2833 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2834 /* vfp->arm */
2835 if (insn & (1 << 21)) {
2836 /* system register */
40f137e1 2837 rn >>= 1;
9ee6e8bb 2838
b7bcbe95 2839 switch (rn) {
40f137e1 2840 case ARM_VFP_FPSID:
4373f3ce 2841 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2842 VFP3 restricts all id registers to privileged
2843 accesses. */
2844 if (IS_USER(s)
2845 && arm_feature(env, ARM_FEATURE_VFP3))
2846 return 1;
4373f3ce 2847 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2848 break;
40f137e1 2849 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2850 if (IS_USER(s))
2851 return 1;
4373f3ce 2852 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2853 break;
40f137e1
PB
2854 case ARM_VFP_FPINST:
2855 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2856 /* Not present in VFP3. */
2857 if (IS_USER(s)
2858 || arm_feature(env, ARM_FEATURE_VFP3))
2859 return 1;
4373f3ce 2860 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2861 break;
40f137e1 2862 case ARM_VFP_FPSCR:
601d70b9 2863 if (rd == 15) {
4373f3ce
PB
2864 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2865 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2866 } else {
7d1b0095 2867 tmp = tcg_temp_new_i32();
4373f3ce
PB
2868 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2869 }
b7bcbe95 2870 break;
9ee6e8bb
PB
2871 case ARM_VFP_MVFR0:
2872 case ARM_VFP_MVFR1:
2873 if (IS_USER(s)
2874 || !arm_feature(env, ARM_FEATURE_VFP3))
2875 return 1;
4373f3ce 2876 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2877 break;
b7bcbe95
FB
2878 default:
2879 return 1;
2880 }
2881 } else {
2882 gen_mov_F0_vreg(0, rn);
4373f3ce 2883 tmp = gen_vfp_mrs();
b7bcbe95
FB
2884 }
2885 if (rd == 15) {
b5ff1b31 2886 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2887 gen_set_nzcv(tmp);
7d1b0095 2888 tcg_temp_free_i32(tmp);
4373f3ce
PB
2889 } else {
2890 store_reg(s, rd, tmp);
2891 }
b7bcbe95
FB
2892 } else {
2893 /* arm->vfp */
4373f3ce 2894 tmp = load_reg(s, rd);
b7bcbe95 2895 if (insn & (1 << 21)) {
40f137e1 2896 rn >>= 1;
b7bcbe95
FB
2897 /* system register */
2898 switch (rn) {
40f137e1 2899 case ARM_VFP_FPSID:
9ee6e8bb
PB
2900 case ARM_VFP_MVFR0:
2901 case ARM_VFP_MVFR1:
b7bcbe95
FB
2902 /* Writes are ignored. */
2903 break;
40f137e1 2904 case ARM_VFP_FPSCR:
4373f3ce 2905 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2906 tcg_temp_free_i32(tmp);
b5ff1b31 2907 gen_lookup_tb(s);
b7bcbe95 2908 break;
40f137e1 2909 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2910 if (IS_USER(s))
2911 return 1;
71b3c3de
JR
2912 /* TODO: VFP subarchitecture support.
2913 * For now, keep the EN bit only */
2914 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2915 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2916 gen_lookup_tb(s);
2917 break;
2918 case ARM_VFP_FPINST:
2919 case ARM_VFP_FPINST2:
4373f3ce 2920 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2921 break;
b7bcbe95
FB
2922 default:
2923 return 1;
2924 }
2925 } else {
4373f3ce 2926 gen_vfp_msr(tmp);
b7bcbe95
FB
2927 gen_mov_vreg_F0(0, rn);
2928 }
2929 }
2930 }
2931 } else {
2932 /* data processing */
2933 /* The opcode is in bits 23, 21, 20 and 6. */
2934 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2935 if (dp) {
2936 if (op == 15) {
2937 /* rn is opcode */
2938 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2939 } else {
2940 /* rn is register number */
9ee6e8bb 2941 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2942 }
2943
04595bf6 2944 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2945 /* Integer or single precision destination. */
9ee6e8bb 2946 rd = VFP_SREG_D(insn);
b7bcbe95 2947 } else {
9ee6e8bb 2948 VFP_DREG_D(rd, insn);
b7bcbe95 2949 }
04595bf6
PM
2950 if (op == 15 &&
2951 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2952 /* VCVT from int is always from S reg regardless of dp bit.
2953 * VCVT with immediate frac_bits has same format as SREG_M
2954 */
2955 rm = VFP_SREG_M(insn);
b7bcbe95 2956 } else {
9ee6e8bb 2957 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2958 }
2959 } else {
9ee6e8bb 2960 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2961 if (op == 15 && rn == 15) {
2962 /* Double precision destination. */
9ee6e8bb
PB
2963 VFP_DREG_D(rd, insn);
2964 } else {
2965 rd = VFP_SREG_D(insn);
2966 }
04595bf6
PM
2967 /* NB that we implicitly rely on the encoding for the frac_bits
2968 * in VCVT of fixed to float being the same as that of an SREG_M
2969 */
9ee6e8bb 2970 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2971 }
2972
69d1fc22 2973 veclen = s->vec_len;
b7bcbe95
FB
2974 if (op == 15 && rn > 3)
2975 veclen = 0;
2976
2977 /* Shut up compiler warnings. */
2978 delta_m = 0;
2979 delta_d = 0;
2980 bank_mask = 0;
3b46e624 2981
b7bcbe95
FB
2982 if (veclen > 0) {
2983 if (dp)
2984 bank_mask = 0xc;
2985 else
2986 bank_mask = 0x18;
2987
2988 /* Figure out what type of vector operation this is. */
2989 if ((rd & bank_mask) == 0) {
2990 /* scalar */
2991 veclen = 0;
2992 } else {
2993 if (dp)
69d1fc22 2994 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2995 else
69d1fc22 2996 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2997
2998 if ((rm & bank_mask) == 0) {
2999 /* mixed scalar/vector */
3000 delta_m = 0;
3001 } else {
3002 /* vector */
3003 delta_m = delta_d;
3004 }
3005 }
3006 }
3007
3008 /* Load the initial operands. */
3009 if (op == 15) {
3010 switch (rn) {
3011 case 16:
3012 case 17:
3013 /* Integer source */
3014 gen_mov_F0_vreg(0, rm);
3015 break;
3016 case 8:
3017 case 9:
3018 /* Compare */
3019 gen_mov_F0_vreg(dp, rd);
3020 gen_mov_F1_vreg(dp, rm);
3021 break;
3022 case 10:
3023 case 11:
3024 /* Compare with zero */
3025 gen_mov_F0_vreg(dp, rd);
3026 gen_vfp_F1_ld0(dp);
3027 break;
9ee6e8bb
PB
3028 case 20:
3029 case 21:
3030 case 22:
3031 case 23:
644ad806
PB
3032 case 28:
3033 case 29:
3034 case 30:
3035 case 31:
9ee6e8bb
PB
3036 /* Source and destination the same. */
3037 gen_mov_F0_vreg(dp, rd);
3038 break;
b7bcbe95
FB
3039 default:
3040 /* One source operand. */
3041 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3042 break;
b7bcbe95
FB
3043 }
3044 } else {
3045 /* Two source operands. */
3046 gen_mov_F0_vreg(dp, rn);
3047 gen_mov_F1_vreg(dp, rm);
3048 }
3049
3050 for (;;) {
3051 /* Perform the calculation. */
3052 switch (op) {
605a6aed
PM
3053 case 0: /* VMLA: fd + (fn * fm) */
3054 /* Note that order of inputs to the add matters for NaNs */
3055 gen_vfp_F1_mul(dp);
3056 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3057 gen_vfp_add(dp);
3058 break;
605a6aed 3059 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3060 gen_vfp_mul(dp);
605a6aed
PM
3061 gen_vfp_F1_neg(dp);
3062 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3063 gen_vfp_add(dp);
3064 break;
605a6aed
PM
3065 case 2: /* VNMLS: -fd + (fn * fm) */
3066 /* Note that it isn't valid to replace (-A + B) with (B - A)
3067 * or similar plausible looking simplifications
3068 * because this will give wrong results for NaNs.
3069 */
3070 gen_vfp_F1_mul(dp);
3071 gen_mov_F0_vreg(dp, rd);
3072 gen_vfp_neg(dp);
3073 gen_vfp_add(dp);
b7bcbe95 3074 break;
605a6aed 3075 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3076 gen_vfp_mul(dp);
605a6aed
PM
3077 gen_vfp_F1_neg(dp);
3078 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3079 gen_vfp_neg(dp);
605a6aed 3080 gen_vfp_add(dp);
b7bcbe95
FB
3081 break;
3082 case 4: /* mul: fn * fm */
3083 gen_vfp_mul(dp);
3084 break;
3085 case 5: /* nmul: -(fn * fm) */
3086 gen_vfp_mul(dp);
3087 gen_vfp_neg(dp);
3088 break;
3089 case 6: /* add: fn + fm */
3090 gen_vfp_add(dp);
3091 break;
3092 case 7: /* sub: fn - fm */
3093 gen_vfp_sub(dp);
3094 break;
3095 case 8: /* div: fn / fm */
3096 gen_vfp_div(dp);
3097 break;
9ee6e8bb
PB
3098 case 14: /* fconst */
3099 if (!arm_feature(env, ARM_FEATURE_VFP3))
3100 return 1;
3101
3102 n = (insn << 12) & 0x80000000;
3103 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3104 if (dp) {
3105 if (i & 0x40)
3106 i |= 0x3f80;
3107 else
3108 i |= 0x4000;
3109 n |= i << 16;
4373f3ce 3110 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3111 } else {
3112 if (i & 0x40)
3113 i |= 0x780;
3114 else
3115 i |= 0x800;
3116 n |= i << 19;
5b340b51 3117 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3118 }
9ee6e8bb 3119 break;
b7bcbe95
FB
3120 case 15: /* extension space */
3121 switch (rn) {
3122 case 0: /* cpy */
3123 /* no-op */
3124 break;
3125 case 1: /* abs */
3126 gen_vfp_abs(dp);
3127 break;
3128 case 2: /* neg */
3129 gen_vfp_neg(dp);
3130 break;
3131 case 3: /* sqrt */
3132 gen_vfp_sqrt(dp);
3133 break;
60011498
PB
3134 case 4: /* vcvtb.f32.f16 */
3135 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3136 return 1;
3137 tmp = gen_vfp_mrs();
3138 tcg_gen_ext16u_i32(tmp, tmp);
3139 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3140 tcg_temp_free_i32(tmp);
60011498
PB
3141 break;
3142 case 5: /* vcvtt.f32.f16 */
3143 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3144 return 1;
3145 tmp = gen_vfp_mrs();
3146 tcg_gen_shri_i32(tmp, tmp, 16);
3147 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3148 tcg_temp_free_i32(tmp);
60011498
PB
3149 break;
3150 case 6: /* vcvtb.f16.f32 */
3151 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3152 return 1;
7d1b0095 3153 tmp = tcg_temp_new_i32();
60011498
PB
3154 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3155 gen_mov_F0_vreg(0, rd);
3156 tmp2 = gen_vfp_mrs();
3157 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3158 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3159 tcg_temp_free_i32(tmp2);
60011498
PB
3160 gen_vfp_msr(tmp);
3161 break;
3162 case 7: /* vcvtt.f16.f32 */
3163 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3164 return 1;
7d1b0095 3165 tmp = tcg_temp_new_i32();
60011498
PB
3166 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3167 tcg_gen_shli_i32(tmp, tmp, 16);
3168 gen_mov_F0_vreg(0, rd);
3169 tmp2 = gen_vfp_mrs();
3170 tcg_gen_ext16u_i32(tmp2, tmp2);
3171 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3172 tcg_temp_free_i32(tmp2);
60011498
PB
3173 gen_vfp_msr(tmp);
3174 break;
b7bcbe95
FB
3175 case 8: /* cmp */
3176 gen_vfp_cmp(dp);
3177 break;
3178 case 9: /* cmpe */
3179 gen_vfp_cmpe(dp);
3180 break;
3181 case 10: /* cmpz */
3182 gen_vfp_cmp(dp);
3183 break;
3184 case 11: /* cmpez */
3185 gen_vfp_F1_ld0(dp);
3186 gen_vfp_cmpe(dp);
3187 break;
3188 case 15: /* single<->double conversion */
3189 if (dp)
4373f3ce 3190 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3191 else
4373f3ce 3192 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3193 break;
3194 case 16: /* fuito */
5500b06c 3195 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3196 break;
3197 case 17: /* fsito */
5500b06c 3198 gen_vfp_sito(dp, 0);
b7bcbe95 3199 break;
9ee6e8bb
PB
3200 case 20: /* fshto */
3201 if (!arm_feature(env, ARM_FEATURE_VFP3))
3202 return 1;
5500b06c 3203 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3204 break;
3205 case 21: /* fslto */
3206 if (!arm_feature(env, ARM_FEATURE_VFP3))
3207 return 1;
5500b06c 3208 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3209 break;
3210 case 22: /* fuhto */
3211 if (!arm_feature(env, ARM_FEATURE_VFP3))
3212 return 1;
5500b06c 3213 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3214 break;
3215 case 23: /* fulto */
3216 if (!arm_feature(env, ARM_FEATURE_VFP3))
3217 return 1;
5500b06c 3218 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3219 break;
b7bcbe95 3220 case 24: /* ftoui */
5500b06c 3221 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3222 break;
3223 case 25: /* ftouiz */
5500b06c 3224 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3225 break;
3226 case 26: /* ftosi */
5500b06c 3227 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3228 break;
3229 case 27: /* ftosiz */
5500b06c 3230 gen_vfp_tosiz(dp, 0);
b7bcbe95 3231 break;
9ee6e8bb
PB
3232 case 28: /* ftosh */
3233 if (!arm_feature(env, ARM_FEATURE_VFP3))
3234 return 1;
5500b06c 3235 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3236 break;
3237 case 29: /* ftosl */
3238 if (!arm_feature(env, ARM_FEATURE_VFP3))
3239 return 1;
5500b06c 3240 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3241 break;
3242 case 30: /* ftouh */
3243 if (!arm_feature(env, ARM_FEATURE_VFP3))
3244 return 1;
5500b06c 3245 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3246 break;
3247 case 31: /* ftoul */
3248 if (!arm_feature(env, ARM_FEATURE_VFP3))
3249 return 1;
5500b06c 3250 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3251 break;
b7bcbe95
FB
3252 default: /* undefined */
3253 printf ("rn:%d\n", rn);
3254 return 1;
3255 }
3256 break;
3257 default: /* undefined */
3258 printf ("op:%d\n", op);
3259 return 1;
3260 }
3261
3262 /* Write back the result. */
3263 if (op == 15 && (rn >= 8 && rn <= 11))
3264 ; /* Comparison, do nothing. */
04595bf6
PM
3265 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3266 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3267 gen_mov_vreg_F0(0, rd);
3268 else if (op == 15 && rn == 15)
3269 /* conversion */
3270 gen_mov_vreg_F0(!dp, rd);
3271 else
3272 gen_mov_vreg_F0(dp, rd);
3273
3274 /* break out of the loop if we have finished */
3275 if (veclen == 0)
3276 break;
3277
3278 if (op == 15 && delta_m == 0) {
3279 /* single source one-many */
3280 while (veclen--) {
3281 rd = ((rd + delta_d) & (bank_mask - 1))
3282 | (rd & bank_mask);
3283 gen_mov_vreg_F0(dp, rd);
3284 }
3285 break;
3286 }
3287 /* Setup the next operands. */
3288 veclen--;
3289 rd = ((rd + delta_d) & (bank_mask - 1))
3290 | (rd & bank_mask);
3291
3292 if (op == 15) {
3293 /* One source operand. */
3294 rm = ((rm + delta_m) & (bank_mask - 1))
3295 | (rm & bank_mask);
3296 gen_mov_F0_vreg(dp, rm);
3297 } else {
3298 /* Two source operands. */
3299 rn = ((rn + delta_d) & (bank_mask - 1))
3300 | (rn & bank_mask);
3301 gen_mov_F0_vreg(dp, rn);
3302 if (delta_m) {
3303 rm = ((rm + delta_m) & (bank_mask - 1))
3304 | (rm & bank_mask);
3305 gen_mov_F1_vreg(dp, rm);
3306 }
3307 }
3308 }
3309 }
3310 break;
3311 case 0xc:
3312 case 0xd:
8387da81 3313 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3314 /* two-register transfer */
3315 rn = (insn >> 16) & 0xf;
3316 rd = (insn >> 12) & 0xf;
3317 if (dp) {
9ee6e8bb
PB
3318 VFP_DREG_M(rm, insn);
3319 } else {
3320 rm = VFP_SREG_M(insn);
3321 }
b7bcbe95 3322
18c9b560 3323 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3324 /* vfp->arm */
3325 if (dp) {
4373f3ce
PB
3326 gen_mov_F0_vreg(0, rm * 2);
3327 tmp = gen_vfp_mrs();
3328 store_reg(s, rd, tmp);
3329 gen_mov_F0_vreg(0, rm * 2 + 1);
3330 tmp = gen_vfp_mrs();
3331 store_reg(s, rn, tmp);
b7bcbe95
FB
3332 } else {
3333 gen_mov_F0_vreg(0, rm);
4373f3ce 3334 tmp = gen_vfp_mrs();
8387da81 3335 store_reg(s, rd, tmp);
b7bcbe95 3336 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3337 tmp = gen_vfp_mrs();
8387da81 3338 store_reg(s, rn, tmp);
b7bcbe95
FB
3339 }
3340 } else {
3341 /* arm->vfp */
3342 if (dp) {
4373f3ce
PB
3343 tmp = load_reg(s, rd);
3344 gen_vfp_msr(tmp);
3345 gen_mov_vreg_F0(0, rm * 2);
3346 tmp = load_reg(s, rn);
3347 gen_vfp_msr(tmp);
3348 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3349 } else {
8387da81 3350 tmp = load_reg(s, rd);
4373f3ce 3351 gen_vfp_msr(tmp);
b7bcbe95 3352 gen_mov_vreg_F0(0, rm);
8387da81 3353 tmp = load_reg(s, rn);
4373f3ce 3354 gen_vfp_msr(tmp);
b7bcbe95
FB
3355 gen_mov_vreg_F0(0, rm + 1);
3356 }
3357 }
3358 } else {
3359 /* Load/store */
3360 rn = (insn >> 16) & 0xf;
3361 if (dp)
9ee6e8bb 3362 VFP_DREG_D(rd, insn);
b7bcbe95 3363 else
9ee6e8bb
PB
3364 rd = VFP_SREG_D(insn);
3365 if (s->thumb && rn == 15) {
7d1b0095 3366 addr = tcg_temp_new_i32();
312eea9f 3367 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3368 } else {
312eea9f 3369 addr = load_reg(s, rn);
9ee6e8bb 3370 }
b7bcbe95
FB
3371 if ((insn & 0x01200000) == 0x01000000) {
3372 /* Single load/store */
3373 offset = (insn & 0xff) << 2;
3374 if ((insn & (1 << 23)) == 0)
3375 offset = -offset;
312eea9f 3376 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3377 if (insn & (1 << 20)) {
312eea9f 3378 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3379 gen_mov_vreg_F0(dp, rd);
3380 } else {
3381 gen_mov_F0_vreg(dp, rd);
312eea9f 3382 gen_vfp_st(s, dp, addr);
b7bcbe95 3383 }
7d1b0095 3384 tcg_temp_free_i32(addr);
b7bcbe95
FB
3385 } else {
3386 /* load/store multiple */
3387 if (dp)
3388 n = (insn >> 1) & 0x7f;
3389 else
3390 n = insn & 0xff;
3391
3392 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3393 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3394
3395 if (dp)
3396 offset = 8;
3397 else
3398 offset = 4;
3399 for (i = 0; i < n; i++) {
18c9b560 3400 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3401 /* load */
312eea9f 3402 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3403 gen_mov_vreg_F0(dp, rd + i);
3404 } else {
3405 /* store */
3406 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3407 gen_vfp_st(s, dp, addr);
b7bcbe95 3408 }
312eea9f 3409 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3410 }
3411 if (insn & (1 << 21)) {
3412 /* writeback */
3413 if (insn & (1 << 24))
3414 offset = -offset * n;
3415 else if (dp && (insn & 1))
3416 offset = 4;
3417 else
3418 offset = 0;
3419
3420 if (offset != 0)
312eea9f
FN
3421 tcg_gen_addi_i32(addr, addr, offset);
3422 store_reg(s, rn, addr);
3423 } else {
7d1b0095 3424 tcg_temp_free_i32(addr);
b7bcbe95
FB
3425 }
3426 }
3427 }
3428 break;
3429 default:
3430 /* Should never happen. */
3431 return 1;
3432 }
3433 return 0;
3434}
3435
6e256c93 3436static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3437{
6e256c93
FB
3438 TranslationBlock *tb;
3439
3440 tb = s->tb;
3441 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3442 tcg_gen_goto_tb(n);
8984bd2e 3443 gen_set_pc_im(dest);
4b4a72e5 3444 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3445 } else {
8984bd2e 3446 gen_set_pc_im(dest);
57fec1fe 3447 tcg_gen_exit_tb(0);
6e256c93 3448 }
c53be334
FB
3449}
3450
8aaca4c0
FB
3451static inline void gen_jmp (DisasContext *s, uint32_t dest)
3452{
551bd27f 3453 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3454 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3455 if (s->thumb)
d9ba4830
PB
3456 dest |= 1;
3457 gen_bx_im(s, dest);
8aaca4c0 3458 } else {
6e256c93 3459 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3460 s->is_jmp = DISAS_TB_JUMP;
3461 }
3462}
3463
d9ba4830 3464static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3465{
ee097184 3466 if (x)
d9ba4830 3467 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3468 else
d9ba4830 3469 gen_sxth(t0);
ee097184 3470 if (y)
d9ba4830 3471 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3472 else
d9ba4830
PB
3473 gen_sxth(t1);
3474 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3475}
3476
3477/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3478static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3479 uint32_t mask;
3480
3481 mask = 0;
3482 if (flags & (1 << 0))
3483 mask |= 0xff;
3484 if (flags & (1 << 1))
3485 mask |= 0xff00;
3486 if (flags & (1 << 2))
3487 mask |= 0xff0000;
3488 if (flags & (1 << 3))
3489 mask |= 0xff000000;
9ee6e8bb 3490
2ae23e75 3491 /* Mask out undefined bits. */
9ee6e8bb 3492 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3493 if (!arm_feature(env, ARM_FEATURE_V4T))
3494 mask &= ~CPSR_T;
3495 if (!arm_feature(env, ARM_FEATURE_V5))
3496 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3497 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3498 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3499 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3500 mask &= ~CPSR_IT;
9ee6e8bb 3501 /* Mask out execution state bits. */
2ae23e75 3502 if (!spsr)
e160c51c 3503 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3504 /* Mask out privileged bits. */
3505 if (IS_USER(s))
9ee6e8bb 3506 mask &= CPSR_USER;
b5ff1b31
FB
3507 return mask;
3508}
3509
2fbac54b
FN
3510/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3511static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3512{
d9ba4830 3513 TCGv tmp;
b5ff1b31
FB
3514 if (spsr) {
3515 /* ??? This is also undefined in system mode. */
3516 if (IS_USER(s))
3517 return 1;
d9ba4830
PB
3518
3519 tmp = load_cpu_field(spsr);
3520 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3521 tcg_gen_andi_i32(t0, t0, mask);
3522 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3523 store_cpu_field(tmp, spsr);
b5ff1b31 3524 } else {
2fbac54b 3525 gen_set_cpsr(t0, mask);
b5ff1b31 3526 }
7d1b0095 3527 tcg_temp_free_i32(t0);
b5ff1b31
FB
3528 gen_lookup_tb(s);
3529 return 0;
3530}
3531
2fbac54b
FN
3532/* Returns nonzero if access to the PSR is not permitted. */
3533static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3534{
3535 TCGv tmp;
7d1b0095 3536 tmp = tcg_temp_new_i32();
2fbac54b
FN
3537 tcg_gen_movi_i32(tmp, val);
3538 return gen_set_psr(s, mask, spsr, tmp);
3539}
3540
e9bb4aa9
JR
3541/* Generate an old-style exception return. Marks pc as dead. */
3542static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3543{
d9ba4830 3544 TCGv tmp;
e9bb4aa9 3545 store_reg(s, 15, pc);
d9ba4830
PB
3546 tmp = load_cpu_field(spsr);
3547 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3548 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3549 s->is_jmp = DISAS_UPDATE;
3550}
3551
b0109805
PB
3552/* Generate a v6 exception return. Marks both values as dead. */
3553static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3554{
b0109805 3555 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3556 tcg_temp_free_i32(cpsr);
b0109805 3557 store_reg(s, 15, pc);
9ee6e8bb
PB
3558 s->is_jmp = DISAS_UPDATE;
3559}
3b46e624 3560
9ee6e8bb
PB
3561static inline void
3562gen_set_condexec (DisasContext *s)
3563{
3564 if (s->condexec_mask) {
8f01245e 3565 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3566 TCGv tmp = tcg_temp_new_i32();
8f01245e 3567 tcg_gen_movi_i32(tmp, val);
d9ba4830 3568 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3569 }
3570}
3b46e624 3571
bc4a0de0
PM
3572static void gen_exception_insn(DisasContext *s, int offset, int excp)
3573{
3574 gen_set_condexec(s);
3575 gen_set_pc_im(s->pc - offset);
3576 gen_exception(excp);
3577 s->is_jmp = DISAS_JUMP;
3578}
3579
9ee6e8bb
PB
3580static void gen_nop_hint(DisasContext *s, int val)
3581{
3582 switch (val) {
3583 case 3: /* wfi */
8984bd2e 3584 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3585 s->is_jmp = DISAS_WFI;
3586 break;
3587 case 2: /* wfe */
3588 case 4: /* sev */
3589 /* TODO: Implement SEV and WFE. May help SMP performance. */
3590 default: /* nop */
3591 break;
3592 }
3593}
99c475ab 3594
ad69471c 3595#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3596
62698be3 3597static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3598{
3599 switch (size) {
dd8fbd78
FN
3600 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3601 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3602 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3603 default: abort();
9ee6e8bb 3604 }
9ee6e8bb
PB
3605}
3606
dd8fbd78 3607static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3608{
3609 switch (size) {
dd8fbd78
FN
3610 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3611 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3612 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3613 default: return;
3614 }
3615}
3616
3617/* 32-bit pairwise ops end up the same as the elementwise versions. */
3618#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3619#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3620#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3621#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3622
ad69471c
PB
3623#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3624 switch ((size << 1) | u) { \
3625 case 0: \
dd8fbd78 3626 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3627 break; \
3628 case 1: \
dd8fbd78 3629 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3630 break; \
3631 case 2: \
dd8fbd78 3632 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3633 break; \
3634 case 3: \
dd8fbd78 3635 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3636 break; \
3637 case 4: \
dd8fbd78 3638 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3639 break; \
3640 case 5: \
dd8fbd78 3641 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3642 break; \
3643 default: return 1; \
3644 }} while (0)
9ee6e8bb
PB
3645
3646#define GEN_NEON_INTEGER_OP(name) do { \
3647 switch ((size << 1) | u) { \
ad69471c 3648 case 0: \
dd8fbd78 3649 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3650 break; \
3651 case 1: \
dd8fbd78 3652 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3653 break; \
3654 case 2: \
dd8fbd78 3655 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3656 break; \
3657 case 3: \
dd8fbd78 3658 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3659 break; \
3660 case 4: \
dd8fbd78 3661 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3662 break; \
3663 case 5: \
dd8fbd78 3664 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3665 break; \
9ee6e8bb
PB
3666 default: return 1; \
3667 }} while (0)
3668
dd8fbd78 3669static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3670{
7d1b0095 3671 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3672 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3673 return tmp;
9ee6e8bb
PB
3674}
3675
dd8fbd78 3676static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3677{
dd8fbd78 3678 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3679 tcg_temp_free_i32(var);
9ee6e8bb
PB
3680}
3681
dd8fbd78 3682static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3683{
dd8fbd78 3684 TCGv tmp;
9ee6e8bb 3685 if (size == 1) {
0fad6efc
PM
3686 tmp = neon_load_reg(reg & 7, reg >> 4);
3687 if (reg & 8) {
dd8fbd78 3688 gen_neon_dup_high16(tmp);
0fad6efc
PM
3689 } else {
3690 gen_neon_dup_low16(tmp);
dd8fbd78 3691 }
0fad6efc
PM
3692 } else {
3693 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3694 }
dd8fbd78 3695 return tmp;
9ee6e8bb
PB
3696}
3697
02acedf9 3698static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3699{
02acedf9 3700 TCGv tmp, tmp2;
600b828c 3701 if (!q && size == 2) {
02acedf9
PM
3702 return 1;
3703 }
3704 tmp = tcg_const_i32(rd);
3705 tmp2 = tcg_const_i32(rm);
3706 if (q) {
3707 switch (size) {
3708 case 0:
2a3f75b4 3709 gen_helper_neon_qunzip8(tmp, tmp2);
02acedf9
PM
3710 break;
3711 case 1:
2a3f75b4 3712 gen_helper_neon_qunzip16(tmp, tmp2);
02acedf9
PM
3713 break;
3714 case 2:
2a3f75b4 3715 gen_helper_neon_qunzip32(tmp, tmp2);
02acedf9
PM
3716 break;
3717 default:
3718 abort();
3719 }
3720 } else {
3721 switch (size) {
3722 case 0:
2a3f75b4 3723 gen_helper_neon_unzip8(tmp, tmp2);
02acedf9
PM
3724 break;
3725 case 1:
2a3f75b4 3726 gen_helper_neon_unzip16(tmp, tmp2);
02acedf9
PM
3727 break;
3728 default:
3729 abort();
3730 }
3731 }
3732 tcg_temp_free_i32(tmp);
3733 tcg_temp_free_i32(tmp2);
3734 return 0;
19457615
FN
3735}
3736
d68a6f3a 3737static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3738{
3739 TCGv tmp, tmp2;
600b828c 3740 if (!q && size == 2) {
d68a6f3a
PM
3741 return 1;
3742 }
3743 tmp = tcg_const_i32(rd);
3744 tmp2 = tcg_const_i32(rm);
3745 if (q) {
3746 switch (size) {
3747 case 0:
2a3f75b4 3748 gen_helper_neon_qzip8(tmp, tmp2);
d68a6f3a
PM
3749 break;
3750 case 1:
2a3f75b4 3751 gen_helper_neon_qzip16(tmp, tmp2);
d68a6f3a
PM
3752 break;
3753 case 2:
2a3f75b4 3754 gen_helper_neon_qzip32(tmp, tmp2);
d68a6f3a
PM
3755 break;
3756 default:
3757 abort();
3758 }
3759 } else {
3760 switch (size) {
3761 case 0:
2a3f75b4 3762 gen_helper_neon_zip8(tmp, tmp2);
d68a6f3a
PM
3763 break;
3764 case 1:
2a3f75b4 3765 gen_helper_neon_zip16(tmp, tmp2);
d68a6f3a
PM
3766 break;
3767 default:
3768 abort();
3769 }
3770 }
3771 tcg_temp_free_i32(tmp);
3772 tcg_temp_free_i32(tmp2);
3773 return 0;
19457615
FN
3774}
3775
19457615
FN
3776static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3777{
3778 TCGv rd, tmp;
3779
7d1b0095
PM
3780 rd = tcg_temp_new_i32();
3781 tmp = tcg_temp_new_i32();
19457615
FN
3782
3783 tcg_gen_shli_i32(rd, t0, 8);
3784 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3785 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3786 tcg_gen_or_i32(rd, rd, tmp);
3787
3788 tcg_gen_shri_i32(t1, t1, 8);
3789 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3790 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3791 tcg_gen_or_i32(t1, t1, tmp);
3792 tcg_gen_mov_i32(t0, rd);
3793
7d1b0095
PM
3794 tcg_temp_free_i32(tmp);
3795 tcg_temp_free_i32(rd);
19457615
FN
3796}
3797
3798static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3799{
3800 TCGv rd, tmp;
3801
7d1b0095
PM
3802 rd = tcg_temp_new_i32();
3803 tmp = tcg_temp_new_i32();
19457615
FN
3804
3805 tcg_gen_shli_i32(rd, t0, 16);
3806 tcg_gen_andi_i32(tmp, t1, 0xffff);
3807 tcg_gen_or_i32(rd, rd, tmp);
3808 tcg_gen_shri_i32(t1, t1, 16);
3809 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3810 tcg_gen_or_i32(t1, t1, tmp);
3811 tcg_gen_mov_i32(t0, rd);
3812
7d1b0095
PM
3813 tcg_temp_free_i32(tmp);
3814 tcg_temp_free_i32(rd);
19457615
FN
3815}
3816
3817
9ee6e8bb
PB
3818static struct {
3819 int nregs;
3820 int interleave;
3821 int spacing;
3822} neon_ls_element_type[11] = {
3823 {4, 4, 1},
3824 {4, 4, 2},
3825 {4, 1, 1},
3826 {4, 2, 1},
3827 {3, 3, 1},
3828 {3, 3, 2},
3829 {3, 1, 1},
3830 {1, 1, 1},
3831 {2, 2, 1},
3832 {2, 2, 2},
3833 {2, 1, 1}
3834};
3835
3836/* Translate a NEON load/store element instruction. Return nonzero if the
3837 instruction is invalid. */
3838static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3839{
3840 int rd, rn, rm;
3841 int op;
3842 int nregs;
3843 int interleave;
84496233 3844 int spacing;
9ee6e8bb
PB
3845 int stride;
3846 int size;
3847 int reg;
3848 int pass;
3849 int load;
3850 int shift;
9ee6e8bb 3851 int n;
1b2b1e54 3852 TCGv addr;
b0109805 3853 TCGv tmp;
8f8e3aa4 3854 TCGv tmp2;
84496233 3855 TCGv_i64 tmp64;
9ee6e8bb 3856
5df8bac1 3857 if (!s->vfp_enabled)
9ee6e8bb
PB
3858 return 1;
3859 VFP_DREG_D(rd, insn);
3860 rn = (insn >> 16) & 0xf;
3861 rm = insn & 0xf;
3862 load = (insn & (1 << 21)) != 0;
3863 if ((insn & (1 << 23)) == 0) {
3864 /* Load store all elements. */
3865 op = (insn >> 8) & 0xf;
3866 size = (insn >> 6) & 3;
84496233 3867 if (op > 10)
9ee6e8bb 3868 return 1;
f2dd89d0
PM
3869 /* Catch UNDEF cases for bad values of align field */
3870 switch (op & 0xc) {
3871 case 4:
3872 if (((insn >> 5) & 1) == 1) {
3873 return 1;
3874 }
3875 break;
3876 case 8:
3877 if (((insn >> 4) & 3) == 3) {
3878 return 1;
3879 }
3880 break;
3881 default:
3882 break;
3883 }
9ee6e8bb
PB
3884 nregs = neon_ls_element_type[op].nregs;
3885 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3886 spacing = neon_ls_element_type[op].spacing;
3887 if (size == 3 && (interleave | spacing) != 1)
3888 return 1;
e318a60b 3889 addr = tcg_temp_new_i32();
dcc65026 3890 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3891 stride = (1 << size) * interleave;
3892 for (reg = 0; reg < nregs; reg++) {
3893 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3894 load_reg_var(s, addr, rn);
3895 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3896 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3897 load_reg_var(s, addr, rn);
3898 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3899 }
84496233
JR
3900 if (size == 3) {
3901 if (load) {
3902 tmp64 = gen_ld64(addr, IS_USER(s));
3903 neon_store_reg64(tmp64, rd);
3904 tcg_temp_free_i64(tmp64);
3905 } else {
3906 tmp64 = tcg_temp_new_i64();
3907 neon_load_reg64(tmp64, rd);
3908 gen_st64(tmp64, addr, IS_USER(s));
3909 }
3910 tcg_gen_addi_i32(addr, addr, stride);
3911 } else {
3912 for (pass = 0; pass < 2; pass++) {
3913 if (size == 2) {
3914 if (load) {
3915 tmp = gen_ld32(addr, IS_USER(s));
3916 neon_store_reg(rd, pass, tmp);
3917 } else {
3918 tmp = neon_load_reg(rd, pass);
3919 gen_st32(tmp, addr, IS_USER(s));
3920 }
1b2b1e54 3921 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3922 } else if (size == 1) {
3923 if (load) {
3924 tmp = gen_ld16u(addr, IS_USER(s));
3925 tcg_gen_addi_i32(addr, addr, stride);
3926 tmp2 = gen_ld16u(addr, IS_USER(s));
3927 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3928 tcg_gen_shli_i32(tmp2, tmp2, 16);
3929 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3930 tcg_temp_free_i32(tmp2);
84496233
JR
3931 neon_store_reg(rd, pass, tmp);
3932 } else {
3933 tmp = neon_load_reg(rd, pass);
7d1b0095 3934 tmp2 = tcg_temp_new_i32();
84496233
JR
3935 tcg_gen_shri_i32(tmp2, tmp, 16);
3936 gen_st16(tmp, addr, IS_USER(s));
3937 tcg_gen_addi_i32(addr, addr, stride);
3938 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3939 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3940 }
84496233
JR
3941 } else /* size == 0 */ {
3942 if (load) {
3943 TCGV_UNUSED(tmp2);
3944 for (n = 0; n < 4; n++) {
3945 tmp = gen_ld8u(addr, IS_USER(s));
3946 tcg_gen_addi_i32(addr, addr, stride);
3947 if (n == 0) {
3948 tmp2 = tmp;
3949 } else {
41ba8341
PB
3950 tcg_gen_shli_i32(tmp, tmp, n * 8);
3951 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3952 tcg_temp_free_i32(tmp);
84496233 3953 }
9ee6e8bb 3954 }
84496233
JR
3955 neon_store_reg(rd, pass, tmp2);
3956 } else {
3957 tmp2 = neon_load_reg(rd, pass);
3958 for (n = 0; n < 4; n++) {
7d1b0095 3959 tmp = tcg_temp_new_i32();
84496233
JR
3960 if (n == 0) {
3961 tcg_gen_mov_i32(tmp, tmp2);
3962 } else {
3963 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3964 }
3965 gen_st8(tmp, addr, IS_USER(s));
3966 tcg_gen_addi_i32(addr, addr, stride);
3967 }
7d1b0095 3968 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3969 }
3970 }
3971 }
3972 }
84496233 3973 rd += spacing;
9ee6e8bb 3974 }
e318a60b 3975 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3976 stride = nregs * 8;
3977 } else {
3978 size = (insn >> 10) & 3;
3979 if (size == 3) {
3980 /* Load single element to all lanes. */
8e18cde3
PM
3981 int a = (insn >> 4) & 1;
3982 if (!load) {
9ee6e8bb 3983 return 1;
8e18cde3 3984 }
9ee6e8bb
PB
3985 size = (insn >> 6) & 3;
3986 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3987
3988 if (size == 3) {
3989 if (nregs != 4 || a == 0) {
9ee6e8bb 3990 return 1;
99c475ab 3991 }
8e18cde3
PM
3992 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3993 size = 2;
3994 }
3995 if (nregs == 1 && a == 1 && size == 0) {
3996 return 1;
3997 }
3998 if (nregs == 3 && a == 1) {
3999 return 1;
4000 }
e318a60b 4001 addr = tcg_temp_new_i32();
8e18cde3
PM
4002 load_reg_var(s, addr, rn);
4003 if (nregs == 1) {
4004 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4005 tmp = gen_load_and_replicate(s, addr, size);
4006 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4007 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4008 if (insn & (1 << 5)) {
4009 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4010 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4011 }
4012 tcg_temp_free_i32(tmp);
4013 } else {
4014 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4015 stride = (insn & (1 << 5)) ? 2 : 1;
4016 for (reg = 0; reg < nregs; reg++) {
4017 tmp = gen_load_and_replicate(s, addr, size);
4018 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4019 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4020 tcg_temp_free_i32(tmp);
4021 tcg_gen_addi_i32(addr, addr, 1 << size);
4022 rd += stride;
4023 }
9ee6e8bb 4024 }
e318a60b 4025 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4026 stride = (1 << size) * nregs;
4027 } else {
4028 /* Single element. */
93262b16 4029 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4030 pass = (insn >> 7) & 1;
4031 switch (size) {
4032 case 0:
4033 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4034 stride = 1;
4035 break;
4036 case 1:
4037 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4038 stride = (insn & (1 << 5)) ? 2 : 1;
4039 break;
4040 case 2:
4041 shift = 0;
9ee6e8bb
PB
4042 stride = (insn & (1 << 6)) ? 2 : 1;
4043 break;
4044 default:
4045 abort();
4046 }
4047 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4048 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4049 switch (nregs) {
4050 case 1:
4051 if (((idx & (1 << size)) != 0) ||
4052 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4053 return 1;
4054 }
4055 break;
4056 case 3:
4057 if ((idx & 1) != 0) {
4058 return 1;
4059 }
4060 /* fall through */
4061 case 2:
4062 if (size == 2 && (idx & 2) != 0) {
4063 return 1;
4064 }
4065 break;
4066 case 4:
4067 if ((size == 2) && ((idx & 3) == 3)) {
4068 return 1;
4069 }
4070 break;
4071 default:
4072 abort();
4073 }
4074 if ((rd + stride * (nregs - 1)) > 31) {
4075 /* Attempts to write off the end of the register file
4076 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4077 * the neon_load_reg() would write off the end of the array.
4078 */
4079 return 1;
4080 }
e318a60b 4081 addr = tcg_temp_new_i32();
dcc65026 4082 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4083 for (reg = 0; reg < nregs; reg++) {
4084 if (load) {
9ee6e8bb
PB
4085 switch (size) {
4086 case 0:
1b2b1e54 4087 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
4088 break;
4089 case 1:
1b2b1e54 4090 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
4091 break;
4092 case 2:
1b2b1e54 4093 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 4094 break;
a50f5b91
PB
4095 default: /* Avoid compiler warnings. */
4096 abort();
9ee6e8bb
PB
4097 }
4098 if (size != 2) {
8f8e3aa4
PB
4099 tmp2 = neon_load_reg(rd, pass);
4100 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
7d1b0095 4101 tcg_temp_free_i32(tmp2);
9ee6e8bb 4102 }
8f8e3aa4 4103 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4104 } else { /* Store */
8f8e3aa4
PB
4105 tmp = neon_load_reg(rd, pass);
4106 if (shift)
4107 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4108 switch (size) {
4109 case 0:
1b2b1e54 4110 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4111 break;
4112 case 1:
1b2b1e54 4113 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4114 break;
4115 case 2:
1b2b1e54 4116 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4117 break;
99c475ab 4118 }
99c475ab 4119 }
9ee6e8bb 4120 rd += stride;
1b2b1e54 4121 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4122 }
e318a60b 4123 tcg_temp_free_i32(addr);
9ee6e8bb 4124 stride = nregs * (1 << size);
99c475ab 4125 }
9ee6e8bb
PB
4126 }
4127 if (rm != 15) {
b26eefb6
PB
4128 TCGv base;
4129
4130 base = load_reg(s, rn);
9ee6e8bb 4131 if (rm == 13) {
b26eefb6 4132 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4133 } else {
b26eefb6
PB
4134 TCGv index;
4135 index = load_reg(s, rm);
4136 tcg_gen_add_i32(base, base, index);
7d1b0095 4137 tcg_temp_free_i32(index);
9ee6e8bb 4138 }
b26eefb6 4139 store_reg(s, rn, base);
9ee6e8bb
PB
4140 }
4141 return 0;
4142}
3b46e624 4143
8f8e3aa4
PB
4144/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4145static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4146{
4147 tcg_gen_and_i32(t, t, c);
f669df27 4148 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4149 tcg_gen_or_i32(dest, t, f);
4150}
4151
a7812ae4 4152static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4153{
4154 switch (size) {
4155 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4156 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4157 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4158 default: abort();
4159 }
4160}
4161
a7812ae4 4162static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4163{
4164 switch (size) {
2a3f75b4
PM
4165 case 0: gen_helper_neon_narrow_sat_s8(dest, src); break;
4166 case 1: gen_helper_neon_narrow_sat_s16(dest, src); break;
4167 case 2: gen_helper_neon_narrow_sat_s32(dest, src); break;
ad69471c
PB
4168 default: abort();
4169 }
4170}
4171
a7812ae4 4172static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4173{
4174 switch (size) {
2a3f75b4
PM
4175 case 0: gen_helper_neon_narrow_sat_u8(dest, src); break;
4176 case 1: gen_helper_neon_narrow_sat_u16(dest, src); break;
4177 case 2: gen_helper_neon_narrow_sat_u32(dest, src); break;
ad69471c
PB
4178 default: abort();
4179 }
4180}
4181
af1bbf30
JR
4182static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4183{
4184 switch (size) {
2a3f75b4
PM
4185 case 0: gen_helper_neon_unarrow_sat8(dest, src); break;
4186 case 1: gen_helper_neon_unarrow_sat16(dest, src); break;
4187 case 2: gen_helper_neon_unarrow_sat32(dest, src); break;
af1bbf30
JR
4188 default: abort();
4189 }
4190}
4191
ad69471c
PB
4192static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4193 int q, int u)
4194{
4195 if (q) {
4196 if (u) {
4197 switch (size) {
4198 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4199 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4200 default: abort();
4201 }
4202 } else {
4203 switch (size) {
4204 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4205 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4206 default: abort();
4207 }
4208 }
4209 } else {
4210 if (u) {
4211 switch (size) {
b408a9b0
CL
4212 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4213 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4214 default: abort();
4215 }
4216 } else {
4217 switch (size) {
4218 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4219 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4220 default: abort();
4221 }
4222 }
4223 }
4224}
4225
a7812ae4 4226static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4227{
4228 if (u) {
4229 switch (size) {
4230 case 0: gen_helper_neon_widen_u8(dest, src); break;
4231 case 1: gen_helper_neon_widen_u16(dest, src); break;
4232 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4233 default: abort();
4234 }
4235 } else {
4236 switch (size) {
4237 case 0: gen_helper_neon_widen_s8(dest, src); break;
4238 case 1: gen_helper_neon_widen_s16(dest, src); break;
4239 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4240 default: abort();
4241 }
4242 }
7d1b0095 4243 tcg_temp_free_i32(src);
ad69471c
PB
4244}
4245
4246static inline void gen_neon_addl(int size)
4247{
4248 switch (size) {
4249 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4250 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4251 case 2: tcg_gen_add_i64(CPU_V001); break;
4252 default: abort();
4253 }
4254}
4255
4256static inline void gen_neon_subl(int size)
4257{
4258 switch (size) {
4259 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4260 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4261 case 2: tcg_gen_sub_i64(CPU_V001); break;
4262 default: abort();
4263 }
4264}
4265
a7812ae4 4266static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4267{
4268 switch (size) {
4269 case 0: gen_helper_neon_negl_u16(var, var); break;
4270 case 1: gen_helper_neon_negl_u32(var, var); break;
4271 case 2: gen_helper_neon_negl_u64(var, var); break;
4272 default: abort();
4273 }
4274}
4275
a7812ae4 4276static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4277{
4278 switch (size) {
2a3f75b4
PM
4279 case 1: gen_helper_neon_addl_saturate_s32(op0, op0, op1); break;
4280 case 2: gen_helper_neon_addl_saturate_s64(op0, op0, op1); break;
ad69471c
PB
4281 default: abort();
4282 }
4283}
4284
a7812ae4 4285static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4286{
a7812ae4 4287 TCGv_i64 tmp;
ad69471c
PB
4288
4289 switch ((size << 1) | u) {
4290 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4291 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4292 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4293 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4294 case 4:
4295 tmp = gen_muls_i64_i32(a, b);
4296 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4297 tcg_temp_free_i64(tmp);
ad69471c
PB
4298 break;
4299 case 5:
4300 tmp = gen_mulu_i64_i32(a, b);
4301 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4302 tcg_temp_free_i64(tmp);
ad69471c
PB
4303 break;
4304 default: abort();
4305 }
c6067f04
CL
4306
4307 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4308 Don't forget to clean them now. */
4309 if (size < 2) {
7d1b0095
PM
4310 tcg_temp_free_i32(a);
4311 tcg_temp_free_i32(b);
c6067f04 4312 }
ad69471c
PB
4313}
4314
c33171c7
PM
4315static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4316{
4317 if (op) {
4318 if (u) {
4319 gen_neon_unarrow_sats(size, dest, src);
4320 } else {
4321 gen_neon_narrow(size, dest, src);
4322 }
4323 } else {
4324 if (u) {
4325 gen_neon_narrow_satu(size, dest, src);
4326 } else {
4327 gen_neon_narrow_sats(size, dest, src);
4328 }
4329 }
4330}
4331
62698be3
PM
4332/* Symbolic constants for op fields for Neon 3-register same-length.
4333 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4334 * table A7-9.
4335 */
4336#define NEON_3R_VHADD 0
4337#define NEON_3R_VQADD 1
4338#define NEON_3R_VRHADD 2
4339#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4340#define NEON_3R_VHSUB 4
4341#define NEON_3R_VQSUB 5
4342#define NEON_3R_VCGT 6
4343#define NEON_3R_VCGE 7
4344#define NEON_3R_VSHL 8
4345#define NEON_3R_VQSHL 9
4346#define NEON_3R_VRSHL 10
4347#define NEON_3R_VQRSHL 11
4348#define NEON_3R_VMAX 12
4349#define NEON_3R_VMIN 13
4350#define NEON_3R_VABD 14
4351#define NEON_3R_VABA 15
4352#define NEON_3R_VADD_VSUB 16
4353#define NEON_3R_VTST_VCEQ 17
4354#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4355#define NEON_3R_VMUL 19
4356#define NEON_3R_VPMAX 20
4357#define NEON_3R_VPMIN 21
4358#define NEON_3R_VQDMULH_VQRDMULH 22
4359#define NEON_3R_VPADD 23
4360#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4361#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4362#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4363#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4364#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4365#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4366
4367static const uint8_t neon_3r_sizes[] = {
4368 [NEON_3R_VHADD] = 0x7,
4369 [NEON_3R_VQADD] = 0xf,
4370 [NEON_3R_VRHADD] = 0x7,
4371 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4372 [NEON_3R_VHSUB] = 0x7,
4373 [NEON_3R_VQSUB] = 0xf,
4374 [NEON_3R_VCGT] = 0x7,
4375 [NEON_3R_VCGE] = 0x7,
4376 [NEON_3R_VSHL] = 0xf,
4377 [NEON_3R_VQSHL] = 0xf,
4378 [NEON_3R_VRSHL] = 0xf,
4379 [NEON_3R_VQRSHL] = 0xf,
4380 [NEON_3R_VMAX] = 0x7,
4381 [NEON_3R_VMIN] = 0x7,
4382 [NEON_3R_VABD] = 0x7,
4383 [NEON_3R_VABA] = 0x7,
4384 [NEON_3R_VADD_VSUB] = 0xf,
4385 [NEON_3R_VTST_VCEQ] = 0x7,
4386 [NEON_3R_VML] = 0x7,
4387 [NEON_3R_VMUL] = 0x7,
4388 [NEON_3R_VPMAX] = 0x7,
4389 [NEON_3R_VPMIN] = 0x7,
4390 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4391 [NEON_3R_VPADD] = 0x7,
4392 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4393 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4394 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4395 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4396 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4397 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4398};
4399
600b828c
PM
4400/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4401 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4402 * table A7-13.
4403 */
4404#define NEON_2RM_VREV64 0
4405#define NEON_2RM_VREV32 1
4406#define NEON_2RM_VREV16 2
4407#define NEON_2RM_VPADDL 4
4408#define NEON_2RM_VPADDL_U 5
4409#define NEON_2RM_VCLS 8
4410#define NEON_2RM_VCLZ 9
4411#define NEON_2RM_VCNT 10
4412#define NEON_2RM_VMVN 11
4413#define NEON_2RM_VPADAL 12
4414#define NEON_2RM_VPADAL_U 13
4415#define NEON_2RM_VQABS 14
4416#define NEON_2RM_VQNEG 15
4417#define NEON_2RM_VCGT0 16
4418#define NEON_2RM_VCGE0 17
4419#define NEON_2RM_VCEQ0 18
4420#define NEON_2RM_VCLE0 19
4421#define NEON_2RM_VCLT0 20
4422#define NEON_2RM_VABS 22
4423#define NEON_2RM_VNEG 23
4424#define NEON_2RM_VCGT0_F 24
4425#define NEON_2RM_VCGE0_F 25
4426#define NEON_2RM_VCEQ0_F 26
4427#define NEON_2RM_VCLE0_F 27
4428#define NEON_2RM_VCLT0_F 28
4429#define NEON_2RM_VABS_F 30
4430#define NEON_2RM_VNEG_F 31
4431#define NEON_2RM_VSWP 32
4432#define NEON_2RM_VTRN 33
4433#define NEON_2RM_VUZP 34
4434#define NEON_2RM_VZIP 35
4435#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4436#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4437#define NEON_2RM_VSHLL 38
4438#define NEON_2RM_VCVT_F16_F32 44
4439#define NEON_2RM_VCVT_F32_F16 46
4440#define NEON_2RM_VRECPE 56
4441#define NEON_2RM_VRSQRTE 57
4442#define NEON_2RM_VRECPE_F 58
4443#define NEON_2RM_VRSQRTE_F 59
4444#define NEON_2RM_VCVT_FS 60
4445#define NEON_2RM_VCVT_FU 61
4446#define NEON_2RM_VCVT_SF 62
4447#define NEON_2RM_VCVT_UF 63
4448
4449static int neon_2rm_is_float_op(int op)
4450{
4451 /* Return true if this neon 2reg-misc op is float-to-float */
4452 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4453 op >= NEON_2RM_VRECPE_F);
4454}
4455
4456/* Each entry in this array has bit n set if the insn allows
4457 * size value n (otherwise it will UNDEF). Since unallocated
4458 * op values will have no bits set they always UNDEF.
4459 */
4460static const uint8_t neon_2rm_sizes[] = {
4461 [NEON_2RM_VREV64] = 0x7,
4462 [NEON_2RM_VREV32] = 0x3,
4463 [NEON_2RM_VREV16] = 0x1,
4464 [NEON_2RM_VPADDL] = 0x7,
4465 [NEON_2RM_VPADDL_U] = 0x7,
4466 [NEON_2RM_VCLS] = 0x7,
4467 [NEON_2RM_VCLZ] = 0x7,
4468 [NEON_2RM_VCNT] = 0x1,
4469 [NEON_2RM_VMVN] = 0x1,
4470 [NEON_2RM_VPADAL] = 0x7,
4471 [NEON_2RM_VPADAL_U] = 0x7,
4472 [NEON_2RM_VQABS] = 0x7,
4473 [NEON_2RM_VQNEG] = 0x7,
4474 [NEON_2RM_VCGT0] = 0x7,
4475 [NEON_2RM_VCGE0] = 0x7,
4476 [NEON_2RM_VCEQ0] = 0x7,
4477 [NEON_2RM_VCLE0] = 0x7,
4478 [NEON_2RM_VCLT0] = 0x7,
4479 [NEON_2RM_VABS] = 0x7,
4480 [NEON_2RM_VNEG] = 0x7,
4481 [NEON_2RM_VCGT0_F] = 0x4,
4482 [NEON_2RM_VCGE0_F] = 0x4,
4483 [NEON_2RM_VCEQ0_F] = 0x4,
4484 [NEON_2RM_VCLE0_F] = 0x4,
4485 [NEON_2RM_VCLT0_F] = 0x4,
4486 [NEON_2RM_VABS_F] = 0x4,
4487 [NEON_2RM_VNEG_F] = 0x4,
4488 [NEON_2RM_VSWP] = 0x1,
4489 [NEON_2RM_VTRN] = 0x7,
4490 [NEON_2RM_VUZP] = 0x7,
4491 [NEON_2RM_VZIP] = 0x7,
4492 [NEON_2RM_VMOVN] = 0x7,
4493 [NEON_2RM_VQMOVN] = 0x7,
4494 [NEON_2RM_VSHLL] = 0x7,
4495 [NEON_2RM_VCVT_F16_F32] = 0x2,
4496 [NEON_2RM_VCVT_F32_F16] = 0x2,
4497 [NEON_2RM_VRECPE] = 0x4,
4498 [NEON_2RM_VRSQRTE] = 0x4,
4499 [NEON_2RM_VRECPE_F] = 0x4,
4500 [NEON_2RM_VRSQRTE_F] = 0x4,
4501 [NEON_2RM_VCVT_FS] = 0x4,
4502 [NEON_2RM_VCVT_FU] = 0x4,
4503 [NEON_2RM_VCVT_SF] = 0x4,
4504 [NEON_2RM_VCVT_UF] = 0x4,
4505};
4506
9ee6e8bb
PB
4507/* Translate a NEON data processing instruction. Return nonzero if the
4508 instruction is invalid.
ad69471c
PB
4509 We process data in a mixture of 32-bit and 64-bit chunks.
4510 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4511
9ee6e8bb
PB
4512static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4513{
4514 int op;
4515 int q;
4516 int rd, rn, rm;
4517 int size;
4518 int shift;
4519 int pass;
4520 int count;
4521 int pairwise;
4522 int u;
ca9a32e4 4523 uint32_t imm, mask;
b75263d6 4524 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4525 TCGv_i64 tmp64;
9ee6e8bb 4526
5df8bac1 4527 if (!s->vfp_enabled)
9ee6e8bb
PB
4528 return 1;
4529 q = (insn & (1 << 6)) != 0;
4530 u = (insn >> 24) & 1;
4531 VFP_DREG_D(rd, insn);
4532 VFP_DREG_N(rn, insn);
4533 VFP_DREG_M(rm, insn);
4534 size = (insn >> 20) & 3;
4535 if ((insn & (1 << 23)) == 0) {
4536 /* Three register same length. */
4537 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4538 /* Catch invalid op and bad size combinations: UNDEF */
4539 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4540 return 1;
4541 }
25f84f79
PM
4542 /* All insns of this form UNDEF for either this condition or the
4543 * superset of cases "Q==1"; we catch the latter later.
4544 */
4545 if (q && ((rd | rn | rm) & 1)) {
4546 return 1;
4547 }
62698be3
PM
4548 if (size == 3 && op != NEON_3R_LOGIC) {
4549 /* 64-bit element instructions. */
9ee6e8bb 4550 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4551 neon_load_reg64(cpu_V0, rn + pass);
4552 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4553 switch (op) {
62698be3 4554 case NEON_3R_VQADD:
9ee6e8bb 4555 if (u) {
2a3f75b4 4556 gen_helper_neon_qadd_u64(cpu_V0, cpu_V0, cpu_V1);
2c0262af 4557 } else {
2a3f75b4 4558 gen_helper_neon_qadd_s64(cpu_V0, cpu_V0, cpu_V1);
2c0262af 4559 }
9ee6e8bb 4560 break;
62698be3 4561 case NEON_3R_VQSUB:
9ee6e8bb 4562 if (u) {
2a3f75b4 4563 gen_helper_neon_qsub_u64(cpu_V0, cpu_V0, cpu_V1);
ad69471c 4564 } else {
2a3f75b4 4565 gen_helper_neon_qsub_s64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
4566 }
4567 break;
62698be3 4568 case NEON_3R_VSHL:
ad69471c
PB
4569 if (u) {
4570 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4571 } else {
4572 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4573 }
4574 break;
62698be3 4575 case NEON_3R_VQSHL:
ad69471c 4576 if (u) {
2a3f75b4 4577 gen_helper_neon_qshl_u64(cpu_V0, cpu_V1, cpu_V0);
ad69471c 4578 } else {
2a3f75b4 4579 gen_helper_neon_qshl_s64(cpu_V0, cpu_V1, cpu_V0);
ad69471c
PB
4580 }
4581 break;
62698be3 4582 case NEON_3R_VRSHL:
ad69471c
PB
4583 if (u) {
4584 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4585 } else {
ad69471c
PB
4586 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4587 }
4588 break;
62698be3 4589 case NEON_3R_VQRSHL:
ad69471c 4590 if (u) {
2a3f75b4 4591 gen_helper_neon_qrshl_u64(cpu_V0, cpu_V1, cpu_V0);
ad69471c 4592 } else {
2a3f75b4 4593 gen_helper_neon_qrshl_s64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4594 }
9ee6e8bb 4595 break;
62698be3 4596 case NEON_3R_VADD_VSUB:
9ee6e8bb 4597 if (u) {
ad69471c 4598 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4599 } else {
ad69471c 4600 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4601 }
4602 break;
4603 default:
4604 abort();
2c0262af 4605 }
ad69471c 4606 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4607 }
9ee6e8bb 4608 return 0;
2c0262af 4609 }
25f84f79 4610 pairwise = 0;
9ee6e8bb 4611 switch (op) {
62698be3
PM
4612 case NEON_3R_VSHL:
4613 case NEON_3R_VQSHL:
4614 case NEON_3R_VRSHL:
4615 case NEON_3R_VQRSHL:
9ee6e8bb 4616 {
ad69471c
PB
4617 int rtmp;
4618 /* Shift instruction operands are reversed. */
4619 rtmp = rn;
9ee6e8bb 4620 rn = rm;
ad69471c 4621 rm = rtmp;
9ee6e8bb 4622 }
2c0262af 4623 break;
25f84f79
PM
4624 case NEON_3R_VPADD:
4625 if (u) {
4626 return 1;
4627 }
4628 /* Fall through */
62698be3
PM
4629 case NEON_3R_VPMAX:
4630 case NEON_3R_VPMIN:
9ee6e8bb 4631 pairwise = 1;
2c0262af 4632 break;
25f84f79
PM
4633 case NEON_3R_FLOAT_ARITH:
4634 pairwise = (u && size < 2); /* if VPADD (float) */
4635 break;
4636 case NEON_3R_FLOAT_MINMAX:
4637 pairwise = u; /* if VPMIN/VPMAX (float) */
4638 break;
4639 case NEON_3R_FLOAT_CMP:
4640 if (!u && size) {
4641 /* no encoding for U=0 C=1x */
4642 return 1;
4643 }
4644 break;
4645 case NEON_3R_FLOAT_ACMP:
4646 if (!u) {
4647 return 1;
4648 }
4649 break;
4650 case NEON_3R_VRECPS_VRSQRTS:
4651 if (u) {
4652 return 1;
4653 }
2c0262af 4654 break;
25f84f79
PM
4655 case NEON_3R_VMUL:
4656 if (u && (size != 0)) {
4657 /* UNDEF on invalid size for polynomial subcase */
4658 return 1;
4659 }
2c0262af 4660 break;
9ee6e8bb 4661 default:
2c0262af 4662 break;
9ee6e8bb 4663 }
dd8fbd78 4664
25f84f79
PM
4665 if (pairwise && q) {
4666 /* All the pairwise insns UNDEF if Q is set */
4667 return 1;
4668 }
4669
9ee6e8bb
PB
4670 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4671
4672 if (pairwise) {
4673 /* Pairwise. */
a5a14945
JR
4674 if (pass < 1) {
4675 tmp = neon_load_reg(rn, 0);
4676 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4677 } else {
a5a14945
JR
4678 tmp = neon_load_reg(rm, 0);
4679 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4680 }
4681 } else {
4682 /* Elementwise. */
dd8fbd78
FN
4683 tmp = neon_load_reg(rn, pass);
4684 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4685 }
4686 switch (op) {
62698be3 4687 case NEON_3R_VHADD:
9ee6e8bb
PB
4688 GEN_NEON_INTEGER_OP(hadd);
4689 break;
62698be3 4690 case NEON_3R_VQADD:
2a3f75b4 4691 GEN_NEON_INTEGER_OP(qadd);
2c0262af 4692 break;
62698be3 4693 case NEON_3R_VRHADD:
9ee6e8bb 4694 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4695 break;
62698be3 4696 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4697 switch ((u << 2) | size) {
4698 case 0: /* VAND */
dd8fbd78 4699 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4700 break;
4701 case 1: /* BIC */
f669df27 4702 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4703 break;
4704 case 2: /* VORR */
dd8fbd78 4705 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4706 break;
4707 case 3: /* VORN */
f669df27 4708 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4709 break;
4710 case 4: /* VEOR */
dd8fbd78 4711 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4712 break;
4713 case 5: /* VBSL */
dd8fbd78
FN
4714 tmp3 = neon_load_reg(rd, pass);
4715 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4716 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4717 break;
4718 case 6: /* VBIT */
dd8fbd78
FN
4719 tmp3 = neon_load_reg(rd, pass);
4720 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4721 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4722 break;
4723 case 7: /* VBIF */
dd8fbd78
FN
4724 tmp3 = neon_load_reg(rd, pass);
4725 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4726 tcg_temp_free_i32(tmp3);
9ee6e8bb 4727 break;
2c0262af
FB
4728 }
4729 break;
62698be3 4730 case NEON_3R_VHSUB:
9ee6e8bb
PB
4731 GEN_NEON_INTEGER_OP(hsub);
4732 break;
62698be3 4733 case NEON_3R_VQSUB:
2a3f75b4 4734 GEN_NEON_INTEGER_OP(qsub);
2c0262af 4735 break;
62698be3 4736 case NEON_3R_VCGT:
9ee6e8bb
PB
4737 GEN_NEON_INTEGER_OP(cgt);
4738 break;
62698be3 4739 case NEON_3R_VCGE:
9ee6e8bb
PB
4740 GEN_NEON_INTEGER_OP(cge);
4741 break;
62698be3 4742 case NEON_3R_VSHL:
ad69471c 4743 GEN_NEON_INTEGER_OP(shl);
2c0262af 4744 break;
62698be3 4745 case NEON_3R_VQSHL:
2a3f75b4 4746 GEN_NEON_INTEGER_OP(qshl);
2c0262af 4747 break;
62698be3 4748 case NEON_3R_VRSHL:
ad69471c 4749 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4750 break;
62698be3 4751 case NEON_3R_VQRSHL:
2a3f75b4 4752 GEN_NEON_INTEGER_OP(qrshl);
9ee6e8bb 4753 break;
62698be3 4754 case NEON_3R_VMAX:
9ee6e8bb
PB
4755 GEN_NEON_INTEGER_OP(max);
4756 break;
62698be3 4757 case NEON_3R_VMIN:
9ee6e8bb
PB
4758 GEN_NEON_INTEGER_OP(min);
4759 break;
62698be3 4760 case NEON_3R_VABD:
9ee6e8bb
PB
4761 GEN_NEON_INTEGER_OP(abd);
4762 break;
62698be3 4763 case NEON_3R_VABA:
9ee6e8bb 4764 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4765 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4766 tmp2 = neon_load_reg(rd, pass);
4767 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4768 break;
62698be3 4769 case NEON_3R_VADD_VSUB:
9ee6e8bb 4770 if (!u) { /* VADD */
62698be3 4771 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4772 } else { /* VSUB */
4773 switch (size) {
dd8fbd78
FN
4774 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4775 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4776 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4777 default: abort();
9ee6e8bb
PB
4778 }
4779 }
4780 break;
62698be3 4781 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4782 if (!u) { /* VTST */
4783 switch (size) {
dd8fbd78
FN
4784 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4785 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4786 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4787 default: abort();
9ee6e8bb
PB
4788 }
4789 } else { /* VCEQ */
4790 switch (size) {
dd8fbd78
FN
4791 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4792 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4793 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4794 default: abort();
9ee6e8bb
PB
4795 }
4796 }
4797 break;
62698be3 4798 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4799 switch (size) {
dd8fbd78
FN
4800 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4801 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4802 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4803 default: abort();
9ee6e8bb 4804 }
7d1b0095 4805 tcg_temp_free_i32(tmp2);
dd8fbd78 4806 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4807 if (u) { /* VMLS */
dd8fbd78 4808 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4809 } else { /* VMLA */
dd8fbd78 4810 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4811 }
4812 break;
62698be3 4813 case NEON_3R_VMUL:
9ee6e8bb 4814 if (u) { /* polynomial */
dd8fbd78 4815 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4816 } else { /* Integer */
4817 switch (size) {
dd8fbd78
FN
4818 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4819 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4820 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4821 default: abort();
9ee6e8bb
PB
4822 }
4823 }
4824 break;
62698be3 4825 case NEON_3R_VPMAX:
9ee6e8bb
PB
4826 GEN_NEON_INTEGER_OP(pmax);
4827 break;
62698be3 4828 case NEON_3R_VPMIN:
9ee6e8bb
PB
4829 GEN_NEON_INTEGER_OP(pmin);
4830 break;
62698be3 4831 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4832 if (!u) { /* VQDMULH */
4833 switch (size) {
2a3f75b4
PM
4834 case 1: gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); break;
4835 case 2: gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); break;
62698be3 4836 default: abort();
9ee6e8bb 4837 }
62698be3 4838 } else { /* VQRDMULH */
9ee6e8bb 4839 switch (size) {
2a3f75b4
PM
4840 case 1: gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); break;
4841 case 2: gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); break;
62698be3 4842 default: abort();
9ee6e8bb
PB
4843 }
4844 }
4845 break;
62698be3 4846 case NEON_3R_VPADD:
9ee6e8bb 4847 switch (size) {
dd8fbd78
FN
4848 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4849 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4850 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4851 default: abort();
9ee6e8bb
PB
4852 }
4853 break;
62698be3 4854 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
9ee6e8bb
PB
4855 switch ((u << 2) | size) {
4856 case 0: /* VADD */
dd8fbd78 4857 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4858 break;
4859 case 2: /* VSUB */
dd8fbd78 4860 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4861 break;
4862 case 4: /* VPADD */
dd8fbd78 4863 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4864 break;
4865 case 6: /* VABD */
dd8fbd78 4866 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4867 break;
4868 default:
62698be3 4869 abort();
9ee6e8bb
PB
4870 }
4871 break;
62698be3 4872 case NEON_3R_FLOAT_MULTIPLY:
dd8fbd78 4873 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4874 if (!u) {
7d1b0095 4875 tcg_temp_free_i32(tmp2);
dd8fbd78 4876 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4877 if (size == 0) {
dd8fbd78 4878 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4879 } else {
dd8fbd78 4880 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4881 }
4882 }
4883 break;
62698be3 4884 case NEON_3R_FLOAT_CMP:
9ee6e8bb 4885 if (!u) {
dd8fbd78 4886 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4887 } else {
9ee6e8bb 4888 if (size == 0)
dd8fbd78 4889 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4890 else
dd8fbd78 4891 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4892 }
2c0262af 4893 break;
62698be3 4894 case NEON_3R_FLOAT_ACMP:
9ee6e8bb 4895 if (size == 0)
dd8fbd78 4896 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4897 else
dd8fbd78 4898 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4899 break;
62698be3 4900 case NEON_3R_FLOAT_MINMAX:
9ee6e8bb 4901 if (size == 0)
dd8fbd78 4902 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4903 else
dd8fbd78 4904 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb 4905 break;
62698be3 4906 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4907 if (size == 0)
dd8fbd78 4908 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4909 else
dd8fbd78 4910 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4911 break;
9ee6e8bb
PB
4912 default:
4913 abort();
2c0262af 4914 }
7d1b0095 4915 tcg_temp_free_i32(tmp2);
dd8fbd78 4916
9ee6e8bb
PB
4917 /* Save the result. For elementwise operations we can put it
4918 straight into the destination register. For pairwise operations
4919 we have to be careful to avoid clobbering the source operands. */
4920 if (pairwise && rd == rm) {
dd8fbd78 4921 neon_store_scratch(pass, tmp);
9ee6e8bb 4922 } else {
dd8fbd78 4923 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4924 }
4925
4926 } /* for pass */
4927 if (pairwise && rd == rm) {
4928 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4929 tmp = neon_load_scratch(pass);
4930 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4931 }
4932 }
ad69471c 4933 /* End of 3 register same size operations. */
9ee6e8bb
PB
4934 } else if (insn & (1 << 4)) {
4935 if ((insn & 0x00380080) != 0) {
4936 /* Two registers and shift. */
4937 op = (insn >> 8) & 0xf;
4938 if (insn & (1 << 7)) {
cc13115b
PM
4939 /* 64-bit shift. */
4940 if (op > 7) {
4941 return 1;
4942 }
9ee6e8bb
PB
4943 size = 3;
4944 } else {
4945 size = 2;
4946 while ((insn & (1 << (size + 19))) == 0)
4947 size--;
4948 }
4949 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4950 /* To avoid excessive dumplication of ops we implement shift
4951 by immediate using the variable shift operations. */
4952 if (op < 8) {
4953 /* Shift by immediate:
4954 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
4955 if (q && ((rd | rm) & 1)) {
4956 return 1;
4957 }
4958 if (!u && (op == 4 || op == 6)) {
4959 return 1;
4960 }
9ee6e8bb
PB
4961 /* Right shifts are encoded as N - shift, where N is the
4962 element size in bits. */
4963 if (op <= 4)
4964 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4965 if (size == 3) {
4966 count = q + 1;
4967 } else {
4968 count = q ? 4: 2;
4969 }
4970 switch (size) {
4971 case 0:
4972 imm = (uint8_t) shift;
4973 imm |= imm << 8;
4974 imm |= imm << 16;
4975 break;
4976 case 1:
4977 imm = (uint16_t) shift;
4978 imm |= imm << 16;
4979 break;
4980 case 2:
4981 case 3:
4982 imm = shift;
4983 break;
4984 default:
4985 abort();
4986 }
4987
4988 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4989 if (size == 3) {
4990 neon_load_reg64(cpu_V0, rm + pass);
4991 tcg_gen_movi_i64(cpu_V1, imm);
4992 switch (op) {
4993 case 0: /* VSHR */
4994 case 1: /* VSRA */
4995 if (u)
4996 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4997 else
ad69471c 4998 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4999 break;
ad69471c
PB
5000 case 2: /* VRSHR */
5001 case 3: /* VRSRA */
5002 if (u)
5003 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5004 else
ad69471c 5005 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5006 break;
ad69471c 5007 case 4: /* VSRI */
ad69471c
PB
5008 case 5: /* VSHL, VSLI */
5009 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5010 break;
0322b26e 5011 case 6: /* VQSHLU */
cc13115b 5012 gen_helper_neon_qshlu_s64(cpu_V0, cpu_V0, cpu_V1);
ad69471c 5013 break;
0322b26e
PM
5014 case 7: /* VQSHL */
5015 if (u) {
2a3f75b4 5016 gen_helper_neon_qshl_u64(cpu_V0,
0322b26e
PM
5017 cpu_V0, cpu_V1);
5018 } else {
2a3f75b4 5019 gen_helper_neon_qshl_s64(cpu_V0,
0322b26e
PM
5020 cpu_V0, cpu_V1);
5021 }
9ee6e8bb 5022 break;
9ee6e8bb 5023 }
ad69471c
PB
5024 if (op == 1 || op == 3) {
5025 /* Accumulate. */
5371cb81 5026 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5027 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5028 } else if (op == 4 || (op == 5 && u)) {
5029 /* Insert */
923e6509
CL
5030 neon_load_reg64(cpu_V1, rd + pass);
5031 uint64_t mask;
5032 if (shift < -63 || shift > 63) {
5033 mask = 0;
5034 } else {
5035 if (op == 4) {
5036 mask = 0xffffffffffffffffull >> -shift;
5037 } else {
5038 mask = 0xffffffffffffffffull << shift;
5039 }
5040 }
5041 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5042 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5043 }
5044 neon_store_reg64(cpu_V0, rd + pass);
5045 } else { /* size < 3 */
5046 /* Operands in T0 and T1. */
dd8fbd78 5047 tmp = neon_load_reg(rm, pass);
7d1b0095 5048 tmp2 = tcg_temp_new_i32();
dd8fbd78 5049 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5050 switch (op) {
5051 case 0: /* VSHR */
5052 case 1: /* VSRA */
5053 GEN_NEON_INTEGER_OP(shl);
5054 break;
5055 case 2: /* VRSHR */
5056 case 3: /* VRSRA */
5057 GEN_NEON_INTEGER_OP(rshl);
5058 break;
5059 case 4: /* VSRI */
ad69471c
PB
5060 case 5: /* VSHL, VSLI */
5061 switch (size) {
dd8fbd78
FN
5062 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5063 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5064 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5065 default: abort();
ad69471c
PB
5066 }
5067 break;
0322b26e 5068 case 6: /* VQSHLU */
ad69471c 5069 switch (size) {
0322b26e 5070 case 0:
2a3f75b4 5071 gen_helper_neon_qshlu_s8(tmp, tmp, tmp2);
0322b26e
PM
5072 break;
5073 case 1:
2a3f75b4 5074 gen_helper_neon_qshlu_s16(tmp, tmp, tmp2);
0322b26e
PM
5075 break;
5076 case 2:
2a3f75b4 5077 gen_helper_neon_qshlu_s32(tmp, tmp, tmp2);
0322b26e
PM
5078 break;
5079 default:
cc13115b 5080 abort();
ad69471c
PB
5081 }
5082 break;
0322b26e 5083 case 7: /* VQSHL */
2a3f75b4 5084 GEN_NEON_INTEGER_OP(qshl);
0322b26e 5085 break;
ad69471c 5086 }
7d1b0095 5087 tcg_temp_free_i32(tmp2);
ad69471c
PB
5088
5089 if (op == 1 || op == 3) {
5090 /* Accumulate. */
dd8fbd78 5091 tmp2 = neon_load_reg(rd, pass);
5371cb81 5092 gen_neon_add(size, tmp, tmp2);
7d1b0095 5093 tcg_temp_free_i32(tmp2);
ad69471c
PB
5094 } else if (op == 4 || (op == 5 && u)) {
5095 /* Insert */
5096 switch (size) {
5097 case 0:
5098 if (op == 4)
ca9a32e4 5099 mask = 0xff >> -shift;
ad69471c 5100 else
ca9a32e4
JR
5101 mask = (uint8_t)(0xff << shift);
5102 mask |= mask << 8;
5103 mask |= mask << 16;
ad69471c
PB
5104 break;
5105 case 1:
5106 if (op == 4)
ca9a32e4 5107 mask = 0xffff >> -shift;
ad69471c 5108 else
ca9a32e4
JR
5109 mask = (uint16_t)(0xffff << shift);
5110 mask |= mask << 16;
ad69471c
PB
5111 break;
5112 case 2:
ca9a32e4
JR
5113 if (shift < -31 || shift > 31) {
5114 mask = 0;
5115 } else {
5116 if (op == 4)
5117 mask = 0xffffffffu >> -shift;
5118 else
5119 mask = 0xffffffffu << shift;
5120 }
ad69471c
PB
5121 break;
5122 default:
5123 abort();
5124 }
dd8fbd78 5125 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5126 tcg_gen_andi_i32(tmp, tmp, mask);
5127 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5128 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5129 tcg_temp_free_i32(tmp2);
ad69471c 5130 }
dd8fbd78 5131 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5132 }
5133 } /* for pass */
5134 } else if (op < 10) {
ad69471c 5135 /* Shift by immediate and narrow:
9ee6e8bb 5136 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5137 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5138 if (rm & 1) {
5139 return 1;
5140 }
9ee6e8bb
PB
5141 shift = shift - (1 << (size + 3));
5142 size++;
92cdfaeb 5143 if (size == 3) {
a7812ae4 5144 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5145 neon_load_reg64(cpu_V0, rm);
5146 neon_load_reg64(cpu_V1, rm + 1);
5147 for (pass = 0; pass < 2; pass++) {
5148 TCGv_i64 in;
5149 if (pass == 0) {
5150 in = cpu_V0;
5151 } else {
5152 in = cpu_V1;
5153 }
ad69471c 5154 if (q) {
0b36f4cd 5155 if (input_unsigned) {
92cdfaeb 5156 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5157 } else {
92cdfaeb 5158 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5159 }
ad69471c 5160 } else {
0b36f4cd 5161 if (input_unsigned) {
92cdfaeb 5162 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5163 } else {
92cdfaeb 5164 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5165 }
ad69471c 5166 }
7d1b0095 5167 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5168 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5169 neon_store_reg(rd, pass, tmp);
5170 } /* for pass */
5171 tcg_temp_free_i64(tmp64);
5172 } else {
5173 if (size == 1) {
5174 imm = (uint16_t)shift;
5175 imm |= imm << 16;
2c0262af 5176 } else {
92cdfaeb
PM
5177 /* size == 2 */
5178 imm = (uint32_t)shift;
5179 }
5180 tmp2 = tcg_const_i32(imm);
5181 tmp4 = neon_load_reg(rm + 1, 0);
5182 tmp5 = neon_load_reg(rm + 1, 1);
5183 for (pass = 0; pass < 2; pass++) {
5184 if (pass == 0) {
5185 tmp = neon_load_reg(rm, 0);
5186 } else {
5187 tmp = tmp4;
5188 }
0b36f4cd
CL
5189 gen_neon_shift_narrow(size, tmp, tmp2, q,
5190 input_unsigned);
92cdfaeb
PM
5191 if (pass == 0) {
5192 tmp3 = neon_load_reg(rm, 1);
5193 } else {
5194 tmp3 = tmp5;
5195 }
0b36f4cd
CL
5196 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5197 input_unsigned);
36aa55dc 5198 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5199 tcg_temp_free_i32(tmp);
5200 tcg_temp_free_i32(tmp3);
5201 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5202 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5203 neon_store_reg(rd, pass, tmp);
5204 } /* for pass */
c6067f04 5205 tcg_temp_free_i32(tmp2);
b75263d6 5206 }
9ee6e8bb 5207 } else if (op == 10) {
cc13115b
PM
5208 /* VSHLL, VMOVL */
5209 if (q || (rd & 1)) {
9ee6e8bb 5210 return 1;
cc13115b 5211 }
ad69471c
PB
5212 tmp = neon_load_reg(rm, 0);
5213 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5214 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5215 if (pass == 1)
5216 tmp = tmp2;
5217
5218 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5219
9ee6e8bb
PB
5220 if (shift != 0) {
5221 /* The shift is less than the width of the source
ad69471c
PB
5222 type, so we can just shift the whole register. */
5223 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5224 /* Widen the result of shift: we need to clear
5225 * the potential overflow bits resulting from
5226 * left bits of the narrow input appearing as
5227 * right bits of left the neighbour narrow
5228 * input. */
ad69471c
PB
5229 if (size < 2 || !u) {
5230 uint64_t imm64;
5231 if (size == 0) {
5232 imm = (0xffu >> (8 - shift));
5233 imm |= imm << 16;
acdf01ef 5234 } else if (size == 1) {
ad69471c 5235 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5236 } else {
5237 /* size == 2 */
5238 imm = 0xffffffff >> (32 - shift);
5239 }
5240 if (size < 2) {
5241 imm64 = imm | (((uint64_t)imm) << 32);
5242 } else {
5243 imm64 = imm;
9ee6e8bb 5244 }
acdf01ef 5245 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5246 }
5247 }
ad69471c 5248 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5249 }
f73534a5 5250 } else if (op >= 14) {
9ee6e8bb 5251 /* VCVT fixed-point. */
cc13115b
PM
5252 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5253 return 1;
5254 }
f73534a5
PM
5255 /* We have already masked out the must-be-1 top bit of imm6,
5256 * hence this 32-shift where the ARM ARM has 64-imm6.
5257 */
5258 shift = 32 - shift;
9ee6e8bb 5259 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5260 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5261 if (!(op & 1)) {
9ee6e8bb 5262 if (u)
5500b06c 5263 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5264 else
5500b06c 5265 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5266 } else {
5267 if (u)
5500b06c 5268 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5269 else
5500b06c 5270 gen_vfp_tosl(0, shift, 1);
2c0262af 5271 }
4373f3ce 5272 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5273 }
5274 } else {
9ee6e8bb
PB
5275 return 1;
5276 }
5277 } else { /* (insn & 0x00380080) == 0 */
5278 int invert;
7d80fee5
PM
5279 if (q && (rd & 1)) {
5280 return 1;
5281 }
9ee6e8bb
PB
5282
5283 op = (insn >> 8) & 0xf;
5284 /* One register and immediate. */
5285 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5286 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5287 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5288 * We choose to not special-case this and will behave as if a
5289 * valid constant encoding of 0 had been given.
5290 */
9ee6e8bb
PB
5291 switch (op) {
5292 case 0: case 1:
5293 /* no-op */
5294 break;
5295 case 2: case 3:
5296 imm <<= 8;
5297 break;
5298 case 4: case 5:
5299 imm <<= 16;
5300 break;
5301 case 6: case 7:
5302 imm <<= 24;
5303 break;
5304 case 8: case 9:
5305 imm |= imm << 16;
5306 break;
5307 case 10: case 11:
5308 imm = (imm << 8) | (imm << 24);
5309 break;
5310 case 12:
8e31209e 5311 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5312 break;
5313 case 13:
5314 imm = (imm << 16) | 0xffff;
5315 break;
5316 case 14:
5317 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5318 if (invert)
5319 imm = ~imm;
5320 break;
5321 case 15:
7d80fee5
PM
5322 if (invert) {
5323 return 1;
5324 }
9ee6e8bb
PB
5325 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5326 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5327 break;
5328 }
5329 if (invert)
5330 imm = ~imm;
5331
9ee6e8bb
PB
5332 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5333 if (op & 1 && op < 12) {
ad69471c 5334 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5335 if (invert) {
5336 /* The immediate value has already been inverted, so
5337 BIC becomes AND. */
ad69471c 5338 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5339 } else {
ad69471c 5340 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5341 }
9ee6e8bb 5342 } else {
ad69471c 5343 /* VMOV, VMVN. */
7d1b0095 5344 tmp = tcg_temp_new_i32();
9ee6e8bb 5345 if (op == 14 && invert) {
a5a14945 5346 int n;
ad69471c
PB
5347 uint32_t val;
5348 val = 0;
9ee6e8bb
PB
5349 for (n = 0; n < 4; n++) {
5350 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5351 val |= 0xff << (n * 8);
9ee6e8bb 5352 }
ad69471c
PB
5353 tcg_gen_movi_i32(tmp, val);
5354 } else {
5355 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5356 }
9ee6e8bb 5357 }
ad69471c 5358 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5359 }
5360 }
e4b3861d 5361 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5362 if (size != 3) {
5363 op = (insn >> 8) & 0xf;
5364 if ((insn & (1 << 6)) == 0) {
5365 /* Three registers of different lengths. */
5366 int src1_wide;
5367 int src2_wide;
5368 int prewiden;
695272dc
PM
5369 /* undefreq: bit 0 : UNDEF if size != 0
5370 * bit 1 : UNDEF if size == 0
5371 * bit 2 : UNDEF if U == 1
5372 * Note that [1:0] set implies 'always UNDEF'
5373 */
5374 int undefreq;
5375 /* prewiden, src1_wide, src2_wide, undefreq */
5376 static const int neon_3reg_wide[16][4] = {
5377 {1, 0, 0, 0}, /* VADDL */
5378 {1, 1, 0, 0}, /* VADDW */
5379 {1, 0, 0, 0}, /* VSUBL */
5380 {1, 1, 0, 0}, /* VSUBW */
5381 {0, 1, 1, 0}, /* VADDHN */
5382 {0, 0, 0, 0}, /* VABAL */
5383 {0, 1, 1, 0}, /* VSUBHN */
5384 {0, 0, 0, 0}, /* VABDL */
5385 {0, 0, 0, 0}, /* VMLAL */
5386 {0, 0, 0, 6}, /* VQDMLAL */
5387 {0, 0, 0, 0}, /* VMLSL */
5388 {0, 0, 0, 6}, /* VQDMLSL */
5389 {0, 0, 0, 0}, /* Integer VMULL */
5390 {0, 0, 0, 2}, /* VQDMULL */
5391 {0, 0, 0, 5}, /* Polynomial VMULL */
5392 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5393 };
5394
5395 prewiden = neon_3reg_wide[op][0];
5396 src1_wide = neon_3reg_wide[op][1];
5397 src2_wide = neon_3reg_wide[op][2];
695272dc 5398 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5399
695272dc
PM
5400 if (((undefreq & 1) && (size != 0)) ||
5401 ((undefreq & 2) && (size == 0)) ||
5402 ((undefreq & 4) && u)) {
5403 return 1;
5404 }
5405 if ((src1_wide && (rn & 1)) ||
5406 (src2_wide && (rm & 1)) ||
5407 (!src2_wide && (rd & 1))) {
ad69471c 5408 return 1;
695272dc 5409 }
ad69471c 5410
9ee6e8bb
PB
5411 /* Avoid overlapping operands. Wide source operands are
5412 always aligned so will never overlap with wide
5413 destinations in problematic ways. */
8f8e3aa4 5414 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5415 tmp = neon_load_reg(rm, 1);
5416 neon_store_scratch(2, tmp);
8f8e3aa4 5417 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5418 tmp = neon_load_reg(rn, 1);
5419 neon_store_scratch(2, tmp);
9ee6e8bb 5420 }
a50f5b91 5421 TCGV_UNUSED(tmp3);
9ee6e8bb 5422 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5423 if (src1_wide) {
5424 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5425 TCGV_UNUSED(tmp);
9ee6e8bb 5426 } else {
ad69471c 5427 if (pass == 1 && rd == rn) {
dd8fbd78 5428 tmp = neon_load_scratch(2);
9ee6e8bb 5429 } else {
ad69471c
PB
5430 tmp = neon_load_reg(rn, pass);
5431 }
5432 if (prewiden) {
5433 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5434 }
5435 }
ad69471c
PB
5436 if (src2_wide) {
5437 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5438 TCGV_UNUSED(tmp2);
9ee6e8bb 5439 } else {
ad69471c 5440 if (pass == 1 && rd == rm) {
dd8fbd78 5441 tmp2 = neon_load_scratch(2);
9ee6e8bb 5442 } else {
ad69471c
PB
5443 tmp2 = neon_load_reg(rm, pass);
5444 }
5445 if (prewiden) {
5446 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5447 }
9ee6e8bb
PB
5448 }
5449 switch (op) {
5450 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5451 gen_neon_addl(size);
9ee6e8bb 5452 break;
79b0e534 5453 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5454 gen_neon_subl(size);
9ee6e8bb
PB
5455 break;
5456 case 5: case 7: /* VABAL, VABDL */
5457 switch ((size << 1) | u) {
ad69471c
PB
5458 case 0:
5459 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5460 break;
5461 case 1:
5462 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5463 break;
5464 case 2:
5465 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5466 break;
5467 case 3:
5468 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5469 break;
5470 case 4:
5471 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5472 break;
5473 case 5:
5474 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5475 break;
9ee6e8bb
PB
5476 default: abort();
5477 }
7d1b0095
PM
5478 tcg_temp_free_i32(tmp2);
5479 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5480 break;
5481 case 8: case 9: case 10: case 11: case 12: case 13:
5482 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5483 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5484 break;
5485 case 14: /* Polynomial VMULL */
e5ca24cb 5486 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5487 tcg_temp_free_i32(tmp2);
5488 tcg_temp_free_i32(tmp);
e5ca24cb 5489 break;
695272dc
PM
5490 default: /* 15 is RESERVED: caught earlier */
5491 abort();
9ee6e8bb 5492 }
ebcd88ce
PM
5493 if (op == 13) {
5494 /* VQDMULL */
5495 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5496 neon_store_reg64(cpu_V0, rd + pass);
5497 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5498 /* Accumulate. */
ebcd88ce 5499 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5500 switch (op) {
4dc064e6
PM
5501 case 10: /* VMLSL */
5502 gen_neon_negl(cpu_V0, size);
5503 /* Fall through */
5504 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5505 gen_neon_addl(size);
9ee6e8bb
PB
5506 break;
5507 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5508 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5509 if (op == 11) {
5510 gen_neon_negl(cpu_V0, size);
5511 }
ad69471c
PB
5512 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5513 break;
9ee6e8bb
PB
5514 default:
5515 abort();
5516 }
ad69471c 5517 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5518 } else if (op == 4 || op == 6) {
5519 /* Narrowing operation. */
7d1b0095 5520 tmp = tcg_temp_new_i32();
79b0e534 5521 if (!u) {
9ee6e8bb 5522 switch (size) {
ad69471c
PB
5523 case 0:
5524 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5525 break;
5526 case 1:
5527 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5528 break;
5529 case 2:
5530 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5531 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5532 break;
9ee6e8bb
PB
5533 default: abort();
5534 }
5535 } else {
5536 switch (size) {
ad69471c
PB
5537 case 0:
5538 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5539 break;
5540 case 1:
5541 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5542 break;
5543 case 2:
5544 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5545 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5546 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5547 break;
9ee6e8bb
PB
5548 default: abort();
5549 }
5550 }
ad69471c
PB
5551 if (pass == 0) {
5552 tmp3 = tmp;
5553 } else {
5554 neon_store_reg(rd, 0, tmp3);
5555 neon_store_reg(rd, 1, tmp);
5556 }
9ee6e8bb
PB
5557 } else {
5558 /* Write back the result. */
ad69471c 5559 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5560 }
5561 }
5562 } else {
3e3326df
PM
5563 /* Two registers and a scalar. NB that for ops of this form
5564 * the ARM ARM labels bit 24 as Q, but it is in our variable
5565 * 'u', not 'q'.
5566 */
5567 if (size == 0) {
5568 return 1;
5569 }
9ee6e8bb 5570 switch (op) {
9ee6e8bb 5571 case 1: /* Float VMLA scalar */
9ee6e8bb 5572 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5573 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5574 if (size == 1) {
5575 return 1;
5576 }
5577 /* fall through */
5578 case 0: /* Integer VMLA scalar */
5579 case 4: /* Integer VMLS scalar */
5580 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5581 case 12: /* VQDMULH scalar */
5582 case 13: /* VQRDMULH scalar */
3e3326df
PM
5583 if (u && ((rd | rn) & 1)) {
5584 return 1;
5585 }
dd8fbd78
FN
5586 tmp = neon_get_scalar(size, rm);
5587 neon_store_scratch(0, tmp);
9ee6e8bb 5588 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5589 tmp = neon_load_scratch(0);
5590 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5591 if (op == 12) {
5592 if (size == 1) {
2a3f75b4 5593 gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2);
9ee6e8bb 5594 } else {
2a3f75b4 5595 gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2);
9ee6e8bb
PB
5596 }
5597 } else if (op == 13) {
5598 if (size == 1) {
2a3f75b4 5599 gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2);
9ee6e8bb 5600 } else {
2a3f75b4 5601 gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2);
9ee6e8bb
PB
5602 }
5603 } else if (op & 1) {
dd8fbd78 5604 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5605 } else {
5606 switch (size) {
dd8fbd78
FN
5607 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5608 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5609 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5610 default: abort();
9ee6e8bb
PB
5611 }
5612 }
7d1b0095 5613 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5614 if (op < 8) {
5615 /* Accumulate. */
dd8fbd78 5616 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5617 switch (op) {
5618 case 0:
dd8fbd78 5619 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5620 break;
5621 case 1:
dd8fbd78 5622 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5623 break;
5624 case 4:
dd8fbd78 5625 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5626 break;
5627 case 5:
dd8fbd78 5628 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5629 break;
5630 default:
5631 abort();
5632 }
7d1b0095 5633 tcg_temp_free_i32(tmp2);
9ee6e8bb 5634 }
dd8fbd78 5635 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5636 }
5637 break;
9ee6e8bb 5638 case 3: /* VQDMLAL scalar */
9ee6e8bb 5639 case 7: /* VQDMLSL scalar */
9ee6e8bb 5640 case 11: /* VQDMULL scalar */
3e3326df 5641 if (u == 1) {
ad69471c 5642 return 1;
3e3326df
PM
5643 }
5644 /* fall through */
5645 case 2: /* VMLAL sclar */
5646 case 6: /* VMLSL scalar */
5647 case 10: /* VMULL scalar */
5648 if (rd & 1) {
5649 return 1;
5650 }
dd8fbd78 5651 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5652 /* We need a copy of tmp2 because gen_neon_mull
5653 * deletes it during pass 0. */
7d1b0095 5654 tmp4 = tcg_temp_new_i32();
c6067f04 5655 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5656 tmp3 = neon_load_reg(rn, 1);
ad69471c 5657
9ee6e8bb 5658 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5659 if (pass == 0) {
5660 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5661 } else {
dd8fbd78 5662 tmp = tmp3;
c6067f04 5663 tmp2 = tmp4;
9ee6e8bb 5664 }
ad69471c 5665 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5666 if (op != 11) {
5667 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5668 }
9ee6e8bb 5669 switch (op) {
4dc064e6
PM
5670 case 6:
5671 gen_neon_negl(cpu_V0, size);
5672 /* Fall through */
5673 case 2:
ad69471c 5674 gen_neon_addl(size);
9ee6e8bb
PB
5675 break;
5676 case 3: case 7:
ad69471c 5677 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5678 if (op == 7) {
5679 gen_neon_negl(cpu_V0, size);
5680 }
ad69471c 5681 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5682 break;
5683 case 10:
5684 /* no-op */
5685 break;
5686 case 11:
ad69471c 5687 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5688 break;
5689 default:
5690 abort();
5691 }
ad69471c 5692 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5693 }
dd8fbd78 5694
dd8fbd78 5695
9ee6e8bb
PB
5696 break;
5697 default: /* 14 and 15 are RESERVED */
5698 return 1;
5699 }
5700 }
5701 } else { /* size == 3 */
5702 if (!u) {
5703 /* Extract. */
9ee6e8bb 5704 imm = (insn >> 8) & 0xf;
ad69471c
PB
5705
5706 if (imm > 7 && !q)
5707 return 1;
5708
52579ea1
PM
5709 if (q && ((rd | rn | rm) & 1)) {
5710 return 1;
5711 }
5712
ad69471c
PB
5713 if (imm == 0) {
5714 neon_load_reg64(cpu_V0, rn);
5715 if (q) {
5716 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5717 }
ad69471c
PB
5718 } else if (imm == 8) {
5719 neon_load_reg64(cpu_V0, rn + 1);
5720 if (q) {
5721 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5722 }
ad69471c 5723 } else if (q) {
a7812ae4 5724 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5725 if (imm < 8) {
5726 neon_load_reg64(cpu_V0, rn);
a7812ae4 5727 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5728 } else {
5729 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5730 neon_load_reg64(tmp64, rm);
ad69471c
PB
5731 }
5732 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5733 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5734 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5735 if (imm < 8) {
5736 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5737 } else {
ad69471c
PB
5738 neon_load_reg64(cpu_V1, rm + 1);
5739 imm -= 8;
9ee6e8bb 5740 }
ad69471c 5741 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5742 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5743 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5744 tcg_temp_free_i64(tmp64);
ad69471c 5745 } else {
a7812ae4 5746 /* BUGFIX */
ad69471c 5747 neon_load_reg64(cpu_V0, rn);
a7812ae4 5748 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5749 neon_load_reg64(cpu_V1, rm);
a7812ae4 5750 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5751 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5752 }
5753 neon_store_reg64(cpu_V0, rd);
5754 if (q) {
5755 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5756 }
5757 } else if ((insn & (1 << 11)) == 0) {
5758 /* Two register misc. */
5759 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5760 size = (insn >> 18) & 3;
600b828c
PM
5761 /* UNDEF for unknown op values and bad op-size combinations */
5762 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5763 return 1;
5764 }
fc2a9b37
PM
5765 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5766 q && ((rm | rd) & 1)) {
5767 return 1;
5768 }
9ee6e8bb 5769 switch (op) {
600b828c 5770 case NEON_2RM_VREV64:
9ee6e8bb 5771 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5772 tmp = neon_load_reg(rm, pass * 2);
5773 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5774 switch (size) {
dd8fbd78
FN
5775 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5776 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5777 case 2: /* no-op */ break;
5778 default: abort();
5779 }
dd8fbd78 5780 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5781 if (size == 2) {
dd8fbd78 5782 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5783 } else {
9ee6e8bb 5784 switch (size) {
dd8fbd78
FN
5785 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5786 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5787 default: abort();
5788 }
dd8fbd78 5789 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5790 }
5791 }
5792 break;
600b828c
PM
5793 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5794 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5795 for (pass = 0; pass < q + 1; pass++) {
5796 tmp = neon_load_reg(rm, pass * 2);
5797 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5798 tmp = neon_load_reg(rm, pass * 2 + 1);
5799 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5800 switch (size) {
5801 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5802 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5803 case 2: tcg_gen_add_i64(CPU_V001); break;
5804 default: abort();
5805 }
600b828c 5806 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5807 /* Accumulate. */
ad69471c
PB
5808 neon_load_reg64(cpu_V1, rd + pass);
5809 gen_neon_addl(size);
9ee6e8bb 5810 }
ad69471c 5811 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5812 }
5813 break;
600b828c 5814 case NEON_2RM_VTRN:
9ee6e8bb 5815 if (size == 2) {
a5a14945 5816 int n;
9ee6e8bb 5817 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5818 tmp = neon_load_reg(rm, n);
5819 tmp2 = neon_load_reg(rd, n + 1);
5820 neon_store_reg(rm, n, tmp2);
5821 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5822 }
5823 } else {
5824 goto elementwise;
5825 }
5826 break;
600b828c 5827 case NEON_2RM_VUZP:
02acedf9 5828 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5829 return 1;
9ee6e8bb
PB
5830 }
5831 break;
600b828c 5832 case NEON_2RM_VZIP:
d68a6f3a 5833 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5834 return 1;
9ee6e8bb
PB
5835 }
5836 break;
600b828c
PM
5837 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5838 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5839 if (rm & 1) {
5840 return 1;
5841 }
a50f5b91 5842 TCGV_UNUSED(tmp2);
9ee6e8bb 5843 for (pass = 0; pass < 2; pass++) {
ad69471c 5844 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5845 tmp = tcg_temp_new_i32();
600b828c
PM
5846 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5847 tmp, cpu_V0);
ad69471c
PB
5848 if (pass == 0) {
5849 tmp2 = tmp;
5850 } else {
5851 neon_store_reg(rd, 0, tmp2);
5852 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5853 }
9ee6e8bb
PB
5854 }
5855 break;
600b828c 5856 case NEON_2RM_VSHLL:
fc2a9b37 5857 if (q || (rd & 1)) {
9ee6e8bb 5858 return 1;
600b828c 5859 }
ad69471c
PB
5860 tmp = neon_load_reg(rm, 0);
5861 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5862 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5863 if (pass == 1)
5864 tmp = tmp2;
5865 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5866 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5867 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5868 }
5869 break;
600b828c 5870 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5871 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5872 q || (rm & 1)) {
5873 return 1;
5874 }
7d1b0095
PM
5875 tmp = tcg_temp_new_i32();
5876 tmp2 = tcg_temp_new_i32();
60011498 5877 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5878 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5879 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5880 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5881 tcg_gen_shli_i32(tmp2, tmp2, 16);
5882 tcg_gen_or_i32(tmp2, tmp2, tmp);
5883 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5884 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5885 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5886 neon_store_reg(rd, 0, tmp2);
7d1b0095 5887 tmp2 = tcg_temp_new_i32();
2d981da7 5888 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5889 tcg_gen_shli_i32(tmp2, tmp2, 16);
5890 tcg_gen_or_i32(tmp2, tmp2, tmp);
5891 neon_store_reg(rd, 1, tmp2);
7d1b0095 5892 tcg_temp_free_i32(tmp);
60011498 5893 break;
600b828c 5894 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5895 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5896 q || (rd & 1)) {
5897 return 1;
5898 }
7d1b0095 5899 tmp3 = tcg_temp_new_i32();
60011498
PB
5900 tmp = neon_load_reg(rm, 0);
5901 tmp2 = neon_load_reg(rm, 1);
5902 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5903 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5904 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5905 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5906 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5907 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5908 tcg_temp_free_i32(tmp);
60011498 5909 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5910 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5911 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5912 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5913 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5914 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5915 tcg_temp_free_i32(tmp2);
5916 tcg_temp_free_i32(tmp3);
60011498 5917 break;
9ee6e8bb
PB
5918 default:
5919 elementwise:
5920 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 5921 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
5922 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5923 neon_reg_offset(rm, pass));
dd8fbd78 5924 TCGV_UNUSED(tmp);
9ee6e8bb 5925 } else {
dd8fbd78 5926 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5927 }
5928 switch (op) {
600b828c 5929 case NEON_2RM_VREV32:
9ee6e8bb 5930 switch (size) {
dd8fbd78
FN
5931 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5932 case 1: gen_swap_half(tmp); break;
600b828c 5933 default: abort();
9ee6e8bb
PB
5934 }
5935 break;
600b828c 5936 case NEON_2RM_VREV16:
dd8fbd78 5937 gen_rev16(tmp);
9ee6e8bb 5938 break;
600b828c 5939 case NEON_2RM_VCLS:
9ee6e8bb 5940 switch (size) {
dd8fbd78
FN
5941 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5942 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5943 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 5944 default: abort();
9ee6e8bb
PB
5945 }
5946 break;
600b828c 5947 case NEON_2RM_VCLZ:
9ee6e8bb 5948 switch (size) {
dd8fbd78
FN
5949 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5950 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5951 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 5952 default: abort();
9ee6e8bb
PB
5953 }
5954 break;
600b828c 5955 case NEON_2RM_VCNT:
dd8fbd78 5956 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 5957 break;
600b828c 5958 case NEON_2RM_VMVN:
dd8fbd78 5959 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 5960 break;
600b828c 5961 case NEON_2RM_VQABS:
9ee6e8bb 5962 switch (size) {
2a3f75b4
PM
5963 case 0: gen_helper_neon_qabs_s8(tmp, tmp); break;
5964 case 1: gen_helper_neon_qabs_s16(tmp, tmp); break;
5965 case 2: gen_helper_neon_qabs_s32(tmp, tmp); break;
600b828c 5966 default: abort();
9ee6e8bb
PB
5967 }
5968 break;
600b828c 5969 case NEON_2RM_VQNEG:
9ee6e8bb 5970 switch (size) {
2a3f75b4
PM
5971 case 0: gen_helper_neon_qneg_s8(tmp, tmp); break;
5972 case 1: gen_helper_neon_qneg_s16(tmp, tmp); break;
5973 case 2: gen_helper_neon_qneg_s32(tmp, tmp); break;
600b828c 5974 default: abort();
9ee6e8bb
PB
5975 }
5976 break;
600b828c 5977 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 5978 tmp2 = tcg_const_i32(0);
9ee6e8bb 5979 switch(size) {
dd8fbd78
FN
5980 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5981 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5982 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 5983 default: abort();
9ee6e8bb 5984 }
dd8fbd78 5985 tcg_temp_free(tmp2);
600b828c 5986 if (op == NEON_2RM_VCLE0) {
dd8fbd78 5987 tcg_gen_not_i32(tmp, tmp);
600b828c 5988 }
9ee6e8bb 5989 break;
600b828c 5990 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 5991 tmp2 = tcg_const_i32(0);
9ee6e8bb 5992 switch(size) {
dd8fbd78
FN
5993 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5994 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5995 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 5996 default: abort();
9ee6e8bb 5997 }
dd8fbd78 5998 tcg_temp_free(tmp2);
600b828c 5999 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6000 tcg_gen_not_i32(tmp, tmp);
600b828c 6001 }
9ee6e8bb 6002 break;
600b828c 6003 case NEON_2RM_VCEQ0:
dd8fbd78 6004 tmp2 = tcg_const_i32(0);
9ee6e8bb 6005 switch(size) {
dd8fbd78
FN
6006 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6007 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6008 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6009 default: abort();
9ee6e8bb 6010 }
dd8fbd78 6011 tcg_temp_free(tmp2);
9ee6e8bb 6012 break;
600b828c 6013 case NEON_2RM_VABS:
9ee6e8bb 6014 switch(size) {
dd8fbd78
FN
6015 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6016 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6017 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6018 default: abort();
9ee6e8bb
PB
6019 }
6020 break;
600b828c 6021 case NEON_2RM_VNEG:
dd8fbd78
FN
6022 tmp2 = tcg_const_i32(0);
6023 gen_neon_rsb(size, tmp, tmp2);
6024 tcg_temp_free(tmp2);
9ee6e8bb 6025 break;
600b828c 6026 case NEON_2RM_VCGT0_F:
dd8fbd78
FN
6027 tmp2 = tcg_const_i32(0);
6028 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
6029 tcg_temp_free(tmp2);
9ee6e8bb 6030 break;
600b828c 6031 case NEON_2RM_VCGE0_F:
dd8fbd78
FN
6032 tmp2 = tcg_const_i32(0);
6033 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
6034 tcg_temp_free(tmp2);
9ee6e8bb 6035 break;
600b828c 6036 case NEON_2RM_VCEQ0_F:
dd8fbd78
FN
6037 tmp2 = tcg_const_i32(0);
6038 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
6039 tcg_temp_free(tmp2);
9ee6e8bb 6040 break;
600b828c 6041 case NEON_2RM_VCLE0_F:
0e326109
PM
6042 tmp2 = tcg_const_i32(0);
6043 gen_helper_neon_cge_f32(tmp, tmp2, tmp);
6044 tcg_temp_free(tmp2);
6045 break;
600b828c 6046 case NEON_2RM_VCLT0_F:
0e326109
PM
6047 tmp2 = tcg_const_i32(0);
6048 gen_helper_neon_cgt_f32(tmp, tmp2, tmp);
6049 tcg_temp_free(tmp2);
6050 break;
600b828c 6051 case NEON_2RM_VABS_F:
4373f3ce 6052 gen_vfp_abs(0);
9ee6e8bb 6053 break;
600b828c 6054 case NEON_2RM_VNEG_F:
4373f3ce 6055 gen_vfp_neg(0);
9ee6e8bb 6056 break;
600b828c 6057 case NEON_2RM_VSWP:
dd8fbd78
FN
6058 tmp2 = neon_load_reg(rd, pass);
6059 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6060 break;
600b828c 6061 case NEON_2RM_VTRN:
dd8fbd78 6062 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6063 switch (size) {
dd8fbd78
FN
6064 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6065 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6066 default: abort();
9ee6e8bb 6067 }
dd8fbd78 6068 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6069 break;
600b828c 6070 case NEON_2RM_VRECPE:
dd8fbd78 6071 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6072 break;
600b828c 6073 case NEON_2RM_VRSQRTE:
dd8fbd78 6074 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6075 break;
600b828c 6076 case NEON_2RM_VRECPE_F:
4373f3ce 6077 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6078 break;
600b828c 6079 case NEON_2RM_VRSQRTE_F:
4373f3ce 6080 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6081 break;
600b828c 6082 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6083 gen_vfp_sito(0, 1);
9ee6e8bb 6084 break;
600b828c 6085 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6086 gen_vfp_uito(0, 1);
9ee6e8bb 6087 break;
600b828c 6088 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6089 gen_vfp_tosiz(0, 1);
9ee6e8bb 6090 break;
600b828c 6091 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6092 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6093 break;
6094 default:
600b828c
PM
6095 /* Reserved op values were caught by the
6096 * neon_2rm_sizes[] check earlier.
6097 */
6098 abort();
9ee6e8bb 6099 }
600b828c 6100 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6101 tcg_gen_st_f32(cpu_F0s, cpu_env,
6102 neon_reg_offset(rd, pass));
9ee6e8bb 6103 } else {
dd8fbd78 6104 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6105 }
6106 }
6107 break;
6108 }
6109 } else if ((insn & (1 << 10)) == 0) {
6110 /* VTBL, VTBX. */
56907d77
PM
6111 int n = ((insn >> 8) & 3) + 1;
6112 if ((rn + n) > 32) {
6113 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6114 * helper function running off the end of the register file.
6115 */
6116 return 1;
6117 }
6118 n <<= 3;
9ee6e8bb 6119 if (insn & (1 << 6)) {
8f8e3aa4 6120 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6121 } else {
7d1b0095 6122 tmp = tcg_temp_new_i32();
8f8e3aa4 6123 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6124 }
8f8e3aa4 6125 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6126 tmp4 = tcg_const_i32(rn);
6127 tmp5 = tcg_const_i32(n);
6128 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
7d1b0095 6129 tcg_temp_free_i32(tmp);
9ee6e8bb 6130 if (insn & (1 << 6)) {
8f8e3aa4 6131 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6132 } else {
7d1b0095 6133 tmp = tcg_temp_new_i32();
8f8e3aa4 6134 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6135 }
8f8e3aa4 6136 tmp3 = neon_load_reg(rm, 1);
b75263d6 6137 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6138 tcg_temp_free_i32(tmp5);
6139 tcg_temp_free_i32(tmp4);
8f8e3aa4 6140 neon_store_reg(rd, 0, tmp2);
3018f259 6141 neon_store_reg(rd, 1, tmp3);
7d1b0095 6142 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6143 } else if ((insn & 0x380) == 0) {
6144 /* VDUP */
133da6aa
JR
6145 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6146 return 1;
6147 }
9ee6e8bb 6148 if (insn & (1 << 19)) {
dd8fbd78 6149 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6150 } else {
dd8fbd78 6151 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6152 }
6153 if (insn & (1 << 16)) {
dd8fbd78 6154 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6155 } else if (insn & (1 << 17)) {
6156 if ((insn >> 18) & 1)
dd8fbd78 6157 gen_neon_dup_high16(tmp);
9ee6e8bb 6158 else
dd8fbd78 6159 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6160 }
6161 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6162 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6163 tcg_gen_mov_i32(tmp2, tmp);
6164 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6165 }
7d1b0095 6166 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6167 } else {
6168 return 1;
6169 }
6170 }
6171 }
6172 return 0;
6173}
6174
fe1479c3
PB
6175static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
6176{
6177 int crn = (insn >> 16) & 0xf;
6178 int crm = insn & 0xf;
6179 int op1 = (insn >> 21) & 7;
6180 int op2 = (insn >> 5) & 7;
6181 int rt = (insn >> 12) & 0xf;
6182 TCGv tmp;
6183
ca27c052
PM
6184 /* Minimal set of debug registers, since we don't support debug */
6185 if (op1 == 0 && crn == 0 && op2 == 0) {
6186 switch (crm) {
6187 case 0:
6188 /* DBGDIDR: just RAZ. In particular this means the
6189 * "debug architecture version" bits will read as
6190 * a reserved value, which should cause Linux to
6191 * not try to use the debug hardware.
6192 */
6193 tmp = tcg_const_i32(0);
6194 store_reg(s, rt, tmp);
6195 return 0;
6196 case 1:
6197 case 2:
6198 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6199 * don't implement memory mapped debug components
6200 */
6201 if (ENABLE_ARCH_7) {
6202 tmp = tcg_const_i32(0);
6203 store_reg(s, rt, tmp);
6204 return 0;
6205 }
6206 break;
6207 default:
6208 break;
6209 }
6210 }
6211
fe1479c3
PB
6212 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6213 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6214 /* TEECR */
6215 if (IS_USER(s))
6216 return 1;
6217 tmp = load_cpu_field(teecr);
6218 store_reg(s, rt, tmp);
6219 return 0;
6220 }
6221 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6222 /* TEEHBR */
6223 if (IS_USER(s) && (env->teecr & 1))
6224 return 1;
6225 tmp = load_cpu_field(teehbr);
6226 store_reg(s, rt, tmp);
6227 return 0;
6228 }
6229 }
6230 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
6231 op1, crn, crm, op2);
6232 return 1;
6233}
6234
6235static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
6236{
6237 int crn = (insn >> 16) & 0xf;
6238 int crm = insn & 0xf;
6239 int op1 = (insn >> 21) & 7;
6240 int op2 = (insn >> 5) & 7;
6241 int rt = (insn >> 12) & 0xf;
6242 TCGv tmp;
6243
6244 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6245 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6246 /* TEECR */
6247 if (IS_USER(s))
6248 return 1;
6249 tmp = load_reg(s, rt);
6250 gen_helper_set_teecr(cpu_env, tmp);
7d1b0095 6251 tcg_temp_free_i32(tmp);
fe1479c3
PB
6252 return 0;
6253 }
6254 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6255 /* TEEHBR */
6256 if (IS_USER(s) && (env->teecr & 1))
6257 return 1;
6258 tmp = load_reg(s, rt);
6259 store_cpu_field(tmp, teehbr);
6260 return 0;
6261 }
6262 }
6263 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6264 op1, crn, crm, op2);
6265 return 1;
6266}
6267
9ee6e8bb
PB
6268static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
6269{
6270 int cpnum;
6271
6272 cpnum = (insn >> 8) & 0xf;
6273 if (arm_feature(env, ARM_FEATURE_XSCALE)
6274 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6275 return 1;
6276
6277 switch (cpnum) {
6278 case 0:
6279 case 1:
6280 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6281 return disas_iwmmxt_insn(env, s, insn);
6282 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6283 return disas_dsp_insn(env, s, insn);
6284 }
6285 return 1;
6286 case 10:
6287 case 11:
6288 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
6289 case 14:
6290 /* Coprocessors 7-15 are architecturally reserved by ARM.
6291 Unfortunately Intel decided to ignore this. */
6292 if (arm_feature(env, ARM_FEATURE_XSCALE))
6293 goto board;
6294 if (insn & (1 << 20))
6295 return disas_cp14_read(env, s, insn);
6296 else
6297 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
6298 case 15:
6299 return disas_cp15_insn (env, s, insn);
6300 default:
fe1479c3 6301 board:
9ee6e8bb
PB
6302 /* Unknown coprocessor. See if the board has hooked it. */
6303 return disas_cp_insn (env, s, insn);
6304 }
6305}
6306
5e3f878a
PB
6307
6308/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6309static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
6310{
6311 TCGv tmp;
7d1b0095 6312 tmp = tcg_temp_new_i32();
5e3f878a
PB
6313 tcg_gen_trunc_i64_i32(tmp, val);
6314 store_reg(s, rlow, tmp);
7d1b0095 6315 tmp = tcg_temp_new_i32();
5e3f878a
PB
6316 tcg_gen_shri_i64(val, val, 32);
6317 tcg_gen_trunc_i64_i32(tmp, val);
6318 store_reg(s, rhigh, tmp);
6319}
6320
6321/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6322static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6323{
a7812ae4 6324 TCGv_i64 tmp;
5e3f878a
PB
6325 TCGv tmp2;
6326
36aa55dc 6327 /* Load value and extend to 64 bits. */
a7812ae4 6328 tmp = tcg_temp_new_i64();
5e3f878a
PB
6329 tmp2 = load_reg(s, rlow);
6330 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6331 tcg_temp_free_i32(tmp2);
5e3f878a 6332 tcg_gen_add_i64(val, val, tmp);
b75263d6 6333 tcg_temp_free_i64(tmp);
5e3f878a
PB
6334}
6335
6336/* load and add a 64-bit value from a register pair. */
a7812ae4 6337static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6338{
a7812ae4 6339 TCGv_i64 tmp;
36aa55dc
PB
6340 TCGv tmpl;
6341 TCGv tmph;
5e3f878a
PB
6342
6343 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6344 tmpl = load_reg(s, rlow);
6345 tmph = load_reg(s, rhigh);
a7812ae4 6346 tmp = tcg_temp_new_i64();
36aa55dc 6347 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6348 tcg_temp_free_i32(tmpl);
6349 tcg_temp_free_i32(tmph);
5e3f878a 6350 tcg_gen_add_i64(val, val, tmp);
b75263d6 6351 tcg_temp_free_i64(tmp);
5e3f878a
PB
6352}
6353
6354/* Set N and Z flags from a 64-bit value. */
a7812ae4 6355static void gen_logicq_cc(TCGv_i64 val)
5e3f878a 6356{
7d1b0095 6357 TCGv tmp = tcg_temp_new_i32();
5e3f878a 6358 gen_helper_logicq_cc(tmp, val);
6fbe23d5 6359 gen_logic_CC(tmp);
7d1b0095 6360 tcg_temp_free_i32(tmp);
5e3f878a
PB
6361}
6362
426f5abc
PB
6363/* Load/Store exclusive instructions are implemented by remembering
6364 the value/address loaded, and seeing if these are the same
6365 when the store is performed. This should be is sufficient to implement
6366 the architecturally mandated semantics, and avoids having to monitor
6367 regular stores.
6368
6369 In system emulation mode only one CPU will be running at once, so
6370 this sequence is effectively atomic. In user emulation mode we
6371 throw an exception and handle the atomic operation elsewhere. */
6372static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6373 TCGv addr, int size)
6374{
6375 TCGv tmp;
6376
6377 switch (size) {
6378 case 0:
6379 tmp = gen_ld8u(addr, IS_USER(s));
6380 break;
6381 case 1:
6382 tmp = gen_ld16u(addr, IS_USER(s));
6383 break;
6384 case 2:
6385 case 3:
6386 tmp = gen_ld32(addr, IS_USER(s));
6387 break;
6388 default:
6389 abort();
6390 }
6391 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6392 store_reg(s, rt, tmp);
6393 if (size == 3) {
7d1b0095 6394 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6395 tcg_gen_addi_i32(tmp2, addr, 4);
6396 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6397 tcg_temp_free_i32(tmp2);
426f5abc
PB
6398 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6399 store_reg(s, rt2, tmp);
6400 }
6401 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6402}
6403
6404static void gen_clrex(DisasContext *s)
6405{
6406 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6407}
6408
6409#ifdef CONFIG_USER_ONLY
6410static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6411 TCGv addr, int size)
6412{
6413 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6414 tcg_gen_movi_i32(cpu_exclusive_info,
6415 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6416 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6417}
6418#else
6419static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6420 TCGv addr, int size)
6421{
6422 TCGv tmp;
6423 int done_label;
6424 int fail_label;
6425
6426 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6427 [addr] = {Rt};
6428 {Rd} = 0;
6429 } else {
6430 {Rd} = 1;
6431 } */
6432 fail_label = gen_new_label();
6433 done_label = gen_new_label();
6434 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6435 switch (size) {
6436 case 0:
6437 tmp = gen_ld8u(addr, IS_USER(s));
6438 break;
6439 case 1:
6440 tmp = gen_ld16u(addr, IS_USER(s));
6441 break;
6442 case 2:
6443 case 3:
6444 tmp = gen_ld32(addr, IS_USER(s));
6445 break;
6446 default:
6447 abort();
6448 }
6449 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6450 tcg_temp_free_i32(tmp);
426f5abc 6451 if (size == 3) {
7d1b0095 6452 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6453 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6454 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6455 tcg_temp_free_i32(tmp2);
426f5abc 6456 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6457 tcg_temp_free_i32(tmp);
426f5abc
PB
6458 }
6459 tmp = load_reg(s, rt);
6460 switch (size) {
6461 case 0:
6462 gen_st8(tmp, addr, IS_USER(s));
6463 break;
6464 case 1:
6465 gen_st16(tmp, addr, IS_USER(s));
6466 break;
6467 case 2:
6468 case 3:
6469 gen_st32(tmp, addr, IS_USER(s));
6470 break;
6471 default:
6472 abort();
6473 }
6474 if (size == 3) {
6475 tcg_gen_addi_i32(addr, addr, 4);
6476 tmp = load_reg(s, rt2);
6477 gen_st32(tmp, addr, IS_USER(s));
6478 }
6479 tcg_gen_movi_i32(cpu_R[rd], 0);
6480 tcg_gen_br(done_label);
6481 gen_set_label(fail_label);
6482 tcg_gen_movi_i32(cpu_R[rd], 1);
6483 gen_set_label(done_label);
6484 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6485}
6486#endif
6487
9ee6e8bb
PB
6488static void disas_arm_insn(CPUState * env, DisasContext *s)
6489{
6490 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6491 TCGv tmp;
3670669c 6492 TCGv tmp2;
6ddbc6e4 6493 TCGv tmp3;
b0109805 6494 TCGv addr;
a7812ae4 6495 TCGv_i64 tmp64;
9ee6e8bb
PB
6496
6497 insn = ldl_code(s->pc);
6498 s->pc += 4;
6499
6500 /* M variants do not implement ARM mode. */
6501 if (IS_M(env))
6502 goto illegal_op;
6503 cond = insn >> 28;
6504 if (cond == 0xf){
be5e7a76
DES
6505 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6506 * choose to UNDEF. In ARMv5 and above the space is used
6507 * for miscellaneous unconditional instructions.
6508 */
6509 ARCH(5);
6510
9ee6e8bb
PB
6511 /* Unconditional instructions. */
6512 if (((insn >> 25) & 7) == 1) {
6513 /* NEON Data processing. */
6514 if (!arm_feature(env, ARM_FEATURE_NEON))
6515 goto illegal_op;
6516
6517 if (disas_neon_data_insn(env, s, insn))
6518 goto illegal_op;
6519 return;
6520 }
6521 if ((insn & 0x0f100000) == 0x04000000) {
6522 /* NEON load/store. */
6523 if (!arm_feature(env, ARM_FEATURE_NEON))
6524 goto illegal_op;
6525
6526 if (disas_neon_ls_insn(env, s, insn))
6527 goto illegal_op;
6528 return;
6529 }
3d185e5d
PM
6530 if (((insn & 0x0f30f000) == 0x0510f000) ||
6531 ((insn & 0x0f30f010) == 0x0710f000)) {
6532 if ((insn & (1 << 22)) == 0) {
6533 /* PLDW; v7MP */
6534 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6535 goto illegal_op;
6536 }
6537 }
6538 /* Otherwise PLD; v5TE+ */
be5e7a76 6539 ARCH(5TE);
3d185e5d
PM
6540 return;
6541 }
6542 if (((insn & 0x0f70f000) == 0x0450f000) ||
6543 ((insn & 0x0f70f010) == 0x0650f000)) {
6544 ARCH(7);
6545 return; /* PLI; V7 */
6546 }
6547 if (((insn & 0x0f700000) == 0x04100000) ||
6548 ((insn & 0x0f700010) == 0x06100000)) {
6549 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6550 goto illegal_op;
6551 }
6552 return; /* v7MP: Unallocated memory hint: must NOP */
6553 }
6554
6555 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6556 ARCH(6);
6557 /* setend */
6558 if (insn & (1 << 9)) {
6559 /* BE8 mode not implemented. */
6560 goto illegal_op;
6561 }
6562 return;
6563 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6564 switch ((insn >> 4) & 0xf) {
6565 case 1: /* clrex */
6566 ARCH(6K);
426f5abc 6567 gen_clrex(s);
9ee6e8bb
PB
6568 return;
6569 case 4: /* dsb */
6570 case 5: /* dmb */
6571 case 6: /* isb */
6572 ARCH(7);
6573 /* We don't emulate caches so these are a no-op. */
6574 return;
6575 default:
6576 goto illegal_op;
6577 }
6578 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6579 /* srs */
c67b6b71 6580 int32_t offset;
9ee6e8bb
PB
6581 if (IS_USER(s))
6582 goto illegal_op;
6583 ARCH(6);
6584 op1 = (insn & 0x1f);
7d1b0095 6585 addr = tcg_temp_new_i32();
39ea3d4e
PM
6586 tmp = tcg_const_i32(op1);
6587 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6588 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6589 i = (insn >> 23) & 3;
6590 switch (i) {
6591 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6592 case 1: offset = 0; break; /* IA */
6593 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6594 case 3: offset = 4; break; /* IB */
6595 default: abort();
6596 }
6597 if (offset)
b0109805
PB
6598 tcg_gen_addi_i32(addr, addr, offset);
6599 tmp = load_reg(s, 14);
6600 gen_st32(tmp, addr, 0);
c67b6b71 6601 tmp = load_cpu_field(spsr);
b0109805
PB
6602 tcg_gen_addi_i32(addr, addr, 4);
6603 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6604 if (insn & (1 << 21)) {
6605 /* Base writeback. */
6606 switch (i) {
6607 case 0: offset = -8; break;
c67b6b71
FN
6608 case 1: offset = 4; break;
6609 case 2: offset = -4; break;
9ee6e8bb
PB
6610 case 3: offset = 0; break;
6611 default: abort();
6612 }
6613 if (offset)
c67b6b71 6614 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6615 tmp = tcg_const_i32(op1);
6616 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6617 tcg_temp_free_i32(tmp);
7d1b0095 6618 tcg_temp_free_i32(addr);
b0109805 6619 } else {
7d1b0095 6620 tcg_temp_free_i32(addr);
9ee6e8bb 6621 }
a990f58f 6622 return;
ea825eee 6623 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6624 /* rfe */
c67b6b71 6625 int32_t offset;
9ee6e8bb
PB
6626 if (IS_USER(s))
6627 goto illegal_op;
6628 ARCH(6);
6629 rn = (insn >> 16) & 0xf;
b0109805 6630 addr = load_reg(s, rn);
9ee6e8bb
PB
6631 i = (insn >> 23) & 3;
6632 switch (i) {
b0109805 6633 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6634 case 1: offset = 0; break; /* IA */
6635 case 2: offset = -8; break; /* DB */
b0109805 6636 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6637 default: abort();
6638 }
6639 if (offset)
b0109805
PB
6640 tcg_gen_addi_i32(addr, addr, offset);
6641 /* Load PC into tmp and CPSR into tmp2. */
6642 tmp = gen_ld32(addr, 0);
6643 tcg_gen_addi_i32(addr, addr, 4);
6644 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6645 if (insn & (1 << 21)) {
6646 /* Base writeback. */
6647 switch (i) {
b0109805 6648 case 0: offset = -8; break;
c67b6b71
FN
6649 case 1: offset = 4; break;
6650 case 2: offset = -4; break;
b0109805 6651 case 3: offset = 0; break;
9ee6e8bb
PB
6652 default: abort();
6653 }
6654 if (offset)
b0109805
PB
6655 tcg_gen_addi_i32(addr, addr, offset);
6656 store_reg(s, rn, addr);
6657 } else {
7d1b0095 6658 tcg_temp_free_i32(addr);
9ee6e8bb 6659 }
b0109805 6660 gen_rfe(s, tmp, tmp2);
c67b6b71 6661 return;
9ee6e8bb
PB
6662 } else if ((insn & 0x0e000000) == 0x0a000000) {
6663 /* branch link and change to thumb (blx <offset>) */
6664 int32_t offset;
6665
6666 val = (uint32_t)s->pc;
7d1b0095 6667 tmp = tcg_temp_new_i32();
d9ba4830
PB
6668 tcg_gen_movi_i32(tmp, val);
6669 store_reg(s, 14, tmp);
9ee6e8bb
PB
6670 /* Sign-extend the 24-bit offset */
6671 offset = (((int32_t)insn) << 8) >> 8;
6672 /* offset * 4 + bit24 * 2 + (thumb bit) */
6673 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6674 /* pipeline offset */
6675 val += 4;
be5e7a76 6676 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6677 gen_bx_im(s, val);
9ee6e8bb
PB
6678 return;
6679 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6680 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6681 /* iWMMXt register transfer. */
6682 if (env->cp15.c15_cpar & (1 << 1))
6683 if (!disas_iwmmxt_insn(env, s, insn))
6684 return;
6685 }
6686 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6687 /* Coprocessor double register transfer. */
be5e7a76 6688 ARCH(5TE);
9ee6e8bb
PB
6689 } else if ((insn & 0x0f000010) == 0x0e000010) {
6690 /* Additional coprocessor register transfer. */
7997d92f 6691 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6692 uint32_t mask;
6693 uint32_t val;
6694 /* cps (privileged) */
6695 if (IS_USER(s))
6696 return;
6697 mask = val = 0;
6698 if (insn & (1 << 19)) {
6699 if (insn & (1 << 8))
6700 mask |= CPSR_A;
6701 if (insn & (1 << 7))
6702 mask |= CPSR_I;
6703 if (insn & (1 << 6))
6704 mask |= CPSR_F;
6705 if (insn & (1 << 18))
6706 val |= mask;
6707 }
7997d92f 6708 if (insn & (1 << 17)) {
9ee6e8bb
PB
6709 mask |= CPSR_M;
6710 val |= (insn & 0x1f);
6711 }
6712 if (mask) {
2fbac54b 6713 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6714 }
6715 return;
6716 }
6717 goto illegal_op;
6718 }
6719 if (cond != 0xe) {
6720 /* if not always execute, we generate a conditional jump to
6721 next instruction */
6722 s->condlabel = gen_new_label();
d9ba4830 6723 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6724 s->condjmp = 1;
6725 }
6726 if ((insn & 0x0f900000) == 0x03000000) {
6727 if ((insn & (1 << 21)) == 0) {
6728 ARCH(6T2);
6729 rd = (insn >> 12) & 0xf;
6730 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6731 if ((insn & (1 << 22)) == 0) {
6732 /* MOVW */
7d1b0095 6733 tmp = tcg_temp_new_i32();
5e3f878a 6734 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6735 } else {
6736 /* MOVT */
5e3f878a 6737 tmp = load_reg(s, rd);
86831435 6738 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6739 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6740 }
5e3f878a 6741 store_reg(s, rd, tmp);
9ee6e8bb
PB
6742 } else {
6743 if (((insn >> 12) & 0xf) != 0xf)
6744 goto illegal_op;
6745 if (((insn >> 16) & 0xf) == 0) {
6746 gen_nop_hint(s, insn & 0xff);
6747 } else {
6748 /* CPSR = immediate */
6749 val = insn & 0xff;
6750 shift = ((insn >> 8) & 0xf) * 2;
6751 if (shift)
6752 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6753 i = ((insn & (1 << 22)) != 0);
2fbac54b 6754 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6755 goto illegal_op;
6756 }
6757 }
6758 } else if ((insn & 0x0f900000) == 0x01000000
6759 && (insn & 0x00000090) != 0x00000090) {
6760 /* miscellaneous instructions */
6761 op1 = (insn >> 21) & 3;
6762 sh = (insn >> 4) & 0xf;
6763 rm = insn & 0xf;
6764 switch (sh) {
6765 case 0x0: /* move program status register */
6766 if (op1 & 1) {
6767 /* PSR = reg */
2fbac54b 6768 tmp = load_reg(s, rm);
9ee6e8bb 6769 i = ((op1 & 2) != 0);
2fbac54b 6770 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6771 goto illegal_op;
6772 } else {
6773 /* reg = PSR */
6774 rd = (insn >> 12) & 0xf;
6775 if (op1 & 2) {
6776 if (IS_USER(s))
6777 goto illegal_op;
d9ba4830 6778 tmp = load_cpu_field(spsr);
9ee6e8bb 6779 } else {
7d1b0095 6780 tmp = tcg_temp_new_i32();
d9ba4830 6781 gen_helper_cpsr_read(tmp);
9ee6e8bb 6782 }
d9ba4830 6783 store_reg(s, rd, tmp);
9ee6e8bb
PB
6784 }
6785 break;
6786 case 0x1:
6787 if (op1 == 1) {
6788 /* branch/exchange thumb (bx). */
be5e7a76 6789 ARCH(4T);
d9ba4830
PB
6790 tmp = load_reg(s, rm);
6791 gen_bx(s, tmp);
9ee6e8bb
PB
6792 } else if (op1 == 3) {
6793 /* clz */
be5e7a76 6794 ARCH(5);
9ee6e8bb 6795 rd = (insn >> 12) & 0xf;
1497c961
PB
6796 tmp = load_reg(s, rm);
6797 gen_helper_clz(tmp, tmp);
6798 store_reg(s, rd, tmp);
9ee6e8bb
PB
6799 } else {
6800 goto illegal_op;
6801 }
6802 break;
6803 case 0x2:
6804 if (op1 == 1) {
6805 ARCH(5J); /* bxj */
6806 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6807 tmp = load_reg(s, rm);
6808 gen_bx(s, tmp);
9ee6e8bb
PB
6809 } else {
6810 goto illegal_op;
6811 }
6812 break;
6813 case 0x3:
6814 if (op1 != 1)
6815 goto illegal_op;
6816
be5e7a76 6817 ARCH(5);
9ee6e8bb 6818 /* branch link/exchange thumb (blx) */
d9ba4830 6819 tmp = load_reg(s, rm);
7d1b0095 6820 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6821 tcg_gen_movi_i32(tmp2, s->pc);
6822 store_reg(s, 14, tmp2);
6823 gen_bx(s, tmp);
9ee6e8bb
PB
6824 break;
6825 case 0x5: /* saturating add/subtract */
be5e7a76 6826 ARCH(5TE);
9ee6e8bb
PB
6827 rd = (insn >> 12) & 0xf;
6828 rn = (insn >> 16) & 0xf;
b40d0353 6829 tmp = load_reg(s, rm);
5e3f878a 6830 tmp2 = load_reg(s, rn);
9ee6e8bb 6831 if (op1 & 2)
5e3f878a 6832 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6833 if (op1 & 1)
5e3f878a 6834 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6835 else
5e3f878a 6836 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 6837 tcg_temp_free_i32(tmp2);
5e3f878a 6838 store_reg(s, rd, tmp);
9ee6e8bb 6839 break;
49e14940
AL
6840 case 7:
6841 /* SMC instruction (op1 == 3)
6842 and undefined instructions (op1 == 0 || op1 == 2)
6843 will trap */
6844 if (op1 != 1) {
6845 goto illegal_op;
6846 }
6847 /* bkpt */
be5e7a76 6848 ARCH(5);
bc4a0de0 6849 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6850 break;
6851 case 0x8: /* signed multiply */
6852 case 0xa:
6853 case 0xc:
6854 case 0xe:
be5e7a76 6855 ARCH(5TE);
9ee6e8bb
PB
6856 rs = (insn >> 8) & 0xf;
6857 rn = (insn >> 12) & 0xf;
6858 rd = (insn >> 16) & 0xf;
6859 if (op1 == 1) {
6860 /* (32 * 16) >> 16 */
5e3f878a
PB
6861 tmp = load_reg(s, rm);
6862 tmp2 = load_reg(s, rs);
9ee6e8bb 6863 if (sh & 4)
5e3f878a 6864 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6865 else
5e3f878a 6866 gen_sxth(tmp2);
a7812ae4
PB
6867 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6868 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 6869 tmp = tcg_temp_new_i32();
a7812ae4 6870 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6871 tcg_temp_free_i64(tmp64);
9ee6e8bb 6872 if ((sh & 2) == 0) {
5e3f878a
PB
6873 tmp2 = load_reg(s, rn);
6874 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6875 tcg_temp_free_i32(tmp2);
9ee6e8bb 6876 }
5e3f878a 6877 store_reg(s, rd, tmp);
9ee6e8bb
PB
6878 } else {
6879 /* 16 * 16 */
5e3f878a
PB
6880 tmp = load_reg(s, rm);
6881 tmp2 = load_reg(s, rs);
6882 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 6883 tcg_temp_free_i32(tmp2);
9ee6e8bb 6884 if (op1 == 2) {
a7812ae4
PB
6885 tmp64 = tcg_temp_new_i64();
6886 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 6887 tcg_temp_free_i32(tmp);
a7812ae4
PB
6888 gen_addq(s, tmp64, rn, rd);
6889 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6890 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6891 } else {
6892 if (op1 == 0) {
5e3f878a
PB
6893 tmp2 = load_reg(s, rn);
6894 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6895 tcg_temp_free_i32(tmp2);
9ee6e8bb 6896 }
5e3f878a 6897 store_reg(s, rd, tmp);
9ee6e8bb
PB
6898 }
6899 }
6900 break;
6901 default:
6902 goto illegal_op;
6903 }
6904 } else if (((insn & 0x0e000000) == 0 &&
6905 (insn & 0x00000090) != 0x90) ||
6906 ((insn & 0x0e000000) == (1 << 25))) {
6907 int set_cc, logic_cc, shiftop;
6908
6909 op1 = (insn >> 21) & 0xf;
6910 set_cc = (insn >> 20) & 1;
6911 logic_cc = table_logic_cc[op1] & set_cc;
6912
6913 /* data processing instruction */
6914 if (insn & (1 << 25)) {
6915 /* immediate operand */
6916 val = insn & 0xff;
6917 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6918 if (shift) {
9ee6e8bb 6919 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 6920 }
7d1b0095 6921 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
6922 tcg_gen_movi_i32(tmp2, val);
6923 if (logic_cc && shift) {
6924 gen_set_CF_bit31(tmp2);
6925 }
9ee6e8bb
PB
6926 } else {
6927 /* register */
6928 rm = (insn) & 0xf;
e9bb4aa9 6929 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6930 shiftop = (insn >> 5) & 3;
6931 if (!(insn & (1 << 4))) {
6932 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6933 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6934 } else {
6935 rs = (insn >> 8) & 0xf;
8984bd2e 6936 tmp = load_reg(s, rs);
e9bb4aa9 6937 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6938 }
6939 }
6940 if (op1 != 0x0f && op1 != 0x0d) {
6941 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6942 tmp = load_reg(s, rn);
6943 } else {
6944 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6945 }
6946 rd = (insn >> 12) & 0xf;
6947 switch(op1) {
6948 case 0x00:
e9bb4aa9
JR
6949 tcg_gen_and_i32(tmp, tmp, tmp2);
6950 if (logic_cc) {
6951 gen_logic_CC(tmp);
6952 }
21aeb343 6953 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6954 break;
6955 case 0x01:
e9bb4aa9
JR
6956 tcg_gen_xor_i32(tmp, tmp, tmp2);
6957 if (logic_cc) {
6958 gen_logic_CC(tmp);
6959 }
21aeb343 6960 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6961 break;
6962 case 0x02:
6963 if (set_cc && rd == 15) {
6964 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6965 if (IS_USER(s)) {
9ee6e8bb 6966 goto illegal_op;
e9bb4aa9
JR
6967 }
6968 gen_helper_sub_cc(tmp, tmp, tmp2);
6969 gen_exception_return(s, tmp);
9ee6e8bb 6970 } else {
e9bb4aa9
JR
6971 if (set_cc) {
6972 gen_helper_sub_cc(tmp, tmp, tmp2);
6973 } else {
6974 tcg_gen_sub_i32(tmp, tmp, tmp2);
6975 }
21aeb343 6976 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6977 }
6978 break;
6979 case 0x03:
e9bb4aa9
JR
6980 if (set_cc) {
6981 gen_helper_sub_cc(tmp, tmp2, tmp);
6982 } else {
6983 tcg_gen_sub_i32(tmp, tmp2, tmp);
6984 }
21aeb343 6985 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6986 break;
6987 case 0x04:
e9bb4aa9
JR
6988 if (set_cc) {
6989 gen_helper_add_cc(tmp, tmp, tmp2);
6990 } else {
6991 tcg_gen_add_i32(tmp, tmp, tmp2);
6992 }
21aeb343 6993 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6994 break;
6995 case 0x05:
e9bb4aa9
JR
6996 if (set_cc) {
6997 gen_helper_adc_cc(tmp, tmp, tmp2);
6998 } else {
6999 gen_add_carry(tmp, tmp, tmp2);
7000 }
21aeb343 7001 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7002 break;
7003 case 0x06:
e9bb4aa9
JR
7004 if (set_cc) {
7005 gen_helper_sbc_cc(tmp, tmp, tmp2);
7006 } else {
7007 gen_sub_carry(tmp, tmp, tmp2);
7008 }
21aeb343 7009 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7010 break;
7011 case 0x07:
e9bb4aa9
JR
7012 if (set_cc) {
7013 gen_helper_sbc_cc(tmp, tmp2, tmp);
7014 } else {
7015 gen_sub_carry(tmp, tmp2, tmp);
7016 }
21aeb343 7017 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7018 break;
7019 case 0x08:
7020 if (set_cc) {
e9bb4aa9
JR
7021 tcg_gen_and_i32(tmp, tmp, tmp2);
7022 gen_logic_CC(tmp);
9ee6e8bb 7023 }
7d1b0095 7024 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7025 break;
7026 case 0x09:
7027 if (set_cc) {
e9bb4aa9
JR
7028 tcg_gen_xor_i32(tmp, tmp, tmp2);
7029 gen_logic_CC(tmp);
9ee6e8bb 7030 }
7d1b0095 7031 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7032 break;
7033 case 0x0a:
7034 if (set_cc) {
e9bb4aa9 7035 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 7036 }
7d1b0095 7037 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7038 break;
7039 case 0x0b:
7040 if (set_cc) {
e9bb4aa9 7041 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 7042 }
7d1b0095 7043 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7044 break;
7045 case 0x0c:
e9bb4aa9
JR
7046 tcg_gen_or_i32(tmp, tmp, tmp2);
7047 if (logic_cc) {
7048 gen_logic_CC(tmp);
7049 }
21aeb343 7050 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7051 break;
7052 case 0x0d:
7053 if (logic_cc && rd == 15) {
7054 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7055 if (IS_USER(s)) {
9ee6e8bb 7056 goto illegal_op;
e9bb4aa9
JR
7057 }
7058 gen_exception_return(s, tmp2);
9ee6e8bb 7059 } else {
e9bb4aa9
JR
7060 if (logic_cc) {
7061 gen_logic_CC(tmp2);
7062 }
21aeb343 7063 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7064 }
7065 break;
7066 case 0x0e:
f669df27 7067 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7068 if (logic_cc) {
7069 gen_logic_CC(tmp);
7070 }
21aeb343 7071 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7072 break;
7073 default:
7074 case 0x0f:
e9bb4aa9
JR
7075 tcg_gen_not_i32(tmp2, tmp2);
7076 if (logic_cc) {
7077 gen_logic_CC(tmp2);
7078 }
21aeb343 7079 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7080 break;
7081 }
e9bb4aa9 7082 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7083 tcg_temp_free_i32(tmp2);
e9bb4aa9 7084 }
9ee6e8bb
PB
7085 } else {
7086 /* other instructions */
7087 op1 = (insn >> 24) & 0xf;
7088 switch(op1) {
7089 case 0x0:
7090 case 0x1:
7091 /* multiplies, extra load/stores */
7092 sh = (insn >> 5) & 3;
7093 if (sh == 0) {
7094 if (op1 == 0x0) {
7095 rd = (insn >> 16) & 0xf;
7096 rn = (insn >> 12) & 0xf;
7097 rs = (insn >> 8) & 0xf;
7098 rm = (insn) & 0xf;
7099 op1 = (insn >> 20) & 0xf;
7100 switch (op1) {
7101 case 0: case 1: case 2: case 3: case 6:
7102 /* 32 bit mul */
5e3f878a
PB
7103 tmp = load_reg(s, rs);
7104 tmp2 = load_reg(s, rm);
7105 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7106 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7107 if (insn & (1 << 22)) {
7108 /* Subtract (mls) */
7109 ARCH(6T2);
5e3f878a
PB
7110 tmp2 = load_reg(s, rn);
7111 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7112 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7113 } else if (insn & (1 << 21)) {
7114 /* Add */
5e3f878a
PB
7115 tmp2 = load_reg(s, rn);
7116 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7117 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7118 }
7119 if (insn & (1 << 20))
5e3f878a
PB
7120 gen_logic_CC(tmp);
7121 store_reg(s, rd, tmp);
9ee6e8bb 7122 break;
8aac08b1
AJ
7123 case 4:
7124 /* 64 bit mul double accumulate (UMAAL) */
7125 ARCH(6);
7126 tmp = load_reg(s, rs);
7127 tmp2 = load_reg(s, rm);
7128 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7129 gen_addq_lo(s, tmp64, rn);
7130 gen_addq_lo(s, tmp64, rd);
7131 gen_storeq_reg(s, rn, rd, tmp64);
7132 tcg_temp_free_i64(tmp64);
7133 break;
7134 case 8: case 9: case 10: case 11:
7135 case 12: case 13: case 14: case 15:
7136 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7137 tmp = load_reg(s, rs);
7138 tmp2 = load_reg(s, rm);
8aac08b1 7139 if (insn & (1 << 22)) {
a7812ae4 7140 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 7141 } else {
a7812ae4 7142 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
7143 }
7144 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 7145 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 7146 }
8aac08b1 7147 if (insn & (1 << 20)) {
a7812ae4 7148 gen_logicq_cc(tmp64);
8aac08b1 7149 }
a7812ae4 7150 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7151 tcg_temp_free_i64(tmp64);
9ee6e8bb 7152 break;
8aac08b1
AJ
7153 default:
7154 goto illegal_op;
9ee6e8bb
PB
7155 }
7156 } else {
7157 rn = (insn >> 16) & 0xf;
7158 rd = (insn >> 12) & 0xf;
7159 if (insn & (1 << 23)) {
7160 /* load/store exclusive */
86753403
PB
7161 op1 = (insn >> 21) & 0x3;
7162 if (op1)
a47f43d2 7163 ARCH(6K);
86753403
PB
7164 else
7165 ARCH(6);
3174f8e9 7166 addr = tcg_temp_local_new_i32();
98a46317 7167 load_reg_var(s, addr, rn);
9ee6e8bb 7168 if (insn & (1 << 20)) {
86753403
PB
7169 switch (op1) {
7170 case 0: /* ldrex */
426f5abc 7171 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7172 break;
7173 case 1: /* ldrexd */
426f5abc 7174 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7175 break;
7176 case 2: /* ldrexb */
426f5abc 7177 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7178 break;
7179 case 3: /* ldrexh */
426f5abc 7180 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7181 break;
7182 default:
7183 abort();
7184 }
9ee6e8bb
PB
7185 } else {
7186 rm = insn & 0xf;
86753403
PB
7187 switch (op1) {
7188 case 0: /* strex */
426f5abc 7189 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7190 break;
7191 case 1: /* strexd */
502e64fe 7192 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7193 break;
7194 case 2: /* strexb */
426f5abc 7195 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7196 break;
7197 case 3: /* strexh */
426f5abc 7198 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7199 break;
7200 default:
7201 abort();
7202 }
9ee6e8bb 7203 }
3174f8e9 7204 tcg_temp_free(addr);
9ee6e8bb
PB
7205 } else {
7206 /* SWP instruction */
7207 rm = (insn) & 0xf;
7208
8984bd2e
PB
7209 /* ??? This is not really atomic. However we know
7210 we never have multiple CPUs running in parallel,
7211 so it is good enough. */
7212 addr = load_reg(s, rn);
7213 tmp = load_reg(s, rm);
9ee6e8bb 7214 if (insn & (1 << 22)) {
8984bd2e
PB
7215 tmp2 = gen_ld8u(addr, IS_USER(s));
7216 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7217 } else {
8984bd2e
PB
7218 tmp2 = gen_ld32(addr, IS_USER(s));
7219 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7220 }
7d1b0095 7221 tcg_temp_free_i32(addr);
8984bd2e 7222 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7223 }
7224 }
7225 } else {
7226 int address_offset;
7227 int load;
7228 /* Misc load/store */
7229 rn = (insn >> 16) & 0xf;
7230 rd = (insn >> 12) & 0xf;
b0109805 7231 addr = load_reg(s, rn);
9ee6e8bb 7232 if (insn & (1 << 24))
b0109805 7233 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7234 address_offset = 0;
7235 if (insn & (1 << 20)) {
7236 /* load */
7237 switch(sh) {
7238 case 1:
b0109805 7239 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
7240 break;
7241 case 2:
b0109805 7242 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
7243 break;
7244 default:
7245 case 3:
b0109805 7246 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
7247 break;
7248 }
7249 load = 1;
7250 } else if (sh & 2) {
be5e7a76 7251 ARCH(5TE);
9ee6e8bb
PB
7252 /* doubleword */
7253 if (sh & 1) {
7254 /* store */
b0109805
PB
7255 tmp = load_reg(s, rd);
7256 gen_st32(tmp, addr, IS_USER(s));
7257 tcg_gen_addi_i32(addr, addr, 4);
7258 tmp = load_reg(s, rd + 1);
7259 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7260 load = 0;
7261 } else {
7262 /* load */
b0109805
PB
7263 tmp = gen_ld32(addr, IS_USER(s));
7264 store_reg(s, rd, tmp);
7265 tcg_gen_addi_i32(addr, addr, 4);
7266 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
7267 rd++;
7268 load = 1;
7269 }
7270 address_offset = -4;
7271 } else {
7272 /* store */
b0109805
PB
7273 tmp = load_reg(s, rd);
7274 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7275 load = 0;
7276 }
7277 /* Perform base writeback before the loaded value to
7278 ensure correct behavior with overlapping index registers.
7279 ldrd with base writeback is is undefined if the
7280 destination and index registers overlap. */
7281 if (!(insn & (1 << 24))) {
b0109805
PB
7282 gen_add_datah_offset(s, insn, address_offset, addr);
7283 store_reg(s, rn, addr);
9ee6e8bb
PB
7284 } else if (insn & (1 << 21)) {
7285 if (address_offset)
b0109805
PB
7286 tcg_gen_addi_i32(addr, addr, address_offset);
7287 store_reg(s, rn, addr);
7288 } else {
7d1b0095 7289 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7290 }
7291 if (load) {
7292 /* Complete the load. */
b0109805 7293 store_reg(s, rd, tmp);
9ee6e8bb
PB
7294 }
7295 }
7296 break;
7297 case 0x4:
7298 case 0x5:
7299 goto do_ldst;
7300 case 0x6:
7301 case 0x7:
7302 if (insn & (1 << 4)) {
7303 ARCH(6);
7304 /* Armv6 Media instructions. */
7305 rm = insn & 0xf;
7306 rn = (insn >> 16) & 0xf;
2c0262af 7307 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7308 rs = (insn >> 8) & 0xf;
7309 switch ((insn >> 23) & 3) {
7310 case 0: /* Parallel add/subtract. */
7311 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7312 tmp = load_reg(s, rn);
7313 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7314 sh = (insn >> 5) & 7;
7315 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7316 goto illegal_op;
6ddbc6e4 7317 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7318 tcg_temp_free_i32(tmp2);
6ddbc6e4 7319 store_reg(s, rd, tmp);
9ee6e8bb
PB
7320 break;
7321 case 1:
7322 if ((insn & 0x00700020) == 0) {
6c95676b 7323 /* Halfword pack. */
3670669c
PB
7324 tmp = load_reg(s, rn);
7325 tmp2 = load_reg(s, rm);
9ee6e8bb 7326 shift = (insn >> 7) & 0x1f;
3670669c
PB
7327 if (insn & (1 << 6)) {
7328 /* pkhtb */
22478e79
AZ
7329 if (shift == 0)
7330 shift = 31;
7331 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7332 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7333 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7334 } else {
7335 /* pkhbt */
22478e79
AZ
7336 if (shift)
7337 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7338 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7339 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7340 }
7341 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7342 tcg_temp_free_i32(tmp2);
3670669c 7343 store_reg(s, rd, tmp);
9ee6e8bb
PB
7344 } else if ((insn & 0x00200020) == 0x00200000) {
7345 /* [us]sat */
6ddbc6e4 7346 tmp = load_reg(s, rm);
9ee6e8bb
PB
7347 shift = (insn >> 7) & 0x1f;
7348 if (insn & (1 << 6)) {
7349 if (shift == 0)
7350 shift = 31;
6ddbc6e4 7351 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7352 } else {
6ddbc6e4 7353 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7354 }
7355 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7356 tmp2 = tcg_const_i32(sh);
7357 if (insn & (1 << 22))
7358 gen_helper_usat(tmp, tmp, tmp2);
7359 else
7360 gen_helper_ssat(tmp, tmp, tmp2);
7361 tcg_temp_free_i32(tmp2);
6ddbc6e4 7362 store_reg(s, rd, tmp);
9ee6e8bb
PB
7363 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7364 /* [us]sat16 */
6ddbc6e4 7365 tmp = load_reg(s, rm);
9ee6e8bb 7366 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7367 tmp2 = tcg_const_i32(sh);
7368 if (insn & (1 << 22))
7369 gen_helper_usat16(tmp, tmp, tmp2);
7370 else
7371 gen_helper_ssat16(tmp, tmp, tmp2);
7372 tcg_temp_free_i32(tmp2);
6ddbc6e4 7373 store_reg(s, rd, tmp);
9ee6e8bb
PB
7374 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7375 /* Select bytes. */
6ddbc6e4
PB
7376 tmp = load_reg(s, rn);
7377 tmp2 = load_reg(s, rm);
7d1b0095 7378 tmp3 = tcg_temp_new_i32();
6ddbc6e4
PB
7379 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7380 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7381 tcg_temp_free_i32(tmp3);
7382 tcg_temp_free_i32(tmp2);
6ddbc6e4 7383 store_reg(s, rd, tmp);
9ee6e8bb 7384 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7385 tmp = load_reg(s, rm);
9ee6e8bb 7386 shift = (insn >> 10) & 3;
1301f322 7387 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7388 rotate, a shift is sufficient. */
7389 if (shift != 0)
f669df27 7390 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7391 op1 = (insn >> 20) & 7;
7392 switch (op1) {
5e3f878a
PB
7393 case 0: gen_sxtb16(tmp); break;
7394 case 2: gen_sxtb(tmp); break;
7395 case 3: gen_sxth(tmp); break;
7396 case 4: gen_uxtb16(tmp); break;
7397 case 6: gen_uxtb(tmp); break;
7398 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7399 default: goto illegal_op;
7400 }
7401 if (rn != 15) {
5e3f878a 7402 tmp2 = load_reg(s, rn);
9ee6e8bb 7403 if ((op1 & 3) == 0) {
5e3f878a 7404 gen_add16(tmp, tmp2);
9ee6e8bb 7405 } else {
5e3f878a 7406 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7407 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7408 }
7409 }
6c95676b 7410 store_reg(s, rd, tmp);
9ee6e8bb
PB
7411 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7412 /* rev */
b0109805 7413 tmp = load_reg(s, rm);
9ee6e8bb
PB
7414 if (insn & (1 << 22)) {
7415 if (insn & (1 << 7)) {
b0109805 7416 gen_revsh(tmp);
9ee6e8bb
PB
7417 } else {
7418 ARCH(6T2);
b0109805 7419 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7420 }
7421 } else {
7422 if (insn & (1 << 7))
b0109805 7423 gen_rev16(tmp);
9ee6e8bb 7424 else
66896cb8 7425 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7426 }
b0109805 7427 store_reg(s, rd, tmp);
9ee6e8bb
PB
7428 } else {
7429 goto illegal_op;
7430 }
7431 break;
7432 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
7433 tmp = load_reg(s, rm);
7434 tmp2 = load_reg(s, rs);
9ee6e8bb 7435 if (insn & (1 << 20)) {
838fa72d
AJ
7436 /* Signed multiply most significant [accumulate].
7437 (SMMUL, SMMLA, SMMLS) */
a7812ae4 7438 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7439
955a7dd5 7440 if (rd != 15) {
838fa72d 7441 tmp = load_reg(s, rd);
9ee6e8bb 7442 if (insn & (1 << 6)) {
838fa72d 7443 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7444 } else {
838fa72d 7445 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7446 }
7447 }
838fa72d
AJ
7448 if (insn & (1 << 5)) {
7449 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7450 }
7451 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7452 tmp = tcg_temp_new_i32();
838fa72d
AJ
7453 tcg_gen_trunc_i64_i32(tmp, tmp64);
7454 tcg_temp_free_i64(tmp64);
955a7dd5 7455 store_reg(s, rn, tmp);
9ee6e8bb
PB
7456 } else {
7457 if (insn & (1 << 5))
5e3f878a
PB
7458 gen_swap_half(tmp2);
7459 gen_smul_dual(tmp, tmp2);
5e3f878a 7460 if (insn & (1 << 6)) {
e1d177b9 7461 /* This subtraction cannot overflow. */
5e3f878a
PB
7462 tcg_gen_sub_i32(tmp, tmp, tmp2);
7463 } else {
e1d177b9
PM
7464 /* This addition cannot overflow 32 bits;
7465 * however it may overflow considered as a signed
7466 * operation, in which case we must set the Q flag.
7467 */
7468 gen_helper_add_setq(tmp, tmp, tmp2);
5e3f878a 7469 }
7d1b0095 7470 tcg_temp_free_i32(tmp2);
9ee6e8bb 7471 if (insn & (1 << 22)) {
5e3f878a 7472 /* smlald, smlsld */
a7812ae4
PB
7473 tmp64 = tcg_temp_new_i64();
7474 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7475 tcg_temp_free_i32(tmp);
a7812ae4
PB
7476 gen_addq(s, tmp64, rd, rn);
7477 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7478 tcg_temp_free_i64(tmp64);
9ee6e8bb 7479 } else {
5e3f878a 7480 /* smuad, smusd, smlad, smlsd */
22478e79 7481 if (rd != 15)
9ee6e8bb 7482 {
22478e79 7483 tmp2 = load_reg(s, rd);
5e3f878a 7484 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7485 tcg_temp_free_i32(tmp2);
9ee6e8bb 7486 }
22478e79 7487 store_reg(s, rn, tmp);
9ee6e8bb
PB
7488 }
7489 }
7490 break;
7491 case 3:
7492 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7493 switch (op1) {
7494 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7495 ARCH(6);
7496 tmp = load_reg(s, rm);
7497 tmp2 = load_reg(s, rs);
7498 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7499 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7500 if (rd != 15) {
7501 tmp2 = load_reg(s, rd);
6ddbc6e4 7502 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7503 tcg_temp_free_i32(tmp2);
9ee6e8bb 7504 }
ded9d295 7505 store_reg(s, rn, tmp);
9ee6e8bb
PB
7506 break;
7507 case 0x20: case 0x24: case 0x28: case 0x2c:
7508 /* Bitfield insert/clear. */
7509 ARCH(6T2);
7510 shift = (insn >> 7) & 0x1f;
7511 i = (insn >> 16) & 0x1f;
7512 i = i + 1 - shift;
7513 if (rm == 15) {
7d1b0095 7514 tmp = tcg_temp_new_i32();
5e3f878a 7515 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7516 } else {
5e3f878a 7517 tmp = load_reg(s, rm);
9ee6e8bb
PB
7518 }
7519 if (i != 32) {
5e3f878a 7520 tmp2 = load_reg(s, rd);
8f8e3aa4 7521 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7d1b0095 7522 tcg_temp_free_i32(tmp2);
9ee6e8bb 7523 }
5e3f878a 7524 store_reg(s, rd, tmp);
9ee6e8bb
PB
7525 break;
7526 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7527 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7528 ARCH(6T2);
5e3f878a 7529 tmp = load_reg(s, rm);
9ee6e8bb
PB
7530 shift = (insn >> 7) & 0x1f;
7531 i = ((insn >> 16) & 0x1f) + 1;
7532 if (shift + i > 32)
7533 goto illegal_op;
7534 if (i < 32) {
7535 if (op1 & 0x20) {
5e3f878a 7536 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7537 } else {
5e3f878a 7538 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7539 }
7540 }
5e3f878a 7541 store_reg(s, rd, tmp);
9ee6e8bb
PB
7542 break;
7543 default:
7544 goto illegal_op;
7545 }
7546 break;
7547 }
7548 break;
7549 }
7550 do_ldst:
7551 /* Check for undefined extension instructions
7552 * per the ARM Bible IE:
7553 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7554 */
7555 sh = (0xf << 20) | (0xf << 4);
7556 if (op1 == 0x7 && ((insn & sh) == sh))
7557 {
7558 goto illegal_op;
7559 }
7560 /* load/store byte/word */
7561 rn = (insn >> 16) & 0xf;
7562 rd = (insn >> 12) & 0xf;
b0109805 7563 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7564 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7565 if (insn & (1 << 24))
b0109805 7566 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7567 if (insn & (1 << 20)) {
7568 /* load */
9ee6e8bb 7569 if (insn & (1 << 22)) {
b0109805 7570 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7571 } else {
b0109805 7572 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7573 }
9ee6e8bb
PB
7574 } else {
7575 /* store */
b0109805 7576 tmp = load_reg(s, rd);
9ee6e8bb 7577 if (insn & (1 << 22))
b0109805 7578 gen_st8(tmp, tmp2, i);
9ee6e8bb 7579 else
b0109805 7580 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7581 }
7582 if (!(insn & (1 << 24))) {
b0109805
PB
7583 gen_add_data_offset(s, insn, tmp2);
7584 store_reg(s, rn, tmp2);
7585 } else if (insn & (1 << 21)) {
7586 store_reg(s, rn, tmp2);
7587 } else {
7d1b0095 7588 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7589 }
7590 if (insn & (1 << 20)) {
7591 /* Complete the load. */
be5e7a76 7592 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7593 }
7594 break;
7595 case 0x08:
7596 case 0x09:
7597 {
7598 int j, n, user, loaded_base;
b0109805 7599 TCGv loaded_var;
9ee6e8bb
PB
7600 /* load/store multiple words */
7601 /* XXX: store correct base if write back */
7602 user = 0;
7603 if (insn & (1 << 22)) {
7604 if (IS_USER(s))
7605 goto illegal_op; /* only usable in supervisor mode */
7606
7607 if ((insn & (1 << 15)) == 0)
7608 user = 1;
7609 }
7610 rn = (insn >> 16) & 0xf;
b0109805 7611 addr = load_reg(s, rn);
9ee6e8bb
PB
7612
7613 /* compute total size */
7614 loaded_base = 0;
a50f5b91 7615 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7616 n = 0;
7617 for(i=0;i<16;i++) {
7618 if (insn & (1 << i))
7619 n++;
7620 }
7621 /* XXX: test invalid n == 0 case ? */
7622 if (insn & (1 << 23)) {
7623 if (insn & (1 << 24)) {
7624 /* pre increment */
b0109805 7625 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7626 } else {
7627 /* post increment */
7628 }
7629 } else {
7630 if (insn & (1 << 24)) {
7631 /* pre decrement */
b0109805 7632 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7633 } else {
7634 /* post decrement */
7635 if (n != 1)
b0109805 7636 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7637 }
7638 }
7639 j = 0;
7640 for(i=0;i<16;i++) {
7641 if (insn & (1 << i)) {
7642 if (insn & (1 << 20)) {
7643 /* load */
b0109805 7644 tmp = gen_ld32(addr, IS_USER(s));
be5e7a76 7645 if (user) {
b75263d6
JR
7646 tmp2 = tcg_const_i32(i);
7647 gen_helper_set_user_reg(tmp2, tmp);
7648 tcg_temp_free_i32(tmp2);
7d1b0095 7649 tcg_temp_free_i32(tmp);
9ee6e8bb 7650 } else if (i == rn) {
b0109805 7651 loaded_var = tmp;
9ee6e8bb
PB
7652 loaded_base = 1;
7653 } else {
be5e7a76 7654 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7655 }
7656 } else {
7657 /* store */
7658 if (i == 15) {
7659 /* special case: r15 = PC + 8 */
7660 val = (long)s->pc + 4;
7d1b0095 7661 tmp = tcg_temp_new_i32();
b0109805 7662 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7663 } else if (user) {
7d1b0095 7664 tmp = tcg_temp_new_i32();
b75263d6
JR
7665 tmp2 = tcg_const_i32(i);
7666 gen_helper_get_user_reg(tmp, tmp2);
7667 tcg_temp_free_i32(tmp2);
9ee6e8bb 7668 } else {
b0109805 7669 tmp = load_reg(s, i);
9ee6e8bb 7670 }
b0109805 7671 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7672 }
7673 j++;
7674 /* no need to add after the last transfer */
7675 if (j != n)
b0109805 7676 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7677 }
7678 }
7679 if (insn & (1 << 21)) {
7680 /* write back */
7681 if (insn & (1 << 23)) {
7682 if (insn & (1 << 24)) {
7683 /* pre increment */
7684 } else {
7685 /* post increment */
b0109805 7686 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7687 }
7688 } else {
7689 if (insn & (1 << 24)) {
7690 /* pre decrement */
7691 if (n != 1)
b0109805 7692 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7693 } else {
7694 /* post decrement */
b0109805 7695 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7696 }
7697 }
b0109805
PB
7698 store_reg(s, rn, addr);
7699 } else {
7d1b0095 7700 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7701 }
7702 if (loaded_base) {
b0109805 7703 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7704 }
7705 if ((insn & (1 << 22)) && !user) {
7706 /* Restore CPSR from SPSR. */
d9ba4830
PB
7707 tmp = load_cpu_field(spsr);
7708 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7709 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7710 s->is_jmp = DISAS_UPDATE;
7711 }
7712 }
7713 break;
7714 case 0xa:
7715 case 0xb:
7716 {
7717 int32_t offset;
7718
7719 /* branch (and link) */
7720 val = (int32_t)s->pc;
7721 if (insn & (1 << 24)) {
7d1b0095 7722 tmp = tcg_temp_new_i32();
5e3f878a
PB
7723 tcg_gen_movi_i32(tmp, val);
7724 store_reg(s, 14, tmp);
9ee6e8bb
PB
7725 }
7726 offset = (((int32_t)insn << 8) >> 8);
7727 val += (offset << 2) + 4;
7728 gen_jmp(s, val);
7729 }
7730 break;
7731 case 0xc:
7732 case 0xd:
7733 case 0xe:
7734 /* Coprocessor. */
7735 if (disas_coproc_insn(env, s, insn))
7736 goto illegal_op;
7737 break;
7738 case 0xf:
7739 /* swi */
5e3f878a 7740 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7741 s->is_jmp = DISAS_SWI;
7742 break;
7743 default:
7744 illegal_op:
bc4a0de0 7745 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7746 break;
7747 }
7748 }
7749}
7750
7751/* Return true if this is a Thumb-2 logical op. */
7752static int
7753thumb2_logic_op(int op)
7754{
7755 return (op < 8);
7756}
7757
7758/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7759 then set condition code flags based on the result of the operation.
7760 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7761 to the high bit of T1.
7762 Returns zero if the opcode is valid. */
7763
7764static int
396e467c 7765gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7766{
7767 int logic_cc;
7768
7769 logic_cc = 0;
7770 switch (op) {
7771 case 0: /* and */
396e467c 7772 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7773 logic_cc = conds;
7774 break;
7775 case 1: /* bic */
f669df27 7776 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7777 logic_cc = conds;
7778 break;
7779 case 2: /* orr */
396e467c 7780 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7781 logic_cc = conds;
7782 break;
7783 case 3: /* orn */
29501f1b 7784 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7785 logic_cc = conds;
7786 break;
7787 case 4: /* eor */
396e467c 7788 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7789 logic_cc = conds;
7790 break;
7791 case 8: /* add */
7792 if (conds)
396e467c 7793 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7794 else
396e467c 7795 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7796 break;
7797 case 10: /* adc */
7798 if (conds)
396e467c 7799 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7800 else
396e467c 7801 gen_adc(t0, t1);
9ee6e8bb
PB
7802 break;
7803 case 11: /* sbc */
7804 if (conds)
396e467c 7805 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7806 else
396e467c 7807 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7808 break;
7809 case 13: /* sub */
7810 if (conds)
396e467c 7811 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7812 else
396e467c 7813 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7814 break;
7815 case 14: /* rsb */
7816 if (conds)
396e467c 7817 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7818 else
396e467c 7819 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7820 break;
7821 default: /* 5, 6, 7, 9, 12, 15. */
7822 return 1;
7823 }
7824 if (logic_cc) {
396e467c 7825 gen_logic_CC(t0);
9ee6e8bb 7826 if (shifter_out)
396e467c 7827 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7828 }
7829 return 0;
7830}
7831
7832/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7833 is not legal. */
7834static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7835{
b0109805 7836 uint32_t insn, imm, shift, offset;
9ee6e8bb 7837 uint32_t rd, rn, rm, rs;
b26eefb6 7838 TCGv tmp;
6ddbc6e4
PB
7839 TCGv tmp2;
7840 TCGv tmp3;
b0109805 7841 TCGv addr;
a7812ae4 7842 TCGv_i64 tmp64;
9ee6e8bb
PB
7843 int op;
7844 int shiftop;
7845 int conds;
7846 int logic_cc;
7847
7848 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7849 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7850 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7851 16-bit instructions to get correct prefetch abort behavior. */
7852 insn = insn_hw1;
7853 if ((insn & (1 << 12)) == 0) {
be5e7a76 7854 ARCH(5);
9ee6e8bb
PB
7855 /* Second half of blx. */
7856 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7857 tmp = load_reg(s, 14);
7858 tcg_gen_addi_i32(tmp, tmp, offset);
7859 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7860
7d1b0095 7861 tmp2 = tcg_temp_new_i32();
b0109805 7862 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7863 store_reg(s, 14, tmp2);
7864 gen_bx(s, tmp);
9ee6e8bb
PB
7865 return 0;
7866 }
7867 if (insn & (1 << 11)) {
7868 /* Second half of bl. */
7869 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7870 tmp = load_reg(s, 14);
6a0d8a1d 7871 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7872
7d1b0095 7873 tmp2 = tcg_temp_new_i32();
b0109805 7874 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7875 store_reg(s, 14, tmp2);
7876 gen_bx(s, tmp);
9ee6e8bb
PB
7877 return 0;
7878 }
7879 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7880 /* Instruction spans a page boundary. Implement it as two
7881 16-bit instructions in case the second half causes an
7882 prefetch abort. */
7883 offset = ((int32_t)insn << 21) >> 9;
396e467c 7884 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7885 return 0;
7886 }
7887 /* Fall through to 32-bit decode. */
7888 }
7889
7890 insn = lduw_code(s->pc);
7891 s->pc += 2;
7892 insn |= (uint32_t)insn_hw1 << 16;
7893
7894 if ((insn & 0xf800e800) != 0xf000e800) {
7895 ARCH(6T2);
7896 }
7897
7898 rn = (insn >> 16) & 0xf;
7899 rs = (insn >> 12) & 0xf;
7900 rd = (insn >> 8) & 0xf;
7901 rm = insn & 0xf;
7902 switch ((insn >> 25) & 0xf) {
7903 case 0: case 1: case 2: case 3:
7904 /* 16-bit instructions. Should never happen. */
7905 abort();
7906 case 4:
7907 if (insn & (1 << 22)) {
7908 /* Other load/store, table branch. */
7909 if (insn & 0x01200000) {
7910 /* Load/store doubleword. */
7911 if (rn == 15) {
7d1b0095 7912 addr = tcg_temp_new_i32();
b0109805 7913 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7914 } else {
b0109805 7915 addr = load_reg(s, rn);
9ee6e8bb
PB
7916 }
7917 offset = (insn & 0xff) * 4;
7918 if ((insn & (1 << 23)) == 0)
7919 offset = -offset;
7920 if (insn & (1 << 24)) {
b0109805 7921 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7922 offset = 0;
7923 }
7924 if (insn & (1 << 20)) {
7925 /* ldrd */
b0109805
PB
7926 tmp = gen_ld32(addr, IS_USER(s));
7927 store_reg(s, rs, tmp);
7928 tcg_gen_addi_i32(addr, addr, 4);
7929 tmp = gen_ld32(addr, IS_USER(s));
7930 store_reg(s, rd, tmp);
9ee6e8bb
PB
7931 } else {
7932 /* strd */
b0109805
PB
7933 tmp = load_reg(s, rs);
7934 gen_st32(tmp, addr, IS_USER(s));
7935 tcg_gen_addi_i32(addr, addr, 4);
7936 tmp = load_reg(s, rd);
7937 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7938 }
7939 if (insn & (1 << 21)) {
7940 /* Base writeback. */
7941 if (rn == 15)
7942 goto illegal_op;
b0109805
PB
7943 tcg_gen_addi_i32(addr, addr, offset - 4);
7944 store_reg(s, rn, addr);
7945 } else {
7d1b0095 7946 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7947 }
7948 } else if ((insn & (1 << 23)) == 0) {
7949 /* Load/store exclusive word. */
3174f8e9 7950 addr = tcg_temp_local_new();
98a46317 7951 load_reg_var(s, addr, rn);
426f5abc 7952 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 7953 if (insn & (1 << 20)) {
426f5abc 7954 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 7955 } else {
426f5abc 7956 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 7957 }
3174f8e9 7958 tcg_temp_free(addr);
9ee6e8bb
PB
7959 } else if ((insn & (1 << 6)) == 0) {
7960 /* Table Branch. */
7961 if (rn == 15) {
7d1b0095 7962 addr = tcg_temp_new_i32();
b0109805 7963 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7964 } else {
b0109805 7965 addr = load_reg(s, rn);
9ee6e8bb 7966 }
b26eefb6 7967 tmp = load_reg(s, rm);
b0109805 7968 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7969 if (insn & (1 << 4)) {
7970 /* tbh */
b0109805 7971 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 7972 tcg_temp_free_i32(tmp);
b0109805 7973 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7974 } else { /* tbb */
7d1b0095 7975 tcg_temp_free_i32(tmp);
b0109805 7976 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7977 }
7d1b0095 7978 tcg_temp_free_i32(addr);
b0109805
PB
7979 tcg_gen_shli_i32(tmp, tmp, 1);
7980 tcg_gen_addi_i32(tmp, tmp, s->pc);
7981 store_reg(s, 15, tmp);
9ee6e8bb
PB
7982 } else {
7983 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 7984 ARCH(7);
9ee6e8bb 7985 op = (insn >> 4) & 0x3;
426f5abc
PB
7986 if (op == 2) {
7987 goto illegal_op;
7988 }
3174f8e9 7989 addr = tcg_temp_local_new();
98a46317 7990 load_reg_var(s, addr, rn);
9ee6e8bb 7991 if (insn & (1 << 20)) {
426f5abc 7992 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 7993 } else {
426f5abc 7994 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 7995 }
3174f8e9 7996 tcg_temp_free(addr);
9ee6e8bb
PB
7997 }
7998 } else {
7999 /* Load/store multiple, RFE, SRS. */
8000 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8001 /* Not available in user mode. */
b0109805 8002 if (IS_USER(s))
9ee6e8bb
PB
8003 goto illegal_op;
8004 if (insn & (1 << 20)) {
8005 /* rfe */
b0109805
PB
8006 addr = load_reg(s, rn);
8007 if ((insn & (1 << 24)) == 0)
8008 tcg_gen_addi_i32(addr, addr, -8);
8009 /* Load PC into tmp and CPSR into tmp2. */
8010 tmp = gen_ld32(addr, 0);
8011 tcg_gen_addi_i32(addr, addr, 4);
8012 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
8013 if (insn & (1 << 21)) {
8014 /* Base writeback. */
b0109805
PB
8015 if (insn & (1 << 24)) {
8016 tcg_gen_addi_i32(addr, addr, 4);
8017 } else {
8018 tcg_gen_addi_i32(addr, addr, -4);
8019 }
8020 store_reg(s, rn, addr);
8021 } else {
7d1b0095 8022 tcg_temp_free_i32(addr);
9ee6e8bb 8023 }
b0109805 8024 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8025 } else {
8026 /* srs */
8027 op = (insn & 0x1f);
7d1b0095 8028 addr = tcg_temp_new_i32();
39ea3d4e
PM
8029 tmp = tcg_const_i32(op);
8030 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8031 tcg_temp_free_i32(tmp);
9ee6e8bb 8032 if ((insn & (1 << 24)) == 0) {
b0109805 8033 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 8034 }
b0109805
PB
8035 tmp = load_reg(s, 14);
8036 gen_st32(tmp, addr, 0);
8037 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 8038 tmp = tcg_temp_new_i32();
b0109805
PB
8039 gen_helper_cpsr_read(tmp);
8040 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
8041 if (insn & (1 << 21)) {
8042 if ((insn & (1 << 24)) == 0) {
b0109805 8043 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 8044 } else {
b0109805 8045 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8046 }
39ea3d4e
PM
8047 tmp = tcg_const_i32(op);
8048 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8049 tcg_temp_free_i32(tmp);
b0109805 8050 } else {
7d1b0095 8051 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8052 }
8053 }
8054 } else {
5856d44e
YO
8055 int i, loaded_base = 0;
8056 TCGv loaded_var;
9ee6e8bb 8057 /* Load/store multiple. */
b0109805 8058 addr = load_reg(s, rn);
9ee6e8bb
PB
8059 offset = 0;
8060 for (i = 0; i < 16; i++) {
8061 if (insn & (1 << i))
8062 offset += 4;
8063 }
8064 if (insn & (1 << 24)) {
b0109805 8065 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8066 }
8067
5856d44e 8068 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
8069 for (i = 0; i < 16; i++) {
8070 if ((insn & (1 << i)) == 0)
8071 continue;
8072 if (insn & (1 << 20)) {
8073 /* Load. */
b0109805 8074 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 8075 if (i == 15) {
b0109805 8076 gen_bx(s, tmp);
5856d44e
YO
8077 } else if (i == rn) {
8078 loaded_var = tmp;
8079 loaded_base = 1;
9ee6e8bb 8080 } else {
b0109805 8081 store_reg(s, i, tmp);
9ee6e8bb
PB
8082 }
8083 } else {
8084 /* Store. */
b0109805
PB
8085 tmp = load_reg(s, i);
8086 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8087 }
b0109805 8088 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8089 }
5856d44e
YO
8090 if (loaded_base) {
8091 store_reg(s, rn, loaded_var);
8092 }
9ee6e8bb
PB
8093 if (insn & (1 << 21)) {
8094 /* Base register writeback. */
8095 if (insn & (1 << 24)) {
b0109805 8096 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8097 }
8098 /* Fault if writeback register is in register list. */
8099 if (insn & (1 << rn))
8100 goto illegal_op;
b0109805
PB
8101 store_reg(s, rn, addr);
8102 } else {
7d1b0095 8103 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8104 }
8105 }
8106 }
8107 break;
2af9ab77
JB
8108 case 5:
8109
9ee6e8bb 8110 op = (insn >> 21) & 0xf;
2af9ab77
JB
8111 if (op == 6) {
8112 /* Halfword pack. */
8113 tmp = load_reg(s, rn);
8114 tmp2 = load_reg(s, rm);
8115 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8116 if (insn & (1 << 5)) {
8117 /* pkhtb */
8118 if (shift == 0)
8119 shift = 31;
8120 tcg_gen_sari_i32(tmp2, tmp2, shift);
8121 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8122 tcg_gen_ext16u_i32(tmp2, tmp2);
8123 } else {
8124 /* pkhbt */
8125 if (shift)
8126 tcg_gen_shli_i32(tmp2, tmp2, shift);
8127 tcg_gen_ext16u_i32(tmp, tmp);
8128 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8129 }
8130 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8131 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8132 store_reg(s, rd, tmp);
8133 } else {
2af9ab77
JB
8134 /* Data processing register constant shift. */
8135 if (rn == 15) {
7d1b0095 8136 tmp = tcg_temp_new_i32();
2af9ab77
JB
8137 tcg_gen_movi_i32(tmp, 0);
8138 } else {
8139 tmp = load_reg(s, rn);
8140 }
8141 tmp2 = load_reg(s, rm);
8142
8143 shiftop = (insn >> 4) & 3;
8144 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8145 conds = (insn & (1 << 20)) != 0;
8146 logic_cc = (conds && thumb2_logic_op(op));
8147 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8148 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8149 goto illegal_op;
7d1b0095 8150 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8151 if (rd != 15) {
8152 store_reg(s, rd, tmp);
8153 } else {
7d1b0095 8154 tcg_temp_free_i32(tmp);
2af9ab77 8155 }
3174f8e9 8156 }
9ee6e8bb
PB
8157 break;
8158 case 13: /* Misc data processing. */
8159 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8160 if (op < 4 && (insn & 0xf000) != 0xf000)
8161 goto illegal_op;
8162 switch (op) {
8163 case 0: /* Register controlled shift. */
8984bd2e
PB
8164 tmp = load_reg(s, rn);
8165 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8166 if ((insn & 0x70) != 0)
8167 goto illegal_op;
8168 op = (insn >> 21) & 3;
8984bd2e
PB
8169 logic_cc = (insn & (1 << 20)) != 0;
8170 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8171 if (logic_cc)
8172 gen_logic_CC(tmp);
21aeb343 8173 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8174 break;
8175 case 1: /* Sign/zero extend. */
5e3f878a 8176 tmp = load_reg(s, rm);
9ee6e8bb 8177 shift = (insn >> 4) & 3;
1301f322 8178 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8179 rotate, a shift is sufficient. */
8180 if (shift != 0)
f669df27 8181 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8182 op = (insn >> 20) & 7;
8183 switch (op) {
5e3f878a
PB
8184 case 0: gen_sxth(tmp); break;
8185 case 1: gen_uxth(tmp); break;
8186 case 2: gen_sxtb16(tmp); break;
8187 case 3: gen_uxtb16(tmp); break;
8188 case 4: gen_sxtb(tmp); break;
8189 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8190 default: goto illegal_op;
8191 }
8192 if (rn != 15) {
5e3f878a 8193 tmp2 = load_reg(s, rn);
9ee6e8bb 8194 if ((op >> 1) == 1) {
5e3f878a 8195 gen_add16(tmp, tmp2);
9ee6e8bb 8196 } else {
5e3f878a 8197 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8198 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8199 }
8200 }
5e3f878a 8201 store_reg(s, rd, tmp);
9ee6e8bb
PB
8202 break;
8203 case 2: /* SIMD add/subtract. */
8204 op = (insn >> 20) & 7;
8205 shift = (insn >> 4) & 7;
8206 if ((op & 3) == 3 || (shift & 3) == 3)
8207 goto illegal_op;
6ddbc6e4
PB
8208 tmp = load_reg(s, rn);
8209 tmp2 = load_reg(s, rm);
8210 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8211 tcg_temp_free_i32(tmp2);
6ddbc6e4 8212 store_reg(s, rd, tmp);
9ee6e8bb
PB
8213 break;
8214 case 3: /* Other data processing. */
8215 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8216 if (op < 4) {
8217 /* Saturating add/subtract. */
d9ba4830
PB
8218 tmp = load_reg(s, rn);
8219 tmp2 = load_reg(s, rm);
9ee6e8bb 8220 if (op & 1)
4809c612
JB
8221 gen_helper_double_saturate(tmp, tmp);
8222 if (op & 2)
d9ba4830 8223 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 8224 else
d9ba4830 8225 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 8226 tcg_temp_free_i32(tmp2);
9ee6e8bb 8227 } else {
d9ba4830 8228 tmp = load_reg(s, rn);
9ee6e8bb
PB
8229 switch (op) {
8230 case 0x0a: /* rbit */
d9ba4830 8231 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8232 break;
8233 case 0x08: /* rev */
66896cb8 8234 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8235 break;
8236 case 0x09: /* rev16 */
d9ba4830 8237 gen_rev16(tmp);
9ee6e8bb
PB
8238 break;
8239 case 0x0b: /* revsh */
d9ba4830 8240 gen_revsh(tmp);
9ee6e8bb
PB
8241 break;
8242 case 0x10: /* sel */
d9ba4830 8243 tmp2 = load_reg(s, rm);
7d1b0095 8244 tmp3 = tcg_temp_new_i32();
6ddbc6e4 8245 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 8246 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8247 tcg_temp_free_i32(tmp3);
8248 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8249 break;
8250 case 0x18: /* clz */
d9ba4830 8251 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8252 break;
8253 default:
8254 goto illegal_op;
8255 }
8256 }
d9ba4830 8257 store_reg(s, rd, tmp);
9ee6e8bb
PB
8258 break;
8259 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8260 op = (insn >> 4) & 0xf;
d9ba4830
PB
8261 tmp = load_reg(s, rn);
8262 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8263 switch ((insn >> 20) & 7) {
8264 case 0: /* 32 x 32 -> 32 */
d9ba4830 8265 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8266 tcg_temp_free_i32(tmp2);
9ee6e8bb 8267 if (rs != 15) {
d9ba4830 8268 tmp2 = load_reg(s, rs);
9ee6e8bb 8269 if (op)
d9ba4830 8270 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8271 else
d9ba4830 8272 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8273 tcg_temp_free_i32(tmp2);
9ee6e8bb 8274 }
9ee6e8bb
PB
8275 break;
8276 case 1: /* 16 x 16 -> 32 */
d9ba4830 8277 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8278 tcg_temp_free_i32(tmp2);
9ee6e8bb 8279 if (rs != 15) {
d9ba4830
PB
8280 tmp2 = load_reg(s, rs);
8281 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8282 tcg_temp_free_i32(tmp2);
9ee6e8bb 8283 }
9ee6e8bb
PB
8284 break;
8285 case 2: /* Dual multiply add. */
8286 case 4: /* Dual multiply subtract. */
8287 if (op)
d9ba4830
PB
8288 gen_swap_half(tmp2);
8289 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8290 if (insn & (1 << 22)) {
e1d177b9 8291 /* This subtraction cannot overflow. */
d9ba4830 8292 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8293 } else {
e1d177b9
PM
8294 /* This addition cannot overflow 32 bits;
8295 * however it may overflow considered as a signed
8296 * operation, in which case we must set the Q flag.
8297 */
8298 gen_helper_add_setq(tmp, tmp, tmp2);
9ee6e8bb 8299 }
7d1b0095 8300 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8301 if (rs != 15)
8302 {
d9ba4830
PB
8303 tmp2 = load_reg(s, rs);
8304 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8305 tcg_temp_free_i32(tmp2);
9ee6e8bb 8306 }
9ee6e8bb
PB
8307 break;
8308 case 3: /* 32 * 16 -> 32msb */
8309 if (op)
d9ba4830 8310 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8311 else
d9ba4830 8312 gen_sxth(tmp2);
a7812ae4
PB
8313 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8314 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8315 tmp = tcg_temp_new_i32();
a7812ae4 8316 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8317 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8318 if (rs != 15)
8319 {
d9ba4830
PB
8320 tmp2 = load_reg(s, rs);
8321 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8322 tcg_temp_free_i32(tmp2);
9ee6e8bb 8323 }
9ee6e8bb 8324 break;
838fa72d
AJ
8325 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8326 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8327 if (rs != 15) {
838fa72d
AJ
8328 tmp = load_reg(s, rs);
8329 if (insn & (1 << 20)) {
8330 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8331 } else {
838fa72d 8332 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8333 }
2c0262af 8334 }
838fa72d
AJ
8335 if (insn & (1 << 4)) {
8336 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8337 }
8338 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8339 tmp = tcg_temp_new_i32();
838fa72d
AJ
8340 tcg_gen_trunc_i64_i32(tmp, tmp64);
8341 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8342 break;
8343 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8344 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8345 tcg_temp_free_i32(tmp2);
9ee6e8bb 8346 if (rs != 15) {
d9ba4830
PB
8347 tmp2 = load_reg(s, rs);
8348 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8349 tcg_temp_free_i32(tmp2);
5fd46862 8350 }
9ee6e8bb 8351 break;
2c0262af 8352 }
d9ba4830 8353 store_reg(s, rd, tmp);
2c0262af 8354 break;
9ee6e8bb
PB
8355 case 6: case 7: /* 64-bit multiply, Divide. */
8356 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8357 tmp = load_reg(s, rn);
8358 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8359 if ((op & 0x50) == 0x10) {
8360 /* sdiv, udiv */
8361 if (!arm_feature(env, ARM_FEATURE_DIV))
8362 goto illegal_op;
8363 if (op & 0x20)
5e3f878a 8364 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8365 else
5e3f878a 8366 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8367 tcg_temp_free_i32(tmp2);
5e3f878a 8368 store_reg(s, rd, tmp);
9ee6e8bb
PB
8369 } else if ((op & 0xe) == 0xc) {
8370 /* Dual multiply accumulate long. */
8371 if (op & 1)
5e3f878a
PB
8372 gen_swap_half(tmp2);
8373 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8374 if (op & 0x10) {
5e3f878a 8375 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8376 } else {
5e3f878a 8377 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8378 }
7d1b0095 8379 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8380 /* BUGFIX */
8381 tmp64 = tcg_temp_new_i64();
8382 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8383 tcg_temp_free_i32(tmp);
a7812ae4
PB
8384 gen_addq(s, tmp64, rs, rd);
8385 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8386 tcg_temp_free_i64(tmp64);
2c0262af 8387 } else {
9ee6e8bb
PB
8388 if (op & 0x20) {
8389 /* Unsigned 64-bit multiply */
a7812ae4 8390 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8391 } else {
9ee6e8bb
PB
8392 if (op & 8) {
8393 /* smlalxy */
5e3f878a 8394 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8395 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8396 tmp64 = tcg_temp_new_i64();
8397 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8398 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8399 } else {
8400 /* Signed 64-bit multiply */
a7812ae4 8401 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8402 }
b5ff1b31 8403 }
9ee6e8bb
PB
8404 if (op & 4) {
8405 /* umaal */
a7812ae4
PB
8406 gen_addq_lo(s, tmp64, rs);
8407 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8408 } else if (op & 0x40) {
8409 /* 64-bit accumulate. */
a7812ae4 8410 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8411 }
a7812ae4 8412 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8413 tcg_temp_free_i64(tmp64);
5fd46862 8414 }
2c0262af 8415 break;
9ee6e8bb
PB
8416 }
8417 break;
8418 case 6: case 7: case 14: case 15:
8419 /* Coprocessor. */
8420 if (((insn >> 24) & 3) == 3) {
8421 /* Translate into the equivalent ARM encoding. */
f06053e3 8422 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8423 if (disas_neon_data_insn(env, s, insn))
8424 goto illegal_op;
8425 } else {
8426 if (insn & (1 << 28))
8427 goto illegal_op;
8428 if (disas_coproc_insn (env, s, insn))
8429 goto illegal_op;
8430 }
8431 break;
8432 case 8: case 9: case 10: case 11:
8433 if (insn & (1 << 15)) {
8434 /* Branches, misc control. */
8435 if (insn & 0x5000) {
8436 /* Unconditional branch. */
8437 /* signextend(hw1[10:0]) -> offset[:12]. */
8438 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8439 /* hw1[10:0] -> offset[11:1]. */
8440 offset |= (insn & 0x7ff) << 1;
8441 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8442 offset[24:22] already have the same value because of the
8443 sign extension above. */
8444 offset ^= ((~insn) & (1 << 13)) << 10;
8445 offset ^= ((~insn) & (1 << 11)) << 11;
8446
9ee6e8bb
PB
8447 if (insn & (1 << 14)) {
8448 /* Branch and link. */
3174f8e9 8449 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8450 }
3b46e624 8451
b0109805 8452 offset += s->pc;
9ee6e8bb
PB
8453 if (insn & (1 << 12)) {
8454 /* b/bl */
b0109805 8455 gen_jmp(s, offset);
9ee6e8bb
PB
8456 } else {
8457 /* blx */
b0109805 8458 offset &= ~(uint32_t)2;
be5e7a76 8459 /* thumb2 bx, no need to check */
b0109805 8460 gen_bx_im(s, offset);
2c0262af 8461 }
9ee6e8bb
PB
8462 } else if (((insn >> 23) & 7) == 7) {
8463 /* Misc control */
8464 if (insn & (1 << 13))
8465 goto illegal_op;
8466
8467 if (insn & (1 << 26)) {
8468 /* Secure monitor call (v6Z) */
8469 goto illegal_op; /* not implemented. */
2c0262af 8470 } else {
9ee6e8bb
PB
8471 op = (insn >> 20) & 7;
8472 switch (op) {
8473 case 0: /* msr cpsr. */
8474 if (IS_M(env)) {
8984bd2e
PB
8475 tmp = load_reg(s, rn);
8476 addr = tcg_const_i32(insn & 0xff);
8477 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8478 tcg_temp_free_i32(addr);
7d1b0095 8479 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8480 gen_lookup_tb(s);
8481 break;
8482 }
8483 /* fall through */
8484 case 1: /* msr spsr. */
8485 if (IS_M(env))
8486 goto illegal_op;
2fbac54b
FN
8487 tmp = load_reg(s, rn);
8488 if (gen_set_psr(s,
9ee6e8bb 8489 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8490 op == 1, tmp))
9ee6e8bb
PB
8491 goto illegal_op;
8492 break;
8493 case 2: /* cps, nop-hint. */
8494 if (((insn >> 8) & 7) == 0) {
8495 gen_nop_hint(s, insn & 0xff);
8496 }
8497 /* Implemented as NOP in user mode. */
8498 if (IS_USER(s))
8499 break;
8500 offset = 0;
8501 imm = 0;
8502 if (insn & (1 << 10)) {
8503 if (insn & (1 << 7))
8504 offset |= CPSR_A;
8505 if (insn & (1 << 6))
8506 offset |= CPSR_I;
8507 if (insn & (1 << 5))
8508 offset |= CPSR_F;
8509 if (insn & (1 << 9))
8510 imm = CPSR_A | CPSR_I | CPSR_F;
8511 }
8512 if (insn & (1 << 8)) {
8513 offset |= 0x1f;
8514 imm |= (insn & 0x1f);
8515 }
8516 if (offset) {
2fbac54b 8517 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8518 }
8519 break;
8520 case 3: /* Special control operations. */
426f5abc 8521 ARCH(7);
9ee6e8bb
PB
8522 op = (insn >> 4) & 0xf;
8523 switch (op) {
8524 case 2: /* clrex */
426f5abc 8525 gen_clrex(s);
9ee6e8bb
PB
8526 break;
8527 case 4: /* dsb */
8528 case 5: /* dmb */
8529 case 6: /* isb */
8530 /* These execute as NOPs. */
9ee6e8bb
PB
8531 break;
8532 default:
8533 goto illegal_op;
8534 }
8535 break;
8536 case 4: /* bxj */
8537 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8538 tmp = load_reg(s, rn);
8539 gen_bx(s, tmp);
9ee6e8bb
PB
8540 break;
8541 case 5: /* Exception return. */
b8b45b68
RV
8542 if (IS_USER(s)) {
8543 goto illegal_op;
8544 }
8545 if (rn != 14 || rd != 15) {
8546 goto illegal_op;
8547 }
8548 tmp = load_reg(s, rn);
8549 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8550 gen_exception_return(s, tmp);
8551 break;
9ee6e8bb 8552 case 6: /* mrs cpsr. */
7d1b0095 8553 tmp = tcg_temp_new_i32();
9ee6e8bb 8554 if (IS_M(env)) {
8984bd2e
PB
8555 addr = tcg_const_i32(insn & 0xff);
8556 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8557 tcg_temp_free_i32(addr);
9ee6e8bb 8558 } else {
8984bd2e 8559 gen_helper_cpsr_read(tmp);
9ee6e8bb 8560 }
8984bd2e 8561 store_reg(s, rd, tmp);
9ee6e8bb
PB
8562 break;
8563 case 7: /* mrs spsr. */
8564 /* Not accessible in user mode. */
8565 if (IS_USER(s) || IS_M(env))
8566 goto illegal_op;
d9ba4830
PB
8567 tmp = load_cpu_field(spsr);
8568 store_reg(s, rd, tmp);
9ee6e8bb 8569 break;
2c0262af
FB
8570 }
8571 }
9ee6e8bb
PB
8572 } else {
8573 /* Conditional branch. */
8574 op = (insn >> 22) & 0xf;
8575 /* Generate a conditional jump to next instruction. */
8576 s->condlabel = gen_new_label();
d9ba4830 8577 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8578 s->condjmp = 1;
8579
8580 /* offset[11:1] = insn[10:0] */
8581 offset = (insn & 0x7ff) << 1;
8582 /* offset[17:12] = insn[21:16]. */
8583 offset |= (insn & 0x003f0000) >> 4;
8584 /* offset[31:20] = insn[26]. */
8585 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8586 /* offset[18] = insn[13]. */
8587 offset |= (insn & (1 << 13)) << 5;
8588 /* offset[19] = insn[11]. */
8589 offset |= (insn & (1 << 11)) << 8;
8590
8591 /* jump to the offset */
b0109805 8592 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8593 }
8594 } else {
8595 /* Data processing immediate. */
8596 if (insn & (1 << 25)) {
8597 if (insn & (1 << 24)) {
8598 if (insn & (1 << 20))
8599 goto illegal_op;
8600 /* Bitfield/Saturate. */
8601 op = (insn >> 21) & 7;
8602 imm = insn & 0x1f;
8603 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8604 if (rn == 15) {
7d1b0095 8605 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8606 tcg_gen_movi_i32(tmp, 0);
8607 } else {
8608 tmp = load_reg(s, rn);
8609 }
9ee6e8bb
PB
8610 switch (op) {
8611 case 2: /* Signed bitfield extract. */
8612 imm++;
8613 if (shift + imm > 32)
8614 goto illegal_op;
8615 if (imm < 32)
6ddbc6e4 8616 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8617 break;
8618 case 6: /* Unsigned bitfield extract. */
8619 imm++;
8620 if (shift + imm > 32)
8621 goto illegal_op;
8622 if (imm < 32)
6ddbc6e4 8623 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8624 break;
8625 case 3: /* Bitfield insert/clear. */
8626 if (imm < shift)
8627 goto illegal_op;
8628 imm = imm + 1 - shift;
8629 if (imm != 32) {
6ddbc6e4 8630 tmp2 = load_reg(s, rd);
8f8e3aa4 8631 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7d1b0095 8632 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8633 }
8634 break;
8635 case 7:
8636 goto illegal_op;
8637 default: /* Saturate. */
9ee6e8bb
PB
8638 if (shift) {
8639 if (op & 1)
6ddbc6e4 8640 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8641 else
6ddbc6e4 8642 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8643 }
6ddbc6e4 8644 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8645 if (op & 4) {
8646 /* Unsigned. */
9ee6e8bb 8647 if ((op & 1) && shift == 0)
6ddbc6e4 8648 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8649 else
6ddbc6e4 8650 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8651 } else {
9ee6e8bb 8652 /* Signed. */
9ee6e8bb 8653 if ((op & 1) && shift == 0)
6ddbc6e4 8654 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8655 else
6ddbc6e4 8656 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8657 }
b75263d6 8658 tcg_temp_free_i32(tmp2);
9ee6e8bb 8659 break;
2c0262af 8660 }
6ddbc6e4 8661 store_reg(s, rd, tmp);
9ee6e8bb
PB
8662 } else {
8663 imm = ((insn & 0x04000000) >> 15)
8664 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8665 if (insn & (1 << 22)) {
8666 /* 16-bit immediate. */
8667 imm |= (insn >> 4) & 0xf000;
8668 if (insn & (1 << 23)) {
8669 /* movt */
5e3f878a 8670 tmp = load_reg(s, rd);
86831435 8671 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8672 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8673 } else {
9ee6e8bb 8674 /* movw */
7d1b0095 8675 tmp = tcg_temp_new_i32();
5e3f878a 8676 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8677 }
8678 } else {
9ee6e8bb
PB
8679 /* Add/sub 12-bit immediate. */
8680 if (rn == 15) {
b0109805 8681 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8682 if (insn & (1 << 23))
b0109805 8683 offset -= imm;
9ee6e8bb 8684 else
b0109805 8685 offset += imm;
7d1b0095 8686 tmp = tcg_temp_new_i32();
5e3f878a 8687 tcg_gen_movi_i32(tmp, offset);
2c0262af 8688 } else {
5e3f878a 8689 tmp = load_reg(s, rn);
9ee6e8bb 8690 if (insn & (1 << 23))
5e3f878a 8691 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8692 else
5e3f878a 8693 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8694 }
9ee6e8bb 8695 }
5e3f878a 8696 store_reg(s, rd, tmp);
191abaa2 8697 }
9ee6e8bb
PB
8698 } else {
8699 int shifter_out = 0;
8700 /* modified 12-bit immediate. */
8701 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8702 imm = (insn & 0xff);
8703 switch (shift) {
8704 case 0: /* XY */
8705 /* Nothing to do. */
8706 break;
8707 case 1: /* 00XY00XY */
8708 imm |= imm << 16;
8709 break;
8710 case 2: /* XY00XY00 */
8711 imm |= imm << 16;
8712 imm <<= 8;
8713 break;
8714 case 3: /* XYXYXYXY */
8715 imm |= imm << 16;
8716 imm |= imm << 8;
8717 break;
8718 default: /* Rotated constant. */
8719 shift = (shift << 1) | (imm >> 7);
8720 imm |= 0x80;
8721 imm = imm << (32 - shift);
8722 shifter_out = 1;
8723 break;
b5ff1b31 8724 }
7d1b0095 8725 tmp2 = tcg_temp_new_i32();
3174f8e9 8726 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8727 rn = (insn >> 16) & 0xf;
3174f8e9 8728 if (rn == 15) {
7d1b0095 8729 tmp = tcg_temp_new_i32();
3174f8e9
FN
8730 tcg_gen_movi_i32(tmp, 0);
8731 } else {
8732 tmp = load_reg(s, rn);
8733 }
9ee6e8bb
PB
8734 op = (insn >> 21) & 0xf;
8735 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8736 shifter_out, tmp, tmp2))
9ee6e8bb 8737 goto illegal_op;
7d1b0095 8738 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8739 rd = (insn >> 8) & 0xf;
8740 if (rd != 15) {
3174f8e9
FN
8741 store_reg(s, rd, tmp);
8742 } else {
7d1b0095 8743 tcg_temp_free_i32(tmp);
2c0262af 8744 }
2c0262af 8745 }
9ee6e8bb
PB
8746 }
8747 break;
8748 case 12: /* Load/store single data item. */
8749 {
8750 int postinc = 0;
8751 int writeback = 0;
b0109805 8752 int user;
9ee6e8bb
PB
8753 if ((insn & 0x01100000) == 0x01000000) {
8754 if (disas_neon_ls_insn(env, s, insn))
c1713132 8755 goto illegal_op;
9ee6e8bb
PB
8756 break;
8757 }
a2fdc890
PM
8758 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8759 if (rs == 15) {
8760 if (!(insn & (1 << 20))) {
8761 goto illegal_op;
8762 }
8763 if (op != 2) {
8764 /* Byte or halfword load space with dest == r15 : memory hints.
8765 * Catch them early so we don't emit pointless addressing code.
8766 * This space is a mix of:
8767 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8768 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8769 * cores)
8770 * unallocated hints, which must be treated as NOPs
8771 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8772 * which is easiest for the decoding logic
8773 * Some space which must UNDEF
8774 */
8775 int op1 = (insn >> 23) & 3;
8776 int op2 = (insn >> 6) & 0x3f;
8777 if (op & 2) {
8778 goto illegal_op;
8779 }
8780 if (rn == 15) {
8781 /* UNPREDICTABLE or unallocated hint */
8782 return 0;
8783 }
8784 if (op1 & 1) {
8785 return 0; /* PLD* or unallocated hint */
8786 }
8787 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8788 return 0; /* PLD* or unallocated hint */
8789 }
8790 /* UNDEF space, or an UNPREDICTABLE */
8791 return 1;
8792 }
8793 }
b0109805 8794 user = IS_USER(s);
9ee6e8bb 8795 if (rn == 15) {
7d1b0095 8796 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8797 /* PC relative. */
8798 /* s->pc has already been incremented by 4. */
8799 imm = s->pc & 0xfffffffc;
8800 if (insn & (1 << 23))
8801 imm += insn & 0xfff;
8802 else
8803 imm -= insn & 0xfff;
b0109805 8804 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8805 } else {
b0109805 8806 addr = load_reg(s, rn);
9ee6e8bb
PB
8807 if (insn & (1 << 23)) {
8808 /* Positive offset. */
8809 imm = insn & 0xfff;
b0109805 8810 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8811 } else {
9ee6e8bb 8812 imm = insn & 0xff;
2a0308c5
PM
8813 switch ((insn >> 8) & 0xf) {
8814 case 0x0: /* Shifted Register. */
9ee6e8bb 8815 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8816 if (shift > 3) {
8817 tcg_temp_free_i32(addr);
18c9b560 8818 goto illegal_op;
2a0308c5 8819 }
b26eefb6 8820 tmp = load_reg(s, rm);
9ee6e8bb 8821 if (shift)
b26eefb6 8822 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8823 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8824 tcg_temp_free_i32(tmp);
9ee6e8bb 8825 break;
2a0308c5 8826 case 0xc: /* Negative offset. */
b0109805 8827 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8828 break;
2a0308c5 8829 case 0xe: /* User privilege. */
b0109805
PB
8830 tcg_gen_addi_i32(addr, addr, imm);
8831 user = 1;
9ee6e8bb 8832 break;
2a0308c5 8833 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8834 imm = -imm;
8835 /* Fall through. */
2a0308c5 8836 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8837 postinc = 1;
8838 writeback = 1;
8839 break;
2a0308c5 8840 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
8841 imm = -imm;
8842 /* Fall through. */
2a0308c5 8843 case 0xf: /* Pre-increment. */
b0109805 8844 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8845 writeback = 1;
8846 break;
8847 default:
2a0308c5 8848 tcg_temp_free_i32(addr);
b7bcbe95 8849 goto illegal_op;
9ee6e8bb
PB
8850 }
8851 }
8852 }
9ee6e8bb
PB
8853 if (insn & (1 << 20)) {
8854 /* Load. */
a2fdc890
PM
8855 switch (op) {
8856 case 0: tmp = gen_ld8u(addr, user); break;
8857 case 4: tmp = gen_ld8s(addr, user); break;
8858 case 1: tmp = gen_ld16u(addr, user); break;
8859 case 5: tmp = gen_ld16s(addr, user); break;
8860 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
8861 default:
8862 tcg_temp_free_i32(addr);
8863 goto illegal_op;
a2fdc890
PM
8864 }
8865 if (rs == 15) {
8866 gen_bx(s, tmp);
9ee6e8bb 8867 } else {
a2fdc890 8868 store_reg(s, rs, tmp);
9ee6e8bb
PB
8869 }
8870 } else {
8871 /* Store. */
b0109805 8872 tmp = load_reg(s, rs);
9ee6e8bb 8873 switch (op) {
b0109805
PB
8874 case 0: gen_st8(tmp, addr, user); break;
8875 case 1: gen_st16(tmp, addr, user); break;
8876 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
8877 default:
8878 tcg_temp_free_i32(addr);
8879 goto illegal_op;
b7bcbe95 8880 }
2c0262af 8881 }
9ee6e8bb 8882 if (postinc)
b0109805
PB
8883 tcg_gen_addi_i32(addr, addr, imm);
8884 if (writeback) {
8885 store_reg(s, rn, addr);
8886 } else {
7d1b0095 8887 tcg_temp_free_i32(addr);
b0109805 8888 }
9ee6e8bb
PB
8889 }
8890 break;
8891 default:
8892 goto illegal_op;
2c0262af 8893 }
9ee6e8bb
PB
8894 return 0;
8895illegal_op:
8896 return 1;
2c0262af
FB
8897}
8898
9ee6e8bb 8899static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8900{
8901 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8902 int32_t offset;
8903 int i;
b26eefb6 8904 TCGv tmp;
d9ba4830 8905 TCGv tmp2;
b0109805 8906 TCGv addr;
99c475ab 8907
9ee6e8bb
PB
8908 if (s->condexec_mask) {
8909 cond = s->condexec_cond;
bedd2912
JB
8910 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8911 s->condlabel = gen_new_label();
8912 gen_test_cc(cond ^ 1, s->condlabel);
8913 s->condjmp = 1;
8914 }
9ee6e8bb
PB
8915 }
8916
b5ff1b31 8917 insn = lduw_code(s->pc);
99c475ab 8918 s->pc += 2;
b5ff1b31 8919
99c475ab
FB
8920 switch (insn >> 12) {
8921 case 0: case 1:
396e467c 8922
99c475ab
FB
8923 rd = insn & 7;
8924 op = (insn >> 11) & 3;
8925 if (op == 3) {
8926 /* add/subtract */
8927 rn = (insn >> 3) & 7;
396e467c 8928 tmp = load_reg(s, rn);
99c475ab
FB
8929 if (insn & (1 << 10)) {
8930 /* immediate */
7d1b0095 8931 tmp2 = tcg_temp_new_i32();
396e467c 8932 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8933 } else {
8934 /* reg */
8935 rm = (insn >> 6) & 7;
396e467c 8936 tmp2 = load_reg(s, rm);
99c475ab 8937 }
9ee6e8bb
PB
8938 if (insn & (1 << 9)) {
8939 if (s->condexec_mask)
396e467c 8940 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8941 else
396e467c 8942 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8943 } else {
8944 if (s->condexec_mask)
396e467c 8945 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8946 else
396e467c 8947 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8948 }
7d1b0095 8949 tcg_temp_free_i32(tmp2);
396e467c 8950 store_reg(s, rd, tmp);
99c475ab
FB
8951 } else {
8952 /* shift immediate */
8953 rm = (insn >> 3) & 7;
8954 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8955 tmp = load_reg(s, rm);
8956 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8957 if (!s->condexec_mask)
8958 gen_logic_CC(tmp);
8959 store_reg(s, rd, tmp);
99c475ab
FB
8960 }
8961 break;
8962 case 2: case 3:
8963 /* arithmetic large immediate */
8964 op = (insn >> 11) & 3;
8965 rd = (insn >> 8) & 0x7;
396e467c 8966 if (op == 0) { /* mov */
7d1b0095 8967 tmp = tcg_temp_new_i32();
396e467c 8968 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8969 if (!s->condexec_mask)
396e467c
FN
8970 gen_logic_CC(tmp);
8971 store_reg(s, rd, tmp);
8972 } else {
8973 tmp = load_reg(s, rd);
7d1b0095 8974 tmp2 = tcg_temp_new_i32();
396e467c
FN
8975 tcg_gen_movi_i32(tmp2, insn & 0xff);
8976 switch (op) {
8977 case 1: /* cmp */
8978 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
8979 tcg_temp_free_i32(tmp);
8980 tcg_temp_free_i32(tmp2);
396e467c
FN
8981 break;
8982 case 2: /* add */
8983 if (s->condexec_mask)
8984 tcg_gen_add_i32(tmp, tmp, tmp2);
8985 else
8986 gen_helper_add_cc(tmp, tmp, tmp2);
7d1b0095 8987 tcg_temp_free_i32(tmp2);
396e467c
FN
8988 store_reg(s, rd, tmp);
8989 break;
8990 case 3: /* sub */
8991 if (s->condexec_mask)
8992 tcg_gen_sub_i32(tmp, tmp, tmp2);
8993 else
8994 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095 8995 tcg_temp_free_i32(tmp2);
396e467c
FN
8996 store_reg(s, rd, tmp);
8997 break;
8998 }
99c475ab 8999 }
99c475ab
FB
9000 break;
9001 case 4:
9002 if (insn & (1 << 11)) {
9003 rd = (insn >> 8) & 7;
5899f386
FB
9004 /* load pc-relative. Bit 1 of PC is ignored. */
9005 val = s->pc + 2 + ((insn & 0xff) * 4);
9006 val &= ~(uint32_t)2;
7d1b0095 9007 addr = tcg_temp_new_i32();
b0109805
PB
9008 tcg_gen_movi_i32(addr, val);
9009 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 9010 tcg_temp_free_i32(addr);
b0109805 9011 store_reg(s, rd, tmp);
99c475ab
FB
9012 break;
9013 }
9014 if (insn & (1 << 10)) {
9015 /* data processing extended or blx */
9016 rd = (insn & 7) | ((insn >> 4) & 8);
9017 rm = (insn >> 3) & 0xf;
9018 op = (insn >> 8) & 3;
9019 switch (op) {
9020 case 0: /* add */
396e467c
FN
9021 tmp = load_reg(s, rd);
9022 tmp2 = load_reg(s, rm);
9023 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9024 tcg_temp_free_i32(tmp2);
396e467c 9025 store_reg(s, rd, tmp);
99c475ab
FB
9026 break;
9027 case 1: /* cmp */
396e467c
FN
9028 tmp = load_reg(s, rd);
9029 tmp2 = load_reg(s, rm);
9030 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
9031 tcg_temp_free_i32(tmp2);
9032 tcg_temp_free_i32(tmp);
99c475ab
FB
9033 break;
9034 case 2: /* mov/cpy */
396e467c
FN
9035 tmp = load_reg(s, rm);
9036 store_reg(s, rd, tmp);
99c475ab
FB
9037 break;
9038 case 3:/* branch [and link] exchange thumb register */
b0109805 9039 tmp = load_reg(s, rm);
99c475ab 9040 if (insn & (1 << 7)) {
be5e7a76 9041 ARCH(5);
99c475ab 9042 val = (uint32_t)s->pc | 1;
7d1b0095 9043 tmp2 = tcg_temp_new_i32();
b0109805
PB
9044 tcg_gen_movi_i32(tmp2, val);
9045 store_reg(s, 14, tmp2);
99c475ab 9046 }
be5e7a76 9047 /* already thumb, no need to check */
d9ba4830 9048 gen_bx(s, tmp);
99c475ab
FB
9049 break;
9050 }
9051 break;
9052 }
9053
9054 /* data processing register */
9055 rd = insn & 7;
9056 rm = (insn >> 3) & 7;
9057 op = (insn >> 6) & 0xf;
9058 if (op == 2 || op == 3 || op == 4 || op == 7) {
9059 /* the shift/rotate ops want the operands backwards */
9060 val = rm;
9061 rm = rd;
9062 rd = val;
9063 val = 1;
9064 } else {
9065 val = 0;
9066 }
9067
396e467c 9068 if (op == 9) { /* neg */
7d1b0095 9069 tmp = tcg_temp_new_i32();
396e467c
FN
9070 tcg_gen_movi_i32(tmp, 0);
9071 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9072 tmp = load_reg(s, rd);
9073 } else {
9074 TCGV_UNUSED(tmp);
9075 }
99c475ab 9076
396e467c 9077 tmp2 = load_reg(s, rm);
5899f386 9078 switch (op) {
99c475ab 9079 case 0x0: /* and */
396e467c 9080 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9081 if (!s->condexec_mask)
396e467c 9082 gen_logic_CC(tmp);
99c475ab
FB
9083 break;
9084 case 0x1: /* eor */
396e467c 9085 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9086 if (!s->condexec_mask)
396e467c 9087 gen_logic_CC(tmp);
99c475ab
FB
9088 break;
9089 case 0x2: /* lsl */
9ee6e8bb 9090 if (s->condexec_mask) {
396e467c 9091 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 9092 } else {
396e467c
FN
9093 gen_helper_shl_cc(tmp2, tmp2, tmp);
9094 gen_logic_CC(tmp2);
9ee6e8bb 9095 }
99c475ab
FB
9096 break;
9097 case 0x3: /* lsr */
9ee6e8bb 9098 if (s->condexec_mask) {
396e467c 9099 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 9100 } else {
396e467c
FN
9101 gen_helper_shr_cc(tmp2, tmp2, tmp);
9102 gen_logic_CC(tmp2);
9ee6e8bb 9103 }
99c475ab
FB
9104 break;
9105 case 0x4: /* asr */
9ee6e8bb 9106 if (s->condexec_mask) {
396e467c 9107 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 9108 } else {
396e467c
FN
9109 gen_helper_sar_cc(tmp2, tmp2, tmp);
9110 gen_logic_CC(tmp2);
9ee6e8bb 9111 }
99c475ab
FB
9112 break;
9113 case 0x5: /* adc */
9ee6e8bb 9114 if (s->condexec_mask)
396e467c 9115 gen_adc(tmp, tmp2);
9ee6e8bb 9116 else
396e467c 9117 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
9118 break;
9119 case 0x6: /* sbc */
9ee6e8bb 9120 if (s->condexec_mask)
396e467c 9121 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 9122 else
396e467c 9123 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
9124 break;
9125 case 0x7: /* ror */
9ee6e8bb 9126 if (s->condexec_mask) {
f669df27
AJ
9127 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9128 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9129 } else {
396e467c
FN
9130 gen_helper_ror_cc(tmp2, tmp2, tmp);
9131 gen_logic_CC(tmp2);
9ee6e8bb 9132 }
99c475ab
FB
9133 break;
9134 case 0x8: /* tst */
396e467c
FN
9135 tcg_gen_and_i32(tmp, tmp, tmp2);
9136 gen_logic_CC(tmp);
99c475ab 9137 rd = 16;
5899f386 9138 break;
99c475ab 9139 case 0x9: /* neg */
9ee6e8bb 9140 if (s->condexec_mask)
396e467c 9141 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9142 else
396e467c 9143 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9144 break;
9145 case 0xa: /* cmp */
396e467c 9146 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9147 rd = 16;
9148 break;
9149 case 0xb: /* cmn */
396e467c 9150 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
9151 rd = 16;
9152 break;
9153 case 0xc: /* orr */
396e467c 9154 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9155 if (!s->condexec_mask)
396e467c 9156 gen_logic_CC(tmp);
99c475ab
FB
9157 break;
9158 case 0xd: /* mul */
7b2919a0 9159 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9160 if (!s->condexec_mask)
396e467c 9161 gen_logic_CC(tmp);
99c475ab
FB
9162 break;
9163 case 0xe: /* bic */
f669df27 9164 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9165 if (!s->condexec_mask)
396e467c 9166 gen_logic_CC(tmp);
99c475ab
FB
9167 break;
9168 case 0xf: /* mvn */
396e467c 9169 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9170 if (!s->condexec_mask)
396e467c 9171 gen_logic_CC(tmp2);
99c475ab 9172 val = 1;
5899f386 9173 rm = rd;
99c475ab
FB
9174 break;
9175 }
9176 if (rd != 16) {
396e467c
FN
9177 if (val) {
9178 store_reg(s, rm, tmp2);
9179 if (op != 0xf)
7d1b0095 9180 tcg_temp_free_i32(tmp);
396e467c
FN
9181 } else {
9182 store_reg(s, rd, tmp);
7d1b0095 9183 tcg_temp_free_i32(tmp2);
396e467c
FN
9184 }
9185 } else {
7d1b0095
PM
9186 tcg_temp_free_i32(tmp);
9187 tcg_temp_free_i32(tmp2);
99c475ab
FB
9188 }
9189 break;
9190
9191 case 5:
9192 /* load/store register offset. */
9193 rd = insn & 7;
9194 rn = (insn >> 3) & 7;
9195 rm = (insn >> 6) & 7;
9196 op = (insn >> 9) & 7;
b0109805 9197 addr = load_reg(s, rn);
b26eefb6 9198 tmp = load_reg(s, rm);
b0109805 9199 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9200 tcg_temp_free_i32(tmp);
99c475ab
FB
9201
9202 if (op < 3) /* store */
b0109805 9203 tmp = load_reg(s, rd);
99c475ab
FB
9204
9205 switch (op) {
9206 case 0: /* str */
b0109805 9207 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9208 break;
9209 case 1: /* strh */
b0109805 9210 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9211 break;
9212 case 2: /* strb */
b0109805 9213 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9214 break;
9215 case 3: /* ldrsb */
b0109805 9216 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
9217 break;
9218 case 4: /* ldr */
b0109805 9219 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9220 break;
9221 case 5: /* ldrh */
b0109805 9222 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
9223 break;
9224 case 6: /* ldrb */
b0109805 9225 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
9226 break;
9227 case 7: /* ldrsh */
b0109805 9228 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
9229 break;
9230 }
9231 if (op >= 3) /* load */
b0109805 9232 store_reg(s, rd, tmp);
7d1b0095 9233 tcg_temp_free_i32(addr);
99c475ab
FB
9234 break;
9235
9236 case 6:
9237 /* load/store word immediate offset */
9238 rd = insn & 7;
9239 rn = (insn >> 3) & 7;
b0109805 9240 addr = load_reg(s, rn);
99c475ab 9241 val = (insn >> 4) & 0x7c;
b0109805 9242 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9243
9244 if (insn & (1 << 11)) {
9245 /* load */
b0109805
PB
9246 tmp = gen_ld32(addr, IS_USER(s));
9247 store_reg(s, rd, tmp);
99c475ab
FB
9248 } else {
9249 /* store */
b0109805
PB
9250 tmp = load_reg(s, rd);
9251 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9252 }
7d1b0095 9253 tcg_temp_free_i32(addr);
99c475ab
FB
9254 break;
9255
9256 case 7:
9257 /* load/store byte immediate offset */
9258 rd = insn & 7;
9259 rn = (insn >> 3) & 7;
b0109805 9260 addr = load_reg(s, rn);
99c475ab 9261 val = (insn >> 6) & 0x1f;
b0109805 9262 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9263
9264 if (insn & (1 << 11)) {
9265 /* load */
b0109805
PB
9266 tmp = gen_ld8u(addr, IS_USER(s));
9267 store_reg(s, rd, tmp);
99c475ab
FB
9268 } else {
9269 /* store */
b0109805
PB
9270 tmp = load_reg(s, rd);
9271 gen_st8(tmp, addr, IS_USER(s));
99c475ab 9272 }
7d1b0095 9273 tcg_temp_free_i32(addr);
99c475ab
FB
9274 break;
9275
9276 case 8:
9277 /* load/store halfword immediate offset */
9278 rd = insn & 7;
9279 rn = (insn >> 3) & 7;
b0109805 9280 addr = load_reg(s, rn);
99c475ab 9281 val = (insn >> 5) & 0x3e;
b0109805 9282 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9283
9284 if (insn & (1 << 11)) {
9285 /* load */
b0109805
PB
9286 tmp = gen_ld16u(addr, IS_USER(s));
9287 store_reg(s, rd, tmp);
99c475ab
FB
9288 } else {
9289 /* store */
b0109805
PB
9290 tmp = load_reg(s, rd);
9291 gen_st16(tmp, addr, IS_USER(s));
99c475ab 9292 }
7d1b0095 9293 tcg_temp_free_i32(addr);
99c475ab
FB
9294 break;
9295
9296 case 9:
9297 /* load/store from stack */
9298 rd = (insn >> 8) & 7;
b0109805 9299 addr = load_reg(s, 13);
99c475ab 9300 val = (insn & 0xff) * 4;
b0109805 9301 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9302
9303 if (insn & (1 << 11)) {
9304 /* load */
b0109805
PB
9305 tmp = gen_ld32(addr, IS_USER(s));
9306 store_reg(s, rd, tmp);
99c475ab
FB
9307 } else {
9308 /* store */
b0109805
PB
9309 tmp = load_reg(s, rd);
9310 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9311 }
7d1b0095 9312 tcg_temp_free_i32(addr);
99c475ab
FB
9313 break;
9314
9315 case 10:
9316 /* add to high reg */
9317 rd = (insn >> 8) & 7;
5899f386
FB
9318 if (insn & (1 << 11)) {
9319 /* SP */
5e3f878a 9320 tmp = load_reg(s, 13);
5899f386
FB
9321 } else {
9322 /* PC. bit 1 is ignored. */
7d1b0095 9323 tmp = tcg_temp_new_i32();
5e3f878a 9324 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9325 }
99c475ab 9326 val = (insn & 0xff) * 4;
5e3f878a
PB
9327 tcg_gen_addi_i32(tmp, tmp, val);
9328 store_reg(s, rd, tmp);
99c475ab
FB
9329 break;
9330
9331 case 11:
9332 /* misc */
9333 op = (insn >> 8) & 0xf;
9334 switch (op) {
9335 case 0:
9336 /* adjust stack pointer */
b26eefb6 9337 tmp = load_reg(s, 13);
99c475ab
FB
9338 val = (insn & 0x7f) * 4;
9339 if (insn & (1 << 7))
6a0d8a1d 9340 val = -(int32_t)val;
b26eefb6
PB
9341 tcg_gen_addi_i32(tmp, tmp, val);
9342 store_reg(s, 13, tmp);
99c475ab
FB
9343 break;
9344
9ee6e8bb
PB
9345 case 2: /* sign/zero extend. */
9346 ARCH(6);
9347 rd = insn & 7;
9348 rm = (insn >> 3) & 7;
b0109805 9349 tmp = load_reg(s, rm);
9ee6e8bb 9350 switch ((insn >> 6) & 3) {
b0109805
PB
9351 case 0: gen_sxth(tmp); break;
9352 case 1: gen_sxtb(tmp); break;
9353 case 2: gen_uxth(tmp); break;
9354 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9355 }
b0109805 9356 store_reg(s, rd, tmp);
9ee6e8bb 9357 break;
99c475ab
FB
9358 case 4: case 5: case 0xc: case 0xd:
9359 /* push/pop */
b0109805 9360 addr = load_reg(s, 13);
5899f386
FB
9361 if (insn & (1 << 8))
9362 offset = 4;
99c475ab 9363 else
5899f386
FB
9364 offset = 0;
9365 for (i = 0; i < 8; i++) {
9366 if (insn & (1 << i))
9367 offset += 4;
9368 }
9369 if ((insn & (1 << 11)) == 0) {
b0109805 9370 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9371 }
99c475ab
FB
9372 for (i = 0; i < 8; i++) {
9373 if (insn & (1 << i)) {
9374 if (insn & (1 << 11)) {
9375 /* pop */
b0109805
PB
9376 tmp = gen_ld32(addr, IS_USER(s));
9377 store_reg(s, i, tmp);
99c475ab
FB
9378 } else {
9379 /* push */
b0109805
PB
9380 tmp = load_reg(s, i);
9381 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9382 }
5899f386 9383 /* advance to the next address. */
b0109805 9384 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9385 }
9386 }
a50f5b91 9387 TCGV_UNUSED(tmp);
99c475ab
FB
9388 if (insn & (1 << 8)) {
9389 if (insn & (1 << 11)) {
9390 /* pop pc */
b0109805 9391 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9392 /* don't set the pc until the rest of the instruction
9393 has completed */
9394 } else {
9395 /* push lr */
b0109805
PB
9396 tmp = load_reg(s, 14);
9397 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9398 }
b0109805 9399 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9400 }
5899f386 9401 if ((insn & (1 << 11)) == 0) {
b0109805 9402 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9403 }
99c475ab 9404 /* write back the new stack pointer */
b0109805 9405 store_reg(s, 13, addr);
99c475ab 9406 /* set the new PC value */
be5e7a76
DES
9407 if ((insn & 0x0900) == 0x0900) {
9408 store_reg_from_load(env, s, 15, tmp);
9409 }
99c475ab
FB
9410 break;
9411
9ee6e8bb
PB
9412 case 1: case 3: case 9: case 11: /* czb */
9413 rm = insn & 7;
d9ba4830 9414 tmp = load_reg(s, rm);
9ee6e8bb
PB
9415 s->condlabel = gen_new_label();
9416 s->condjmp = 1;
9417 if (insn & (1 << 11))
cb63669a 9418 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9419 else
cb63669a 9420 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9421 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9422 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9423 val = (uint32_t)s->pc + 2;
9424 val += offset;
9425 gen_jmp(s, val);
9426 break;
9427
9428 case 15: /* IT, nop-hint. */
9429 if ((insn & 0xf) == 0) {
9430 gen_nop_hint(s, (insn >> 4) & 0xf);
9431 break;
9432 }
9433 /* If Then. */
9434 s->condexec_cond = (insn >> 4) & 0xe;
9435 s->condexec_mask = insn & 0x1f;
9436 /* No actual code generated for this insn, just setup state. */
9437 break;
9438
06c949e6 9439 case 0xe: /* bkpt */
be5e7a76 9440 ARCH(5);
bc4a0de0 9441 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9442 break;
9443
9ee6e8bb
PB
9444 case 0xa: /* rev */
9445 ARCH(6);
9446 rn = (insn >> 3) & 0x7;
9447 rd = insn & 0x7;
b0109805 9448 tmp = load_reg(s, rn);
9ee6e8bb 9449 switch ((insn >> 6) & 3) {
66896cb8 9450 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9451 case 1: gen_rev16(tmp); break;
9452 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9453 default: goto illegal_op;
9454 }
b0109805 9455 store_reg(s, rd, tmp);
9ee6e8bb
PB
9456 break;
9457
9458 case 6: /* cps */
9459 ARCH(6);
9460 if (IS_USER(s))
9461 break;
9462 if (IS_M(env)) {
8984bd2e 9463 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 9464 /* PRIMASK */
8984bd2e
PB
9465 if (insn & 1) {
9466 addr = tcg_const_i32(16);
9467 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9468 tcg_temp_free_i32(addr);
8984bd2e 9469 }
9ee6e8bb 9470 /* FAULTMASK */
8984bd2e
PB
9471 if (insn & 2) {
9472 addr = tcg_const_i32(17);
9473 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9474 tcg_temp_free_i32(addr);
8984bd2e 9475 }
b75263d6 9476 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9477 gen_lookup_tb(s);
9478 } else {
9479 if (insn & (1 << 4))
9480 shift = CPSR_A | CPSR_I | CPSR_F;
9481 else
9482 shift = 0;
fa26df03 9483 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
9484 }
9485 break;
9486
99c475ab
FB
9487 default:
9488 goto undef;
9489 }
9490 break;
9491
9492 case 12:
a7d3970d 9493 {
99c475ab 9494 /* load/store multiple */
a7d3970d
PM
9495 TCGv loaded_var;
9496 TCGV_UNUSED(loaded_var);
99c475ab 9497 rn = (insn >> 8) & 0x7;
b0109805 9498 addr = load_reg(s, rn);
99c475ab
FB
9499 for (i = 0; i < 8; i++) {
9500 if (insn & (1 << i)) {
99c475ab
FB
9501 if (insn & (1 << 11)) {
9502 /* load */
b0109805 9503 tmp = gen_ld32(addr, IS_USER(s));
a7d3970d
PM
9504 if (i == rn) {
9505 loaded_var = tmp;
9506 } else {
9507 store_reg(s, i, tmp);
9508 }
99c475ab
FB
9509 } else {
9510 /* store */
b0109805
PB
9511 tmp = load_reg(s, i);
9512 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9513 }
5899f386 9514 /* advance to the next address */
b0109805 9515 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9516 }
9517 }
b0109805 9518 if ((insn & (1 << rn)) == 0) {
a7d3970d 9519 /* base reg not in list: base register writeback */
b0109805
PB
9520 store_reg(s, rn, addr);
9521 } else {
a7d3970d
PM
9522 /* base reg in list: if load, complete it now */
9523 if (insn & (1 << 11)) {
9524 store_reg(s, rn, loaded_var);
9525 }
7d1b0095 9526 tcg_temp_free_i32(addr);
b0109805 9527 }
99c475ab 9528 break;
a7d3970d 9529 }
99c475ab
FB
9530 case 13:
9531 /* conditional branch or swi */
9532 cond = (insn >> 8) & 0xf;
9533 if (cond == 0xe)
9534 goto undef;
9535
9536 if (cond == 0xf) {
9537 /* swi */
422ebf69 9538 gen_set_pc_im(s->pc);
9ee6e8bb 9539 s->is_jmp = DISAS_SWI;
99c475ab
FB
9540 break;
9541 }
9542 /* generate a conditional jump to next instruction */
e50e6a20 9543 s->condlabel = gen_new_label();
d9ba4830 9544 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9545 s->condjmp = 1;
99c475ab
FB
9546
9547 /* jump to the offset */
5899f386 9548 val = (uint32_t)s->pc + 2;
99c475ab 9549 offset = ((int32_t)insn << 24) >> 24;
5899f386 9550 val += offset << 1;
8aaca4c0 9551 gen_jmp(s, val);
99c475ab
FB
9552 break;
9553
9554 case 14:
358bf29e 9555 if (insn & (1 << 11)) {
9ee6e8bb
PB
9556 if (disas_thumb2_insn(env, s, insn))
9557 goto undef32;
358bf29e
PB
9558 break;
9559 }
9ee6e8bb 9560 /* unconditional branch */
99c475ab
FB
9561 val = (uint32_t)s->pc;
9562 offset = ((int32_t)insn << 21) >> 21;
9563 val += (offset << 1) + 2;
8aaca4c0 9564 gen_jmp(s, val);
99c475ab
FB
9565 break;
9566
9567 case 15:
9ee6e8bb 9568 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9569 goto undef32;
9ee6e8bb 9570 break;
99c475ab
FB
9571 }
9572 return;
9ee6e8bb 9573undef32:
bc4a0de0 9574 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9575 return;
9576illegal_op:
99c475ab 9577undef:
bc4a0de0 9578 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9579}
9580
2c0262af
FB
9581/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9582 basic block 'tb'. If search_pc is TRUE, also generate PC
9583 information for each intermediate instruction. */
2cfc5f17
TS
9584static inline void gen_intermediate_code_internal(CPUState *env,
9585 TranslationBlock *tb,
9586 int search_pc)
2c0262af
FB
9587{
9588 DisasContext dc1, *dc = &dc1;
a1d1bb31 9589 CPUBreakpoint *bp;
2c0262af
FB
9590 uint16_t *gen_opc_end;
9591 int j, lj;
0fa85d43 9592 target_ulong pc_start;
b5ff1b31 9593 uint32_t next_page_start;
2e70f6ef
PB
9594 int num_insns;
9595 int max_insns;
3b46e624 9596
2c0262af 9597 /* generate intermediate code */
0fa85d43 9598 pc_start = tb->pc;
3b46e624 9599
2c0262af
FB
9600 dc->tb = tb;
9601
2c0262af 9602 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9603
9604 dc->is_jmp = DISAS_NEXT;
9605 dc->pc = pc_start;
8aaca4c0 9606 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9607 dc->condjmp = 0;
7204ab88 9608 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
98eac7ca
PM
9609 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9610 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9611#if !defined(CONFIG_USER_ONLY)
61f74d6a 9612 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9613#endif
5df8bac1 9614 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9615 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9616 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9617 cpu_F0s = tcg_temp_new_i32();
9618 cpu_F1s = tcg_temp_new_i32();
9619 cpu_F0d = tcg_temp_new_i64();
9620 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9621 cpu_V0 = cpu_F0d;
9622 cpu_V1 = cpu_F1d;
e677137d 9623 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9624 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9625 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9626 lj = -1;
2e70f6ef
PB
9627 num_insns = 0;
9628 max_insns = tb->cflags & CF_COUNT_MASK;
9629 if (max_insns == 0)
9630 max_insns = CF_COUNT_MASK;
9631
9632 gen_icount_start();
e12ce78d 9633
3849902c
PM
9634 tcg_clear_temp_count();
9635
e12ce78d
PM
9636 /* A note on handling of the condexec (IT) bits:
9637 *
9638 * We want to avoid the overhead of having to write the updated condexec
9639 * bits back to the CPUState for every instruction in an IT block. So:
9640 * (1) if the condexec bits are not already zero then we write
9641 * zero back into the CPUState now. This avoids complications trying
9642 * to do it at the end of the block. (For example if we don't do this
9643 * it's hard to identify whether we can safely skip writing condexec
9644 * at the end of the TB, which we definitely want to do for the case
9645 * where a TB doesn't do anything with the IT state at all.)
9646 * (2) if we are going to leave the TB then we call gen_set_condexec()
9647 * which will write the correct value into CPUState if zero is wrong.
9648 * This is done both for leaving the TB at the end, and for leaving
9649 * it because of an exception we know will happen, which is done in
9650 * gen_exception_insn(). The latter is necessary because we need to
9651 * leave the TB with the PC/IT state just prior to execution of the
9652 * instruction which caused the exception.
9653 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9654 * then the CPUState will be wrong and we need to reset it.
9655 * This is handled in the same way as restoration of the
9656 * PC in these situations: we will be called again with search_pc=1
9657 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9658 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9659 * this to restore the condexec bits.
e12ce78d
PM
9660 *
9661 * Note that there are no instructions which can read the condexec
9662 * bits, and none which can write non-static values to them, so
9663 * we don't need to care about whether CPUState is correct in the
9664 * middle of a TB.
9665 */
9666
9ee6e8bb
PB
9667 /* Reset the conditional execution bits immediately. This avoids
9668 complications trying to do it at the end of the block. */
98eac7ca 9669 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9670 {
7d1b0095 9671 TCGv tmp = tcg_temp_new_i32();
8f01245e 9672 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9673 store_cpu_field(tmp, condexec_bits);
8f01245e 9674 }
2c0262af 9675 do {
fbb4a2e3
PB
9676#ifdef CONFIG_USER_ONLY
9677 /* Intercept jump to the magic kernel page. */
9678 if (dc->pc >= 0xffff0000) {
9679 /* We always get here via a jump, so know we are not in a
9680 conditional execution block. */
9681 gen_exception(EXCP_KERNEL_TRAP);
9682 dc->is_jmp = DISAS_UPDATE;
9683 break;
9684 }
9685#else
9ee6e8bb
PB
9686 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9687 /* We always get here via a jump, so know we are not in a
9688 conditional execution block. */
d9ba4830 9689 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9690 dc->is_jmp = DISAS_UPDATE;
9691 break;
9ee6e8bb
PB
9692 }
9693#endif
9694
72cf2d4f
BS
9695 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9696 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9697 if (bp->pc == dc->pc) {
bc4a0de0 9698 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9699 /* Advance PC so that clearing the breakpoint will
9700 invalidate this TB. */
9701 dc->pc += 2;
9702 goto done_generating;
1fddef4b
FB
9703 break;
9704 }
9705 }
9706 }
2c0262af
FB
9707 if (search_pc) {
9708 j = gen_opc_ptr - gen_opc_buf;
9709 if (lj < j) {
9710 lj++;
9711 while (lj < j)
9712 gen_opc_instr_start[lj++] = 0;
9713 }
0fa85d43 9714 gen_opc_pc[lj] = dc->pc;
e12ce78d 9715 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
2c0262af 9716 gen_opc_instr_start[lj] = 1;
2e70f6ef 9717 gen_opc_icount[lj] = num_insns;
2c0262af 9718 }
e50e6a20 9719
2e70f6ef
PB
9720 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9721 gen_io_start();
9722
5642463a
PM
9723 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9724 tcg_gen_debug_insn_start(dc->pc);
9725 }
9726
7204ab88 9727 if (dc->thumb) {
9ee6e8bb
PB
9728 disas_thumb_insn(env, dc);
9729 if (dc->condexec_mask) {
9730 dc->condexec_cond = (dc->condexec_cond & 0xe)
9731 | ((dc->condexec_mask >> 4) & 1);
9732 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9733 if (dc->condexec_mask == 0) {
9734 dc->condexec_cond = 0;
9735 }
9736 }
9737 } else {
9738 disas_arm_insn(env, dc);
9739 }
e50e6a20
FB
9740
9741 if (dc->condjmp && !dc->is_jmp) {
9742 gen_set_label(dc->condlabel);
9743 dc->condjmp = 0;
9744 }
3849902c
PM
9745
9746 if (tcg_check_temp_count()) {
9747 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9748 }
9749
aaf2d97d 9750 /* Translation stops when a conditional branch is encountered.
e50e6a20 9751 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9752 * Also stop translation when a page boundary is reached. This
bf20dc07 9753 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9754 num_insns ++;
1fddef4b
FB
9755 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9756 !env->singlestep_enabled &&
1b530a6d 9757 !singlestep &&
2e70f6ef
PB
9758 dc->pc < next_page_start &&
9759 num_insns < max_insns);
9760
9761 if (tb->cflags & CF_LAST_IO) {
9762 if (dc->condjmp) {
9763 /* FIXME: This can theoretically happen with self-modifying
9764 code. */
9765 cpu_abort(env, "IO on conditional branch instruction");
9766 }
9767 gen_io_end();
9768 }
9ee6e8bb 9769
b5ff1b31 9770 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9771 instruction was a conditional branch or trap, and the PC has
9772 already been written. */
551bd27f 9773 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9774 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9775 if (dc->condjmp) {
9ee6e8bb
PB
9776 gen_set_condexec(dc);
9777 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9778 gen_exception(EXCP_SWI);
9ee6e8bb 9779 } else {
d9ba4830 9780 gen_exception(EXCP_DEBUG);
9ee6e8bb 9781 }
e50e6a20
FB
9782 gen_set_label(dc->condlabel);
9783 }
9784 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9785 gen_set_pc_im(dc->pc);
e50e6a20 9786 dc->condjmp = 0;
8aaca4c0 9787 }
9ee6e8bb
PB
9788 gen_set_condexec(dc);
9789 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9790 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9791 } else {
9792 /* FIXME: Single stepping a WFI insn will not halt
9793 the CPU. */
d9ba4830 9794 gen_exception(EXCP_DEBUG);
9ee6e8bb 9795 }
8aaca4c0 9796 } else {
9ee6e8bb
PB
9797 /* While branches must always occur at the end of an IT block,
9798 there are a few other things that can cause us to terminate
9799 the TB in the middel of an IT block:
9800 - Exception generating instructions (bkpt, swi, undefined).
9801 - Page boundaries.
9802 - Hardware watchpoints.
9803 Hardware breakpoints have already been handled and skip this code.
9804 */
9805 gen_set_condexec(dc);
8aaca4c0 9806 switch(dc->is_jmp) {
8aaca4c0 9807 case DISAS_NEXT:
6e256c93 9808 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9809 break;
9810 default:
9811 case DISAS_JUMP:
9812 case DISAS_UPDATE:
9813 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9814 tcg_gen_exit_tb(0);
8aaca4c0
FB
9815 break;
9816 case DISAS_TB_JUMP:
9817 /* nothing more to generate */
9818 break;
9ee6e8bb 9819 case DISAS_WFI:
d9ba4830 9820 gen_helper_wfi();
9ee6e8bb
PB
9821 break;
9822 case DISAS_SWI:
d9ba4830 9823 gen_exception(EXCP_SWI);
9ee6e8bb 9824 break;
8aaca4c0 9825 }
e50e6a20
FB
9826 if (dc->condjmp) {
9827 gen_set_label(dc->condlabel);
9ee6e8bb 9828 gen_set_condexec(dc);
6e256c93 9829 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9830 dc->condjmp = 0;
9831 }
2c0262af 9832 }
2e70f6ef 9833
9ee6e8bb 9834done_generating:
2e70f6ef 9835 gen_icount_end(tb, num_insns);
2c0262af
FB
9836 *gen_opc_ptr = INDEX_op_end;
9837
9838#ifdef DEBUG_DISAS
8fec2b8c 9839 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9840 qemu_log("----------------\n");
9841 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7204ab88 9842 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
93fcfe39 9843 qemu_log("\n");
2c0262af
FB
9844 }
9845#endif
b5ff1b31
FB
9846 if (search_pc) {
9847 j = gen_opc_ptr - gen_opc_buf;
9848 lj++;
9849 while (lj <= j)
9850 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9851 } else {
2c0262af 9852 tb->size = dc->pc - pc_start;
2e70f6ef 9853 tb->icount = num_insns;
b5ff1b31 9854 }
2c0262af
FB
9855}
9856
2cfc5f17 9857void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9858{
2cfc5f17 9859 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9860}
9861
2cfc5f17 9862void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9863{
2cfc5f17 9864 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9865}
9866
b5ff1b31
FB
9867static const char *cpu_mode_names[16] = {
9868 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9869 "???", "???", "???", "und", "???", "???", "???", "sys"
9870};
9ee6e8bb 9871
9a78eead 9872void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 9873 int flags)
2c0262af
FB
9874{
9875 int i;
06e80fc9 9876#if 0
bc380d17 9877 union {
b7bcbe95
FB
9878 uint32_t i;
9879 float s;
9880 } s0, s1;
9881 CPU_DoubleU d;
a94a6abf
PB
9882 /* ??? This assumes float64 and double have the same layout.
9883 Oh well, it's only debug dumps. */
9884 union {
9885 float64 f64;
9886 double d;
9887 } d0;
06e80fc9 9888#endif
b5ff1b31 9889 uint32_t psr;
2c0262af
FB
9890
9891 for(i=0;i<16;i++) {
7fe48483 9892 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9893 if ((i % 4) == 3)
7fe48483 9894 cpu_fprintf(f, "\n");
2c0262af 9895 else
7fe48483 9896 cpu_fprintf(f, " ");
2c0262af 9897 }
b5ff1b31 9898 psr = cpsr_read(env);
687fa640
TS
9899 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9900 psr,
b5ff1b31
FB
9901 psr & (1 << 31) ? 'N' : '-',
9902 psr & (1 << 30) ? 'Z' : '-',
9903 psr & (1 << 29) ? 'C' : '-',
9904 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9905 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9906 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9907
5e3f878a 9908#if 0
b7bcbe95 9909 for (i = 0; i < 16; i++) {
8e96005d
FB
9910 d.d = env->vfp.regs[i];
9911 s0.i = d.l.lower;
9912 s1.i = d.l.upper;
a94a6abf
PB
9913 d0.f64 = d.d;
9914 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9915 i * 2, (int)s0.i, s0.s,
a94a6abf 9916 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9917 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9918 d0.d);
b7bcbe95 9919 }
40f137e1 9920 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9921#endif
2c0262af 9922}
a6b025d3 9923
e87b7cb0 9924void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
9925{
9926 env->regs[15] = gen_opc_pc[pc_pos];
e12ce78d 9927 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 9928}