]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-arm: Support v6 barriers in linux-user mode
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
2c0262af 28#include "disas.h"
57fec1fe 29#include "tcg-op.h"
79383c9c 30#include "qemu-log.h"
1497c961 31
7b59220e 32#include "helper.h"
1497c961 33#define GEN_HELPER 1
7b59220e 34#include "helper.h"
2c0262af 35
be5e7a76
DES
36#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38/* currently all emulated v5 cores are also v5TE, so don't bother */
39#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
40#define ENABLE_ARCH_5J 0
41#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 45
86753403 46#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 47
2c0262af
FB
48/* internal defines */
49typedef struct DisasContext {
0fa85d43 50 target_ulong pc;
2c0262af 51 int is_jmp;
e50e6a20
FB
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
9ee6e8bb
PB
56 /* Thumb-2 condtional execution bits. */
57 int condexec_mask;
58 int condexec_cond;
2c0262af 59 struct TranslationBlock *tb;
8aaca4c0 60 int singlestep_enabled;
5899f386 61 int thumb;
b5ff1b31
FB
62#if !defined(CONFIG_USER_ONLY)
63 int user;
64#endif
5df8bac1 65 int vfp_enabled;
69d1fc22
PM
66 int vec_len;
67 int vec_stride;
2c0262af
FB
68} DisasContext;
69
e12ce78d
PM
70static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
71
b5ff1b31
FB
72#if defined(CONFIG_USER_ONLY)
73#define IS_USER(s) 1
74#else
75#define IS_USER(s) (s->user)
76#endif
77
9ee6e8bb
PB
78/* These instructions trap after executing, so defer them until after the
79 conditional executions state has been updated. */
80#define DISAS_WFI 4
81#define DISAS_SWI 5
2c0262af 82
a7812ae4 83static TCGv_ptr cpu_env;
ad69471c 84/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 85static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 86static TCGv_i32 cpu_R[16];
426f5abc
PB
87static TCGv_i32 cpu_exclusive_addr;
88static TCGv_i32 cpu_exclusive_val;
89static TCGv_i32 cpu_exclusive_high;
90#ifdef CONFIG_USER_ONLY
91static TCGv_i32 cpu_exclusive_test;
92static TCGv_i32 cpu_exclusive_info;
93#endif
ad69471c 94
b26eefb6 95/* FIXME: These should be removed. */
a7812ae4
PB
96static TCGv cpu_F0s, cpu_F1s;
97static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 98
2e70f6ef
PB
99#include "gen-icount.h"
100
155c3eac
FN
101static const char *regnames[] =
102 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
103 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
104
b26eefb6
PB
105/* initialize TCG globals. */
106void arm_translate_init(void)
107{
155c3eac
FN
108 int i;
109
a7812ae4
PB
110 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
111
155c3eac
FN
112 for (i = 0; i < 16; i++) {
113 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, regs[i]),
115 regnames[i]);
116 }
426f5abc
PB
117 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUState, exclusive_addr), "exclusive_addr");
119 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUState, exclusive_val), "exclusive_val");
121 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_high), "exclusive_high");
123#ifdef CONFIG_USER_ONLY
124 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUState, exclusive_test), "exclusive_test");
126 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUState, exclusive_info), "exclusive_info");
128#endif
155c3eac 129
a7812ae4 130#define GEN_HELPER 2
7b59220e 131#include "helper.h"
b26eefb6
PB
132}
133
d9ba4830
PB
134static inline TCGv load_cpu_offset(int offset)
135{
7d1b0095 136 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
137 tcg_gen_ld_i32(tmp, cpu_env, offset);
138 return tmp;
139}
140
141#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
142
143static inline void store_cpu_offset(TCGv var, int offset)
144{
145 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 146 tcg_temp_free_i32(var);
d9ba4830
PB
147}
148
149#define store_cpu_field(var, name) \
150 store_cpu_offset(var, offsetof(CPUState, name))
151
b26eefb6
PB
152/* Set a variable to the value of a CPU register. */
153static void load_reg_var(DisasContext *s, TCGv var, int reg)
154{
155 if (reg == 15) {
156 uint32_t addr;
157 /* normaly, since we updated PC, we need only to add one insn */
158 if (s->thumb)
159 addr = (long)s->pc + 2;
160 else
161 addr = (long)s->pc + 4;
162 tcg_gen_movi_i32(var, addr);
163 } else {
155c3eac 164 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
165 }
166}
167
168/* Create a new temporary and set it to the value of a CPU register. */
169static inline TCGv load_reg(DisasContext *s, int reg)
170{
7d1b0095 171 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
172 load_reg_var(s, tmp, reg);
173 return tmp;
174}
175
176/* Set a CPU register. The source must be a temporary and will be
177 marked as dead. */
178static void store_reg(DisasContext *s, int reg, TCGv var)
179{
180 if (reg == 15) {
181 tcg_gen_andi_i32(var, var, ~1);
182 s->is_jmp = DISAS_JUMP;
183 }
155c3eac 184 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 185 tcg_temp_free_i32(var);
b26eefb6
PB
186}
187
b26eefb6 188/* Value extensions. */
86831435
PB
189#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
190#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
191#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
192#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
193
1497c961
PB
194#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
195#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 196
b26eefb6 197
b75263d6
JR
198static inline void gen_set_cpsr(TCGv var, uint32_t mask)
199{
200 TCGv tmp_mask = tcg_const_i32(mask);
201 gen_helper_cpsr_write(var, tmp_mask);
202 tcg_temp_free_i32(tmp_mask);
203}
d9ba4830
PB
204/* Set NZCV flags from the high 4 bits of var. */
205#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
206
207static void gen_exception(int excp)
208{
7d1b0095 209 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
210 tcg_gen_movi_i32(tmp, excp);
211 gen_helper_exception(tmp);
7d1b0095 212 tcg_temp_free_i32(tmp);
d9ba4830
PB
213}
214
3670669c
PB
215static void gen_smul_dual(TCGv a, TCGv b)
216{
7d1b0095
PM
217 TCGv tmp1 = tcg_temp_new_i32();
218 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
219 tcg_gen_ext16s_i32(tmp1, a);
220 tcg_gen_ext16s_i32(tmp2, b);
3670669c 221 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 222 tcg_temp_free_i32(tmp2);
3670669c
PB
223 tcg_gen_sari_i32(a, a, 16);
224 tcg_gen_sari_i32(b, b, 16);
225 tcg_gen_mul_i32(b, b, a);
226 tcg_gen_mov_i32(a, tmp1);
7d1b0095 227 tcg_temp_free_i32(tmp1);
3670669c
PB
228}
229
230/* Byteswap each halfword. */
231static void gen_rev16(TCGv var)
232{
7d1b0095 233 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
234 tcg_gen_shri_i32(tmp, var, 8);
235 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
236 tcg_gen_shli_i32(var, var, 8);
237 tcg_gen_andi_i32(var, var, 0xff00ff00);
238 tcg_gen_or_i32(var, var, tmp);
7d1b0095 239 tcg_temp_free_i32(tmp);
3670669c
PB
240}
241
242/* Byteswap low halfword and sign extend. */
243static void gen_revsh(TCGv var)
244{
1a855029
AJ
245 tcg_gen_ext16u_i32(var, var);
246 tcg_gen_bswap16_i32(var, var);
247 tcg_gen_ext16s_i32(var, var);
3670669c
PB
248}
249
250/* Unsigned bitfield extract. */
251static void gen_ubfx(TCGv var, int shift, uint32_t mask)
252{
253 if (shift)
254 tcg_gen_shri_i32(var, var, shift);
255 tcg_gen_andi_i32(var, var, mask);
256}
257
258/* Signed bitfield extract. */
259static void gen_sbfx(TCGv var, int shift, int width)
260{
261 uint32_t signbit;
262
263 if (shift)
264 tcg_gen_sari_i32(var, var, shift);
265 if (shift + width < 32) {
266 signbit = 1u << (width - 1);
267 tcg_gen_andi_i32(var, var, (1u << width) - 1);
268 tcg_gen_xori_i32(var, var, signbit);
269 tcg_gen_subi_i32(var, var, signbit);
270 }
271}
272
273/* Bitfield insertion. Insert val into base. Clobbers base and val. */
274static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
275{
3670669c 276 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
277 tcg_gen_shli_i32(val, val, shift);
278 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
279 tcg_gen_or_i32(dest, base, val);
280}
281
838fa72d
AJ
282/* Return (b << 32) + a. Mark inputs as dead */
283static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 284{
838fa72d
AJ
285 TCGv_i64 tmp64 = tcg_temp_new_i64();
286
287 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 288 tcg_temp_free_i32(b);
838fa72d
AJ
289 tcg_gen_shli_i64(tmp64, tmp64, 32);
290 tcg_gen_add_i64(a, tmp64, a);
291
292 tcg_temp_free_i64(tmp64);
293 return a;
294}
295
296/* Return (b << 32) - a. Mark inputs as dead. */
297static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
298{
299 TCGv_i64 tmp64 = tcg_temp_new_i64();
300
301 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 302 tcg_temp_free_i32(b);
838fa72d
AJ
303 tcg_gen_shli_i64(tmp64, tmp64, 32);
304 tcg_gen_sub_i64(a, tmp64, a);
305
306 tcg_temp_free_i64(tmp64);
307 return a;
3670669c
PB
308}
309
8f01245e
PB
310/* FIXME: Most targets have native widening multiplication.
311 It would be good to use that instead of a full wide multiply. */
5e3f878a 312/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 313static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 314{
a7812ae4
PB
315 TCGv_i64 tmp1 = tcg_temp_new_i64();
316 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
317
318 tcg_gen_extu_i32_i64(tmp1, a);
7d1b0095 319 tcg_temp_free_i32(a);
5e3f878a 320 tcg_gen_extu_i32_i64(tmp2, b);
7d1b0095 321 tcg_temp_free_i32(b);
5e3f878a 322 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 323 tcg_temp_free_i64(tmp2);
5e3f878a
PB
324 return tmp1;
325}
326
a7812ae4 327static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 328{
a7812ae4
PB
329 TCGv_i64 tmp1 = tcg_temp_new_i64();
330 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
331
332 tcg_gen_ext_i32_i64(tmp1, a);
7d1b0095 333 tcg_temp_free_i32(a);
5e3f878a 334 tcg_gen_ext_i32_i64(tmp2, b);
7d1b0095 335 tcg_temp_free_i32(b);
5e3f878a 336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 337 tcg_temp_free_i64(tmp2);
5e3f878a
PB
338 return tmp1;
339}
340
8f01245e
PB
341/* Swap low and high halfwords. */
342static void gen_swap_half(TCGv var)
343{
7d1b0095 344 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
345 tcg_gen_shri_i32(tmp, var, 16);
346 tcg_gen_shli_i32(var, var, 16);
347 tcg_gen_or_i32(var, var, tmp);
7d1b0095 348 tcg_temp_free_i32(tmp);
8f01245e
PB
349}
350
b26eefb6
PB
351/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
352 tmp = (t0 ^ t1) & 0x8000;
353 t0 &= ~0x8000;
354 t1 &= ~0x8000;
355 t0 = (t0 + t1) ^ tmp;
356 */
357
358static void gen_add16(TCGv t0, TCGv t1)
359{
7d1b0095 360 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
361 tcg_gen_xor_i32(tmp, t0, t1);
362 tcg_gen_andi_i32(tmp, tmp, 0x8000);
363 tcg_gen_andi_i32(t0, t0, ~0x8000);
364 tcg_gen_andi_i32(t1, t1, ~0x8000);
365 tcg_gen_add_i32(t0, t0, t1);
366 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
367 tcg_temp_free_i32(tmp);
368 tcg_temp_free_i32(t1);
b26eefb6
PB
369}
370
9a119ff6
PB
371#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
372
b26eefb6
PB
373/* Set CF to the top bit of var. */
374static void gen_set_CF_bit31(TCGv var)
375{
7d1b0095 376 TCGv tmp = tcg_temp_new_i32();
b26eefb6 377 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 378 gen_set_CF(tmp);
7d1b0095 379 tcg_temp_free_i32(tmp);
b26eefb6
PB
380}
381
382/* Set N and Z flags from var. */
383static inline void gen_logic_CC(TCGv var)
384{
6fbe23d5
PB
385 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
387}
388
389/* T0 += T1 + CF. */
396e467c 390static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 391{
d9ba4830 392 TCGv tmp;
396e467c 393 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 394 tmp = load_cpu_field(CF);
396e467c 395 tcg_gen_add_i32(t0, t0, tmp);
7d1b0095 396 tcg_temp_free_i32(tmp);
b26eefb6
PB
397}
398
e9bb4aa9
JR
399/* dest = T0 + T1 + CF. */
400static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
401{
402 TCGv tmp;
403 tcg_gen_add_i32(dest, t0, t1);
404 tmp = load_cpu_field(CF);
405 tcg_gen_add_i32(dest, dest, tmp);
7d1b0095 406 tcg_temp_free_i32(tmp);
e9bb4aa9
JR
407}
408
3670669c
PB
409/* dest = T0 - T1 + CF - 1. */
410static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
411{
d9ba4830 412 TCGv tmp;
3670669c 413 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 414 tmp = load_cpu_field(CF);
3670669c
PB
415 tcg_gen_add_i32(dest, dest, tmp);
416 tcg_gen_subi_i32(dest, dest, 1);
7d1b0095 417 tcg_temp_free_i32(tmp);
3670669c
PB
418}
419
ad69471c
PB
420/* FIXME: Implement this natively. */
421#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
422
9a119ff6 423static void shifter_out_im(TCGv var, int shift)
b26eefb6 424{
7d1b0095 425 TCGv tmp = tcg_temp_new_i32();
9a119ff6
PB
426 if (shift == 0) {
427 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 428 } else {
9a119ff6 429 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 430 if (shift != 31)
9a119ff6
PB
431 tcg_gen_andi_i32(tmp, tmp, 1);
432 }
433 gen_set_CF(tmp);
7d1b0095 434 tcg_temp_free_i32(tmp);
9a119ff6 435}
b26eefb6 436
9a119ff6
PB
437/* Shift by immediate. Includes special handling for shift == 0. */
438static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
439{
440 switch (shiftop) {
441 case 0: /* LSL */
442 if (shift != 0) {
443 if (flags)
444 shifter_out_im(var, 32 - shift);
445 tcg_gen_shli_i32(var, var, shift);
446 }
447 break;
448 case 1: /* LSR */
449 if (shift == 0) {
450 if (flags) {
451 tcg_gen_shri_i32(var, var, 31);
452 gen_set_CF(var);
453 }
454 tcg_gen_movi_i32(var, 0);
455 } else {
456 if (flags)
457 shifter_out_im(var, shift - 1);
458 tcg_gen_shri_i32(var, var, shift);
459 }
460 break;
461 case 2: /* ASR */
462 if (shift == 0)
463 shift = 32;
464 if (flags)
465 shifter_out_im(var, shift - 1);
466 if (shift == 32)
467 shift = 31;
468 tcg_gen_sari_i32(var, var, shift);
469 break;
470 case 3: /* ROR/RRX */
471 if (shift != 0) {
472 if (flags)
473 shifter_out_im(var, shift - 1);
f669df27 474 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 475 } else {
d9ba4830 476 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
477 if (flags)
478 shifter_out_im(var, 0);
479 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
480 tcg_gen_shli_i32(tmp, tmp, 31);
481 tcg_gen_or_i32(var, var, tmp);
7d1b0095 482 tcg_temp_free_i32(tmp);
b26eefb6
PB
483 }
484 }
485};
486
8984bd2e
PB
487static inline void gen_arm_shift_reg(TCGv var, int shiftop,
488 TCGv shift, int flags)
489{
490 if (flags) {
491 switch (shiftop) {
492 case 0: gen_helper_shl_cc(var, var, shift); break;
493 case 1: gen_helper_shr_cc(var, var, shift); break;
494 case 2: gen_helper_sar_cc(var, var, shift); break;
495 case 3: gen_helper_ror_cc(var, var, shift); break;
496 }
497 } else {
498 switch (shiftop) {
499 case 0: gen_helper_shl(var, var, shift); break;
500 case 1: gen_helper_shr(var, var, shift); break;
501 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
502 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
503 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
504 }
505 }
7d1b0095 506 tcg_temp_free_i32(shift);
8984bd2e
PB
507}
508
6ddbc6e4
PB
509#define PAS_OP(pfx) \
510 switch (op2) { \
511 case 0: gen_pas_helper(glue(pfx,add16)); break; \
512 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
513 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
514 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
515 case 4: gen_pas_helper(glue(pfx,add8)); break; \
516 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
517 }
d9ba4830 518static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 519{
a7812ae4 520 TCGv_ptr tmp;
6ddbc6e4
PB
521
522 switch (op1) {
523#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
524 case 1:
a7812ae4 525 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
526 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
527 PAS_OP(s)
b75263d6 528 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
529 break;
530 case 5:
a7812ae4 531 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
532 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
533 PAS_OP(u)
b75263d6 534 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
535 break;
536#undef gen_pas_helper
537#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
538 case 2:
539 PAS_OP(q);
540 break;
541 case 3:
542 PAS_OP(sh);
543 break;
544 case 6:
545 PAS_OP(uq);
546 break;
547 case 7:
548 PAS_OP(uh);
549 break;
550#undef gen_pas_helper
551 }
552}
9ee6e8bb
PB
553#undef PAS_OP
554
6ddbc6e4
PB
555/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
556#define PAS_OP(pfx) \
ed89a2f1 557 switch (op1) { \
6ddbc6e4
PB
558 case 0: gen_pas_helper(glue(pfx,add8)); break; \
559 case 1: gen_pas_helper(glue(pfx,add16)); break; \
560 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
561 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
562 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
563 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
564 }
d9ba4830 565static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 566{
a7812ae4 567 TCGv_ptr tmp;
6ddbc6e4 568
ed89a2f1 569 switch (op2) {
6ddbc6e4
PB
570#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
571 case 0:
a7812ae4 572 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
573 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
574 PAS_OP(s)
b75263d6 575 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
576 break;
577 case 4:
a7812ae4 578 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
579 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
580 PAS_OP(u)
b75263d6 581 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
582 break;
583#undef gen_pas_helper
584#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
585 case 1:
586 PAS_OP(q);
587 break;
588 case 2:
589 PAS_OP(sh);
590 break;
591 case 5:
592 PAS_OP(uq);
593 break;
594 case 6:
595 PAS_OP(uh);
596 break;
597#undef gen_pas_helper
598 }
599}
9ee6e8bb
PB
600#undef PAS_OP
601
d9ba4830
PB
602static void gen_test_cc(int cc, int label)
603{
604 TCGv tmp;
605 TCGv tmp2;
d9ba4830
PB
606 int inv;
607
d9ba4830
PB
608 switch (cc) {
609 case 0: /* eq: Z */
6fbe23d5 610 tmp = load_cpu_field(ZF);
cb63669a 611 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
612 break;
613 case 1: /* ne: !Z */
6fbe23d5 614 tmp = load_cpu_field(ZF);
cb63669a 615 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
616 break;
617 case 2: /* cs: C */
618 tmp = load_cpu_field(CF);
cb63669a 619 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
620 break;
621 case 3: /* cc: !C */
622 tmp = load_cpu_field(CF);
cb63669a 623 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
624 break;
625 case 4: /* mi: N */
6fbe23d5 626 tmp = load_cpu_field(NF);
cb63669a 627 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
628 break;
629 case 5: /* pl: !N */
6fbe23d5 630 tmp = load_cpu_field(NF);
cb63669a 631 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
632 break;
633 case 6: /* vs: V */
634 tmp = load_cpu_field(VF);
cb63669a 635 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
636 break;
637 case 7: /* vc: !V */
638 tmp = load_cpu_field(VF);
cb63669a 639 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
640 break;
641 case 8: /* hi: C && !Z */
642 inv = gen_new_label();
643 tmp = load_cpu_field(CF);
cb63669a 644 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 645 tcg_temp_free_i32(tmp);
6fbe23d5 646 tmp = load_cpu_field(ZF);
cb63669a 647 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
648 gen_set_label(inv);
649 break;
650 case 9: /* ls: !C || Z */
651 tmp = load_cpu_field(CF);
cb63669a 652 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 653 tcg_temp_free_i32(tmp);
6fbe23d5 654 tmp = load_cpu_field(ZF);
cb63669a 655 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
656 break;
657 case 10: /* ge: N == V -> N ^ V == 0 */
658 tmp = load_cpu_field(VF);
6fbe23d5 659 tmp2 = load_cpu_field(NF);
d9ba4830 660 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 661 tcg_temp_free_i32(tmp2);
cb63669a 662 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
663 break;
664 case 11: /* lt: N != V -> N ^ V != 0 */
665 tmp = load_cpu_field(VF);
6fbe23d5 666 tmp2 = load_cpu_field(NF);
d9ba4830 667 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 668 tcg_temp_free_i32(tmp2);
cb63669a 669 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
670 break;
671 case 12: /* gt: !Z && N == V */
672 inv = gen_new_label();
6fbe23d5 673 tmp = load_cpu_field(ZF);
cb63669a 674 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 675 tcg_temp_free_i32(tmp);
d9ba4830 676 tmp = load_cpu_field(VF);
6fbe23d5 677 tmp2 = load_cpu_field(NF);
d9ba4830 678 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 679 tcg_temp_free_i32(tmp2);
cb63669a 680 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
681 gen_set_label(inv);
682 break;
683 case 13: /* le: Z || N != V */
6fbe23d5 684 tmp = load_cpu_field(ZF);
cb63669a 685 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 686 tcg_temp_free_i32(tmp);
d9ba4830 687 tmp = load_cpu_field(VF);
6fbe23d5 688 tmp2 = load_cpu_field(NF);
d9ba4830 689 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 690 tcg_temp_free_i32(tmp2);
cb63669a 691 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
692 break;
693 default:
694 fprintf(stderr, "Bad condition code 0x%x\n", cc);
695 abort();
696 }
7d1b0095 697 tcg_temp_free_i32(tmp);
d9ba4830 698}
2c0262af 699
b1d8e52e 700static const uint8_t table_logic_cc[16] = {
2c0262af
FB
701 1, /* and */
702 1, /* xor */
703 0, /* sub */
704 0, /* rsb */
705 0, /* add */
706 0, /* adc */
707 0, /* sbc */
708 0, /* rsc */
709 1, /* andl */
710 1, /* xorl */
711 0, /* cmp */
712 0, /* cmn */
713 1, /* orr */
714 1, /* mov */
715 1, /* bic */
716 1, /* mvn */
717};
3b46e624 718
d9ba4830
PB
719/* Set PC and Thumb state from an immediate address. */
720static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 721{
b26eefb6 722 TCGv tmp;
99c475ab 723
b26eefb6 724 s->is_jmp = DISAS_UPDATE;
d9ba4830 725 if (s->thumb != (addr & 1)) {
7d1b0095 726 tmp = tcg_temp_new_i32();
d9ba4830
PB
727 tcg_gen_movi_i32(tmp, addr & 1);
728 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
7d1b0095 729 tcg_temp_free_i32(tmp);
d9ba4830 730 }
155c3eac 731 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
732}
733
734/* Set PC and Thumb state from var. var is marked as dead. */
735static inline void gen_bx(DisasContext *s, TCGv var)
736{
d9ba4830 737 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
738 tcg_gen_andi_i32(cpu_R[15], var, ~1);
739 tcg_gen_andi_i32(var, var, 1);
740 store_cpu_field(var, thumb);
d9ba4830
PB
741}
742
21aeb343
JR
743/* Variant of store_reg which uses branch&exchange logic when storing
744 to r15 in ARM architecture v7 and above. The source must be a temporary
745 and will be marked as dead. */
746static inline void store_reg_bx(CPUState *env, DisasContext *s,
747 int reg, TCGv var)
748{
749 if (reg == 15 && ENABLE_ARCH_7) {
750 gen_bx(s, var);
751 } else {
752 store_reg(s, reg, var);
753 }
754}
755
be5e7a76
DES
756/* Variant of store_reg which uses branch&exchange logic when storing
757 * to r15 in ARM architecture v5T and above. This is used for storing
758 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
759 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
760static inline void store_reg_from_load(CPUState *env, DisasContext *s,
761 int reg, TCGv var)
762{
763 if (reg == 15 && ENABLE_ARCH_5) {
764 gen_bx(s, var);
765 } else {
766 store_reg(s, reg, var);
767 }
768}
769
b0109805
PB
770static inline TCGv gen_ld8s(TCGv addr, int index)
771{
7d1b0095 772 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
773 tcg_gen_qemu_ld8s(tmp, addr, index);
774 return tmp;
775}
776static inline TCGv gen_ld8u(TCGv addr, int index)
777{
7d1b0095 778 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
779 tcg_gen_qemu_ld8u(tmp, addr, index);
780 return tmp;
781}
782static inline TCGv gen_ld16s(TCGv addr, int index)
783{
7d1b0095 784 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
785 tcg_gen_qemu_ld16s(tmp, addr, index);
786 return tmp;
787}
788static inline TCGv gen_ld16u(TCGv addr, int index)
789{
7d1b0095 790 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
791 tcg_gen_qemu_ld16u(tmp, addr, index);
792 return tmp;
793}
794static inline TCGv gen_ld32(TCGv addr, int index)
795{
7d1b0095 796 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
797 tcg_gen_qemu_ld32u(tmp, addr, index);
798 return tmp;
799}
84496233
JR
800static inline TCGv_i64 gen_ld64(TCGv addr, int index)
801{
802 TCGv_i64 tmp = tcg_temp_new_i64();
803 tcg_gen_qemu_ld64(tmp, addr, index);
804 return tmp;
805}
b0109805
PB
806static inline void gen_st8(TCGv val, TCGv addr, int index)
807{
808 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 809 tcg_temp_free_i32(val);
b0109805
PB
810}
811static inline void gen_st16(TCGv val, TCGv addr, int index)
812{
813 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 814 tcg_temp_free_i32(val);
b0109805
PB
815}
816static inline void gen_st32(TCGv val, TCGv addr, int index)
817{
818 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 819 tcg_temp_free_i32(val);
b0109805 820}
84496233
JR
821static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
822{
823 tcg_gen_qemu_st64(val, addr, index);
824 tcg_temp_free_i64(val);
825}
b5ff1b31 826
5e3f878a
PB
827static inline void gen_set_pc_im(uint32_t val)
828{
155c3eac 829 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
830}
831
b5ff1b31
FB
832/* Force a TB lookup after an instruction that changes the CPU state. */
833static inline void gen_lookup_tb(DisasContext *s)
834{
a6445c52 835 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
836 s->is_jmp = DISAS_UPDATE;
837}
838
b0109805
PB
839static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
840 TCGv var)
2c0262af 841{
1e8d4eec 842 int val, rm, shift, shiftop;
b26eefb6 843 TCGv offset;
2c0262af
FB
844
845 if (!(insn & (1 << 25))) {
846 /* immediate */
847 val = insn & 0xfff;
848 if (!(insn & (1 << 23)))
849 val = -val;
537730b9 850 if (val != 0)
b0109805 851 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
852 } else {
853 /* shift/register */
854 rm = (insn) & 0xf;
855 shift = (insn >> 7) & 0x1f;
1e8d4eec 856 shiftop = (insn >> 5) & 3;
b26eefb6 857 offset = load_reg(s, rm);
9a119ff6 858 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 859 if (!(insn & (1 << 23)))
b0109805 860 tcg_gen_sub_i32(var, var, offset);
2c0262af 861 else
b0109805 862 tcg_gen_add_i32(var, var, offset);
7d1b0095 863 tcg_temp_free_i32(offset);
2c0262af
FB
864 }
865}
866
191f9a93 867static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 868 int extra, TCGv var)
2c0262af
FB
869{
870 int val, rm;
b26eefb6 871 TCGv offset;
3b46e624 872
2c0262af
FB
873 if (insn & (1 << 22)) {
874 /* immediate */
875 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
876 if (!(insn & (1 << 23)))
877 val = -val;
18acad92 878 val += extra;
537730b9 879 if (val != 0)
b0109805 880 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
881 } else {
882 /* register */
191f9a93 883 if (extra)
b0109805 884 tcg_gen_addi_i32(var, var, extra);
2c0262af 885 rm = (insn) & 0xf;
b26eefb6 886 offset = load_reg(s, rm);
2c0262af 887 if (!(insn & (1 << 23)))
b0109805 888 tcg_gen_sub_i32(var, var, offset);
2c0262af 889 else
b0109805 890 tcg_gen_add_i32(var, var, offset);
7d1b0095 891 tcg_temp_free_i32(offset);
2c0262af
FB
892 }
893}
894
5aaebd13
PM
895static TCGv_ptr get_fpstatus_ptr(int neon)
896{
897 TCGv_ptr statusptr = tcg_temp_new_ptr();
898 int offset;
899 if (neon) {
900 offset = offsetof(CPUState, vfp.standard_fp_status);
901 } else {
902 offset = offsetof(CPUState, vfp.fp_status);
903 }
904 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
905 return statusptr;
906}
907
4373f3ce
PB
908#define VFP_OP2(name) \
909static inline void gen_vfp_##name(int dp) \
910{ \
ae1857ec
PM
911 TCGv_ptr fpst = get_fpstatus_ptr(0); \
912 if (dp) { \
913 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
914 } else { \
915 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
916 } \
917 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
918}
919
4373f3ce
PB
920VFP_OP2(add)
921VFP_OP2(sub)
922VFP_OP2(mul)
923VFP_OP2(div)
924
925#undef VFP_OP2
926
605a6aed
PM
927static inline void gen_vfp_F1_mul(int dp)
928{
929 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 930 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 931 if (dp) {
ae1857ec 932 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 933 } else {
ae1857ec 934 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 935 }
ae1857ec 936 tcg_temp_free_ptr(fpst);
605a6aed
PM
937}
938
939static inline void gen_vfp_F1_neg(int dp)
940{
941 /* Like gen_vfp_neg() but put result in F1 */
942 if (dp) {
943 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
944 } else {
945 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
946 }
947}
948
4373f3ce
PB
949static inline void gen_vfp_abs(int dp)
950{
951 if (dp)
952 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
953 else
954 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
955}
956
957static inline void gen_vfp_neg(int dp)
958{
959 if (dp)
960 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
961 else
962 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
963}
964
965static inline void gen_vfp_sqrt(int dp)
966{
967 if (dp)
968 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
969 else
970 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
971}
972
973static inline void gen_vfp_cmp(int dp)
974{
975 if (dp)
976 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
977 else
978 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
979}
980
981static inline void gen_vfp_cmpe(int dp)
982{
983 if (dp)
984 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
985 else
986 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
987}
988
989static inline void gen_vfp_F1_ld0(int dp)
990{
991 if (dp)
5b340b51 992 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 993 else
5b340b51 994 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
995}
996
5500b06c
PM
997#define VFP_GEN_ITOF(name) \
998static inline void gen_vfp_##name(int dp, int neon) \
999{ \
5aaebd13 1000 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1001 if (dp) { \
1002 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1003 } else { \
1004 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1005 } \
b7fa9214 1006 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1007}
1008
5500b06c
PM
1009VFP_GEN_ITOF(uito)
1010VFP_GEN_ITOF(sito)
1011#undef VFP_GEN_ITOF
4373f3ce 1012
5500b06c
PM
1013#define VFP_GEN_FTOI(name) \
1014static inline void gen_vfp_##name(int dp, int neon) \
1015{ \
5aaebd13 1016 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1017 if (dp) { \
1018 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1019 } else { \
1020 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1021 } \
b7fa9214 1022 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1023}
1024
5500b06c
PM
1025VFP_GEN_FTOI(toui)
1026VFP_GEN_FTOI(touiz)
1027VFP_GEN_FTOI(tosi)
1028VFP_GEN_FTOI(tosiz)
1029#undef VFP_GEN_FTOI
4373f3ce
PB
1030
1031#define VFP_GEN_FIX(name) \
5500b06c 1032static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1033{ \
b75263d6 1034 TCGv tmp_shift = tcg_const_i32(shift); \
5aaebd13 1035 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1036 if (dp) { \
1037 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1038 } else { \
1039 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1040 } \
b75263d6 1041 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1042 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1043}
4373f3ce
PB
1044VFP_GEN_FIX(tosh)
1045VFP_GEN_FIX(tosl)
1046VFP_GEN_FIX(touh)
1047VFP_GEN_FIX(toul)
1048VFP_GEN_FIX(shto)
1049VFP_GEN_FIX(slto)
1050VFP_GEN_FIX(uhto)
1051VFP_GEN_FIX(ulto)
1052#undef VFP_GEN_FIX
9ee6e8bb 1053
312eea9f 1054static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1055{
1056 if (dp)
312eea9f 1057 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1058 else
312eea9f 1059 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1060}
1061
312eea9f 1062static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1063{
1064 if (dp)
312eea9f 1065 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1066 else
312eea9f 1067 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1068}
1069
8e96005d
FB
1070static inline long
1071vfp_reg_offset (int dp, int reg)
1072{
1073 if (dp)
1074 return offsetof(CPUARMState, vfp.regs[reg]);
1075 else if (reg & 1) {
1076 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1077 + offsetof(CPU_DoubleU, l.upper);
1078 } else {
1079 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1080 + offsetof(CPU_DoubleU, l.lower);
1081 }
1082}
9ee6e8bb
PB
1083
1084/* Return the offset of a 32-bit piece of a NEON register.
1085 zero is the least significant end of the register. */
1086static inline long
1087neon_reg_offset (int reg, int n)
1088{
1089 int sreg;
1090 sreg = reg * 2 + n;
1091 return vfp_reg_offset(0, sreg);
1092}
1093
8f8e3aa4
PB
1094static TCGv neon_load_reg(int reg, int pass)
1095{
7d1b0095 1096 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1097 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1098 return tmp;
1099}
1100
1101static void neon_store_reg(int reg, int pass, TCGv var)
1102{
1103 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1104 tcg_temp_free_i32(var);
8f8e3aa4
PB
1105}
1106
a7812ae4 1107static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1108{
1109 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1110}
1111
a7812ae4 1112static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1113{
1114 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1115}
1116
4373f3ce
PB
1117#define tcg_gen_ld_f32 tcg_gen_ld_i32
1118#define tcg_gen_ld_f64 tcg_gen_ld_i64
1119#define tcg_gen_st_f32 tcg_gen_st_i32
1120#define tcg_gen_st_f64 tcg_gen_st_i64
1121
b7bcbe95
FB
1122static inline void gen_mov_F0_vreg(int dp, int reg)
1123{
1124 if (dp)
4373f3ce 1125 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1126 else
4373f3ce 1127 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1128}
1129
1130static inline void gen_mov_F1_vreg(int dp, int reg)
1131{
1132 if (dp)
4373f3ce 1133 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1134 else
4373f3ce 1135 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1136}
1137
1138static inline void gen_mov_vreg_F0(int dp, int reg)
1139{
1140 if (dp)
4373f3ce 1141 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1142 else
4373f3ce 1143 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1144}
1145
18c9b560
AZ
1146#define ARM_CP_RW_BIT (1 << 20)
1147
a7812ae4 1148static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1149{
1150 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1151}
1152
a7812ae4 1153static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1154{
1155 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1156}
1157
da6b5335 1158static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1159{
7d1b0095 1160 TCGv var = tcg_temp_new_i32();
da6b5335
FN
1161 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1162 return var;
e677137d
PB
1163}
1164
da6b5335 1165static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1166{
da6b5335 1167 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
7d1b0095 1168 tcg_temp_free_i32(var);
e677137d
PB
1169}
1170
1171static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1172{
1173 iwmmxt_store_reg(cpu_M0, rn);
1174}
1175
1176static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1177{
1178 iwmmxt_load_reg(cpu_M0, rn);
1179}
1180
1181static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1182{
1183 iwmmxt_load_reg(cpu_V1, rn);
1184 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1185}
1186
1187static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1188{
1189 iwmmxt_load_reg(cpu_V1, rn);
1190 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1191}
1192
1193static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1194{
1195 iwmmxt_load_reg(cpu_V1, rn);
1196 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1197}
1198
1199#define IWMMXT_OP(name) \
1200static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1201{ \
1202 iwmmxt_load_reg(cpu_V1, rn); \
1203 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1204}
1205
477955bd
PM
1206#define IWMMXT_OP_ENV(name) \
1207static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1208{ \
1209 iwmmxt_load_reg(cpu_V1, rn); \
1210 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1211}
1212
1213#define IWMMXT_OP_ENV_SIZE(name) \
1214IWMMXT_OP_ENV(name##b) \
1215IWMMXT_OP_ENV(name##w) \
1216IWMMXT_OP_ENV(name##l)
e677137d 1217
477955bd 1218#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1219static inline void gen_op_iwmmxt_##name##_M0(void) \
1220{ \
477955bd 1221 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1222}
1223
1224IWMMXT_OP(maddsq)
1225IWMMXT_OP(madduq)
1226IWMMXT_OP(sadb)
1227IWMMXT_OP(sadw)
1228IWMMXT_OP(mulslw)
1229IWMMXT_OP(mulshw)
1230IWMMXT_OP(mululw)
1231IWMMXT_OP(muluhw)
1232IWMMXT_OP(macsw)
1233IWMMXT_OP(macuw)
1234
477955bd
PM
1235IWMMXT_OP_ENV_SIZE(unpackl)
1236IWMMXT_OP_ENV_SIZE(unpackh)
1237
1238IWMMXT_OP_ENV1(unpacklub)
1239IWMMXT_OP_ENV1(unpackluw)
1240IWMMXT_OP_ENV1(unpacklul)
1241IWMMXT_OP_ENV1(unpackhub)
1242IWMMXT_OP_ENV1(unpackhuw)
1243IWMMXT_OP_ENV1(unpackhul)
1244IWMMXT_OP_ENV1(unpacklsb)
1245IWMMXT_OP_ENV1(unpacklsw)
1246IWMMXT_OP_ENV1(unpacklsl)
1247IWMMXT_OP_ENV1(unpackhsb)
1248IWMMXT_OP_ENV1(unpackhsw)
1249IWMMXT_OP_ENV1(unpackhsl)
1250
1251IWMMXT_OP_ENV_SIZE(cmpeq)
1252IWMMXT_OP_ENV_SIZE(cmpgtu)
1253IWMMXT_OP_ENV_SIZE(cmpgts)
1254
1255IWMMXT_OP_ENV_SIZE(mins)
1256IWMMXT_OP_ENV_SIZE(minu)
1257IWMMXT_OP_ENV_SIZE(maxs)
1258IWMMXT_OP_ENV_SIZE(maxu)
1259
1260IWMMXT_OP_ENV_SIZE(subn)
1261IWMMXT_OP_ENV_SIZE(addn)
1262IWMMXT_OP_ENV_SIZE(subu)
1263IWMMXT_OP_ENV_SIZE(addu)
1264IWMMXT_OP_ENV_SIZE(subs)
1265IWMMXT_OP_ENV_SIZE(adds)
1266
1267IWMMXT_OP_ENV(avgb0)
1268IWMMXT_OP_ENV(avgb1)
1269IWMMXT_OP_ENV(avgw0)
1270IWMMXT_OP_ENV(avgw1)
e677137d
PB
1271
1272IWMMXT_OP(msadb)
1273
477955bd
PM
1274IWMMXT_OP_ENV(packuw)
1275IWMMXT_OP_ENV(packul)
1276IWMMXT_OP_ENV(packuq)
1277IWMMXT_OP_ENV(packsw)
1278IWMMXT_OP_ENV(packsl)
1279IWMMXT_OP_ENV(packsq)
e677137d 1280
e677137d
PB
1281static void gen_op_iwmmxt_set_mup(void)
1282{
1283 TCGv tmp;
1284 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1285 tcg_gen_ori_i32(tmp, tmp, 2);
1286 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1287}
1288
1289static void gen_op_iwmmxt_set_cup(void)
1290{
1291 TCGv tmp;
1292 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1293 tcg_gen_ori_i32(tmp, tmp, 1);
1294 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1295}
1296
1297static void gen_op_iwmmxt_setpsr_nz(void)
1298{
7d1b0095 1299 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1300 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1301 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1302}
1303
1304static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1305{
1306 iwmmxt_load_reg(cpu_V1, rn);
86831435 1307 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1308 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1309}
1310
da6b5335 1311static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1312{
1313 int rd;
1314 uint32_t offset;
da6b5335 1315 TCGv tmp;
18c9b560
AZ
1316
1317 rd = (insn >> 16) & 0xf;
da6b5335 1318 tmp = load_reg(s, rd);
18c9b560
AZ
1319
1320 offset = (insn & 0xff) << ((insn >> 7) & 2);
1321 if (insn & (1 << 24)) {
1322 /* Pre indexed */
1323 if (insn & (1 << 23))
da6b5335 1324 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1325 else
da6b5335
FN
1326 tcg_gen_addi_i32(tmp, tmp, -offset);
1327 tcg_gen_mov_i32(dest, tmp);
18c9b560 1328 if (insn & (1 << 21))
da6b5335
FN
1329 store_reg(s, rd, tmp);
1330 else
7d1b0095 1331 tcg_temp_free_i32(tmp);
18c9b560
AZ
1332 } else if (insn & (1 << 21)) {
1333 /* Post indexed */
da6b5335 1334 tcg_gen_mov_i32(dest, tmp);
18c9b560 1335 if (insn & (1 << 23))
da6b5335 1336 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1337 else
da6b5335
FN
1338 tcg_gen_addi_i32(tmp, tmp, -offset);
1339 store_reg(s, rd, tmp);
18c9b560
AZ
1340 } else if (!(insn & (1 << 23)))
1341 return 1;
1342 return 0;
1343}
1344
da6b5335 1345static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1346{
1347 int rd = (insn >> 0) & 0xf;
da6b5335 1348 TCGv tmp;
18c9b560 1349
da6b5335
FN
1350 if (insn & (1 << 8)) {
1351 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1352 return 1;
da6b5335
FN
1353 } else {
1354 tmp = iwmmxt_load_creg(rd);
1355 }
1356 } else {
7d1b0095 1357 tmp = tcg_temp_new_i32();
da6b5335
FN
1358 iwmmxt_load_reg(cpu_V0, rd);
1359 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1360 }
1361 tcg_gen_andi_i32(tmp, tmp, mask);
1362 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1363 tcg_temp_free_i32(tmp);
18c9b560
AZ
1364 return 0;
1365}
1366
a1c7273b 1367/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560
AZ
1368 (ie. an undefined instruction). */
1369static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1370{
1371 int rd, wrd;
1372 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1373 TCGv addr;
1374 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1375
1376 if ((insn & 0x0e000e00) == 0x0c000000) {
1377 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1378 wrd = insn & 0xf;
1379 rdlo = (insn >> 12) & 0xf;
1380 rdhi = (insn >> 16) & 0xf;
1381 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1382 iwmmxt_load_reg(cpu_V0, wrd);
1383 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1384 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1385 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1386 } else { /* TMCRR */
da6b5335
FN
1387 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1388 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1389 gen_op_iwmmxt_set_mup();
1390 }
1391 return 0;
1392 }
1393
1394 wrd = (insn >> 12) & 0xf;
7d1b0095 1395 addr = tcg_temp_new_i32();
da6b5335 1396 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1397 tcg_temp_free_i32(addr);
18c9b560 1398 return 1;
da6b5335 1399 }
18c9b560
AZ
1400 if (insn & ARM_CP_RW_BIT) {
1401 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1402 tmp = tcg_temp_new_i32();
da6b5335
FN
1403 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1404 iwmmxt_store_creg(wrd, tmp);
18c9b560 1405 } else {
e677137d
PB
1406 i = 1;
1407 if (insn & (1 << 8)) {
1408 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1409 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1410 i = 0;
1411 } else { /* WLDRW wRd */
da6b5335 1412 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1413 }
1414 } else {
1415 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1416 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1417 } else { /* WLDRB */
da6b5335 1418 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1419 }
1420 }
1421 if (i) {
1422 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1423 tcg_temp_free_i32(tmp);
e677137d 1424 }
18c9b560
AZ
1425 gen_op_iwmmxt_movq_wRn_M0(wrd);
1426 }
1427 } else {
1428 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1429 tmp = iwmmxt_load_creg(wrd);
1430 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1431 } else {
1432 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1433 tmp = tcg_temp_new_i32();
e677137d
PB
1434 if (insn & (1 << 8)) {
1435 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1436 tcg_temp_free_i32(tmp);
da6b5335 1437 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1438 } else { /* WSTRW wRd */
1439 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1440 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1441 }
1442 } else {
1443 if (insn & (1 << 22)) { /* WSTRH */
1444 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1445 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1446 } else { /* WSTRB */
1447 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1448 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1449 }
1450 }
18c9b560
AZ
1451 }
1452 }
7d1b0095 1453 tcg_temp_free_i32(addr);
18c9b560
AZ
1454 return 0;
1455 }
1456
1457 if ((insn & 0x0f000000) != 0x0e000000)
1458 return 1;
1459
1460 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1461 case 0x000: /* WOR */
1462 wrd = (insn >> 12) & 0xf;
1463 rd0 = (insn >> 0) & 0xf;
1464 rd1 = (insn >> 16) & 0xf;
1465 gen_op_iwmmxt_movq_M0_wRn(rd0);
1466 gen_op_iwmmxt_orq_M0_wRn(rd1);
1467 gen_op_iwmmxt_setpsr_nz();
1468 gen_op_iwmmxt_movq_wRn_M0(wrd);
1469 gen_op_iwmmxt_set_mup();
1470 gen_op_iwmmxt_set_cup();
1471 break;
1472 case 0x011: /* TMCR */
1473 if (insn & 0xf)
1474 return 1;
1475 rd = (insn >> 12) & 0xf;
1476 wrd = (insn >> 16) & 0xf;
1477 switch (wrd) {
1478 case ARM_IWMMXT_wCID:
1479 case ARM_IWMMXT_wCASF:
1480 break;
1481 case ARM_IWMMXT_wCon:
1482 gen_op_iwmmxt_set_cup();
1483 /* Fall through. */
1484 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1485 tmp = iwmmxt_load_creg(wrd);
1486 tmp2 = load_reg(s, rd);
f669df27 1487 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1488 tcg_temp_free_i32(tmp2);
da6b5335 1489 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1490 break;
1491 case ARM_IWMMXT_wCGR0:
1492 case ARM_IWMMXT_wCGR1:
1493 case ARM_IWMMXT_wCGR2:
1494 case ARM_IWMMXT_wCGR3:
1495 gen_op_iwmmxt_set_cup();
da6b5335
FN
1496 tmp = load_reg(s, rd);
1497 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1498 break;
1499 default:
1500 return 1;
1501 }
1502 break;
1503 case 0x100: /* WXOR */
1504 wrd = (insn >> 12) & 0xf;
1505 rd0 = (insn >> 0) & 0xf;
1506 rd1 = (insn >> 16) & 0xf;
1507 gen_op_iwmmxt_movq_M0_wRn(rd0);
1508 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1509 gen_op_iwmmxt_setpsr_nz();
1510 gen_op_iwmmxt_movq_wRn_M0(wrd);
1511 gen_op_iwmmxt_set_mup();
1512 gen_op_iwmmxt_set_cup();
1513 break;
1514 case 0x111: /* TMRC */
1515 if (insn & 0xf)
1516 return 1;
1517 rd = (insn >> 12) & 0xf;
1518 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1519 tmp = iwmmxt_load_creg(wrd);
1520 store_reg(s, rd, tmp);
18c9b560
AZ
1521 break;
1522 case 0x300: /* WANDN */
1523 wrd = (insn >> 12) & 0xf;
1524 rd0 = (insn >> 0) & 0xf;
1525 rd1 = (insn >> 16) & 0xf;
1526 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1527 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1528 gen_op_iwmmxt_andq_M0_wRn(rd1);
1529 gen_op_iwmmxt_setpsr_nz();
1530 gen_op_iwmmxt_movq_wRn_M0(wrd);
1531 gen_op_iwmmxt_set_mup();
1532 gen_op_iwmmxt_set_cup();
1533 break;
1534 case 0x200: /* WAND */
1535 wrd = (insn >> 12) & 0xf;
1536 rd0 = (insn >> 0) & 0xf;
1537 rd1 = (insn >> 16) & 0xf;
1538 gen_op_iwmmxt_movq_M0_wRn(rd0);
1539 gen_op_iwmmxt_andq_M0_wRn(rd1);
1540 gen_op_iwmmxt_setpsr_nz();
1541 gen_op_iwmmxt_movq_wRn_M0(wrd);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1544 break;
1545 case 0x810: case 0xa10: /* WMADD */
1546 wrd = (insn >> 12) & 0xf;
1547 rd0 = (insn >> 0) & 0xf;
1548 rd1 = (insn >> 16) & 0xf;
1549 gen_op_iwmmxt_movq_M0_wRn(rd0);
1550 if (insn & (1 << 21))
1551 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1552 else
1553 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1554 gen_op_iwmmxt_movq_wRn_M0(wrd);
1555 gen_op_iwmmxt_set_mup();
1556 break;
1557 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1558 wrd = (insn >> 12) & 0xf;
1559 rd0 = (insn >> 16) & 0xf;
1560 rd1 = (insn >> 0) & 0xf;
1561 gen_op_iwmmxt_movq_M0_wRn(rd0);
1562 switch ((insn >> 22) & 3) {
1563 case 0:
1564 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1565 break;
1566 case 1:
1567 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1568 break;
1569 case 2:
1570 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1571 break;
1572 case 3:
1573 return 1;
1574 }
1575 gen_op_iwmmxt_movq_wRn_M0(wrd);
1576 gen_op_iwmmxt_set_mup();
1577 gen_op_iwmmxt_set_cup();
1578 break;
1579 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1580 wrd = (insn >> 12) & 0xf;
1581 rd0 = (insn >> 16) & 0xf;
1582 rd1 = (insn >> 0) & 0xf;
1583 gen_op_iwmmxt_movq_M0_wRn(rd0);
1584 switch ((insn >> 22) & 3) {
1585 case 0:
1586 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1587 break;
1588 case 1:
1589 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1590 break;
1591 case 2:
1592 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1593 break;
1594 case 3:
1595 return 1;
1596 }
1597 gen_op_iwmmxt_movq_wRn_M0(wrd);
1598 gen_op_iwmmxt_set_mup();
1599 gen_op_iwmmxt_set_cup();
1600 break;
1601 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1602 wrd = (insn >> 12) & 0xf;
1603 rd0 = (insn >> 16) & 0xf;
1604 rd1 = (insn >> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0);
1606 if (insn & (1 << 22))
1607 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1608 else
1609 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1610 if (!(insn & (1 << 20)))
1611 gen_op_iwmmxt_addl_M0_wRn(wrd);
1612 gen_op_iwmmxt_movq_wRn_M0(wrd);
1613 gen_op_iwmmxt_set_mup();
1614 break;
1615 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1616 wrd = (insn >> 12) & 0xf;
1617 rd0 = (insn >> 16) & 0xf;
1618 rd1 = (insn >> 0) & 0xf;
1619 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1620 if (insn & (1 << 21)) {
1621 if (insn & (1 << 20))
1622 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1623 else
1624 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1625 } else {
1626 if (insn & (1 << 20))
1627 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1628 else
1629 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1630 }
18c9b560
AZ
1631 gen_op_iwmmxt_movq_wRn_M0(wrd);
1632 gen_op_iwmmxt_set_mup();
1633 break;
1634 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1635 wrd = (insn >> 12) & 0xf;
1636 rd0 = (insn >> 16) & 0xf;
1637 rd1 = (insn >> 0) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0);
1639 if (insn & (1 << 21))
1640 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1641 else
1642 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1643 if (!(insn & (1 << 20))) {
e677137d
PB
1644 iwmmxt_load_reg(cpu_V1, wrd);
1645 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1646 }
1647 gen_op_iwmmxt_movq_wRn_M0(wrd);
1648 gen_op_iwmmxt_set_mup();
1649 break;
1650 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1651 wrd = (insn >> 12) & 0xf;
1652 rd0 = (insn >> 16) & 0xf;
1653 rd1 = (insn >> 0) & 0xf;
1654 gen_op_iwmmxt_movq_M0_wRn(rd0);
1655 switch ((insn >> 22) & 3) {
1656 case 0:
1657 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1658 break;
1659 case 1:
1660 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1661 break;
1662 case 2:
1663 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1664 break;
1665 case 3:
1666 return 1;
1667 }
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 gen_op_iwmmxt_set_cup();
1671 break;
1672 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1673 wrd = (insn >> 12) & 0xf;
1674 rd0 = (insn >> 16) & 0xf;
1675 rd1 = (insn >> 0) & 0xf;
1676 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1677 if (insn & (1 << 22)) {
1678 if (insn & (1 << 20))
1679 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1680 else
1681 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1682 } else {
1683 if (insn & (1 << 20))
1684 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1685 else
1686 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1687 }
18c9b560
AZ
1688 gen_op_iwmmxt_movq_wRn_M0(wrd);
1689 gen_op_iwmmxt_set_mup();
1690 gen_op_iwmmxt_set_cup();
1691 break;
1692 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1693 wrd = (insn >> 12) & 0xf;
1694 rd0 = (insn >> 16) & 0xf;
1695 rd1 = (insn >> 0) & 0xf;
1696 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1697 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1698 tcg_gen_andi_i32(tmp, tmp, 7);
1699 iwmmxt_load_reg(cpu_V1, rd1);
1700 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1701 tcg_temp_free_i32(tmp);
18c9b560
AZ
1702 gen_op_iwmmxt_movq_wRn_M0(wrd);
1703 gen_op_iwmmxt_set_mup();
1704 break;
1705 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1706 if (((insn >> 6) & 3) == 3)
1707 return 1;
18c9b560
AZ
1708 rd = (insn >> 12) & 0xf;
1709 wrd = (insn >> 16) & 0xf;
da6b5335 1710 tmp = load_reg(s, rd);
18c9b560
AZ
1711 gen_op_iwmmxt_movq_M0_wRn(wrd);
1712 switch ((insn >> 6) & 3) {
1713 case 0:
da6b5335
FN
1714 tmp2 = tcg_const_i32(0xff);
1715 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1716 break;
1717 case 1:
da6b5335
FN
1718 tmp2 = tcg_const_i32(0xffff);
1719 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1720 break;
1721 case 2:
da6b5335
FN
1722 tmp2 = tcg_const_i32(0xffffffff);
1723 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1724 break;
da6b5335
FN
1725 default:
1726 TCGV_UNUSED(tmp2);
1727 TCGV_UNUSED(tmp3);
18c9b560 1728 }
da6b5335
FN
1729 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1730 tcg_temp_free(tmp3);
1731 tcg_temp_free(tmp2);
7d1b0095 1732 tcg_temp_free_i32(tmp);
18c9b560
AZ
1733 gen_op_iwmmxt_movq_wRn_M0(wrd);
1734 gen_op_iwmmxt_set_mup();
1735 break;
1736 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1737 rd = (insn >> 12) & 0xf;
1738 wrd = (insn >> 16) & 0xf;
da6b5335 1739 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1740 return 1;
1741 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1742 tmp = tcg_temp_new_i32();
18c9b560
AZ
1743 switch ((insn >> 22) & 3) {
1744 case 0:
da6b5335
FN
1745 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1746 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1747 if (insn & 8) {
1748 tcg_gen_ext8s_i32(tmp, tmp);
1749 } else {
1750 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1751 }
1752 break;
1753 case 1:
da6b5335
FN
1754 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1755 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1756 if (insn & 8) {
1757 tcg_gen_ext16s_i32(tmp, tmp);
1758 } else {
1759 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1760 }
1761 break;
1762 case 2:
da6b5335
FN
1763 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1764 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1765 break;
18c9b560 1766 }
da6b5335 1767 store_reg(s, rd, tmp);
18c9b560
AZ
1768 break;
1769 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1770 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1771 return 1;
da6b5335 1772 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1773 switch ((insn >> 22) & 3) {
1774 case 0:
da6b5335 1775 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1776 break;
1777 case 1:
da6b5335 1778 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1779 break;
1780 case 2:
da6b5335 1781 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1782 break;
18c9b560 1783 }
da6b5335
FN
1784 tcg_gen_shli_i32(tmp, tmp, 28);
1785 gen_set_nzcv(tmp);
7d1b0095 1786 tcg_temp_free_i32(tmp);
18c9b560
AZ
1787 break;
1788 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1789 if (((insn >> 6) & 3) == 3)
1790 return 1;
18c9b560
AZ
1791 rd = (insn >> 12) & 0xf;
1792 wrd = (insn >> 16) & 0xf;
da6b5335 1793 tmp = load_reg(s, rd);
18c9b560
AZ
1794 switch ((insn >> 6) & 3) {
1795 case 0:
da6b5335 1796 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1797 break;
1798 case 1:
da6b5335 1799 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1800 break;
1801 case 2:
da6b5335 1802 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1803 break;
18c9b560 1804 }
7d1b0095 1805 tcg_temp_free_i32(tmp);
18c9b560
AZ
1806 gen_op_iwmmxt_movq_wRn_M0(wrd);
1807 gen_op_iwmmxt_set_mup();
1808 break;
1809 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1810 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1811 return 1;
da6b5335 1812 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1813 tmp2 = tcg_temp_new_i32();
da6b5335 1814 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1815 switch ((insn >> 22) & 3) {
1816 case 0:
1817 for (i = 0; i < 7; i ++) {
da6b5335
FN
1818 tcg_gen_shli_i32(tmp2, tmp2, 4);
1819 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1820 }
1821 break;
1822 case 1:
1823 for (i = 0; i < 3; i ++) {
da6b5335
FN
1824 tcg_gen_shli_i32(tmp2, tmp2, 8);
1825 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1826 }
1827 break;
1828 case 2:
da6b5335
FN
1829 tcg_gen_shli_i32(tmp2, tmp2, 16);
1830 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1831 break;
18c9b560 1832 }
da6b5335 1833 gen_set_nzcv(tmp);
7d1b0095
PM
1834 tcg_temp_free_i32(tmp2);
1835 tcg_temp_free_i32(tmp);
18c9b560
AZ
1836 break;
1837 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1838 wrd = (insn >> 12) & 0xf;
1839 rd0 = (insn >> 16) & 0xf;
1840 gen_op_iwmmxt_movq_M0_wRn(rd0);
1841 switch ((insn >> 22) & 3) {
1842 case 0:
e677137d 1843 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1844 break;
1845 case 1:
e677137d 1846 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1847 break;
1848 case 2:
e677137d 1849 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1850 break;
1851 case 3:
1852 return 1;
1853 }
1854 gen_op_iwmmxt_movq_wRn_M0(wrd);
1855 gen_op_iwmmxt_set_mup();
1856 break;
1857 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1858 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1859 return 1;
da6b5335 1860 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1861 tmp2 = tcg_temp_new_i32();
da6b5335 1862 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1863 switch ((insn >> 22) & 3) {
1864 case 0:
1865 for (i = 0; i < 7; i ++) {
da6b5335
FN
1866 tcg_gen_shli_i32(tmp2, tmp2, 4);
1867 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1868 }
1869 break;
1870 case 1:
1871 for (i = 0; i < 3; i ++) {
da6b5335
FN
1872 tcg_gen_shli_i32(tmp2, tmp2, 8);
1873 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1874 }
1875 break;
1876 case 2:
da6b5335
FN
1877 tcg_gen_shli_i32(tmp2, tmp2, 16);
1878 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1879 break;
18c9b560 1880 }
da6b5335 1881 gen_set_nzcv(tmp);
7d1b0095
PM
1882 tcg_temp_free_i32(tmp2);
1883 tcg_temp_free_i32(tmp);
18c9b560
AZ
1884 break;
1885 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1886 rd = (insn >> 12) & 0xf;
1887 rd0 = (insn >> 16) & 0xf;
da6b5335 1888 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1889 return 1;
1890 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1891 tmp = tcg_temp_new_i32();
18c9b560
AZ
1892 switch ((insn >> 22) & 3) {
1893 case 0:
da6b5335 1894 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1895 break;
1896 case 1:
da6b5335 1897 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1898 break;
1899 case 2:
da6b5335 1900 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1901 break;
18c9b560 1902 }
da6b5335 1903 store_reg(s, rd, tmp);
18c9b560
AZ
1904 break;
1905 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1906 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1907 wrd = (insn >> 12) & 0xf;
1908 rd0 = (insn >> 16) & 0xf;
1909 rd1 = (insn >> 0) & 0xf;
1910 gen_op_iwmmxt_movq_M0_wRn(rd0);
1911 switch ((insn >> 22) & 3) {
1912 case 0:
1913 if (insn & (1 << 21))
1914 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1915 else
1916 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1917 break;
1918 case 1:
1919 if (insn & (1 << 21))
1920 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1921 else
1922 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1923 break;
1924 case 2:
1925 if (insn & (1 << 21))
1926 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1927 else
1928 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1929 break;
1930 case 3:
1931 return 1;
1932 }
1933 gen_op_iwmmxt_movq_wRn_M0(wrd);
1934 gen_op_iwmmxt_set_mup();
1935 gen_op_iwmmxt_set_cup();
1936 break;
1937 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1938 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1939 wrd = (insn >> 12) & 0xf;
1940 rd0 = (insn >> 16) & 0xf;
1941 gen_op_iwmmxt_movq_M0_wRn(rd0);
1942 switch ((insn >> 22) & 3) {
1943 case 0:
1944 if (insn & (1 << 21))
1945 gen_op_iwmmxt_unpacklsb_M0();
1946 else
1947 gen_op_iwmmxt_unpacklub_M0();
1948 break;
1949 case 1:
1950 if (insn & (1 << 21))
1951 gen_op_iwmmxt_unpacklsw_M0();
1952 else
1953 gen_op_iwmmxt_unpackluw_M0();
1954 break;
1955 case 2:
1956 if (insn & (1 << 21))
1957 gen_op_iwmmxt_unpacklsl_M0();
1958 else
1959 gen_op_iwmmxt_unpacklul_M0();
1960 break;
1961 case 3:
1962 return 1;
1963 }
1964 gen_op_iwmmxt_movq_wRn_M0(wrd);
1965 gen_op_iwmmxt_set_mup();
1966 gen_op_iwmmxt_set_cup();
1967 break;
1968 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1969 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
1973 switch ((insn >> 22) & 3) {
1974 case 0:
1975 if (insn & (1 << 21))
1976 gen_op_iwmmxt_unpackhsb_M0();
1977 else
1978 gen_op_iwmmxt_unpackhub_M0();
1979 break;
1980 case 1:
1981 if (insn & (1 << 21))
1982 gen_op_iwmmxt_unpackhsw_M0();
1983 else
1984 gen_op_iwmmxt_unpackhuw_M0();
1985 break;
1986 case 2:
1987 if (insn & (1 << 21))
1988 gen_op_iwmmxt_unpackhsl_M0();
1989 else
1990 gen_op_iwmmxt_unpackhul_M0();
1991 break;
1992 case 3:
1993 return 1;
1994 }
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2000 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2001 if (((insn >> 22) & 3) == 0)
2002 return 1;
18c9b560
AZ
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2006 tmp = tcg_temp_new_i32();
da6b5335 2007 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2008 tcg_temp_free_i32(tmp);
18c9b560 2009 return 1;
da6b5335 2010 }
18c9b560 2011 switch ((insn >> 22) & 3) {
18c9b560 2012 case 1:
477955bd 2013 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2014 break;
2015 case 2:
477955bd 2016 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2017 break;
2018 case 3:
477955bd 2019 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2020 break;
2021 }
7d1b0095 2022 tcg_temp_free_i32(tmp);
18c9b560
AZ
2023 gen_op_iwmmxt_movq_wRn_M0(wrd);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2026 break;
2027 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2028 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2029 if (((insn >> 22) & 3) == 0)
2030 return 1;
18c9b560
AZ
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 16) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2034 tmp = tcg_temp_new_i32();
da6b5335 2035 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2036 tcg_temp_free_i32(tmp);
18c9b560 2037 return 1;
da6b5335 2038 }
18c9b560 2039 switch ((insn >> 22) & 3) {
18c9b560 2040 case 1:
477955bd 2041 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2042 break;
2043 case 2:
477955bd 2044 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2045 break;
2046 case 3:
477955bd 2047 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2048 break;
2049 }
7d1b0095 2050 tcg_temp_free_i32(tmp);
18c9b560
AZ
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2054 break;
2055 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2056 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2057 if (((insn >> 22) & 3) == 0)
2058 return 1;
18c9b560
AZ
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2062 tmp = tcg_temp_new_i32();
da6b5335 2063 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2064 tcg_temp_free_i32(tmp);
18c9b560 2065 return 1;
da6b5335 2066 }
18c9b560 2067 switch ((insn >> 22) & 3) {
18c9b560 2068 case 1:
477955bd 2069 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2070 break;
2071 case 2:
477955bd 2072 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2073 break;
2074 case 3:
477955bd 2075 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2076 break;
2077 }
7d1b0095 2078 tcg_temp_free_i32(tmp);
18c9b560
AZ
2079 gen_op_iwmmxt_movq_wRn_M0(wrd);
2080 gen_op_iwmmxt_set_mup();
2081 gen_op_iwmmxt_set_cup();
2082 break;
2083 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2084 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2085 if (((insn >> 22) & 3) == 0)
2086 return 1;
18c9b560
AZ
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2090 tmp = tcg_temp_new_i32();
18c9b560 2091 switch ((insn >> 22) & 3) {
18c9b560 2092 case 1:
da6b5335 2093 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2094 tcg_temp_free_i32(tmp);
18c9b560 2095 return 1;
da6b5335 2096 }
477955bd 2097 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2098 break;
2099 case 2:
da6b5335 2100 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2101 tcg_temp_free_i32(tmp);
18c9b560 2102 return 1;
da6b5335 2103 }
477955bd 2104 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2105 break;
2106 case 3:
da6b5335 2107 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2108 tcg_temp_free_i32(tmp);
18c9b560 2109 return 1;
da6b5335 2110 }
477955bd 2111 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2112 break;
2113 }
7d1b0095 2114 tcg_temp_free_i32(tmp);
18c9b560
AZ
2115 gen_op_iwmmxt_movq_wRn_M0(wrd);
2116 gen_op_iwmmxt_set_mup();
2117 gen_op_iwmmxt_set_cup();
2118 break;
2119 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2120 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2121 wrd = (insn >> 12) & 0xf;
2122 rd0 = (insn >> 16) & 0xf;
2123 rd1 = (insn >> 0) & 0xf;
2124 gen_op_iwmmxt_movq_M0_wRn(rd0);
2125 switch ((insn >> 22) & 3) {
2126 case 0:
2127 if (insn & (1 << 21))
2128 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2129 else
2130 gen_op_iwmmxt_minub_M0_wRn(rd1);
2131 break;
2132 case 1:
2133 if (insn & (1 << 21))
2134 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2135 else
2136 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2137 break;
2138 case 2:
2139 if (insn & (1 << 21))
2140 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2141 else
2142 gen_op_iwmmxt_minul_M0_wRn(rd1);
2143 break;
2144 case 3:
2145 return 1;
2146 }
2147 gen_op_iwmmxt_movq_wRn_M0(wrd);
2148 gen_op_iwmmxt_set_mup();
2149 break;
2150 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2151 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2152 wrd = (insn >> 12) & 0xf;
2153 rd0 = (insn >> 16) & 0xf;
2154 rd1 = (insn >> 0) & 0xf;
2155 gen_op_iwmmxt_movq_M0_wRn(rd0);
2156 switch ((insn >> 22) & 3) {
2157 case 0:
2158 if (insn & (1 << 21))
2159 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2160 else
2161 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2162 break;
2163 case 1:
2164 if (insn & (1 << 21))
2165 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2166 else
2167 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2168 break;
2169 case 2:
2170 if (insn & (1 << 21))
2171 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2172 else
2173 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2174 break;
2175 case 3:
2176 return 1;
2177 }
2178 gen_op_iwmmxt_movq_wRn_M0(wrd);
2179 gen_op_iwmmxt_set_mup();
2180 break;
2181 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2182 case 0x402: case 0x502: case 0x602: case 0x702:
2183 wrd = (insn >> 12) & 0xf;
2184 rd0 = (insn >> 16) & 0xf;
2185 rd1 = (insn >> 0) & 0xf;
2186 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2187 tmp = tcg_const_i32((insn >> 20) & 3);
2188 iwmmxt_load_reg(cpu_V1, rd1);
2189 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2190 tcg_temp_free(tmp);
18c9b560
AZ
2191 gen_op_iwmmxt_movq_wRn_M0(wrd);
2192 gen_op_iwmmxt_set_mup();
2193 break;
2194 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2195 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2196 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2197 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2198 wrd = (insn >> 12) & 0xf;
2199 rd0 = (insn >> 16) & 0xf;
2200 rd1 = (insn >> 0) & 0xf;
2201 gen_op_iwmmxt_movq_M0_wRn(rd0);
2202 switch ((insn >> 20) & 0xf) {
2203 case 0x0:
2204 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2205 break;
2206 case 0x1:
2207 gen_op_iwmmxt_subub_M0_wRn(rd1);
2208 break;
2209 case 0x3:
2210 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2211 break;
2212 case 0x4:
2213 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2214 break;
2215 case 0x5:
2216 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2217 break;
2218 case 0x7:
2219 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2220 break;
2221 case 0x8:
2222 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2223 break;
2224 case 0x9:
2225 gen_op_iwmmxt_subul_M0_wRn(rd1);
2226 break;
2227 case 0xb:
2228 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2229 break;
2230 default:
2231 return 1;
2232 }
2233 gen_op_iwmmxt_movq_wRn_M0(wrd);
2234 gen_op_iwmmxt_set_mup();
2235 gen_op_iwmmxt_set_cup();
2236 break;
2237 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2238 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2239 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2240 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2241 wrd = (insn >> 12) & 0xf;
2242 rd0 = (insn >> 16) & 0xf;
2243 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2244 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2245 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
da6b5335 2246 tcg_temp_free(tmp);
18c9b560
AZ
2247 gen_op_iwmmxt_movq_wRn_M0(wrd);
2248 gen_op_iwmmxt_set_mup();
2249 gen_op_iwmmxt_set_cup();
2250 break;
2251 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2252 case 0x418: case 0x518: case 0x618: case 0x718:
2253 case 0x818: case 0x918: case 0xa18: case 0xb18:
2254 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2255 wrd = (insn >> 12) & 0xf;
2256 rd0 = (insn >> 16) & 0xf;
2257 rd1 = (insn >> 0) & 0xf;
2258 gen_op_iwmmxt_movq_M0_wRn(rd0);
2259 switch ((insn >> 20) & 0xf) {
2260 case 0x0:
2261 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2262 break;
2263 case 0x1:
2264 gen_op_iwmmxt_addub_M0_wRn(rd1);
2265 break;
2266 case 0x3:
2267 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2268 break;
2269 case 0x4:
2270 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2271 break;
2272 case 0x5:
2273 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2274 break;
2275 case 0x7:
2276 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2277 break;
2278 case 0x8:
2279 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2280 break;
2281 case 0x9:
2282 gen_op_iwmmxt_addul_M0_wRn(rd1);
2283 break;
2284 case 0xb:
2285 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2286 break;
2287 default:
2288 return 1;
2289 }
2290 gen_op_iwmmxt_movq_wRn_M0(wrd);
2291 gen_op_iwmmxt_set_mup();
2292 gen_op_iwmmxt_set_cup();
2293 break;
2294 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2295 case 0x408: case 0x508: case 0x608: case 0x708:
2296 case 0x808: case 0x908: case 0xa08: case 0xb08:
2297 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2298 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2299 return 1;
18c9b560
AZ
2300 wrd = (insn >> 12) & 0xf;
2301 rd0 = (insn >> 16) & 0xf;
2302 rd1 = (insn >> 0) & 0xf;
2303 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2304 switch ((insn >> 22) & 3) {
18c9b560
AZ
2305 case 1:
2306 if (insn & (1 << 21))
2307 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2308 else
2309 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2310 break;
2311 case 2:
2312 if (insn & (1 << 21))
2313 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2314 else
2315 gen_op_iwmmxt_packul_M0_wRn(rd1);
2316 break;
2317 case 3:
2318 if (insn & (1 << 21))
2319 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2320 else
2321 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2322 break;
2323 }
2324 gen_op_iwmmxt_movq_wRn_M0(wrd);
2325 gen_op_iwmmxt_set_mup();
2326 gen_op_iwmmxt_set_cup();
2327 break;
2328 case 0x201: case 0x203: case 0x205: case 0x207:
2329 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2330 case 0x211: case 0x213: case 0x215: case 0x217:
2331 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2332 wrd = (insn >> 5) & 0xf;
2333 rd0 = (insn >> 12) & 0xf;
2334 rd1 = (insn >> 0) & 0xf;
2335 if (rd0 == 0xf || rd1 == 0xf)
2336 return 1;
2337 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2338 tmp = load_reg(s, rd0);
2339 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2340 switch ((insn >> 16) & 0xf) {
2341 case 0x0: /* TMIA */
da6b5335 2342 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2343 break;
2344 case 0x8: /* TMIAPH */
da6b5335 2345 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2346 break;
2347 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2348 if (insn & (1 << 16))
da6b5335 2349 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2350 if (insn & (1 << 17))
da6b5335
FN
2351 tcg_gen_shri_i32(tmp2, tmp2, 16);
2352 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2353 break;
2354 default:
7d1b0095
PM
2355 tcg_temp_free_i32(tmp2);
2356 tcg_temp_free_i32(tmp);
18c9b560
AZ
2357 return 1;
2358 }
7d1b0095
PM
2359 tcg_temp_free_i32(tmp2);
2360 tcg_temp_free_i32(tmp);
18c9b560
AZ
2361 gen_op_iwmmxt_movq_wRn_M0(wrd);
2362 gen_op_iwmmxt_set_mup();
2363 break;
2364 default:
2365 return 1;
2366 }
2367
2368 return 0;
2369}
2370
a1c7273b 2371/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560
AZ
2372 (ie. an undefined instruction). */
2373static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2374{
2375 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2376 TCGv tmp, tmp2;
18c9b560
AZ
2377
2378 if ((insn & 0x0ff00f10) == 0x0e200010) {
2379 /* Multiply with Internal Accumulate Format */
2380 rd0 = (insn >> 12) & 0xf;
2381 rd1 = insn & 0xf;
2382 acc = (insn >> 5) & 7;
2383
2384 if (acc != 0)
2385 return 1;
2386
3a554c0f
FN
2387 tmp = load_reg(s, rd0);
2388 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2389 switch ((insn >> 16) & 0xf) {
2390 case 0x0: /* MIA */
3a554c0f 2391 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2392 break;
2393 case 0x8: /* MIAPH */
3a554c0f 2394 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2395 break;
2396 case 0xc: /* MIABB */
2397 case 0xd: /* MIABT */
2398 case 0xe: /* MIATB */
2399 case 0xf: /* MIATT */
18c9b560 2400 if (insn & (1 << 16))
3a554c0f 2401 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2402 if (insn & (1 << 17))
3a554c0f
FN
2403 tcg_gen_shri_i32(tmp2, tmp2, 16);
2404 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2405 break;
2406 default:
2407 return 1;
2408 }
7d1b0095
PM
2409 tcg_temp_free_i32(tmp2);
2410 tcg_temp_free_i32(tmp);
18c9b560
AZ
2411
2412 gen_op_iwmmxt_movq_wRn_M0(acc);
2413 return 0;
2414 }
2415
2416 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2417 /* Internal Accumulator Access Format */
2418 rdhi = (insn >> 16) & 0xf;
2419 rdlo = (insn >> 12) & 0xf;
2420 acc = insn & 7;
2421
2422 if (acc != 0)
2423 return 1;
2424
2425 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2426 iwmmxt_load_reg(cpu_V0, acc);
2427 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2428 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2429 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2430 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2431 } else { /* MAR */
3a554c0f
FN
2432 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2433 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2434 }
2435 return 0;
2436 }
2437
2438 return 1;
2439}
2440
c1713132
AZ
2441/* Disassemble system coprocessor instruction. Return nonzero if
2442 instruction is not defined. */
2443static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2444{
b75263d6 2445 TCGv tmp, tmp2;
c1713132
AZ
2446 uint32_t rd = (insn >> 12) & 0xf;
2447 uint32_t cp = (insn >> 8) & 0xf;
2448 if (IS_USER(s)) {
2449 return 1;
2450 }
2451
18c9b560 2452 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2453 if (!env->cp[cp].cp_read)
2454 return 1;
8984bd2e 2455 gen_set_pc_im(s->pc);
7d1b0095 2456 tmp = tcg_temp_new_i32();
b75263d6
JR
2457 tmp2 = tcg_const_i32(insn);
2458 gen_helper_get_cp(tmp, cpu_env, tmp2);
2459 tcg_temp_free(tmp2);
8984bd2e 2460 store_reg(s, rd, tmp);
c1713132
AZ
2461 } else {
2462 if (!env->cp[cp].cp_write)
2463 return 1;
8984bd2e
PB
2464 gen_set_pc_im(s->pc);
2465 tmp = load_reg(s, rd);
b75263d6
JR
2466 tmp2 = tcg_const_i32(insn);
2467 gen_helper_set_cp(cpu_env, tmp2, tmp);
2468 tcg_temp_free(tmp2);
7d1b0095 2469 tcg_temp_free_i32(tmp);
c1713132
AZ
2470 }
2471 return 0;
2472}
2473
74594c9d 2474static int cp15_user_ok(CPUState *env, uint32_t insn)
9ee6e8bb
PB
2475{
2476 int cpn = (insn >> 16) & 0xf;
2477 int cpm = insn & 0xf;
2478 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2479
74594c9d
PM
2480 if (arm_feature(env, ARM_FEATURE_V7) && cpn == 9) {
2481 /* Performance monitor registers fall into three categories:
2482 * (a) always UNDEF in usermode
2483 * (b) UNDEF only if PMUSERENR.EN is 0
2484 * (c) always read OK and UNDEF on write (PMUSERENR only)
2485 */
2486 if ((cpm == 12 && (op < 6)) ||
2487 (cpm == 13 && (op < 3))) {
2488 return env->cp15.c9_pmuserenr;
2489 } else if (cpm == 14 && op == 0 && (insn & ARM_CP_RW_BIT)) {
2490 /* PMUSERENR, read only */
2491 return 1;
2492 }
2493 return 0;
2494 }
2495
9ee6e8bb
PB
2496 if (cpn == 13 && cpm == 0) {
2497 /* TLS register. */
2498 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2499 return 1;
2500 }
9ee6e8bb
PB
2501 return 0;
2502}
2503
3f26c122
RV
2504static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2505{
2506 TCGv tmp;
2507 int cpn = (insn >> 16) & 0xf;
2508 int cpm = insn & 0xf;
2509 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2510
2511 if (!arm_feature(env, ARM_FEATURE_V6K))
2512 return 0;
2513
2514 if (!(cpn == 13 && cpm == 0))
2515 return 0;
2516
2517 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2518 switch (op) {
2519 case 2:
c5883be2 2520 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2521 break;
2522 case 3:
c5883be2 2523 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2524 break;
2525 case 4:
c5883be2 2526 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2527 break;
2528 default:
3f26c122
RV
2529 return 0;
2530 }
2531 store_reg(s, rd, tmp);
2532
2533 } else {
2534 tmp = load_reg(s, rd);
2535 switch (op) {
2536 case 2:
c5883be2 2537 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2538 break;
2539 case 3:
c5883be2 2540 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2541 break;
2542 case 4:
c5883be2 2543 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2544 break;
2545 default:
7d1b0095 2546 tcg_temp_free_i32(tmp);
3f26c122
RV
2547 return 0;
2548 }
3f26c122
RV
2549 }
2550 return 1;
2551}
2552
b5ff1b31
FB
2553/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2554 instruction is not defined. */
a90b7318 2555static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2556{
2557 uint32_t rd;
b75263d6 2558 TCGv tmp, tmp2;
b5ff1b31 2559
9ee6e8bb
PB
2560 /* M profile cores use memory mapped registers instead of cp15. */
2561 if (arm_feature(env, ARM_FEATURE_M))
2562 return 1;
2563
2564 if ((insn & (1 << 25)) == 0) {
2565 if (insn & (1 << 20)) {
2566 /* mrrc */
2567 return 1;
2568 }
2569 /* mcrr. Used for block cache operations, so implement as no-op. */
2570 return 0;
2571 }
2572 if ((insn & (1 << 4)) == 0) {
2573 /* cdp */
2574 return 1;
2575 }
4ec648dd
PM
2576 /* We special case a number of cp15 instructions which were used
2577 * for things which are real instructions in ARMv7. This allows
2578 * them to work in linux-user mode which doesn't provide functional
2579 * get_cp15/set_cp15 helpers, and is more efficient anyway.
cc688901 2580 */
4ec648dd
PM
2581 switch ((insn & 0x0fff0fff)) {
2582 case 0x0e070f90:
cc688901
PM
2583 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2584 * In v7, this must NOP.
2585 */
4ec648dd
PM
2586 if (IS_USER(s)) {
2587 return 1;
2588 }
cc688901
PM
2589 if (!arm_feature(env, ARM_FEATURE_V7)) {
2590 /* Wait for interrupt. */
2591 gen_set_pc_im(s->pc);
2592 s->is_jmp = DISAS_WFI;
2593 }
9332f9da 2594 return 0;
4ec648dd 2595 case 0x0e070f58:
cc688901
PM
2596 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2597 * so this is slightly over-broad.
2598 */
4ec648dd 2599 if (!IS_USER(s) && !arm_feature(env, ARM_FEATURE_V6)) {
cc688901
PM
2600 /* Wait for interrupt. */
2601 gen_set_pc_im(s->pc);
2602 s->is_jmp = DISAS_WFI;
2603 return 0;
2604 }
4ec648dd 2605 /* Otherwise continue to handle via helper function.
cc688901
PM
2606 * In particular, on v7 and some v6 cores this is one of
2607 * the VA-PA registers.
2608 */
4ec648dd
PM
2609 break;
2610 case 0x0e070f3d:
2611 /* 0,c7,c13,1: prefetch-by-MVA in v6, NOP in v7 */
2612 if (arm_feature(env, ARM_FEATURE_V6)) {
2613 return IS_USER(s) ? 1 : 0;
2614 }
2615 break;
2616 case 0x0e070f95: /* 0,c7,c5,4 : ISB */
2617 case 0x0e070f9a: /* 0,c7,c10,4: DSB */
2618 case 0x0e070fba: /* 0,c7,c10,5: DMB */
2619 /* Barriers in both v6 and v7 */
2620 if (arm_feature(env, ARM_FEATURE_V6)) {
2621 return 0;
2622 }
2623 break;
2624 default:
2625 break;
2626 }
2627
2628 if (IS_USER(s) && !cp15_user_ok(env, insn)) {
2629 return 1;
cc688901
PM
2630 }
2631
b5ff1b31 2632 rd = (insn >> 12) & 0xf;
3f26c122
RV
2633
2634 if (cp15_tls_load_store(env, s, insn, rd))
2635 return 0;
2636
b75263d6 2637 tmp2 = tcg_const_i32(insn);
18c9b560 2638 if (insn & ARM_CP_RW_BIT) {
7d1b0095 2639 tmp = tcg_temp_new_i32();
b75263d6 2640 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2641 /* If the destination register is r15 then sets condition codes. */
2642 if (rd != 15)
8984bd2e
PB
2643 store_reg(s, rd, tmp);
2644 else
7d1b0095 2645 tcg_temp_free_i32(tmp);
b5ff1b31 2646 } else {
8984bd2e 2647 tmp = load_reg(s, rd);
b75263d6 2648 gen_helper_set_cp15(cpu_env, tmp2, tmp);
7d1b0095 2649 tcg_temp_free_i32(tmp);
a90b7318
AZ
2650 /* Normally we would always end the TB here, but Linux
2651 * arch/arm/mach-pxa/sleep.S expects two instructions following
2652 * an MMU enable to execute from cache. Imitate this behaviour. */
2653 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2654 (insn & 0x0fff0fff) != 0x0e010f10)
2655 gen_lookup_tb(s);
b5ff1b31 2656 }
b75263d6 2657 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2658 return 0;
2659}
2660
9ee6e8bb
PB
2661#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2662#define VFP_SREG(insn, bigbit, smallbit) \
2663 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2664#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2665 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2666 reg = (((insn) >> (bigbit)) & 0x0f) \
2667 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2668 } else { \
2669 if (insn & (1 << (smallbit))) \
2670 return 1; \
2671 reg = ((insn) >> (bigbit)) & 0x0f; \
2672 }} while (0)
2673
2674#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2675#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2676#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2677#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2678#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2679#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2680
4373f3ce
PB
2681/* Move between integer and VFP cores. */
2682static TCGv gen_vfp_mrs(void)
2683{
7d1b0095 2684 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2685 tcg_gen_mov_i32(tmp, cpu_F0s);
2686 return tmp;
2687}
2688
2689static void gen_vfp_msr(TCGv tmp)
2690{
2691 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2692 tcg_temp_free_i32(tmp);
4373f3ce
PB
2693}
2694
ad69471c
PB
2695static void gen_neon_dup_u8(TCGv var, int shift)
2696{
7d1b0095 2697 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2698 if (shift)
2699 tcg_gen_shri_i32(var, var, shift);
86831435 2700 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2701 tcg_gen_shli_i32(tmp, var, 8);
2702 tcg_gen_or_i32(var, var, tmp);
2703 tcg_gen_shli_i32(tmp, var, 16);
2704 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2705 tcg_temp_free_i32(tmp);
ad69471c
PB
2706}
2707
2708static void gen_neon_dup_low16(TCGv var)
2709{
7d1b0095 2710 TCGv tmp = tcg_temp_new_i32();
86831435 2711 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2712 tcg_gen_shli_i32(tmp, var, 16);
2713 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2714 tcg_temp_free_i32(tmp);
ad69471c
PB
2715}
2716
2717static void gen_neon_dup_high16(TCGv var)
2718{
7d1b0095 2719 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2720 tcg_gen_andi_i32(var, var, 0xffff0000);
2721 tcg_gen_shri_i32(tmp, var, 16);
2722 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2723 tcg_temp_free_i32(tmp);
ad69471c
PB
2724}
2725
8e18cde3
PM
2726static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2727{
2728 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2729 TCGv tmp;
2730 switch (size) {
2731 case 0:
2732 tmp = gen_ld8u(addr, IS_USER(s));
2733 gen_neon_dup_u8(tmp, 0);
2734 break;
2735 case 1:
2736 tmp = gen_ld16u(addr, IS_USER(s));
2737 gen_neon_dup_low16(tmp);
2738 break;
2739 case 2:
2740 tmp = gen_ld32(addr, IS_USER(s));
2741 break;
2742 default: /* Avoid compiler warnings. */
2743 abort();
2744 }
2745 return tmp;
2746}
2747
a1c7273b 2748/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95
FB
2749 (ie. an undefined instruction). */
2750static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2751{
2752 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2753 int dp, veclen;
312eea9f 2754 TCGv addr;
4373f3ce 2755 TCGv tmp;
ad69471c 2756 TCGv tmp2;
b7bcbe95 2757
40f137e1
PB
2758 if (!arm_feature(env, ARM_FEATURE_VFP))
2759 return 1;
2760
5df8bac1 2761 if (!s->vfp_enabled) {
9ee6e8bb 2762 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2763 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2764 return 1;
2765 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2766 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2767 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2768 return 1;
2769 }
b7bcbe95
FB
2770 dp = ((insn & 0xf00) == 0xb00);
2771 switch ((insn >> 24) & 0xf) {
2772 case 0xe:
2773 if (insn & (1 << 4)) {
2774 /* single register transfer */
b7bcbe95
FB
2775 rd = (insn >> 12) & 0xf;
2776 if (dp) {
9ee6e8bb
PB
2777 int size;
2778 int pass;
2779
2780 VFP_DREG_N(rn, insn);
2781 if (insn & 0xf)
b7bcbe95 2782 return 1;
9ee6e8bb
PB
2783 if (insn & 0x00c00060
2784 && !arm_feature(env, ARM_FEATURE_NEON))
2785 return 1;
2786
2787 pass = (insn >> 21) & 1;
2788 if (insn & (1 << 22)) {
2789 size = 0;
2790 offset = ((insn >> 5) & 3) * 8;
2791 } else if (insn & (1 << 5)) {
2792 size = 1;
2793 offset = (insn & (1 << 6)) ? 16 : 0;
2794 } else {
2795 size = 2;
2796 offset = 0;
2797 }
18c9b560 2798 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2799 /* vfp->arm */
ad69471c 2800 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2801 switch (size) {
2802 case 0:
9ee6e8bb 2803 if (offset)
ad69471c 2804 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2805 if (insn & (1 << 23))
ad69471c 2806 gen_uxtb(tmp);
9ee6e8bb 2807 else
ad69471c 2808 gen_sxtb(tmp);
9ee6e8bb
PB
2809 break;
2810 case 1:
9ee6e8bb
PB
2811 if (insn & (1 << 23)) {
2812 if (offset) {
ad69471c 2813 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2814 } else {
ad69471c 2815 gen_uxth(tmp);
9ee6e8bb
PB
2816 }
2817 } else {
2818 if (offset) {
ad69471c 2819 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2820 } else {
ad69471c 2821 gen_sxth(tmp);
9ee6e8bb
PB
2822 }
2823 }
2824 break;
2825 case 2:
9ee6e8bb
PB
2826 break;
2827 }
ad69471c 2828 store_reg(s, rd, tmp);
b7bcbe95
FB
2829 } else {
2830 /* arm->vfp */
ad69471c 2831 tmp = load_reg(s, rd);
9ee6e8bb
PB
2832 if (insn & (1 << 23)) {
2833 /* VDUP */
2834 if (size == 0) {
ad69471c 2835 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2836 } else if (size == 1) {
ad69471c 2837 gen_neon_dup_low16(tmp);
9ee6e8bb 2838 }
cbbccffc 2839 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2840 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2841 tcg_gen_mov_i32(tmp2, tmp);
2842 neon_store_reg(rn, n, tmp2);
2843 }
2844 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2845 } else {
2846 /* VMOV */
2847 switch (size) {
2848 case 0:
ad69471c
PB
2849 tmp2 = neon_load_reg(rn, pass);
2850 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
7d1b0095 2851 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2852 break;
2853 case 1:
ad69471c
PB
2854 tmp2 = neon_load_reg(rn, pass);
2855 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
7d1b0095 2856 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2857 break;
2858 case 2:
9ee6e8bb
PB
2859 break;
2860 }
ad69471c 2861 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2862 }
b7bcbe95 2863 }
9ee6e8bb
PB
2864 } else { /* !dp */
2865 if ((insn & 0x6f) != 0x00)
2866 return 1;
2867 rn = VFP_SREG_N(insn);
18c9b560 2868 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2869 /* vfp->arm */
2870 if (insn & (1 << 21)) {
2871 /* system register */
40f137e1 2872 rn >>= 1;
9ee6e8bb 2873
b7bcbe95 2874 switch (rn) {
40f137e1 2875 case ARM_VFP_FPSID:
4373f3ce 2876 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2877 VFP3 restricts all id registers to privileged
2878 accesses. */
2879 if (IS_USER(s)
2880 && arm_feature(env, ARM_FEATURE_VFP3))
2881 return 1;
4373f3ce 2882 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2883 break;
40f137e1 2884 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2885 if (IS_USER(s))
2886 return 1;
4373f3ce 2887 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2888 break;
40f137e1
PB
2889 case ARM_VFP_FPINST:
2890 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2891 /* Not present in VFP3. */
2892 if (IS_USER(s)
2893 || arm_feature(env, ARM_FEATURE_VFP3))
2894 return 1;
4373f3ce 2895 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2896 break;
40f137e1 2897 case ARM_VFP_FPSCR:
601d70b9 2898 if (rd == 15) {
4373f3ce
PB
2899 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2900 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2901 } else {
7d1b0095 2902 tmp = tcg_temp_new_i32();
4373f3ce
PB
2903 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2904 }
b7bcbe95 2905 break;
9ee6e8bb
PB
2906 case ARM_VFP_MVFR0:
2907 case ARM_VFP_MVFR1:
2908 if (IS_USER(s)
2909 || !arm_feature(env, ARM_FEATURE_VFP3))
2910 return 1;
4373f3ce 2911 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2912 break;
b7bcbe95
FB
2913 default:
2914 return 1;
2915 }
2916 } else {
2917 gen_mov_F0_vreg(0, rn);
4373f3ce 2918 tmp = gen_vfp_mrs();
b7bcbe95
FB
2919 }
2920 if (rd == 15) {
b5ff1b31 2921 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2922 gen_set_nzcv(tmp);
7d1b0095 2923 tcg_temp_free_i32(tmp);
4373f3ce
PB
2924 } else {
2925 store_reg(s, rd, tmp);
2926 }
b7bcbe95
FB
2927 } else {
2928 /* arm->vfp */
4373f3ce 2929 tmp = load_reg(s, rd);
b7bcbe95 2930 if (insn & (1 << 21)) {
40f137e1 2931 rn >>= 1;
b7bcbe95
FB
2932 /* system register */
2933 switch (rn) {
40f137e1 2934 case ARM_VFP_FPSID:
9ee6e8bb
PB
2935 case ARM_VFP_MVFR0:
2936 case ARM_VFP_MVFR1:
b7bcbe95
FB
2937 /* Writes are ignored. */
2938 break;
40f137e1 2939 case ARM_VFP_FPSCR:
4373f3ce 2940 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2941 tcg_temp_free_i32(tmp);
b5ff1b31 2942 gen_lookup_tb(s);
b7bcbe95 2943 break;
40f137e1 2944 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2945 if (IS_USER(s))
2946 return 1;
71b3c3de
JR
2947 /* TODO: VFP subarchitecture support.
2948 * For now, keep the EN bit only */
2949 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2950 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2951 gen_lookup_tb(s);
2952 break;
2953 case ARM_VFP_FPINST:
2954 case ARM_VFP_FPINST2:
4373f3ce 2955 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2956 break;
b7bcbe95
FB
2957 default:
2958 return 1;
2959 }
2960 } else {
4373f3ce 2961 gen_vfp_msr(tmp);
b7bcbe95
FB
2962 gen_mov_vreg_F0(0, rn);
2963 }
2964 }
2965 }
2966 } else {
2967 /* data processing */
2968 /* The opcode is in bits 23, 21, 20 and 6. */
2969 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2970 if (dp) {
2971 if (op == 15) {
2972 /* rn is opcode */
2973 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2974 } else {
2975 /* rn is register number */
9ee6e8bb 2976 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2977 }
2978
04595bf6 2979 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2980 /* Integer or single precision destination. */
9ee6e8bb 2981 rd = VFP_SREG_D(insn);
b7bcbe95 2982 } else {
9ee6e8bb 2983 VFP_DREG_D(rd, insn);
b7bcbe95 2984 }
04595bf6
PM
2985 if (op == 15 &&
2986 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2987 /* VCVT from int is always from S reg regardless of dp bit.
2988 * VCVT with immediate frac_bits has same format as SREG_M
2989 */
2990 rm = VFP_SREG_M(insn);
b7bcbe95 2991 } else {
9ee6e8bb 2992 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2993 }
2994 } else {
9ee6e8bb 2995 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2996 if (op == 15 && rn == 15) {
2997 /* Double precision destination. */
9ee6e8bb
PB
2998 VFP_DREG_D(rd, insn);
2999 } else {
3000 rd = VFP_SREG_D(insn);
3001 }
04595bf6
PM
3002 /* NB that we implicitly rely on the encoding for the frac_bits
3003 * in VCVT of fixed to float being the same as that of an SREG_M
3004 */
9ee6e8bb 3005 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3006 }
3007
69d1fc22 3008 veclen = s->vec_len;
b7bcbe95
FB
3009 if (op == 15 && rn > 3)
3010 veclen = 0;
3011
3012 /* Shut up compiler warnings. */
3013 delta_m = 0;
3014 delta_d = 0;
3015 bank_mask = 0;
3b46e624 3016
b7bcbe95
FB
3017 if (veclen > 0) {
3018 if (dp)
3019 bank_mask = 0xc;
3020 else
3021 bank_mask = 0x18;
3022
3023 /* Figure out what type of vector operation this is. */
3024 if ((rd & bank_mask) == 0) {
3025 /* scalar */
3026 veclen = 0;
3027 } else {
3028 if (dp)
69d1fc22 3029 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3030 else
69d1fc22 3031 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3032
3033 if ((rm & bank_mask) == 0) {
3034 /* mixed scalar/vector */
3035 delta_m = 0;
3036 } else {
3037 /* vector */
3038 delta_m = delta_d;
3039 }
3040 }
3041 }
3042
3043 /* Load the initial operands. */
3044 if (op == 15) {
3045 switch (rn) {
3046 case 16:
3047 case 17:
3048 /* Integer source */
3049 gen_mov_F0_vreg(0, rm);
3050 break;
3051 case 8:
3052 case 9:
3053 /* Compare */
3054 gen_mov_F0_vreg(dp, rd);
3055 gen_mov_F1_vreg(dp, rm);
3056 break;
3057 case 10:
3058 case 11:
3059 /* Compare with zero */
3060 gen_mov_F0_vreg(dp, rd);
3061 gen_vfp_F1_ld0(dp);
3062 break;
9ee6e8bb
PB
3063 case 20:
3064 case 21:
3065 case 22:
3066 case 23:
644ad806
PB
3067 case 28:
3068 case 29:
3069 case 30:
3070 case 31:
9ee6e8bb
PB
3071 /* Source and destination the same. */
3072 gen_mov_F0_vreg(dp, rd);
3073 break;
b7bcbe95
FB
3074 default:
3075 /* One source operand. */
3076 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3077 break;
b7bcbe95
FB
3078 }
3079 } else {
3080 /* Two source operands. */
3081 gen_mov_F0_vreg(dp, rn);
3082 gen_mov_F1_vreg(dp, rm);
3083 }
3084
3085 for (;;) {
3086 /* Perform the calculation. */
3087 switch (op) {
605a6aed
PM
3088 case 0: /* VMLA: fd + (fn * fm) */
3089 /* Note that order of inputs to the add matters for NaNs */
3090 gen_vfp_F1_mul(dp);
3091 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3092 gen_vfp_add(dp);
3093 break;
605a6aed 3094 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3095 gen_vfp_mul(dp);
605a6aed
PM
3096 gen_vfp_F1_neg(dp);
3097 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3098 gen_vfp_add(dp);
3099 break;
605a6aed
PM
3100 case 2: /* VNMLS: -fd + (fn * fm) */
3101 /* Note that it isn't valid to replace (-A + B) with (B - A)
3102 * or similar plausible looking simplifications
3103 * because this will give wrong results for NaNs.
3104 */
3105 gen_vfp_F1_mul(dp);
3106 gen_mov_F0_vreg(dp, rd);
3107 gen_vfp_neg(dp);
3108 gen_vfp_add(dp);
b7bcbe95 3109 break;
605a6aed 3110 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3111 gen_vfp_mul(dp);
605a6aed
PM
3112 gen_vfp_F1_neg(dp);
3113 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3114 gen_vfp_neg(dp);
605a6aed 3115 gen_vfp_add(dp);
b7bcbe95
FB
3116 break;
3117 case 4: /* mul: fn * fm */
3118 gen_vfp_mul(dp);
3119 break;
3120 case 5: /* nmul: -(fn * fm) */
3121 gen_vfp_mul(dp);
3122 gen_vfp_neg(dp);
3123 break;
3124 case 6: /* add: fn + fm */
3125 gen_vfp_add(dp);
3126 break;
3127 case 7: /* sub: fn - fm */
3128 gen_vfp_sub(dp);
3129 break;
3130 case 8: /* div: fn / fm */
3131 gen_vfp_div(dp);
3132 break;
9ee6e8bb
PB
3133 case 14: /* fconst */
3134 if (!arm_feature(env, ARM_FEATURE_VFP3))
3135 return 1;
3136
3137 n = (insn << 12) & 0x80000000;
3138 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3139 if (dp) {
3140 if (i & 0x40)
3141 i |= 0x3f80;
3142 else
3143 i |= 0x4000;
3144 n |= i << 16;
4373f3ce 3145 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3146 } else {
3147 if (i & 0x40)
3148 i |= 0x780;
3149 else
3150 i |= 0x800;
3151 n |= i << 19;
5b340b51 3152 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3153 }
9ee6e8bb 3154 break;
b7bcbe95
FB
3155 case 15: /* extension space */
3156 switch (rn) {
3157 case 0: /* cpy */
3158 /* no-op */
3159 break;
3160 case 1: /* abs */
3161 gen_vfp_abs(dp);
3162 break;
3163 case 2: /* neg */
3164 gen_vfp_neg(dp);
3165 break;
3166 case 3: /* sqrt */
3167 gen_vfp_sqrt(dp);
3168 break;
60011498
PB
3169 case 4: /* vcvtb.f32.f16 */
3170 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3171 return 1;
3172 tmp = gen_vfp_mrs();
3173 tcg_gen_ext16u_i32(tmp, tmp);
3174 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3175 tcg_temp_free_i32(tmp);
60011498
PB
3176 break;
3177 case 5: /* vcvtt.f32.f16 */
3178 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3179 return 1;
3180 tmp = gen_vfp_mrs();
3181 tcg_gen_shri_i32(tmp, tmp, 16);
3182 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3183 tcg_temp_free_i32(tmp);
60011498
PB
3184 break;
3185 case 6: /* vcvtb.f16.f32 */
3186 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3187 return 1;
7d1b0095 3188 tmp = tcg_temp_new_i32();
60011498
PB
3189 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3190 gen_mov_F0_vreg(0, rd);
3191 tmp2 = gen_vfp_mrs();
3192 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3193 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3194 tcg_temp_free_i32(tmp2);
60011498
PB
3195 gen_vfp_msr(tmp);
3196 break;
3197 case 7: /* vcvtt.f16.f32 */
3198 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3199 return 1;
7d1b0095 3200 tmp = tcg_temp_new_i32();
60011498
PB
3201 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3202 tcg_gen_shli_i32(tmp, tmp, 16);
3203 gen_mov_F0_vreg(0, rd);
3204 tmp2 = gen_vfp_mrs();
3205 tcg_gen_ext16u_i32(tmp2, tmp2);
3206 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3207 tcg_temp_free_i32(tmp2);
60011498
PB
3208 gen_vfp_msr(tmp);
3209 break;
b7bcbe95
FB
3210 case 8: /* cmp */
3211 gen_vfp_cmp(dp);
3212 break;
3213 case 9: /* cmpe */
3214 gen_vfp_cmpe(dp);
3215 break;
3216 case 10: /* cmpz */
3217 gen_vfp_cmp(dp);
3218 break;
3219 case 11: /* cmpez */
3220 gen_vfp_F1_ld0(dp);
3221 gen_vfp_cmpe(dp);
3222 break;
3223 case 15: /* single<->double conversion */
3224 if (dp)
4373f3ce 3225 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3226 else
4373f3ce 3227 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3228 break;
3229 case 16: /* fuito */
5500b06c 3230 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3231 break;
3232 case 17: /* fsito */
5500b06c 3233 gen_vfp_sito(dp, 0);
b7bcbe95 3234 break;
9ee6e8bb
PB
3235 case 20: /* fshto */
3236 if (!arm_feature(env, ARM_FEATURE_VFP3))
3237 return 1;
5500b06c 3238 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3239 break;
3240 case 21: /* fslto */
3241 if (!arm_feature(env, ARM_FEATURE_VFP3))
3242 return 1;
5500b06c 3243 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3244 break;
3245 case 22: /* fuhto */
3246 if (!arm_feature(env, ARM_FEATURE_VFP3))
3247 return 1;
5500b06c 3248 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3249 break;
3250 case 23: /* fulto */
3251 if (!arm_feature(env, ARM_FEATURE_VFP3))
3252 return 1;
5500b06c 3253 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3254 break;
b7bcbe95 3255 case 24: /* ftoui */
5500b06c 3256 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3257 break;
3258 case 25: /* ftouiz */
5500b06c 3259 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3260 break;
3261 case 26: /* ftosi */
5500b06c 3262 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3263 break;
3264 case 27: /* ftosiz */
5500b06c 3265 gen_vfp_tosiz(dp, 0);
b7bcbe95 3266 break;
9ee6e8bb
PB
3267 case 28: /* ftosh */
3268 if (!arm_feature(env, ARM_FEATURE_VFP3))
3269 return 1;
5500b06c 3270 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3271 break;
3272 case 29: /* ftosl */
3273 if (!arm_feature(env, ARM_FEATURE_VFP3))
3274 return 1;
5500b06c 3275 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3276 break;
3277 case 30: /* ftouh */
3278 if (!arm_feature(env, ARM_FEATURE_VFP3))
3279 return 1;
5500b06c 3280 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3281 break;
3282 case 31: /* ftoul */
3283 if (!arm_feature(env, ARM_FEATURE_VFP3))
3284 return 1;
5500b06c 3285 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3286 break;
b7bcbe95
FB
3287 default: /* undefined */
3288 printf ("rn:%d\n", rn);
3289 return 1;
3290 }
3291 break;
3292 default: /* undefined */
3293 printf ("op:%d\n", op);
3294 return 1;
3295 }
3296
3297 /* Write back the result. */
3298 if (op == 15 && (rn >= 8 && rn <= 11))
3299 ; /* Comparison, do nothing. */
04595bf6
PM
3300 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3301 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3302 gen_mov_vreg_F0(0, rd);
3303 else if (op == 15 && rn == 15)
3304 /* conversion */
3305 gen_mov_vreg_F0(!dp, rd);
3306 else
3307 gen_mov_vreg_F0(dp, rd);
3308
3309 /* break out of the loop if we have finished */
3310 if (veclen == 0)
3311 break;
3312
3313 if (op == 15 && delta_m == 0) {
3314 /* single source one-many */
3315 while (veclen--) {
3316 rd = ((rd + delta_d) & (bank_mask - 1))
3317 | (rd & bank_mask);
3318 gen_mov_vreg_F0(dp, rd);
3319 }
3320 break;
3321 }
3322 /* Setup the next operands. */
3323 veclen--;
3324 rd = ((rd + delta_d) & (bank_mask - 1))
3325 | (rd & bank_mask);
3326
3327 if (op == 15) {
3328 /* One source operand. */
3329 rm = ((rm + delta_m) & (bank_mask - 1))
3330 | (rm & bank_mask);
3331 gen_mov_F0_vreg(dp, rm);
3332 } else {
3333 /* Two source operands. */
3334 rn = ((rn + delta_d) & (bank_mask - 1))
3335 | (rn & bank_mask);
3336 gen_mov_F0_vreg(dp, rn);
3337 if (delta_m) {
3338 rm = ((rm + delta_m) & (bank_mask - 1))
3339 | (rm & bank_mask);
3340 gen_mov_F1_vreg(dp, rm);
3341 }
3342 }
3343 }
3344 }
3345 break;
3346 case 0xc:
3347 case 0xd:
8387da81 3348 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3349 /* two-register transfer */
3350 rn = (insn >> 16) & 0xf;
3351 rd = (insn >> 12) & 0xf;
3352 if (dp) {
9ee6e8bb
PB
3353 VFP_DREG_M(rm, insn);
3354 } else {
3355 rm = VFP_SREG_M(insn);
3356 }
b7bcbe95 3357
18c9b560 3358 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3359 /* vfp->arm */
3360 if (dp) {
4373f3ce
PB
3361 gen_mov_F0_vreg(0, rm * 2);
3362 tmp = gen_vfp_mrs();
3363 store_reg(s, rd, tmp);
3364 gen_mov_F0_vreg(0, rm * 2 + 1);
3365 tmp = gen_vfp_mrs();
3366 store_reg(s, rn, tmp);
b7bcbe95
FB
3367 } else {
3368 gen_mov_F0_vreg(0, rm);
4373f3ce 3369 tmp = gen_vfp_mrs();
8387da81 3370 store_reg(s, rd, tmp);
b7bcbe95 3371 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3372 tmp = gen_vfp_mrs();
8387da81 3373 store_reg(s, rn, tmp);
b7bcbe95
FB
3374 }
3375 } else {
3376 /* arm->vfp */
3377 if (dp) {
4373f3ce
PB
3378 tmp = load_reg(s, rd);
3379 gen_vfp_msr(tmp);
3380 gen_mov_vreg_F0(0, rm * 2);
3381 tmp = load_reg(s, rn);
3382 gen_vfp_msr(tmp);
3383 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3384 } else {
8387da81 3385 tmp = load_reg(s, rd);
4373f3ce 3386 gen_vfp_msr(tmp);
b7bcbe95 3387 gen_mov_vreg_F0(0, rm);
8387da81 3388 tmp = load_reg(s, rn);
4373f3ce 3389 gen_vfp_msr(tmp);
b7bcbe95
FB
3390 gen_mov_vreg_F0(0, rm + 1);
3391 }
3392 }
3393 } else {
3394 /* Load/store */
3395 rn = (insn >> 16) & 0xf;
3396 if (dp)
9ee6e8bb 3397 VFP_DREG_D(rd, insn);
b7bcbe95 3398 else
9ee6e8bb
PB
3399 rd = VFP_SREG_D(insn);
3400 if (s->thumb && rn == 15) {
7d1b0095 3401 addr = tcg_temp_new_i32();
312eea9f 3402 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3403 } else {
312eea9f 3404 addr = load_reg(s, rn);
9ee6e8bb 3405 }
b7bcbe95
FB
3406 if ((insn & 0x01200000) == 0x01000000) {
3407 /* Single load/store */
3408 offset = (insn & 0xff) << 2;
3409 if ((insn & (1 << 23)) == 0)
3410 offset = -offset;
312eea9f 3411 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3412 if (insn & (1 << 20)) {
312eea9f 3413 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3414 gen_mov_vreg_F0(dp, rd);
3415 } else {
3416 gen_mov_F0_vreg(dp, rd);
312eea9f 3417 gen_vfp_st(s, dp, addr);
b7bcbe95 3418 }
7d1b0095 3419 tcg_temp_free_i32(addr);
b7bcbe95
FB
3420 } else {
3421 /* load/store multiple */
3422 if (dp)
3423 n = (insn >> 1) & 0x7f;
3424 else
3425 n = insn & 0xff;
3426
3427 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3428 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3429
3430 if (dp)
3431 offset = 8;
3432 else
3433 offset = 4;
3434 for (i = 0; i < n; i++) {
18c9b560 3435 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3436 /* load */
312eea9f 3437 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3438 gen_mov_vreg_F0(dp, rd + i);
3439 } else {
3440 /* store */
3441 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3442 gen_vfp_st(s, dp, addr);
b7bcbe95 3443 }
312eea9f 3444 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3445 }
3446 if (insn & (1 << 21)) {
3447 /* writeback */
3448 if (insn & (1 << 24))
3449 offset = -offset * n;
3450 else if (dp && (insn & 1))
3451 offset = 4;
3452 else
3453 offset = 0;
3454
3455 if (offset != 0)
312eea9f
FN
3456 tcg_gen_addi_i32(addr, addr, offset);
3457 store_reg(s, rn, addr);
3458 } else {
7d1b0095 3459 tcg_temp_free_i32(addr);
b7bcbe95
FB
3460 }
3461 }
3462 }
3463 break;
3464 default:
3465 /* Should never happen. */
3466 return 1;
3467 }
3468 return 0;
3469}
3470
6e256c93 3471static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3472{
6e256c93
FB
3473 TranslationBlock *tb;
3474
3475 tb = s->tb;
3476 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3477 tcg_gen_goto_tb(n);
8984bd2e 3478 gen_set_pc_im(dest);
4b4a72e5 3479 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3480 } else {
8984bd2e 3481 gen_set_pc_im(dest);
57fec1fe 3482 tcg_gen_exit_tb(0);
6e256c93 3483 }
c53be334
FB
3484}
3485
8aaca4c0
FB
3486static inline void gen_jmp (DisasContext *s, uint32_t dest)
3487{
551bd27f 3488 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3489 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3490 if (s->thumb)
d9ba4830
PB
3491 dest |= 1;
3492 gen_bx_im(s, dest);
8aaca4c0 3493 } else {
6e256c93 3494 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3495 s->is_jmp = DISAS_TB_JUMP;
3496 }
3497}
3498
d9ba4830 3499static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3500{
ee097184 3501 if (x)
d9ba4830 3502 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3503 else
d9ba4830 3504 gen_sxth(t0);
ee097184 3505 if (y)
d9ba4830 3506 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3507 else
d9ba4830
PB
3508 gen_sxth(t1);
3509 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3510}
3511
3512/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3513static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3514 uint32_t mask;
3515
3516 mask = 0;
3517 if (flags & (1 << 0))
3518 mask |= 0xff;
3519 if (flags & (1 << 1))
3520 mask |= 0xff00;
3521 if (flags & (1 << 2))
3522 mask |= 0xff0000;
3523 if (flags & (1 << 3))
3524 mask |= 0xff000000;
9ee6e8bb 3525
2ae23e75 3526 /* Mask out undefined bits. */
9ee6e8bb 3527 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3528 if (!arm_feature(env, ARM_FEATURE_V4T))
3529 mask &= ~CPSR_T;
3530 if (!arm_feature(env, ARM_FEATURE_V5))
3531 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3532 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3533 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3534 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3535 mask &= ~CPSR_IT;
9ee6e8bb 3536 /* Mask out execution state bits. */
2ae23e75 3537 if (!spsr)
e160c51c 3538 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3539 /* Mask out privileged bits. */
3540 if (IS_USER(s))
9ee6e8bb 3541 mask &= CPSR_USER;
b5ff1b31
FB
3542 return mask;
3543}
3544
2fbac54b
FN
3545/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3546static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3547{
d9ba4830 3548 TCGv tmp;
b5ff1b31
FB
3549 if (spsr) {
3550 /* ??? This is also undefined in system mode. */
3551 if (IS_USER(s))
3552 return 1;
d9ba4830
PB
3553
3554 tmp = load_cpu_field(spsr);
3555 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3556 tcg_gen_andi_i32(t0, t0, mask);
3557 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3558 store_cpu_field(tmp, spsr);
b5ff1b31 3559 } else {
2fbac54b 3560 gen_set_cpsr(t0, mask);
b5ff1b31 3561 }
7d1b0095 3562 tcg_temp_free_i32(t0);
b5ff1b31
FB
3563 gen_lookup_tb(s);
3564 return 0;
3565}
3566
2fbac54b
FN
3567/* Returns nonzero if access to the PSR is not permitted. */
3568static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3569{
3570 TCGv tmp;
7d1b0095 3571 tmp = tcg_temp_new_i32();
2fbac54b
FN
3572 tcg_gen_movi_i32(tmp, val);
3573 return gen_set_psr(s, mask, spsr, tmp);
3574}
3575
e9bb4aa9
JR
3576/* Generate an old-style exception return. Marks pc as dead. */
3577static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3578{
d9ba4830 3579 TCGv tmp;
e9bb4aa9 3580 store_reg(s, 15, pc);
d9ba4830
PB
3581 tmp = load_cpu_field(spsr);
3582 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3583 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3584 s->is_jmp = DISAS_UPDATE;
3585}
3586
b0109805
PB
3587/* Generate a v6 exception return. Marks both values as dead. */
3588static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3589{
b0109805 3590 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3591 tcg_temp_free_i32(cpsr);
b0109805 3592 store_reg(s, 15, pc);
9ee6e8bb
PB
3593 s->is_jmp = DISAS_UPDATE;
3594}
3b46e624 3595
9ee6e8bb
PB
3596static inline void
3597gen_set_condexec (DisasContext *s)
3598{
3599 if (s->condexec_mask) {
8f01245e 3600 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3601 TCGv tmp = tcg_temp_new_i32();
8f01245e 3602 tcg_gen_movi_i32(tmp, val);
d9ba4830 3603 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3604 }
3605}
3b46e624 3606
bc4a0de0
PM
3607static void gen_exception_insn(DisasContext *s, int offset, int excp)
3608{
3609 gen_set_condexec(s);
3610 gen_set_pc_im(s->pc - offset);
3611 gen_exception(excp);
3612 s->is_jmp = DISAS_JUMP;
3613}
3614
9ee6e8bb
PB
3615static void gen_nop_hint(DisasContext *s, int val)
3616{
3617 switch (val) {
3618 case 3: /* wfi */
8984bd2e 3619 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3620 s->is_jmp = DISAS_WFI;
3621 break;
3622 case 2: /* wfe */
3623 case 4: /* sev */
3624 /* TODO: Implement SEV and WFE. May help SMP performance. */
3625 default: /* nop */
3626 break;
3627 }
3628}
99c475ab 3629
ad69471c 3630#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3631
62698be3 3632static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3633{
3634 switch (size) {
dd8fbd78
FN
3635 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3636 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3637 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3638 default: abort();
9ee6e8bb 3639 }
9ee6e8bb
PB
3640}
3641
dd8fbd78 3642static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3643{
3644 switch (size) {
dd8fbd78
FN
3645 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3646 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3647 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3648 default: return;
3649 }
3650}
3651
3652/* 32-bit pairwise ops end up the same as the elementwise versions. */
3653#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3654#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3655#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3656#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3657
ad69471c
PB
3658#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3659 switch ((size << 1) | u) { \
3660 case 0: \
dd8fbd78 3661 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3662 break; \
3663 case 1: \
dd8fbd78 3664 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3665 break; \
3666 case 2: \
dd8fbd78 3667 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3668 break; \
3669 case 3: \
dd8fbd78 3670 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3671 break; \
3672 case 4: \
dd8fbd78 3673 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3674 break; \
3675 case 5: \
dd8fbd78 3676 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3677 break; \
3678 default: return 1; \
3679 }} while (0)
9ee6e8bb
PB
3680
3681#define GEN_NEON_INTEGER_OP(name) do { \
3682 switch ((size << 1) | u) { \
ad69471c 3683 case 0: \
dd8fbd78 3684 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3685 break; \
3686 case 1: \
dd8fbd78 3687 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3688 break; \
3689 case 2: \
dd8fbd78 3690 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3691 break; \
3692 case 3: \
dd8fbd78 3693 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3694 break; \
3695 case 4: \
dd8fbd78 3696 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3697 break; \
3698 case 5: \
dd8fbd78 3699 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3700 break; \
9ee6e8bb
PB
3701 default: return 1; \
3702 }} while (0)
3703
dd8fbd78 3704static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3705{
7d1b0095 3706 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3707 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3708 return tmp;
9ee6e8bb
PB
3709}
3710
dd8fbd78 3711static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3712{
dd8fbd78 3713 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3714 tcg_temp_free_i32(var);
9ee6e8bb
PB
3715}
3716
dd8fbd78 3717static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3718{
dd8fbd78 3719 TCGv tmp;
9ee6e8bb 3720 if (size == 1) {
0fad6efc
PM
3721 tmp = neon_load_reg(reg & 7, reg >> 4);
3722 if (reg & 8) {
dd8fbd78 3723 gen_neon_dup_high16(tmp);
0fad6efc
PM
3724 } else {
3725 gen_neon_dup_low16(tmp);
dd8fbd78 3726 }
0fad6efc
PM
3727 } else {
3728 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3729 }
dd8fbd78 3730 return tmp;
9ee6e8bb
PB
3731}
3732
02acedf9 3733static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3734{
02acedf9 3735 TCGv tmp, tmp2;
600b828c 3736 if (!q && size == 2) {
02acedf9
PM
3737 return 1;
3738 }
3739 tmp = tcg_const_i32(rd);
3740 tmp2 = tcg_const_i32(rm);
3741 if (q) {
3742 switch (size) {
3743 case 0:
02da0b2d 3744 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3745 break;
3746 case 1:
02da0b2d 3747 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3748 break;
3749 case 2:
02da0b2d 3750 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3751 break;
3752 default:
3753 abort();
3754 }
3755 } else {
3756 switch (size) {
3757 case 0:
02da0b2d 3758 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3759 break;
3760 case 1:
02da0b2d 3761 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3762 break;
3763 default:
3764 abort();
3765 }
3766 }
3767 tcg_temp_free_i32(tmp);
3768 tcg_temp_free_i32(tmp2);
3769 return 0;
19457615
FN
3770}
3771
d68a6f3a 3772static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3773{
3774 TCGv tmp, tmp2;
600b828c 3775 if (!q && size == 2) {
d68a6f3a
PM
3776 return 1;
3777 }
3778 tmp = tcg_const_i32(rd);
3779 tmp2 = tcg_const_i32(rm);
3780 if (q) {
3781 switch (size) {
3782 case 0:
02da0b2d 3783 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3784 break;
3785 case 1:
02da0b2d 3786 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3787 break;
3788 case 2:
02da0b2d 3789 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3790 break;
3791 default:
3792 abort();
3793 }
3794 } else {
3795 switch (size) {
3796 case 0:
02da0b2d 3797 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3798 break;
3799 case 1:
02da0b2d 3800 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3801 break;
3802 default:
3803 abort();
3804 }
3805 }
3806 tcg_temp_free_i32(tmp);
3807 tcg_temp_free_i32(tmp2);
3808 return 0;
19457615
FN
3809}
3810
19457615
FN
3811static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3812{
3813 TCGv rd, tmp;
3814
7d1b0095
PM
3815 rd = tcg_temp_new_i32();
3816 tmp = tcg_temp_new_i32();
19457615
FN
3817
3818 tcg_gen_shli_i32(rd, t0, 8);
3819 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3820 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3821 tcg_gen_or_i32(rd, rd, tmp);
3822
3823 tcg_gen_shri_i32(t1, t1, 8);
3824 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3825 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3826 tcg_gen_or_i32(t1, t1, tmp);
3827 tcg_gen_mov_i32(t0, rd);
3828
7d1b0095
PM
3829 tcg_temp_free_i32(tmp);
3830 tcg_temp_free_i32(rd);
19457615
FN
3831}
3832
3833static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3834{
3835 TCGv rd, tmp;
3836
7d1b0095
PM
3837 rd = tcg_temp_new_i32();
3838 tmp = tcg_temp_new_i32();
19457615
FN
3839
3840 tcg_gen_shli_i32(rd, t0, 16);
3841 tcg_gen_andi_i32(tmp, t1, 0xffff);
3842 tcg_gen_or_i32(rd, rd, tmp);
3843 tcg_gen_shri_i32(t1, t1, 16);
3844 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3845 tcg_gen_or_i32(t1, t1, tmp);
3846 tcg_gen_mov_i32(t0, rd);
3847
7d1b0095
PM
3848 tcg_temp_free_i32(tmp);
3849 tcg_temp_free_i32(rd);
19457615
FN
3850}
3851
3852
9ee6e8bb
PB
3853static struct {
3854 int nregs;
3855 int interleave;
3856 int spacing;
3857} neon_ls_element_type[11] = {
3858 {4, 4, 1},
3859 {4, 4, 2},
3860 {4, 1, 1},
3861 {4, 2, 1},
3862 {3, 3, 1},
3863 {3, 3, 2},
3864 {3, 1, 1},
3865 {1, 1, 1},
3866 {2, 2, 1},
3867 {2, 2, 2},
3868 {2, 1, 1}
3869};
3870
3871/* Translate a NEON load/store element instruction. Return nonzero if the
3872 instruction is invalid. */
3873static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3874{
3875 int rd, rn, rm;
3876 int op;
3877 int nregs;
3878 int interleave;
84496233 3879 int spacing;
9ee6e8bb
PB
3880 int stride;
3881 int size;
3882 int reg;
3883 int pass;
3884 int load;
3885 int shift;
9ee6e8bb 3886 int n;
1b2b1e54 3887 TCGv addr;
b0109805 3888 TCGv tmp;
8f8e3aa4 3889 TCGv tmp2;
84496233 3890 TCGv_i64 tmp64;
9ee6e8bb 3891
5df8bac1 3892 if (!s->vfp_enabled)
9ee6e8bb
PB
3893 return 1;
3894 VFP_DREG_D(rd, insn);
3895 rn = (insn >> 16) & 0xf;
3896 rm = insn & 0xf;
3897 load = (insn & (1 << 21)) != 0;
3898 if ((insn & (1 << 23)) == 0) {
3899 /* Load store all elements. */
3900 op = (insn >> 8) & 0xf;
3901 size = (insn >> 6) & 3;
84496233 3902 if (op > 10)
9ee6e8bb 3903 return 1;
f2dd89d0
PM
3904 /* Catch UNDEF cases for bad values of align field */
3905 switch (op & 0xc) {
3906 case 4:
3907 if (((insn >> 5) & 1) == 1) {
3908 return 1;
3909 }
3910 break;
3911 case 8:
3912 if (((insn >> 4) & 3) == 3) {
3913 return 1;
3914 }
3915 break;
3916 default:
3917 break;
3918 }
9ee6e8bb
PB
3919 nregs = neon_ls_element_type[op].nregs;
3920 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3921 spacing = neon_ls_element_type[op].spacing;
3922 if (size == 3 && (interleave | spacing) != 1)
3923 return 1;
e318a60b 3924 addr = tcg_temp_new_i32();
dcc65026 3925 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3926 stride = (1 << size) * interleave;
3927 for (reg = 0; reg < nregs; reg++) {
3928 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3929 load_reg_var(s, addr, rn);
3930 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3931 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3932 load_reg_var(s, addr, rn);
3933 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3934 }
84496233
JR
3935 if (size == 3) {
3936 if (load) {
3937 tmp64 = gen_ld64(addr, IS_USER(s));
3938 neon_store_reg64(tmp64, rd);
3939 tcg_temp_free_i64(tmp64);
3940 } else {
3941 tmp64 = tcg_temp_new_i64();
3942 neon_load_reg64(tmp64, rd);
3943 gen_st64(tmp64, addr, IS_USER(s));
3944 }
3945 tcg_gen_addi_i32(addr, addr, stride);
3946 } else {
3947 for (pass = 0; pass < 2; pass++) {
3948 if (size == 2) {
3949 if (load) {
3950 tmp = gen_ld32(addr, IS_USER(s));
3951 neon_store_reg(rd, pass, tmp);
3952 } else {
3953 tmp = neon_load_reg(rd, pass);
3954 gen_st32(tmp, addr, IS_USER(s));
3955 }
1b2b1e54 3956 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3957 } else if (size == 1) {
3958 if (load) {
3959 tmp = gen_ld16u(addr, IS_USER(s));
3960 tcg_gen_addi_i32(addr, addr, stride);
3961 tmp2 = gen_ld16u(addr, IS_USER(s));
3962 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3963 tcg_gen_shli_i32(tmp2, tmp2, 16);
3964 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3965 tcg_temp_free_i32(tmp2);
84496233
JR
3966 neon_store_reg(rd, pass, tmp);
3967 } else {
3968 tmp = neon_load_reg(rd, pass);
7d1b0095 3969 tmp2 = tcg_temp_new_i32();
84496233
JR
3970 tcg_gen_shri_i32(tmp2, tmp, 16);
3971 gen_st16(tmp, addr, IS_USER(s));
3972 tcg_gen_addi_i32(addr, addr, stride);
3973 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3974 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3975 }
84496233
JR
3976 } else /* size == 0 */ {
3977 if (load) {
3978 TCGV_UNUSED(tmp2);
3979 for (n = 0; n < 4; n++) {
3980 tmp = gen_ld8u(addr, IS_USER(s));
3981 tcg_gen_addi_i32(addr, addr, stride);
3982 if (n == 0) {
3983 tmp2 = tmp;
3984 } else {
41ba8341
PB
3985 tcg_gen_shli_i32(tmp, tmp, n * 8);
3986 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3987 tcg_temp_free_i32(tmp);
84496233 3988 }
9ee6e8bb 3989 }
84496233
JR
3990 neon_store_reg(rd, pass, tmp2);
3991 } else {
3992 tmp2 = neon_load_reg(rd, pass);
3993 for (n = 0; n < 4; n++) {
7d1b0095 3994 tmp = tcg_temp_new_i32();
84496233
JR
3995 if (n == 0) {
3996 tcg_gen_mov_i32(tmp, tmp2);
3997 } else {
3998 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3999 }
4000 gen_st8(tmp, addr, IS_USER(s));
4001 tcg_gen_addi_i32(addr, addr, stride);
4002 }
7d1b0095 4003 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4004 }
4005 }
4006 }
4007 }
84496233 4008 rd += spacing;
9ee6e8bb 4009 }
e318a60b 4010 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4011 stride = nregs * 8;
4012 } else {
4013 size = (insn >> 10) & 3;
4014 if (size == 3) {
4015 /* Load single element to all lanes. */
8e18cde3
PM
4016 int a = (insn >> 4) & 1;
4017 if (!load) {
9ee6e8bb 4018 return 1;
8e18cde3 4019 }
9ee6e8bb
PB
4020 size = (insn >> 6) & 3;
4021 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4022
4023 if (size == 3) {
4024 if (nregs != 4 || a == 0) {
9ee6e8bb 4025 return 1;
99c475ab 4026 }
8e18cde3
PM
4027 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4028 size = 2;
4029 }
4030 if (nregs == 1 && a == 1 && size == 0) {
4031 return 1;
4032 }
4033 if (nregs == 3 && a == 1) {
4034 return 1;
4035 }
e318a60b 4036 addr = tcg_temp_new_i32();
8e18cde3
PM
4037 load_reg_var(s, addr, rn);
4038 if (nregs == 1) {
4039 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4040 tmp = gen_load_and_replicate(s, addr, size);
4041 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4042 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4043 if (insn & (1 << 5)) {
4044 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4045 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4046 }
4047 tcg_temp_free_i32(tmp);
4048 } else {
4049 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4050 stride = (insn & (1 << 5)) ? 2 : 1;
4051 for (reg = 0; reg < nregs; reg++) {
4052 tmp = gen_load_and_replicate(s, addr, size);
4053 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4054 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4055 tcg_temp_free_i32(tmp);
4056 tcg_gen_addi_i32(addr, addr, 1 << size);
4057 rd += stride;
4058 }
9ee6e8bb 4059 }
e318a60b 4060 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4061 stride = (1 << size) * nregs;
4062 } else {
4063 /* Single element. */
93262b16 4064 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4065 pass = (insn >> 7) & 1;
4066 switch (size) {
4067 case 0:
4068 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4069 stride = 1;
4070 break;
4071 case 1:
4072 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4073 stride = (insn & (1 << 5)) ? 2 : 1;
4074 break;
4075 case 2:
4076 shift = 0;
9ee6e8bb
PB
4077 stride = (insn & (1 << 6)) ? 2 : 1;
4078 break;
4079 default:
4080 abort();
4081 }
4082 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4083 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4084 switch (nregs) {
4085 case 1:
4086 if (((idx & (1 << size)) != 0) ||
4087 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4088 return 1;
4089 }
4090 break;
4091 case 3:
4092 if ((idx & 1) != 0) {
4093 return 1;
4094 }
4095 /* fall through */
4096 case 2:
4097 if (size == 2 && (idx & 2) != 0) {
4098 return 1;
4099 }
4100 break;
4101 case 4:
4102 if ((size == 2) && ((idx & 3) == 3)) {
4103 return 1;
4104 }
4105 break;
4106 default:
4107 abort();
4108 }
4109 if ((rd + stride * (nregs - 1)) > 31) {
4110 /* Attempts to write off the end of the register file
4111 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4112 * the neon_load_reg() would write off the end of the array.
4113 */
4114 return 1;
4115 }
e318a60b 4116 addr = tcg_temp_new_i32();
dcc65026 4117 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4118 for (reg = 0; reg < nregs; reg++) {
4119 if (load) {
9ee6e8bb
PB
4120 switch (size) {
4121 case 0:
1b2b1e54 4122 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
4123 break;
4124 case 1:
1b2b1e54 4125 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
4126 break;
4127 case 2:
1b2b1e54 4128 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 4129 break;
a50f5b91
PB
4130 default: /* Avoid compiler warnings. */
4131 abort();
9ee6e8bb
PB
4132 }
4133 if (size != 2) {
8f8e3aa4
PB
4134 tmp2 = neon_load_reg(rd, pass);
4135 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
7d1b0095 4136 tcg_temp_free_i32(tmp2);
9ee6e8bb 4137 }
8f8e3aa4 4138 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4139 } else { /* Store */
8f8e3aa4
PB
4140 tmp = neon_load_reg(rd, pass);
4141 if (shift)
4142 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4143 switch (size) {
4144 case 0:
1b2b1e54 4145 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4146 break;
4147 case 1:
1b2b1e54 4148 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4149 break;
4150 case 2:
1b2b1e54 4151 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4152 break;
99c475ab 4153 }
99c475ab 4154 }
9ee6e8bb 4155 rd += stride;
1b2b1e54 4156 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4157 }
e318a60b 4158 tcg_temp_free_i32(addr);
9ee6e8bb 4159 stride = nregs * (1 << size);
99c475ab 4160 }
9ee6e8bb
PB
4161 }
4162 if (rm != 15) {
b26eefb6
PB
4163 TCGv base;
4164
4165 base = load_reg(s, rn);
9ee6e8bb 4166 if (rm == 13) {
b26eefb6 4167 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4168 } else {
b26eefb6
PB
4169 TCGv index;
4170 index = load_reg(s, rm);
4171 tcg_gen_add_i32(base, base, index);
7d1b0095 4172 tcg_temp_free_i32(index);
9ee6e8bb 4173 }
b26eefb6 4174 store_reg(s, rn, base);
9ee6e8bb
PB
4175 }
4176 return 0;
4177}
3b46e624 4178
8f8e3aa4
PB
4179/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4180static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4181{
4182 tcg_gen_and_i32(t, t, c);
f669df27 4183 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4184 tcg_gen_or_i32(dest, t, f);
4185}
4186
a7812ae4 4187static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4188{
4189 switch (size) {
4190 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4191 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4192 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4193 default: abort();
4194 }
4195}
4196
a7812ae4 4197static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4198{
4199 switch (size) {
02da0b2d
PM
4200 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4201 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4202 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4203 default: abort();
4204 }
4205}
4206
a7812ae4 4207static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4208{
4209 switch (size) {
02da0b2d
PM
4210 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4211 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4212 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4213 default: abort();
4214 }
4215}
4216
af1bbf30
JR
4217static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4218{
4219 switch (size) {
02da0b2d
PM
4220 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4221 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4222 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4223 default: abort();
4224 }
4225}
4226
ad69471c
PB
4227static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4228 int q, int u)
4229{
4230 if (q) {
4231 if (u) {
4232 switch (size) {
4233 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4234 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4235 default: abort();
4236 }
4237 } else {
4238 switch (size) {
4239 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4240 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4241 default: abort();
4242 }
4243 }
4244 } else {
4245 if (u) {
4246 switch (size) {
b408a9b0
CL
4247 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4248 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4249 default: abort();
4250 }
4251 } else {
4252 switch (size) {
4253 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4254 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4255 default: abort();
4256 }
4257 }
4258 }
4259}
4260
a7812ae4 4261static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4262{
4263 if (u) {
4264 switch (size) {
4265 case 0: gen_helper_neon_widen_u8(dest, src); break;
4266 case 1: gen_helper_neon_widen_u16(dest, src); break;
4267 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4268 default: abort();
4269 }
4270 } else {
4271 switch (size) {
4272 case 0: gen_helper_neon_widen_s8(dest, src); break;
4273 case 1: gen_helper_neon_widen_s16(dest, src); break;
4274 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4275 default: abort();
4276 }
4277 }
7d1b0095 4278 tcg_temp_free_i32(src);
ad69471c
PB
4279}
4280
4281static inline void gen_neon_addl(int size)
4282{
4283 switch (size) {
4284 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4285 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4286 case 2: tcg_gen_add_i64(CPU_V001); break;
4287 default: abort();
4288 }
4289}
4290
4291static inline void gen_neon_subl(int size)
4292{
4293 switch (size) {
4294 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4295 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4296 case 2: tcg_gen_sub_i64(CPU_V001); break;
4297 default: abort();
4298 }
4299}
4300
a7812ae4 4301static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4302{
4303 switch (size) {
4304 case 0: gen_helper_neon_negl_u16(var, var); break;
4305 case 1: gen_helper_neon_negl_u32(var, var); break;
4306 case 2: gen_helper_neon_negl_u64(var, var); break;
4307 default: abort();
4308 }
4309}
4310
a7812ae4 4311static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4312{
4313 switch (size) {
02da0b2d
PM
4314 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4315 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4316 default: abort();
4317 }
4318}
4319
a7812ae4 4320static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4321{
a7812ae4 4322 TCGv_i64 tmp;
ad69471c
PB
4323
4324 switch ((size << 1) | u) {
4325 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4326 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4327 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4328 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4329 case 4:
4330 tmp = gen_muls_i64_i32(a, b);
4331 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4332 tcg_temp_free_i64(tmp);
ad69471c
PB
4333 break;
4334 case 5:
4335 tmp = gen_mulu_i64_i32(a, b);
4336 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4337 tcg_temp_free_i64(tmp);
ad69471c
PB
4338 break;
4339 default: abort();
4340 }
c6067f04
CL
4341
4342 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4343 Don't forget to clean them now. */
4344 if (size < 2) {
7d1b0095
PM
4345 tcg_temp_free_i32(a);
4346 tcg_temp_free_i32(b);
c6067f04 4347 }
ad69471c
PB
4348}
4349
c33171c7
PM
4350static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4351{
4352 if (op) {
4353 if (u) {
4354 gen_neon_unarrow_sats(size, dest, src);
4355 } else {
4356 gen_neon_narrow(size, dest, src);
4357 }
4358 } else {
4359 if (u) {
4360 gen_neon_narrow_satu(size, dest, src);
4361 } else {
4362 gen_neon_narrow_sats(size, dest, src);
4363 }
4364 }
4365}
4366
62698be3
PM
4367/* Symbolic constants for op fields for Neon 3-register same-length.
4368 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4369 * table A7-9.
4370 */
4371#define NEON_3R_VHADD 0
4372#define NEON_3R_VQADD 1
4373#define NEON_3R_VRHADD 2
4374#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4375#define NEON_3R_VHSUB 4
4376#define NEON_3R_VQSUB 5
4377#define NEON_3R_VCGT 6
4378#define NEON_3R_VCGE 7
4379#define NEON_3R_VSHL 8
4380#define NEON_3R_VQSHL 9
4381#define NEON_3R_VRSHL 10
4382#define NEON_3R_VQRSHL 11
4383#define NEON_3R_VMAX 12
4384#define NEON_3R_VMIN 13
4385#define NEON_3R_VABD 14
4386#define NEON_3R_VABA 15
4387#define NEON_3R_VADD_VSUB 16
4388#define NEON_3R_VTST_VCEQ 17
4389#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4390#define NEON_3R_VMUL 19
4391#define NEON_3R_VPMAX 20
4392#define NEON_3R_VPMIN 21
4393#define NEON_3R_VQDMULH_VQRDMULH 22
4394#define NEON_3R_VPADD 23
4395#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4396#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4397#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4398#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4399#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4400#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4401
4402static const uint8_t neon_3r_sizes[] = {
4403 [NEON_3R_VHADD] = 0x7,
4404 [NEON_3R_VQADD] = 0xf,
4405 [NEON_3R_VRHADD] = 0x7,
4406 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4407 [NEON_3R_VHSUB] = 0x7,
4408 [NEON_3R_VQSUB] = 0xf,
4409 [NEON_3R_VCGT] = 0x7,
4410 [NEON_3R_VCGE] = 0x7,
4411 [NEON_3R_VSHL] = 0xf,
4412 [NEON_3R_VQSHL] = 0xf,
4413 [NEON_3R_VRSHL] = 0xf,
4414 [NEON_3R_VQRSHL] = 0xf,
4415 [NEON_3R_VMAX] = 0x7,
4416 [NEON_3R_VMIN] = 0x7,
4417 [NEON_3R_VABD] = 0x7,
4418 [NEON_3R_VABA] = 0x7,
4419 [NEON_3R_VADD_VSUB] = 0xf,
4420 [NEON_3R_VTST_VCEQ] = 0x7,
4421 [NEON_3R_VML] = 0x7,
4422 [NEON_3R_VMUL] = 0x7,
4423 [NEON_3R_VPMAX] = 0x7,
4424 [NEON_3R_VPMIN] = 0x7,
4425 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4426 [NEON_3R_VPADD] = 0x7,
4427 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4428 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4429 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4430 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4431 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4432 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4433};
4434
600b828c
PM
4435/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4436 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4437 * table A7-13.
4438 */
4439#define NEON_2RM_VREV64 0
4440#define NEON_2RM_VREV32 1
4441#define NEON_2RM_VREV16 2
4442#define NEON_2RM_VPADDL 4
4443#define NEON_2RM_VPADDL_U 5
4444#define NEON_2RM_VCLS 8
4445#define NEON_2RM_VCLZ 9
4446#define NEON_2RM_VCNT 10
4447#define NEON_2RM_VMVN 11
4448#define NEON_2RM_VPADAL 12
4449#define NEON_2RM_VPADAL_U 13
4450#define NEON_2RM_VQABS 14
4451#define NEON_2RM_VQNEG 15
4452#define NEON_2RM_VCGT0 16
4453#define NEON_2RM_VCGE0 17
4454#define NEON_2RM_VCEQ0 18
4455#define NEON_2RM_VCLE0 19
4456#define NEON_2RM_VCLT0 20
4457#define NEON_2RM_VABS 22
4458#define NEON_2RM_VNEG 23
4459#define NEON_2RM_VCGT0_F 24
4460#define NEON_2RM_VCGE0_F 25
4461#define NEON_2RM_VCEQ0_F 26
4462#define NEON_2RM_VCLE0_F 27
4463#define NEON_2RM_VCLT0_F 28
4464#define NEON_2RM_VABS_F 30
4465#define NEON_2RM_VNEG_F 31
4466#define NEON_2RM_VSWP 32
4467#define NEON_2RM_VTRN 33
4468#define NEON_2RM_VUZP 34
4469#define NEON_2RM_VZIP 35
4470#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4471#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4472#define NEON_2RM_VSHLL 38
4473#define NEON_2RM_VCVT_F16_F32 44
4474#define NEON_2RM_VCVT_F32_F16 46
4475#define NEON_2RM_VRECPE 56
4476#define NEON_2RM_VRSQRTE 57
4477#define NEON_2RM_VRECPE_F 58
4478#define NEON_2RM_VRSQRTE_F 59
4479#define NEON_2RM_VCVT_FS 60
4480#define NEON_2RM_VCVT_FU 61
4481#define NEON_2RM_VCVT_SF 62
4482#define NEON_2RM_VCVT_UF 63
4483
4484static int neon_2rm_is_float_op(int op)
4485{
4486 /* Return true if this neon 2reg-misc op is float-to-float */
4487 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4488 op >= NEON_2RM_VRECPE_F);
4489}
4490
4491/* Each entry in this array has bit n set if the insn allows
4492 * size value n (otherwise it will UNDEF). Since unallocated
4493 * op values will have no bits set they always UNDEF.
4494 */
4495static const uint8_t neon_2rm_sizes[] = {
4496 [NEON_2RM_VREV64] = 0x7,
4497 [NEON_2RM_VREV32] = 0x3,
4498 [NEON_2RM_VREV16] = 0x1,
4499 [NEON_2RM_VPADDL] = 0x7,
4500 [NEON_2RM_VPADDL_U] = 0x7,
4501 [NEON_2RM_VCLS] = 0x7,
4502 [NEON_2RM_VCLZ] = 0x7,
4503 [NEON_2RM_VCNT] = 0x1,
4504 [NEON_2RM_VMVN] = 0x1,
4505 [NEON_2RM_VPADAL] = 0x7,
4506 [NEON_2RM_VPADAL_U] = 0x7,
4507 [NEON_2RM_VQABS] = 0x7,
4508 [NEON_2RM_VQNEG] = 0x7,
4509 [NEON_2RM_VCGT0] = 0x7,
4510 [NEON_2RM_VCGE0] = 0x7,
4511 [NEON_2RM_VCEQ0] = 0x7,
4512 [NEON_2RM_VCLE0] = 0x7,
4513 [NEON_2RM_VCLT0] = 0x7,
4514 [NEON_2RM_VABS] = 0x7,
4515 [NEON_2RM_VNEG] = 0x7,
4516 [NEON_2RM_VCGT0_F] = 0x4,
4517 [NEON_2RM_VCGE0_F] = 0x4,
4518 [NEON_2RM_VCEQ0_F] = 0x4,
4519 [NEON_2RM_VCLE0_F] = 0x4,
4520 [NEON_2RM_VCLT0_F] = 0x4,
4521 [NEON_2RM_VABS_F] = 0x4,
4522 [NEON_2RM_VNEG_F] = 0x4,
4523 [NEON_2RM_VSWP] = 0x1,
4524 [NEON_2RM_VTRN] = 0x7,
4525 [NEON_2RM_VUZP] = 0x7,
4526 [NEON_2RM_VZIP] = 0x7,
4527 [NEON_2RM_VMOVN] = 0x7,
4528 [NEON_2RM_VQMOVN] = 0x7,
4529 [NEON_2RM_VSHLL] = 0x7,
4530 [NEON_2RM_VCVT_F16_F32] = 0x2,
4531 [NEON_2RM_VCVT_F32_F16] = 0x2,
4532 [NEON_2RM_VRECPE] = 0x4,
4533 [NEON_2RM_VRSQRTE] = 0x4,
4534 [NEON_2RM_VRECPE_F] = 0x4,
4535 [NEON_2RM_VRSQRTE_F] = 0x4,
4536 [NEON_2RM_VCVT_FS] = 0x4,
4537 [NEON_2RM_VCVT_FU] = 0x4,
4538 [NEON_2RM_VCVT_SF] = 0x4,
4539 [NEON_2RM_VCVT_UF] = 0x4,
4540};
4541
9ee6e8bb
PB
4542/* Translate a NEON data processing instruction. Return nonzero if the
4543 instruction is invalid.
ad69471c
PB
4544 We process data in a mixture of 32-bit and 64-bit chunks.
4545 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4546
9ee6e8bb
PB
4547static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4548{
4549 int op;
4550 int q;
4551 int rd, rn, rm;
4552 int size;
4553 int shift;
4554 int pass;
4555 int count;
4556 int pairwise;
4557 int u;
ca9a32e4 4558 uint32_t imm, mask;
b75263d6 4559 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4560 TCGv_i64 tmp64;
9ee6e8bb 4561
5df8bac1 4562 if (!s->vfp_enabled)
9ee6e8bb
PB
4563 return 1;
4564 q = (insn & (1 << 6)) != 0;
4565 u = (insn >> 24) & 1;
4566 VFP_DREG_D(rd, insn);
4567 VFP_DREG_N(rn, insn);
4568 VFP_DREG_M(rm, insn);
4569 size = (insn >> 20) & 3;
4570 if ((insn & (1 << 23)) == 0) {
4571 /* Three register same length. */
4572 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4573 /* Catch invalid op and bad size combinations: UNDEF */
4574 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4575 return 1;
4576 }
25f84f79
PM
4577 /* All insns of this form UNDEF for either this condition or the
4578 * superset of cases "Q==1"; we catch the latter later.
4579 */
4580 if (q && ((rd | rn | rm) & 1)) {
4581 return 1;
4582 }
62698be3
PM
4583 if (size == 3 && op != NEON_3R_LOGIC) {
4584 /* 64-bit element instructions. */
9ee6e8bb 4585 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4586 neon_load_reg64(cpu_V0, rn + pass);
4587 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4588 switch (op) {
62698be3 4589 case NEON_3R_VQADD:
9ee6e8bb 4590 if (u) {
02da0b2d
PM
4591 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4592 cpu_V0, cpu_V1);
2c0262af 4593 } else {
02da0b2d
PM
4594 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4595 cpu_V0, cpu_V1);
2c0262af 4596 }
9ee6e8bb 4597 break;
62698be3 4598 case NEON_3R_VQSUB:
9ee6e8bb 4599 if (u) {
02da0b2d
PM
4600 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4601 cpu_V0, cpu_V1);
ad69471c 4602 } else {
02da0b2d
PM
4603 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4604 cpu_V0, cpu_V1);
ad69471c
PB
4605 }
4606 break;
62698be3 4607 case NEON_3R_VSHL:
ad69471c
PB
4608 if (u) {
4609 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4610 } else {
4611 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4612 }
4613 break;
62698be3 4614 case NEON_3R_VQSHL:
ad69471c 4615 if (u) {
02da0b2d
PM
4616 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4617 cpu_V1, cpu_V0);
ad69471c 4618 } else {
02da0b2d
PM
4619 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4620 cpu_V1, cpu_V0);
ad69471c
PB
4621 }
4622 break;
62698be3 4623 case NEON_3R_VRSHL:
ad69471c
PB
4624 if (u) {
4625 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4626 } else {
ad69471c
PB
4627 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4628 }
4629 break;
62698be3 4630 case NEON_3R_VQRSHL:
ad69471c 4631 if (u) {
02da0b2d
PM
4632 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4633 cpu_V1, cpu_V0);
ad69471c 4634 } else {
02da0b2d
PM
4635 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4636 cpu_V1, cpu_V0);
1e8d4eec 4637 }
9ee6e8bb 4638 break;
62698be3 4639 case NEON_3R_VADD_VSUB:
9ee6e8bb 4640 if (u) {
ad69471c 4641 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4642 } else {
ad69471c 4643 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4644 }
4645 break;
4646 default:
4647 abort();
2c0262af 4648 }
ad69471c 4649 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4650 }
9ee6e8bb 4651 return 0;
2c0262af 4652 }
25f84f79 4653 pairwise = 0;
9ee6e8bb 4654 switch (op) {
62698be3
PM
4655 case NEON_3R_VSHL:
4656 case NEON_3R_VQSHL:
4657 case NEON_3R_VRSHL:
4658 case NEON_3R_VQRSHL:
9ee6e8bb 4659 {
ad69471c
PB
4660 int rtmp;
4661 /* Shift instruction operands are reversed. */
4662 rtmp = rn;
9ee6e8bb 4663 rn = rm;
ad69471c 4664 rm = rtmp;
9ee6e8bb 4665 }
2c0262af 4666 break;
25f84f79
PM
4667 case NEON_3R_VPADD:
4668 if (u) {
4669 return 1;
4670 }
4671 /* Fall through */
62698be3
PM
4672 case NEON_3R_VPMAX:
4673 case NEON_3R_VPMIN:
9ee6e8bb 4674 pairwise = 1;
2c0262af 4675 break;
25f84f79
PM
4676 case NEON_3R_FLOAT_ARITH:
4677 pairwise = (u && size < 2); /* if VPADD (float) */
4678 break;
4679 case NEON_3R_FLOAT_MINMAX:
4680 pairwise = u; /* if VPMIN/VPMAX (float) */
4681 break;
4682 case NEON_3R_FLOAT_CMP:
4683 if (!u && size) {
4684 /* no encoding for U=0 C=1x */
4685 return 1;
4686 }
4687 break;
4688 case NEON_3R_FLOAT_ACMP:
4689 if (!u) {
4690 return 1;
4691 }
4692 break;
4693 case NEON_3R_VRECPS_VRSQRTS:
4694 if (u) {
4695 return 1;
4696 }
2c0262af 4697 break;
25f84f79
PM
4698 case NEON_3R_VMUL:
4699 if (u && (size != 0)) {
4700 /* UNDEF on invalid size for polynomial subcase */
4701 return 1;
4702 }
2c0262af 4703 break;
9ee6e8bb 4704 default:
2c0262af 4705 break;
9ee6e8bb 4706 }
dd8fbd78 4707
25f84f79
PM
4708 if (pairwise && q) {
4709 /* All the pairwise insns UNDEF if Q is set */
4710 return 1;
4711 }
4712
9ee6e8bb
PB
4713 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4714
4715 if (pairwise) {
4716 /* Pairwise. */
a5a14945
JR
4717 if (pass < 1) {
4718 tmp = neon_load_reg(rn, 0);
4719 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4720 } else {
a5a14945
JR
4721 tmp = neon_load_reg(rm, 0);
4722 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4723 }
4724 } else {
4725 /* Elementwise. */
dd8fbd78
FN
4726 tmp = neon_load_reg(rn, pass);
4727 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4728 }
4729 switch (op) {
62698be3 4730 case NEON_3R_VHADD:
9ee6e8bb
PB
4731 GEN_NEON_INTEGER_OP(hadd);
4732 break;
62698be3 4733 case NEON_3R_VQADD:
02da0b2d 4734 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4735 break;
62698be3 4736 case NEON_3R_VRHADD:
9ee6e8bb 4737 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4738 break;
62698be3 4739 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4740 switch ((u << 2) | size) {
4741 case 0: /* VAND */
dd8fbd78 4742 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4743 break;
4744 case 1: /* BIC */
f669df27 4745 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4746 break;
4747 case 2: /* VORR */
dd8fbd78 4748 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4749 break;
4750 case 3: /* VORN */
f669df27 4751 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4752 break;
4753 case 4: /* VEOR */
dd8fbd78 4754 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4755 break;
4756 case 5: /* VBSL */
dd8fbd78
FN
4757 tmp3 = neon_load_reg(rd, pass);
4758 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4759 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4760 break;
4761 case 6: /* VBIT */
dd8fbd78
FN
4762 tmp3 = neon_load_reg(rd, pass);
4763 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4764 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4765 break;
4766 case 7: /* VBIF */
dd8fbd78
FN
4767 tmp3 = neon_load_reg(rd, pass);
4768 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4769 tcg_temp_free_i32(tmp3);
9ee6e8bb 4770 break;
2c0262af
FB
4771 }
4772 break;
62698be3 4773 case NEON_3R_VHSUB:
9ee6e8bb
PB
4774 GEN_NEON_INTEGER_OP(hsub);
4775 break;
62698be3 4776 case NEON_3R_VQSUB:
02da0b2d 4777 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4778 break;
62698be3 4779 case NEON_3R_VCGT:
9ee6e8bb
PB
4780 GEN_NEON_INTEGER_OP(cgt);
4781 break;
62698be3 4782 case NEON_3R_VCGE:
9ee6e8bb
PB
4783 GEN_NEON_INTEGER_OP(cge);
4784 break;
62698be3 4785 case NEON_3R_VSHL:
ad69471c 4786 GEN_NEON_INTEGER_OP(shl);
2c0262af 4787 break;
62698be3 4788 case NEON_3R_VQSHL:
02da0b2d 4789 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4790 break;
62698be3 4791 case NEON_3R_VRSHL:
ad69471c 4792 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4793 break;
62698be3 4794 case NEON_3R_VQRSHL:
02da0b2d 4795 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4796 break;
62698be3 4797 case NEON_3R_VMAX:
9ee6e8bb
PB
4798 GEN_NEON_INTEGER_OP(max);
4799 break;
62698be3 4800 case NEON_3R_VMIN:
9ee6e8bb
PB
4801 GEN_NEON_INTEGER_OP(min);
4802 break;
62698be3 4803 case NEON_3R_VABD:
9ee6e8bb
PB
4804 GEN_NEON_INTEGER_OP(abd);
4805 break;
62698be3 4806 case NEON_3R_VABA:
9ee6e8bb 4807 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4808 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4809 tmp2 = neon_load_reg(rd, pass);
4810 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4811 break;
62698be3 4812 case NEON_3R_VADD_VSUB:
9ee6e8bb 4813 if (!u) { /* VADD */
62698be3 4814 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4815 } else { /* VSUB */
4816 switch (size) {
dd8fbd78
FN
4817 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4818 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4819 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4820 default: abort();
9ee6e8bb
PB
4821 }
4822 }
4823 break;
62698be3 4824 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4825 if (!u) { /* VTST */
4826 switch (size) {
dd8fbd78
FN
4827 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4828 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4829 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4830 default: abort();
9ee6e8bb
PB
4831 }
4832 } else { /* VCEQ */
4833 switch (size) {
dd8fbd78
FN
4834 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4835 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4836 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4837 default: abort();
9ee6e8bb
PB
4838 }
4839 }
4840 break;
62698be3 4841 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4842 switch (size) {
dd8fbd78
FN
4843 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4844 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4845 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4846 default: abort();
9ee6e8bb 4847 }
7d1b0095 4848 tcg_temp_free_i32(tmp2);
dd8fbd78 4849 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4850 if (u) { /* VMLS */
dd8fbd78 4851 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4852 } else { /* VMLA */
dd8fbd78 4853 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4854 }
4855 break;
62698be3 4856 case NEON_3R_VMUL:
9ee6e8bb 4857 if (u) { /* polynomial */
dd8fbd78 4858 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4859 } else { /* Integer */
4860 switch (size) {
dd8fbd78
FN
4861 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4862 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4863 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4864 default: abort();
9ee6e8bb
PB
4865 }
4866 }
4867 break;
62698be3 4868 case NEON_3R_VPMAX:
9ee6e8bb
PB
4869 GEN_NEON_INTEGER_OP(pmax);
4870 break;
62698be3 4871 case NEON_3R_VPMIN:
9ee6e8bb
PB
4872 GEN_NEON_INTEGER_OP(pmin);
4873 break;
62698be3 4874 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4875 if (!u) { /* VQDMULH */
4876 switch (size) {
02da0b2d
PM
4877 case 1:
4878 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4879 break;
4880 case 2:
4881 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4882 break;
62698be3 4883 default: abort();
9ee6e8bb 4884 }
62698be3 4885 } else { /* VQRDMULH */
9ee6e8bb 4886 switch (size) {
02da0b2d
PM
4887 case 1:
4888 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4889 break;
4890 case 2:
4891 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4892 break;
62698be3 4893 default: abort();
9ee6e8bb
PB
4894 }
4895 }
4896 break;
62698be3 4897 case NEON_3R_VPADD:
9ee6e8bb 4898 switch (size) {
dd8fbd78
FN
4899 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4900 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4901 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4902 default: abort();
9ee6e8bb
PB
4903 }
4904 break;
62698be3 4905 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4906 {
4907 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4908 switch ((u << 2) | size) {
4909 case 0: /* VADD */
aa47cfdd
PM
4910 case 4: /* VPADD */
4911 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4912 break;
4913 case 2: /* VSUB */
aa47cfdd 4914 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4915 break;
4916 case 6: /* VABD */
aa47cfdd 4917 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4918 break;
4919 default:
62698be3 4920 abort();
9ee6e8bb 4921 }
aa47cfdd 4922 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4923 break;
aa47cfdd 4924 }
62698be3 4925 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
4926 {
4927 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4928 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4929 if (!u) {
7d1b0095 4930 tcg_temp_free_i32(tmp2);
dd8fbd78 4931 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4932 if (size == 0) {
aa47cfdd 4933 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4934 } else {
aa47cfdd 4935 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
4936 }
4937 }
aa47cfdd 4938 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4939 break;
aa47cfdd 4940 }
62698be3 4941 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
4942 {
4943 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 4944 if (!u) {
aa47cfdd 4945 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 4946 } else {
aa47cfdd
PM
4947 if (size == 0) {
4948 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4949 } else {
4950 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4951 }
b5ff1b31 4952 }
aa47cfdd 4953 tcg_temp_free_ptr(fpstatus);
2c0262af 4954 break;
aa47cfdd 4955 }
62698be3 4956 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
4957 {
4958 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4959 if (size == 0) {
4960 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4961 } else {
4962 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4963 }
4964 tcg_temp_free_ptr(fpstatus);
2c0262af 4965 break;
aa47cfdd 4966 }
62698be3 4967 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
4968 {
4969 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4970 if (size == 0) {
4971 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4972 } else {
4973 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4974 }
4975 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4976 break;
aa47cfdd 4977 }
62698be3 4978 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4979 if (size == 0)
dd8fbd78 4980 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4981 else
dd8fbd78 4982 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4983 break;
9ee6e8bb
PB
4984 default:
4985 abort();
2c0262af 4986 }
7d1b0095 4987 tcg_temp_free_i32(tmp2);
dd8fbd78 4988
9ee6e8bb
PB
4989 /* Save the result. For elementwise operations we can put it
4990 straight into the destination register. For pairwise operations
4991 we have to be careful to avoid clobbering the source operands. */
4992 if (pairwise && rd == rm) {
dd8fbd78 4993 neon_store_scratch(pass, tmp);
9ee6e8bb 4994 } else {
dd8fbd78 4995 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4996 }
4997
4998 } /* for pass */
4999 if (pairwise && rd == rm) {
5000 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5001 tmp = neon_load_scratch(pass);
5002 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5003 }
5004 }
ad69471c 5005 /* End of 3 register same size operations. */
9ee6e8bb
PB
5006 } else if (insn & (1 << 4)) {
5007 if ((insn & 0x00380080) != 0) {
5008 /* Two registers and shift. */
5009 op = (insn >> 8) & 0xf;
5010 if (insn & (1 << 7)) {
cc13115b
PM
5011 /* 64-bit shift. */
5012 if (op > 7) {
5013 return 1;
5014 }
9ee6e8bb
PB
5015 size = 3;
5016 } else {
5017 size = 2;
5018 while ((insn & (1 << (size + 19))) == 0)
5019 size--;
5020 }
5021 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5022 /* To avoid excessive dumplication of ops we implement shift
5023 by immediate using the variable shift operations. */
5024 if (op < 8) {
5025 /* Shift by immediate:
5026 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5027 if (q && ((rd | rm) & 1)) {
5028 return 1;
5029 }
5030 if (!u && (op == 4 || op == 6)) {
5031 return 1;
5032 }
9ee6e8bb
PB
5033 /* Right shifts are encoded as N - shift, where N is the
5034 element size in bits. */
5035 if (op <= 4)
5036 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5037 if (size == 3) {
5038 count = q + 1;
5039 } else {
5040 count = q ? 4: 2;
5041 }
5042 switch (size) {
5043 case 0:
5044 imm = (uint8_t) shift;
5045 imm |= imm << 8;
5046 imm |= imm << 16;
5047 break;
5048 case 1:
5049 imm = (uint16_t) shift;
5050 imm |= imm << 16;
5051 break;
5052 case 2:
5053 case 3:
5054 imm = shift;
5055 break;
5056 default:
5057 abort();
5058 }
5059
5060 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5061 if (size == 3) {
5062 neon_load_reg64(cpu_V0, rm + pass);
5063 tcg_gen_movi_i64(cpu_V1, imm);
5064 switch (op) {
5065 case 0: /* VSHR */
5066 case 1: /* VSRA */
5067 if (u)
5068 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5069 else
ad69471c 5070 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5071 break;
ad69471c
PB
5072 case 2: /* VRSHR */
5073 case 3: /* VRSRA */
5074 if (u)
5075 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5076 else
ad69471c 5077 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5078 break;
ad69471c 5079 case 4: /* VSRI */
ad69471c
PB
5080 case 5: /* VSHL, VSLI */
5081 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5082 break;
0322b26e 5083 case 6: /* VQSHLU */
02da0b2d
PM
5084 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5085 cpu_V0, cpu_V1);
ad69471c 5086 break;
0322b26e
PM
5087 case 7: /* VQSHL */
5088 if (u) {
02da0b2d 5089 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5090 cpu_V0, cpu_V1);
5091 } else {
02da0b2d 5092 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5093 cpu_V0, cpu_V1);
5094 }
9ee6e8bb 5095 break;
9ee6e8bb 5096 }
ad69471c
PB
5097 if (op == 1 || op == 3) {
5098 /* Accumulate. */
5371cb81 5099 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5100 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5101 } else if (op == 4 || (op == 5 && u)) {
5102 /* Insert */
923e6509
CL
5103 neon_load_reg64(cpu_V1, rd + pass);
5104 uint64_t mask;
5105 if (shift < -63 || shift > 63) {
5106 mask = 0;
5107 } else {
5108 if (op == 4) {
5109 mask = 0xffffffffffffffffull >> -shift;
5110 } else {
5111 mask = 0xffffffffffffffffull << shift;
5112 }
5113 }
5114 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5115 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5116 }
5117 neon_store_reg64(cpu_V0, rd + pass);
5118 } else { /* size < 3 */
5119 /* Operands in T0 and T1. */
dd8fbd78 5120 tmp = neon_load_reg(rm, pass);
7d1b0095 5121 tmp2 = tcg_temp_new_i32();
dd8fbd78 5122 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5123 switch (op) {
5124 case 0: /* VSHR */
5125 case 1: /* VSRA */
5126 GEN_NEON_INTEGER_OP(shl);
5127 break;
5128 case 2: /* VRSHR */
5129 case 3: /* VRSRA */
5130 GEN_NEON_INTEGER_OP(rshl);
5131 break;
5132 case 4: /* VSRI */
ad69471c
PB
5133 case 5: /* VSHL, VSLI */
5134 switch (size) {
dd8fbd78
FN
5135 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5136 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5137 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5138 default: abort();
ad69471c
PB
5139 }
5140 break;
0322b26e 5141 case 6: /* VQSHLU */
ad69471c 5142 switch (size) {
0322b26e 5143 case 0:
02da0b2d
PM
5144 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5145 tmp, tmp2);
0322b26e
PM
5146 break;
5147 case 1:
02da0b2d
PM
5148 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5149 tmp, tmp2);
0322b26e
PM
5150 break;
5151 case 2:
02da0b2d
PM
5152 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5153 tmp, tmp2);
0322b26e
PM
5154 break;
5155 default:
cc13115b 5156 abort();
ad69471c
PB
5157 }
5158 break;
0322b26e 5159 case 7: /* VQSHL */
02da0b2d 5160 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5161 break;
ad69471c 5162 }
7d1b0095 5163 tcg_temp_free_i32(tmp2);
ad69471c
PB
5164
5165 if (op == 1 || op == 3) {
5166 /* Accumulate. */
dd8fbd78 5167 tmp2 = neon_load_reg(rd, pass);
5371cb81 5168 gen_neon_add(size, tmp, tmp2);
7d1b0095 5169 tcg_temp_free_i32(tmp2);
ad69471c
PB
5170 } else if (op == 4 || (op == 5 && u)) {
5171 /* Insert */
5172 switch (size) {
5173 case 0:
5174 if (op == 4)
ca9a32e4 5175 mask = 0xff >> -shift;
ad69471c 5176 else
ca9a32e4
JR
5177 mask = (uint8_t)(0xff << shift);
5178 mask |= mask << 8;
5179 mask |= mask << 16;
ad69471c
PB
5180 break;
5181 case 1:
5182 if (op == 4)
ca9a32e4 5183 mask = 0xffff >> -shift;
ad69471c 5184 else
ca9a32e4
JR
5185 mask = (uint16_t)(0xffff << shift);
5186 mask |= mask << 16;
ad69471c
PB
5187 break;
5188 case 2:
ca9a32e4
JR
5189 if (shift < -31 || shift > 31) {
5190 mask = 0;
5191 } else {
5192 if (op == 4)
5193 mask = 0xffffffffu >> -shift;
5194 else
5195 mask = 0xffffffffu << shift;
5196 }
ad69471c
PB
5197 break;
5198 default:
5199 abort();
5200 }
dd8fbd78 5201 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5202 tcg_gen_andi_i32(tmp, tmp, mask);
5203 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5204 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5205 tcg_temp_free_i32(tmp2);
ad69471c 5206 }
dd8fbd78 5207 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5208 }
5209 } /* for pass */
5210 } else if (op < 10) {
ad69471c 5211 /* Shift by immediate and narrow:
9ee6e8bb 5212 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5213 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5214 if (rm & 1) {
5215 return 1;
5216 }
9ee6e8bb
PB
5217 shift = shift - (1 << (size + 3));
5218 size++;
92cdfaeb 5219 if (size == 3) {
a7812ae4 5220 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5221 neon_load_reg64(cpu_V0, rm);
5222 neon_load_reg64(cpu_V1, rm + 1);
5223 for (pass = 0; pass < 2; pass++) {
5224 TCGv_i64 in;
5225 if (pass == 0) {
5226 in = cpu_V0;
5227 } else {
5228 in = cpu_V1;
5229 }
ad69471c 5230 if (q) {
0b36f4cd 5231 if (input_unsigned) {
92cdfaeb 5232 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5233 } else {
92cdfaeb 5234 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5235 }
ad69471c 5236 } else {
0b36f4cd 5237 if (input_unsigned) {
92cdfaeb 5238 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5239 } else {
92cdfaeb 5240 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5241 }
ad69471c 5242 }
7d1b0095 5243 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5244 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5245 neon_store_reg(rd, pass, tmp);
5246 } /* for pass */
5247 tcg_temp_free_i64(tmp64);
5248 } else {
5249 if (size == 1) {
5250 imm = (uint16_t)shift;
5251 imm |= imm << 16;
2c0262af 5252 } else {
92cdfaeb
PM
5253 /* size == 2 */
5254 imm = (uint32_t)shift;
5255 }
5256 tmp2 = tcg_const_i32(imm);
5257 tmp4 = neon_load_reg(rm + 1, 0);
5258 tmp5 = neon_load_reg(rm + 1, 1);
5259 for (pass = 0; pass < 2; pass++) {
5260 if (pass == 0) {
5261 tmp = neon_load_reg(rm, 0);
5262 } else {
5263 tmp = tmp4;
5264 }
0b36f4cd
CL
5265 gen_neon_shift_narrow(size, tmp, tmp2, q,
5266 input_unsigned);
92cdfaeb
PM
5267 if (pass == 0) {
5268 tmp3 = neon_load_reg(rm, 1);
5269 } else {
5270 tmp3 = tmp5;
5271 }
0b36f4cd
CL
5272 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5273 input_unsigned);
36aa55dc 5274 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5275 tcg_temp_free_i32(tmp);
5276 tcg_temp_free_i32(tmp3);
5277 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5278 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5279 neon_store_reg(rd, pass, tmp);
5280 } /* for pass */
c6067f04 5281 tcg_temp_free_i32(tmp2);
b75263d6 5282 }
9ee6e8bb 5283 } else if (op == 10) {
cc13115b
PM
5284 /* VSHLL, VMOVL */
5285 if (q || (rd & 1)) {
9ee6e8bb 5286 return 1;
cc13115b 5287 }
ad69471c
PB
5288 tmp = neon_load_reg(rm, 0);
5289 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5290 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5291 if (pass == 1)
5292 tmp = tmp2;
5293
5294 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5295
9ee6e8bb
PB
5296 if (shift != 0) {
5297 /* The shift is less than the width of the source
ad69471c
PB
5298 type, so we can just shift the whole register. */
5299 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5300 /* Widen the result of shift: we need to clear
5301 * the potential overflow bits resulting from
5302 * left bits of the narrow input appearing as
5303 * right bits of left the neighbour narrow
5304 * input. */
ad69471c
PB
5305 if (size < 2 || !u) {
5306 uint64_t imm64;
5307 if (size == 0) {
5308 imm = (0xffu >> (8 - shift));
5309 imm |= imm << 16;
acdf01ef 5310 } else if (size == 1) {
ad69471c 5311 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5312 } else {
5313 /* size == 2 */
5314 imm = 0xffffffff >> (32 - shift);
5315 }
5316 if (size < 2) {
5317 imm64 = imm | (((uint64_t)imm) << 32);
5318 } else {
5319 imm64 = imm;
9ee6e8bb 5320 }
acdf01ef 5321 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5322 }
5323 }
ad69471c 5324 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5325 }
f73534a5 5326 } else if (op >= 14) {
9ee6e8bb 5327 /* VCVT fixed-point. */
cc13115b
PM
5328 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5329 return 1;
5330 }
f73534a5
PM
5331 /* We have already masked out the must-be-1 top bit of imm6,
5332 * hence this 32-shift where the ARM ARM has 64-imm6.
5333 */
5334 shift = 32 - shift;
9ee6e8bb 5335 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5336 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5337 if (!(op & 1)) {
9ee6e8bb 5338 if (u)
5500b06c 5339 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5340 else
5500b06c 5341 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5342 } else {
5343 if (u)
5500b06c 5344 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5345 else
5500b06c 5346 gen_vfp_tosl(0, shift, 1);
2c0262af 5347 }
4373f3ce 5348 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5349 }
5350 } else {
9ee6e8bb
PB
5351 return 1;
5352 }
5353 } else { /* (insn & 0x00380080) == 0 */
5354 int invert;
7d80fee5
PM
5355 if (q && (rd & 1)) {
5356 return 1;
5357 }
9ee6e8bb
PB
5358
5359 op = (insn >> 8) & 0xf;
5360 /* One register and immediate. */
5361 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5362 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5363 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5364 * We choose to not special-case this and will behave as if a
5365 * valid constant encoding of 0 had been given.
5366 */
9ee6e8bb
PB
5367 switch (op) {
5368 case 0: case 1:
5369 /* no-op */
5370 break;
5371 case 2: case 3:
5372 imm <<= 8;
5373 break;
5374 case 4: case 5:
5375 imm <<= 16;
5376 break;
5377 case 6: case 7:
5378 imm <<= 24;
5379 break;
5380 case 8: case 9:
5381 imm |= imm << 16;
5382 break;
5383 case 10: case 11:
5384 imm = (imm << 8) | (imm << 24);
5385 break;
5386 case 12:
8e31209e 5387 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5388 break;
5389 case 13:
5390 imm = (imm << 16) | 0xffff;
5391 break;
5392 case 14:
5393 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5394 if (invert)
5395 imm = ~imm;
5396 break;
5397 case 15:
7d80fee5
PM
5398 if (invert) {
5399 return 1;
5400 }
9ee6e8bb
PB
5401 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5402 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5403 break;
5404 }
5405 if (invert)
5406 imm = ~imm;
5407
9ee6e8bb
PB
5408 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5409 if (op & 1 && op < 12) {
ad69471c 5410 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5411 if (invert) {
5412 /* The immediate value has already been inverted, so
5413 BIC becomes AND. */
ad69471c 5414 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5415 } else {
ad69471c 5416 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5417 }
9ee6e8bb 5418 } else {
ad69471c 5419 /* VMOV, VMVN. */
7d1b0095 5420 tmp = tcg_temp_new_i32();
9ee6e8bb 5421 if (op == 14 && invert) {
a5a14945 5422 int n;
ad69471c
PB
5423 uint32_t val;
5424 val = 0;
9ee6e8bb
PB
5425 for (n = 0; n < 4; n++) {
5426 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5427 val |= 0xff << (n * 8);
9ee6e8bb 5428 }
ad69471c
PB
5429 tcg_gen_movi_i32(tmp, val);
5430 } else {
5431 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5432 }
9ee6e8bb 5433 }
ad69471c 5434 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5435 }
5436 }
e4b3861d 5437 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5438 if (size != 3) {
5439 op = (insn >> 8) & 0xf;
5440 if ((insn & (1 << 6)) == 0) {
5441 /* Three registers of different lengths. */
5442 int src1_wide;
5443 int src2_wide;
5444 int prewiden;
695272dc
PM
5445 /* undefreq: bit 0 : UNDEF if size != 0
5446 * bit 1 : UNDEF if size == 0
5447 * bit 2 : UNDEF if U == 1
5448 * Note that [1:0] set implies 'always UNDEF'
5449 */
5450 int undefreq;
5451 /* prewiden, src1_wide, src2_wide, undefreq */
5452 static const int neon_3reg_wide[16][4] = {
5453 {1, 0, 0, 0}, /* VADDL */
5454 {1, 1, 0, 0}, /* VADDW */
5455 {1, 0, 0, 0}, /* VSUBL */
5456 {1, 1, 0, 0}, /* VSUBW */
5457 {0, 1, 1, 0}, /* VADDHN */
5458 {0, 0, 0, 0}, /* VABAL */
5459 {0, 1, 1, 0}, /* VSUBHN */
5460 {0, 0, 0, 0}, /* VABDL */
5461 {0, 0, 0, 0}, /* VMLAL */
5462 {0, 0, 0, 6}, /* VQDMLAL */
5463 {0, 0, 0, 0}, /* VMLSL */
5464 {0, 0, 0, 6}, /* VQDMLSL */
5465 {0, 0, 0, 0}, /* Integer VMULL */
5466 {0, 0, 0, 2}, /* VQDMULL */
5467 {0, 0, 0, 5}, /* Polynomial VMULL */
5468 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5469 };
5470
5471 prewiden = neon_3reg_wide[op][0];
5472 src1_wide = neon_3reg_wide[op][1];
5473 src2_wide = neon_3reg_wide[op][2];
695272dc 5474 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5475
695272dc
PM
5476 if (((undefreq & 1) && (size != 0)) ||
5477 ((undefreq & 2) && (size == 0)) ||
5478 ((undefreq & 4) && u)) {
5479 return 1;
5480 }
5481 if ((src1_wide && (rn & 1)) ||
5482 (src2_wide && (rm & 1)) ||
5483 (!src2_wide && (rd & 1))) {
ad69471c 5484 return 1;
695272dc 5485 }
ad69471c 5486
9ee6e8bb
PB
5487 /* Avoid overlapping operands. Wide source operands are
5488 always aligned so will never overlap with wide
5489 destinations in problematic ways. */
8f8e3aa4 5490 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5491 tmp = neon_load_reg(rm, 1);
5492 neon_store_scratch(2, tmp);
8f8e3aa4 5493 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5494 tmp = neon_load_reg(rn, 1);
5495 neon_store_scratch(2, tmp);
9ee6e8bb 5496 }
a50f5b91 5497 TCGV_UNUSED(tmp3);
9ee6e8bb 5498 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5499 if (src1_wide) {
5500 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5501 TCGV_UNUSED(tmp);
9ee6e8bb 5502 } else {
ad69471c 5503 if (pass == 1 && rd == rn) {
dd8fbd78 5504 tmp = neon_load_scratch(2);
9ee6e8bb 5505 } else {
ad69471c
PB
5506 tmp = neon_load_reg(rn, pass);
5507 }
5508 if (prewiden) {
5509 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5510 }
5511 }
ad69471c
PB
5512 if (src2_wide) {
5513 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5514 TCGV_UNUSED(tmp2);
9ee6e8bb 5515 } else {
ad69471c 5516 if (pass == 1 && rd == rm) {
dd8fbd78 5517 tmp2 = neon_load_scratch(2);
9ee6e8bb 5518 } else {
ad69471c
PB
5519 tmp2 = neon_load_reg(rm, pass);
5520 }
5521 if (prewiden) {
5522 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5523 }
9ee6e8bb
PB
5524 }
5525 switch (op) {
5526 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5527 gen_neon_addl(size);
9ee6e8bb 5528 break;
79b0e534 5529 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5530 gen_neon_subl(size);
9ee6e8bb
PB
5531 break;
5532 case 5: case 7: /* VABAL, VABDL */
5533 switch ((size << 1) | u) {
ad69471c
PB
5534 case 0:
5535 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5536 break;
5537 case 1:
5538 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5539 break;
5540 case 2:
5541 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5542 break;
5543 case 3:
5544 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5545 break;
5546 case 4:
5547 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5548 break;
5549 case 5:
5550 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5551 break;
9ee6e8bb
PB
5552 default: abort();
5553 }
7d1b0095
PM
5554 tcg_temp_free_i32(tmp2);
5555 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5556 break;
5557 case 8: case 9: case 10: case 11: case 12: case 13:
5558 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5559 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5560 break;
5561 case 14: /* Polynomial VMULL */
e5ca24cb 5562 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5563 tcg_temp_free_i32(tmp2);
5564 tcg_temp_free_i32(tmp);
e5ca24cb 5565 break;
695272dc
PM
5566 default: /* 15 is RESERVED: caught earlier */
5567 abort();
9ee6e8bb 5568 }
ebcd88ce
PM
5569 if (op == 13) {
5570 /* VQDMULL */
5571 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5572 neon_store_reg64(cpu_V0, rd + pass);
5573 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5574 /* Accumulate. */
ebcd88ce 5575 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5576 switch (op) {
4dc064e6
PM
5577 case 10: /* VMLSL */
5578 gen_neon_negl(cpu_V0, size);
5579 /* Fall through */
5580 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5581 gen_neon_addl(size);
9ee6e8bb
PB
5582 break;
5583 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5584 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5585 if (op == 11) {
5586 gen_neon_negl(cpu_V0, size);
5587 }
ad69471c
PB
5588 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5589 break;
9ee6e8bb
PB
5590 default:
5591 abort();
5592 }
ad69471c 5593 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5594 } else if (op == 4 || op == 6) {
5595 /* Narrowing operation. */
7d1b0095 5596 tmp = tcg_temp_new_i32();
79b0e534 5597 if (!u) {
9ee6e8bb 5598 switch (size) {
ad69471c
PB
5599 case 0:
5600 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5601 break;
5602 case 1:
5603 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5604 break;
5605 case 2:
5606 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5607 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5608 break;
9ee6e8bb
PB
5609 default: abort();
5610 }
5611 } else {
5612 switch (size) {
ad69471c
PB
5613 case 0:
5614 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5615 break;
5616 case 1:
5617 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5618 break;
5619 case 2:
5620 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5621 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5622 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5623 break;
9ee6e8bb
PB
5624 default: abort();
5625 }
5626 }
ad69471c
PB
5627 if (pass == 0) {
5628 tmp3 = tmp;
5629 } else {
5630 neon_store_reg(rd, 0, tmp3);
5631 neon_store_reg(rd, 1, tmp);
5632 }
9ee6e8bb
PB
5633 } else {
5634 /* Write back the result. */
ad69471c 5635 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5636 }
5637 }
5638 } else {
3e3326df
PM
5639 /* Two registers and a scalar. NB that for ops of this form
5640 * the ARM ARM labels bit 24 as Q, but it is in our variable
5641 * 'u', not 'q'.
5642 */
5643 if (size == 0) {
5644 return 1;
5645 }
9ee6e8bb 5646 switch (op) {
9ee6e8bb 5647 case 1: /* Float VMLA scalar */
9ee6e8bb 5648 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5649 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5650 if (size == 1) {
5651 return 1;
5652 }
5653 /* fall through */
5654 case 0: /* Integer VMLA scalar */
5655 case 4: /* Integer VMLS scalar */
5656 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5657 case 12: /* VQDMULH scalar */
5658 case 13: /* VQRDMULH scalar */
3e3326df
PM
5659 if (u && ((rd | rn) & 1)) {
5660 return 1;
5661 }
dd8fbd78
FN
5662 tmp = neon_get_scalar(size, rm);
5663 neon_store_scratch(0, tmp);
9ee6e8bb 5664 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5665 tmp = neon_load_scratch(0);
5666 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5667 if (op == 12) {
5668 if (size == 1) {
02da0b2d 5669 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5670 } else {
02da0b2d 5671 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5672 }
5673 } else if (op == 13) {
5674 if (size == 1) {
02da0b2d 5675 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5676 } else {
02da0b2d 5677 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5678 }
5679 } else if (op & 1) {
aa47cfdd
PM
5680 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5681 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5682 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5683 } else {
5684 switch (size) {
dd8fbd78
FN
5685 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5686 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5687 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5688 default: abort();
9ee6e8bb
PB
5689 }
5690 }
7d1b0095 5691 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5692 if (op < 8) {
5693 /* Accumulate. */
dd8fbd78 5694 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5695 switch (op) {
5696 case 0:
dd8fbd78 5697 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5698 break;
5699 case 1:
aa47cfdd
PM
5700 {
5701 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5702 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5703 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5704 break;
aa47cfdd 5705 }
9ee6e8bb 5706 case 4:
dd8fbd78 5707 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5708 break;
5709 case 5:
aa47cfdd
PM
5710 {
5711 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5712 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5713 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5714 break;
aa47cfdd 5715 }
9ee6e8bb
PB
5716 default:
5717 abort();
5718 }
7d1b0095 5719 tcg_temp_free_i32(tmp2);
9ee6e8bb 5720 }
dd8fbd78 5721 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5722 }
5723 break;
9ee6e8bb 5724 case 3: /* VQDMLAL scalar */
9ee6e8bb 5725 case 7: /* VQDMLSL scalar */
9ee6e8bb 5726 case 11: /* VQDMULL scalar */
3e3326df 5727 if (u == 1) {
ad69471c 5728 return 1;
3e3326df
PM
5729 }
5730 /* fall through */
5731 case 2: /* VMLAL sclar */
5732 case 6: /* VMLSL scalar */
5733 case 10: /* VMULL scalar */
5734 if (rd & 1) {
5735 return 1;
5736 }
dd8fbd78 5737 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5738 /* We need a copy of tmp2 because gen_neon_mull
5739 * deletes it during pass 0. */
7d1b0095 5740 tmp4 = tcg_temp_new_i32();
c6067f04 5741 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5742 tmp3 = neon_load_reg(rn, 1);
ad69471c 5743
9ee6e8bb 5744 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5745 if (pass == 0) {
5746 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5747 } else {
dd8fbd78 5748 tmp = tmp3;
c6067f04 5749 tmp2 = tmp4;
9ee6e8bb 5750 }
ad69471c 5751 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5752 if (op != 11) {
5753 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5754 }
9ee6e8bb 5755 switch (op) {
4dc064e6
PM
5756 case 6:
5757 gen_neon_negl(cpu_V0, size);
5758 /* Fall through */
5759 case 2:
ad69471c 5760 gen_neon_addl(size);
9ee6e8bb
PB
5761 break;
5762 case 3: case 7:
ad69471c 5763 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5764 if (op == 7) {
5765 gen_neon_negl(cpu_V0, size);
5766 }
ad69471c 5767 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5768 break;
5769 case 10:
5770 /* no-op */
5771 break;
5772 case 11:
ad69471c 5773 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5774 break;
5775 default:
5776 abort();
5777 }
ad69471c 5778 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5779 }
dd8fbd78 5780
dd8fbd78 5781
9ee6e8bb
PB
5782 break;
5783 default: /* 14 and 15 are RESERVED */
5784 return 1;
5785 }
5786 }
5787 } else { /* size == 3 */
5788 if (!u) {
5789 /* Extract. */
9ee6e8bb 5790 imm = (insn >> 8) & 0xf;
ad69471c
PB
5791
5792 if (imm > 7 && !q)
5793 return 1;
5794
52579ea1
PM
5795 if (q && ((rd | rn | rm) & 1)) {
5796 return 1;
5797 }
5798
ad69471c
PB
5799 if (imm == 0) {
5800 neon_load_reg64(cpu_V0, rn);
5801 if (q) {
5802 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5803 }
ad69471c
PB
5804 } else if (imm == 8) {
5805 neon_load_reg64(cpu_V0, rn + 1);
5806 if (q) {
5807 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5808 }
ad69471c 5809 } else if (q) {
a7812ae4 5810 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5811 if (imm < 8) {
5812 neon_load_reg64(cpu_V0, rn);
a7812ae4 5813 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5814 } else {
5815 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5816 neon_load_reg64(tmp64, rm);
ad69471c
PB
5817 }
5818 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5819 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5820 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5821 if (imm < 8) {
5822 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5823 } else {
ad69471c
PB
5824 neon_load_reg64(cpu_V1, rm + 1);
5825 imm -= 8;
9ee6e8bb 5826 }
ad69471c 5827 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5828 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5829 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5830 tcg_temp_free_i64(tmp64);
ad69471c 5831 } else {
a7812ae4 5832 /* BUGFIX */
ad69471c 5833 neon_load_reg64(cpu_V0, rn);
a7812ae4 5834 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5835 neon_load_reg64(cpu_V1, rm);
a7812ae4 5836 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5837 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5838 }
5839 neon_store_reg64(cpu_V0, rd);
5840 if (q) {
5841 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5842 }
5843 } else if ((insn & (1 << 11)) == 0) {
5844 /* Two register misc. */
5845 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5846 size = (insn >> 18) & 3;
600b828c
PM
5847 /* UNDEF for unknown op values and bad op-size combinations */
5848 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5849 return 1;
5850 }
fc2a9b37
PM
5851 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5852 q && ((rm | rd) & 1)) {
5853 return 1;
5854 }
9ee6e8bb 5855 switch (op) {
600b828c 5856 case NEON_2RM_VREV64:
9ee6e8bb 5857 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5858 tmp = neon_load_reg(rm, pass * 2);
5859 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5860 switch (size) {
dd8fbd78
FN
5861 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5862 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5863 case 2: /* no-op */ break;
5864 default: abort();
5865 }
dd8fbd78 5866 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5867 if (size == 2) {
dd8fbd78 5868 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5869 } else {
9ee6e8bb 5870 switch (size) {
dd8fbd78
FN
5871 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5872 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5873 default: abort();
5874 }
dd8fbd78 5875 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5876 }
5877 }
5878 break;
600b828c
PM
5879 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5880 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5881 for (pass = 0; pass < q + 1; pass++) {
5882 tmp = neon_load_reg(rm, pass * 2);
5883 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5884 tmp = neon_load_reg(rm, pass * 2 + 1);
5885 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5886 switch (size) {
5887 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5888 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5889 case 2: tcg_gen_add_i64(CPU_V001); break;
5890 default: abort();
5891 }
600b828c 5892 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5893 /* Accumulate. */
ad69471c
PB
5894 neon_load_reg64(cpu_V1, rd + pass);
5895 gen_neon_addl(size);
9ee6e8bb 5896 }
ad69471c 5897 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5898 }
5899 break;
600b828c 5900 case NEON_2RM_VTRN:
9ee6e8bb 5901 if (size == 2) {
a5a14945 5902 int n;
9ee6e8bb 5903 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5904 tmp = neon_load_reg(rm, n);
5905 tmp2 = neon_load_reg(rd, n + 1);
5906 neon_store_reg(rm, n, tmp2);
5907 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5908 }
5909 } else {
5910 goto elementwise;
5911 }
5912 break;
600b828c 5913 case NEON_2RM_VUZP:
02acedf9 5914 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5915 return 1;
9ee6e8bb
PB
5916 }
5917 break;
600b828c 5918 case NEON_2RM_VZIP:
d68a6f3a 5919 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5920 return 1;
9ee6e8bb
PB
5921 }
5922 break;
600b828c
PM
5923 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5924 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5925 if (rm & 1) {
5926 return 1;
5927 }
a50f5b91 5928 TCGV_UNUSED(tmp2);
9ee6e8bb 5929 for (pass = 0; pass < 2; pass++) {
ad69471c 5930 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5931 tmp = tcg_temp_new_i32();
600b828c
PM
5932 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5933 tmp, cpu_V0);
ad69471c
PB
5934 if (pass == 0) {
5935 tmp2 = tmp;
5936 } else {
5937 neon_store_reg(rd, 0, tmp2);
5938 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5939 }
9ee6e8bb
PB
5940 }
5941 break;
600b828c 5942 case NEON_2RM_VSHLL:
fc2a9b37 5943 if (q || (rd & 1)) {
9ee6e8bb 5944 return 1;
600b828c 5945 }
ad69471c
PB
5946 tmp = neon_load_reg(rm, 0);
5947 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5948 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5949 if (pass == 1)
5950 tmp = tmp2;
5951 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5952 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5953 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5954 }
5955 break;
600b828c 5956 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5957 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5958 q || (rm & 1)) {
5959 return 1;
5960 }
7d1b0095
PM
5961 tmp = tcg_temp_new_i32();
5962 tmp2 = tcg_temp_new_i32();
60011498 5963 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5964 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5965 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5966 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5967 tcg_gen_shli_i32(tmp2, tmp2, 16);
5968 tcg_gen_or_i32(tmp2, tmp2, tmp);
5969 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5970 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5971 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5972 neon_store_reg(rd, 0, tmp2);
7d1b0095 5973 tmp2 = tcg_temp_new_i32();
2d981da7 5974 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5975 tcg_gen_shli_i32(tmp2, tmp2, 16);
5976 tcg_gen_or_i32(tmp2, tmp2, tmp);
5977 neon_store_reg(rd, 1, tmp2);
7d1b0095 5978 tcg_temp_free_i32(tmp);
60011498 5979 break;
600b828c 5980 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5981 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5982 q || (rd & 1)) {
5983 return 1;
5984 }
7d1b0095 5985 tmp3 = tcg_temp_new_i32();
60011498
PB
5986 tmp = neon_load_reg(rm, 0);
5987 tmp2 = neon_load_reg(rm, 1);
5988 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5989 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5990 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5991 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5992 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5993 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5994 tcg_temp_free_i32(tmp);
60011498 5995 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5996 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5997 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5998 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5999 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6000 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6001 tcg_temp_free_i32(tmp2);
6002 tcg_temp_free_i32(tmp3);
60011498 6003 break;
9ee6e8bb
PB
6004 default:
6005 elementwise:
6006 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6007 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6008 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6009 neon_reg_offset(rm, pass));
dd8fbd78 6010 TCGV_UNUSED(tmp);
9ee6e8bb 6011 } else {
dd8fbd78 6012 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6013 }
6014 switch (op) {
600b828c 6015 case NEON_2RM_VREV32:
9ee6e8bb 6016 switch (size) {
dd8fbd78
FN
6017 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6018 case 1: gen_swap_half(tmp); break;
600b828c 6019 default: abort();
9ee6e8bb
PB
6020 }
6021 break;
600b828c 6022 case NEON_2RM_VREV16:
dd8fbd78 6023 gen_rev16(tmp);
9ee6e8bb 6024 break;
600b828c 6025 case NEON_2RM_VCLS:
9ee6e8bb 6026 switch (size) {
dd8fbd78
FN
6027 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6028 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6029 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6030 default: abort();
9ee6e8bb
PB
6031 }
6032 break;
600b828c 6033 case NEON_2RM_VCLZ:
9ee6e8bb 6034 switch (size) {
dd8fbd78
FN
6035 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6036 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6037 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6038 default: abort();
9ee6e8bb
PB
6039 }
6040 break;
600b828c 6041 case NEON_2RM_VCNT:
dd8fbd78 6042 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6043 break;
600b828c 6044 case NEON_2RM_VMVN:
dd8fbd78 6045 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6046 break;
600b828c 6047 case NEON_2RM_VQABS:
9ee6e8bb 6048 switch (size) {
02da0b2d
PM
6049 case 0:
6050 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6051 break;
6052 case 1:
6053 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6054 break;
6055 case 2:
6056 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6057 break;
600b828c 6058 default: abort();
9ee6e8bb
PB
6059 }
6060 break;
600b828c 6061 case NEON_2RM_VQNEG:
9ee6e8bb 6062 switch (size) {
02da0b2d
PM
6063 case 0:
6064 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6065 break;
6066 case 1:
6067 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6068 break;
6069 case 2:
6070 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6071 break;
600b828c 6072 default: abort();
9ee6e8bb
PB
6073 }
6074 break;
600b828c 6075 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6076 tmp2 = tcg_const_i32(0);
9ee6e8bb 6077 switch(size) {
dd8fbd78
FN
6078 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6079 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6080 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6081 default: abort();
9ee6e8bb 6082 }
dd8fbd78 6083 tcg_temp_free(tmp2);
600b828c 6084 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6085 tcg_gen_not_i32(tmp, tmp);
600b828c 6086 }
9ee6e8bb 6087 break;
600b828c 6088 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6089 tmp2 = tcg_const_i32(0);
9ee6e8bb 6090 switch(size) {
dd8fbd78
FN
6091 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6092 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6093 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6094 default: abort();
9ee6e8bb 6095 }
dd8fbd78 6096 tcg_temp_free(tmp2);
600b828c 6097 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6098 tcg_gen_not_i32(tmp, tmp);
600b828c 6099 }
9ee6e8bb 6100 break;
600b828c 6101 case NEON_2RM_VCEQ0:
dd8fbd78 6102 tmp2 = tcg_const_i32(0);
9ee6e8bb 6103 switch(size) {
dd8fbd78
FN
6104 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6105 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6106 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6107 default: abort();
9ee6e8bb 6108 }
dd8fbd78 6109 tcg_temp_free(tmp2);
9ee6e8bb 6110 break;
600b828c 6111 case NEON_2RM_VABS:
9ee6e8bb 6112 switch(size) {
dd8fbd78
FN
6113 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6114 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6115 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6116 default: abort();
9ee6e8bb
PB
6117 }
6118 break;
600b828c 6119 case NEON_2RM_VNEG:
dd8fbd78
FN
6120 tmp2 = tcg_const_i32(0);
6121 gen_neon_rsb(size, tmp, tmp2);
6122 tcg_temp_free(tmp2);
9ee6e8bb 6123 break;
600b828c 6124 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6125 {
6126 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6127 tmp2 = tcg_const_i32(0);
aa47cfdd 6128 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6129 tcg_temp_free(tmp2);
aa47cfdd 6130 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6131 break;
aa47cfdd 6132 }
600b828c 6133 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6134 {
6135 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6136 tmp2 = tcg_const_i32(0);
aa47cfdd 6137 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6138 tcg_temp_free(tmp2);
aa47cfdd 6139 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6140 break;
aa47cfdd 6141 }
600b828c 6142 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6143 {
6144 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6145 tmp2 = tcg_const_i32(0);
aa47cfdd 6146 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6147 tcg_temp_free(tmp2);
aa47cfdd 6148 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6149 break;
aa47cfdd 6150 }
600b828c 6151 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6152 {
6153 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6154 tmp2 = tcg_const_i32(0);
aa47cfdd 6155 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6156 tcg_temp_free(tmp2);
aa47cfdd 6157 tcg_temp_free_ptr(fpstatus);
0e326109 6158 break;
aa47cfdd 6159 }
600b828c 6160 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6161 {
6162 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6163 tmp2 = tcg_const_i32(0);
aa47cfdd 6164 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6165 tcg_temp_free(tmp2);
aa47cfdd 6166 tcg_temp_free_ptr(fpstatus);
0e326109 6167 break;
aa47cfdd 6168 }
600b828c 6169 case NEON_2RM_VABS_F:
4373f3ce 6170 gen_vfp_abs(0);
9ee6e8bb 6171 break;
600b828c 6172 case NEON_2RM_VNEG_F:
4373f3ce 6173 gen_vfp_neg(0);
9ee6e8bb 6174 break;
600b828c 6175 case NEON_2RM_VSWP:
dd8fbd78
FN
6176 tmp2 = neon_load_reg(rd, pass);
6177 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6178 break;
600b828c 6179 case NEON_2RM_VTRN:
dd8fbd78 6180 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6181 switch (size) {
dd8fbd78
FN
6182 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6183 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6184 default: abort();
9ee6e8bb 6185 }
dd8fbd78 6186 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6187 break;
600b828c 6188 case NEON_2RM_VRECPE:
dd8fbd78 6189 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6190 break;
600b828c 6191 case NEON_2RM_VRSQRTE:
dd8fbd78 6192 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6193 break;
600b828c 6194 case NEON_2RM_VRECPE_F:
4373f3ce 6195 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6196 break;
600b828c 6197 case NEON_2RM_VRSQRTE_F:
4373f3ce 6198 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6199 break;
600b828c 6200 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6201 gen_vfp_sito(0, 1);
9ee6e8bb 6202 break;
600b828c 6203 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6204 gen_vfp_uito(0, 1);
9ee6e8bb 6205 break;
600b828c 6206 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6207 gen_vfp_tosiz(0, 1);
9ee6e8bb 6208 break;
600b828c 6209 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6210 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6211 break;
6212 default:
600b828c
PM
6213 /* Reserved op values were caught by the
6214 * neon_2rm_sizes[] check earlier.
6215 */
6216 abort();
9ee6e8bb 6217 }
600b828c 6218 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6219 tcg_gen_st_f32(cpu_F0s, cpu_env,
6220 neon_reg_offset(rd, pass));
9ee6e8bb 6221 } else {
dd8fbd78 6222 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6223 }
6224 }
6225 break;
6226 }
6227 } else if ((insn & (1 << 10)) == 0) {
6228 /* VTBL, VTBX. */
56907d77
PM
6229 int n = ((insn >> 8) & 3) + 1;
6230 if ((rn + n) > 32) {
6231 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6232 * helper function running off the end of the register file.
6233 */
6234 return 1;
6235 }
6236 n <<= 3;
9ee6e8bb 6237 if (insn & (1 << 6)) {
8f8e3aa4 6238 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6239 } else {
7d1b0095 6240 tmp = tcg_temp_new_i32();
8f8e3aa4 6241 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6242 }
8f8e3aa4 6243 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6244 tmp4 = tcg_const_i32(rn);
6245 tmp5 = tcg_const_i32(n);
6246 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
7d1b0095 6247 tcg_temp_free_i32(tmp);
9ee6e8bb 6248 if (insn & (1 << 6)) {
8f8e3aa4 6249 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6250 } else {
7d1b0095 6251 tmp = tcg_temp_new_i32();
8f8e3aa4 6252 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6253 }
8f8e3aa4 6254 tmp3 = neon_load_reg(rm, 1);
b75263d6 6255 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6256 tcg_temp_free_i32(tmp5);
6257 tcg_temp_free_i32(tmp4);
8f8e3aa4 6258 neon_store_reg(rd, 0, tmp2);
3018f259 6259 neon_store_reg(rd, 1, tmp3);
7d1b0095 6260 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6261 } else if ((insn & 0x380) == 0) {
6262 /* VDUP */
133da6aa
JR
6263 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6264 return 1;
6265 }
9ee6e8bb 6266 if (insn & (1 << 19)) {
dd8fbd78 6267 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6268 } else {
dd8fbd78 6269 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6270 }
6271 if (insn & (1 << 16)) {
dd8fbd78 6272 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6273 } else if (insn & (1 << 17)) {
6274 if ((insn >> 18) & 1)
dd8fbd78 6275 gen_neon_dup_high16(tmp);
9ee6e8bb 6276 else
dd8fbd78 6277 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6278 }
6279 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6280 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6281 tcg_gen_mov_i32(tmp2, tmp);
6282 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6283 }
7d1b0095 6284 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6285 } else {
6286 return 1;
6287 }
6288 }
6289 }
6290 return 0;
6291}
6292
fe1479c3
PB
6293static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
6294{
6295 int crn = (insn >> 16) & 0xf;
6296 int crm = insn & 0xf;
6297 int op1 = (insn >> 21) & 7;
6298 int op2 = (insn >> 5) & 7;
6299 int rt = (insn >> 12) & 0xf;
6300 TCGv tmp;
6301
ca27c052
PM
6302 /* Minimal set of debug registers, since we don't support debug */
6303 if (op1 == 0 && crn == 0 && op2 == 0) {
6304 switch (crm) {
6305 case 0:
6306 /* DBGDIDR: just RAZ. In particular this means the
6307 * "debug architecture version" bits will read as
6308 * a reserved value, which should cause Linux to
6309 * not try to use the debug hardware.
6310 */
6311 tmp = tcg_const_i32(0);
6312 store_reg(s, rt, tmp);
6313 return 0;
6314 case 1:
6315 case 2:
6316 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6317 * don't implement memory mapped debug components
6318 */
6319 if (ENABLE_ARCH_7) {
6320 tmp = tcg_const_i32(0);
6321 store_reg(s, rt, tmp);
6322 return 0;
6323 }
6324 break;
6325 default:
6326 break;
6327 }
6328 }
6329
fe1479c3
PB
6330 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6331 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6332 /* TEECR */
6333 if (IS_USER(s))
6334 return 1;
6335 tmp = load_cpu_field(teecr);
6336 store_reg(s, rt, tmp);
6337 return 0;
6338 }
6339 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6340 /* TEEHBR */
6341 if (IS_USER(s) && (env->teecr & 1))
6342 return 1;
6343 tmp = load_cpu_field(teehbr);
6344 store_reg(s, rt, tmp);
6345 return 0;
6346 }
6347 }
6348 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
6349 op1, crn, crm, op2);
6350 return 1;
6351}
6352
6353static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
6354{
6355 int crn = (insn >> 16) & 0xf;
6356 int crm = insn & 0xf;
6357 int op1 = (insn >> 21) & 7;
6358 int op2 = (insn >> 5) & 7;
6359 int rt = (insn >> 12) & 0xf;
6360 TCGv tmp;
6361
6362 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6363 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6364 /* TEECR */
6365 if (IS_USER(s))
6366 return 1;
6367 tmp = load_reg(s, rt);
6368 gen_helper_set_teecr(cpu_env, tmp);
7d1b0095 6369 tcg_temp_free_i32(tmp);
fe1479c3
PB
6370 return 0;
6371 }
6372 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6373 /* TEEHBR */
6374 if (IS_USER(s) && (env->teecr & 1))
6375 return 1;
6376 tmp = load_reg(s, rt);
6377 store_cpu_field(tmp, teehbr);
6378 return 0;
6379 }
6380 }
6381 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6382 op1, crn, crm, op2);
6383 return 1;
6384}
6385
9ee6e8bb
PB
6386static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
6387{
6388 int cpnum;
6389
6390 cpnum = (insn >> 8) & 0xf;
6391 if (arm_feature(env, ARM_FEATURE_XSCALE)
6392 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6393 return 1;
6394
6395 switch (cpnum) {
6396 case 0:
6397 case 1:
6398 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6399 return disas_iwmmxt_insn(env, s, insn);
6400 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6401 return disas_dsp_insn(env, s, insn);
6402 }
6403 return 1;
6404 case 10:
6405 case 11:
6406 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
6407 case 14:
6408 /* Coprocessors 7-15 are architecturally reserved by ARM.
6409 Unfortunately Intel decided to ignore this. */
6410 if (arm_feature(env, ARM_FEATURE_XSCALE))
6411 goto board;
6412 if (insn & (1 << 20))
6413 return disas_cp14_read(env, s, insn);
6414 else
6415 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
6416 case 15:
6417 return disas_cp15_insn (env, s, insn);
6418 default:
fe1479c3 6419 board:
9ee6e8bb
PB
6420 /* Unknown coprocessor. See if the board has hooked it. */
6421 return disas_cp_insn (env, s, insn);
6422 }
6423}
6424
5e3f878a
PB
6425
6426/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6427static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
6428{
6429 TCGv tmp;
7d1b0095 6430 tmp = tcg_temp_new_i32();
5e3f878a
PB
6431 tcg_gen_trunc_i64_i32(tmp, val);
6432 store_reg(s, rlow, tmp);
7d1b0095 6433 tmp = tcg_temp_new_i32();
5e3f878a
PB
6434 tcg_gen_shri_i64(val, val, 32);
6435 tcg_gen_trunc_i64_i32(tmp, val);
6436 store_reg(s, rhigh, tmp);
6437}
6438
6439/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6440static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6441{
a7812ae4 6442 TCGv_i64 tmp;
5e3f878a
PB
6443 TCGv tmp2;
6444
36aa55dc 6445 /* Load value and extend to 64 bits. */
a7812ae4 6446 tmp = tcg_temp_new_i64();
5e3f878a
PB
6447 tmp2 = load_reg(s, rlow);
6448 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6449 tcg_temp_free_i32(tmp2);
5e3f878a 6450 tcg_gen_add_i64(val, val, tmp);
b75263d6 6451 tcg_temp_free_i64(tmp);
5e3f878a
PB
6452}
6453
6454/* load and add a 64-bit value from a register pair. */
a7812ae4 6455static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6456{
a7812ae4 6457 TCGv_i64 tmp;
36aa55dc
PB
6458 TCGv tmpl;
6459 TCGv tmph;
5e3f878a
PB
6460
6461 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6462 tmpl = load_reg(s, rlow);
6463 tmph = load_reg(s, rhigh);
a7812ae4 6464 tmp = tcg_temp_new_i64();
36aa55dc 6465 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6466 tcg_temp_free_i32(tmpl);
6467 tcg_temp_free_i32(tmph);
5e3f878a 6468 tcg_gen_add_i64(val, val, tmp);
b75263d6 6469 tcg_temp_free_i64(tmp);
5e3f878a
PB
6470}
6471
6472/* Set N and Z flags from a 64-bit value. */
a7812ae4 6473static void gen_logicq_cc(TCGv_i64 val)
5e3f878a 6474{
7d1b0095 6475 TCGv tmp = tcg_temp_new_i32();
5e3f878a 6476 gen_helper_logicq_cc(tmp, val);
6fbe23d5 6477 gen_logic_CC(tmp);
7d1b0095 6478 tcg_temp_free_i32(tmp);
5e3f878a
PB
6479}
6480
426f5abc
PB
6481/* Load/Store exclusive instructions are implemented by remembering
6482 the value/address loaded, and seeing if these are the same
6483 when the store is performed. This should be is sufficient to implement
6484 the architecturally mandated semantics, and avoids having to monitor
6485 regular stores.
6486
6487 In system emulation mode only one CPU will be running at once, so
6488 this sequence is effectively atomic. In user emulation mode we
6489 throw an exception and handle the atomic operation elsewhere. */
6490static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6491 TCGv addr, int size)
6492{
6493 TCGv tmp;
6494
6495 switch (size) {
6496 case 0:
6497 tmp = gen_ld8u(addr, IS_USER(s));
6498 break;
6499 case 1:
6500 tmp = gen_ld16u(addr, IS_USER(s));
6501 break;
6502 case 2:
6503 case 3:
6504 tmp = gen_ld32(addr, IS_USER(s));
6505 break;
6506 default:
6507 abort();
6508 }
6509 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6510 store_reg(s, rt, tmp);
6511 if (size == 3) {
7d1b0095 6512 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6513 tcg_gen_addi_i32(tmp2, addr, 4);
6514 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6515 tcg_temp_free_i32(tmp2);
426f5abc
PB
6516 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6517 store_reg(s, rt2, tmp);
6518 }
6519 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6520}
6521
6522static void gen_clrex(DisasContext *s)
6523{
6524 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6525}
6526
6527#ifdef CONFIG_USER_ONLY
6528static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6529 TCGv addr, int size)
6530{
6531 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6532 tcg_gen_movi_i32(cpu_exclusive_info,
6533 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6534 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6535}
6536#else
6537static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6538 TCGv addr, int size)
6539{
6540 TCGv tmp;
6541 int done_label;
6542 int fail_label;
6543
6544 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6545 [addr] = {Rt};
6546 {Rd} = 0;
6547 } else {
6548 {Rd} = 1;
6549 } */
6550 fail_label = gen_new_label();
6551 done_label = gen_new_label();
6552 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6553 switch (size) {
6554 case 0:
6555 tmp = gen_ld8u(addr, IS_USER(s));
6556 break;
6557 case 1:
6558 tmp = gen_ld16u(addr, IS_USER(s));
6559 break;
6560 case 2:
6561 case 3:
6562 tmp = gen_ld32(addr, IS_USER(s));
6563 break;
6564 default:
6565 abort();
6566 }
6567 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6568 tcg_temp_free_i32(tmp);
426f5abc 6569 if (size == 3) {
7d1b0095 6570 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6571 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6572 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6573 tcg_temp_free_i32(tmp2);
426f5abc 6574 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6575 tcg_temp_free_i32(tmp);
426f5abc
PB
6576 }
6577 tmp = load_reg(s, rt);
6578 switch (size) {
6579 case 0:
6580 gen_st8(tmp, addr, IS_USER(s));
6581 break;
6582 case 1:
6583 gen_st16(tmp, addr, IS_USER(s));
6584 break;
6585 case 2:
6586 case 3:
6587 gen_st32(tmp, addr, IS_USER(s));
6588 break;
6589 default:
6590 abort();
6591 }
6592 if (size == 3) {
6593 tcg_gen_addi_i32(addr, addr, 4);
6594 tmp = load_reg(s, rt2);
6595 gen_st32(tmp, addr, IS_USER(s));
6596 }
6597 tcg_gen_movi_i32(cpu_R[rd], 0);
6598 tcg_gen_br(done_label);
6599 gen_set_label(fail_label);
6600 tcg_gen_movi_i32(cpu_R[rd], 1);
6601 gen_set_label(done_label);
6602 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6603}
6604#endif
6605
9ee6e8bb
PB
6606static void disas_arm_insn(CPUState * env, DisasContext *s)
6607{
6608 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6609 TCGv tmp;
3670669c 6610 TCGv tmp2;
6ddbc6e4 6611 TCGv tmp3;
b0109805 6612 TCGv addr;
a7812ae4 6613 TCGv_i64 tmp64;
9ee6e8bb
PB
6614
6615 insn = ldl_code(s->pc);
6616 s->pc += 4;
6617
6618 /* M variants do not implement ARM mode. */
6619 if (IS_M(env))
6620 goto illegal_op;
6621 cond = insn >> 28;
6622 if (cond == 0xf){
be5e7a76
DES
6623 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6624 * choose to UNDEF. In ARMv5 and above the space is used
6625 * for miscellaneous unconditional instructions.
6626 */
6627 ARCH(5);
6628
9ee6e8bb
PB
6629 /* Unconditional instructions. */
6630 if (((insn >> 25) & 7) == 1) {
6631 /* NEON Data processing. */
6632 if (!arm_feature(env, ARM_FEATURE_NEON))
6633 goto illegal_op;
6634
6635 if (disas_neon_data_insn(env, s, insn))
6636 goto illegal_op;
6637 return;
6638 }
6639 if ((insn & 0x0f100000) == 0x04000000) {
6640 /* NEON load/store. */
6641 if (!arm_feature(env, ARM_FEATURE_NEON))
6642 goto illegal_op;
6643
6644 if (disas_neon_ls_insn(env, s, insn))
6645 goto illegal_op;
6646 return;
6647 }
3d185e5d
PM
6648 if (((insn & 0x0f30f000) == 0x0510f000) ||
6649 ((insn & 0x0f30f010) == 0x0710f000)) {
6650 if ((insn & (1 << 22)) == 0) {
6651 /* PLDW; v7MP */
6652 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6653 goto illegal_op;
6654 }
6655 }
6656 /* Otherwise PLD; v5TE+ */
be5e7a76 6657 ARCH(5TE);
3d185e5d
PM
6658 return;
6659 }
6660 if (((insn & 0x0f70f000) == 0x0450f000) ||
6661 ((insn & 0x0f70f010) == 0x0650f000)) {
6662 ARCH(7);
6663 return; /* PLI; V7 */
6664 }
6665 if (((insn & 0x0f700000) == 0x04100000) ||
6666 ((insn & 0x0f700010) == 0x06100000)) {
6667 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6668 goto illegal_op;
6669 }
6670 return; /* v7MP: Unallocated memory hint: must NOP */
6671 }
6672
6673 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6674 ARCH(6);
6675 /* setend */
6676 if (insn & (1 << 9)) {
6677 /* BE8 mode not implemented. */
6678 goto illegal_op;
6679 }
6680 return;
6681 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6682 switch ((insn >> 4) & 0xf) {
6683 case 1: /* clrex */
6684 ARCH(6K);
426f5abc 6685 gen_clrex(s);
9ee6e8bb
PB
6686 return;
6687 case 4: /* dsb */
6688 case 5: /* dmb */
6689 case 6: /* isb */
6690 ARCH(7);
6691 /* We don't emulate caches so these are a no-op. */
6692 return;
6693 default:
6694 goto illegal_op;
6695 }
6696 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6697 /* srs */
c67b6b71 6698 int32_t offset;
9ee6e8bb
PB
6699 if (IS_USER(s))
6700 goto illegal_op;
6701 ARCH(6);
6702 op1 = (insn & 0x1f);
7d1b0095 6703 addr = tcg_temp_new_i32();
39ea3d4e
PM
6704 tmp = tcg_const_i32(op1);
6705 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6706 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6707 i = (insn >> 23) & 3;
6708 switch (i) {
6709 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6710 case 1: offset = 0; break; /* IA */
6711 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6712 case 3: offset = 4; break; /* IB */
6713 default: abort();
6714 }
6715 if (offset)
b0109805
PB
6716 tcg_gen_addi_i32(addr, addr, offset);
6717 tmp = load_reg(s, 14);
6718 gen_st32(tmp, addr, 0);
c67b6b71 6719 tmp = load_cpu_field(spsr);
b0109805
PB
6720 tcg_gen_addi_i32(addr, addr, 4);
6721 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6722 if (insn & (1 << 21)) {
6723 /* Base writeback. */
6724 switch (i) {
6725 case 0: offset = -8; break;
c67b6b71
FN
6726 case 1: offset = 4; break;
6727 case 2: offset = -4; break;
9ee6e8bb
PB
6728 case 3: offset = 0; break;
6729 default: abort();
6730 }
6731 if (offset)
c67b6b71 6732 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6733 tmp = tcg_const_i32(op1);
6734 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6735 tcg_temp_free_i32(tmp);
7d1b0095 6736 tcg_temp_free_i32(addr);
b0109805 6737 } else {
7d1b0095 6738 tcg_temp_free_i32(addr);
9ee6e8bb 6739 }
a990f58f 6740 return;
ea825eee 6741 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6742 /* rfe */
c67b6b71 6743 int32_t offset;
9ee6e8bb
PB
6744 if (IS_USER(s))
6745 goto illegal_op;
6746 ARCH(6);
6747 rn = (insn >> 16) & 0xf;
b0109805 6748 addr = load_reg(s, rn);
9ee6e8bb
PB
6749 i = (insn >> 23) & 3;
6750 switch (i) {
b0109805 6751 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6752 case 1: offset = 0; break; /* IA */
6753 case 2: offset = -8; break; /* DB */
b0109805 6754 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6755 default: abort();
6756 }
6757 if (offset)
b0109805
PB
6758 tcg_gen_addi_i32(addr, addr, offset);
6759 /* Load PC into tmp and CPSR into tmp2. */
6760 tmp = gen_ld32(addr, 0);
6761 tcg_gen_addi_i32(addr, addr, 4);
6762 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6763 if (insn & (1 << 21)) {
6764 /* Base writeback. */
6765 switch (i) {
b0109805 6766 case 0: offset = -8; break;
c67b6b71
FN
6767 case 1: offset = 4; break;
6768 case 2: offset = -4; break;
b0109805 6769 case 3: offset = 0; break;
9ee6e8bb
PB
6770 default: abort();
6771 }
6772 if (offset)
b0109805
PB
6773 tcg_gen_addi_i32(addr, addr, offset);
6774 store_reg(s, rn, addr);
6775 } else {
7d1b0095 6776 tcg_temp_free_i32(addr);
9ee6e8bb 6777 }
b0109805 6778 gen_rfe(s, tmp, tmp2);
c67b6b71 6779 return;
9ee6e8bb
PB
6780 } else if ((insn & 0x0e000000) == 0x0a000000) {
6781 /* branch link and change to thumb (blx <offset>) */
6782 int32_t offset;
6783
6784 val = (uint32_t)s->pc;
7d1b0095 6785 tmp = tcg_temp_new_i32();
d9ba4830
PB
6786 tcg_gen_movi_i32(tmp, val);
6787 store_reg(s, 14, tmp);
9ee6e8bb
PB
6788 /* Sign-extend the 24-bit offset */
6789 offset = (((int32_t)insn) << 8) >> 8;
6790 /* offset * 4 + bit24 * 2 + (thumb bit) */
6791 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6792 /* pipeline offset */
6793 val += 4;
be5e7a76 6794 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6795 gen_bx_im(s, val);
9ee6e8bb
PB
6796 return;
6797 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6798 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6799 /* iWMMXt register transfer. */
6800 if (env->cp15.c15_cpar & (1 << 1))
6801 if (!disas_iwmmxt_insn(env, s, insn))
6802 return;
6803 }
6804 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6805 /* Coprocessor double register transfer. */
be5e7a76 6806 ARCH(5TE);
9ee6e8bb
PB
6807 } else if ((insn & 0x0f000010) == 0x0e000010) {
6808 /* Additional coprocessor register transfer. */
7997d92f 6809 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6810 uint32_t mask;
6811 uint32_t val;
6812 /* cps (privileged) */
6813 if (IS_USER(s))
6814 return;
6815 mask = val = 0;
6816 if (insn & (1 << 19)) {
6817 if (insn & (1 << 8))
6818 mask |= CPSR_A;
6819 if (insn & (1 << 7))
6820 mask |= CPSR_I;
6821 if (insn & (1 << 6))
6822 mask |= CPSR_F;
6823 if (insn & (1 << 18))
6824 val |= mask;
6825 }
7997d92f 6826 if (insn & (1 << 17)) {
9ee6e8bb
PB
6827 mask |= CPSR_M;
6828 val |= (insn & 0x1f);
6829 }
6830 if (mask) {
2fbac54b 6831 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6832 }
6833 return;
6834 }
6835 goto illegal_op;
6836 }
6837 if (cond != 0xe) {
6838 /* if not always execute, we generate a conditional jump to
6839 next instruction */
6840 s->condlabel = gen_new_label();
d9ba4830 6841 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6842 s->condjmp = 1;
6843 }
6844 if ((insn & 0x0f900000) == 0x03000000) {
6845 if ((insn & (1 << 21)) == 0) {
6846 ARCH(6T2);
6847 rd = (insn >> 12) & 0xf;
6848 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6849 if ((insn & (1 << 22)) == 0) {
6850 /* MOVW */
7d1b0095 6851 tmp = tcg_temp_new_i32();
5e3f878a 6852 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6853 } else {
6854 /* MOVT */
5e3f878a 6855 tmp = load_reg(s, rd);
86831435 6856 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6857 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6858 }
5e3f878a 6859 store_reg(s, rd, tmp);
9ee6e8bb
PB
6860 } else {
6861 if (((insn >> 12) & 0xf) != 0xf)
6862 goto illegal_op;
6863 if (((insn >> 16) & 0xf) == 0) {
6864 gen_nop_hint(s, insn & 0xff);
6865 } else {
6866 /* CPSR = immediate */
6867 val = insn & 0xff;
6868 shift = ((insn >> 8) & 0xf) * 2;
6869 if (shift)
6870 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6871 i = ((insn & (1 << 22)) != 0);
2fbac54b 6872 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6873 goto illegal_op;
6874 }
6875 }
6876 } else if ((insn & 0x0f900000) == 0x01000000
6877 && (insn & 0x00000090) != 0x00000090) {
6878 /* miscellaneous instructions */
6879 op1 = (insn >> 21) & 3;
6880 sh = (insn >> 4) & 0xf;
6881 rm = insn & 0xf;
6882 switch (sh) {
6883 case 0x0: /* move program status register */
6884 if (op1 & 1) {
6885 /* PSR = reg */
2fbac54b 6886 tmp = load_reg(s, rm);
9ee6e8bb 6887 i = ((op1 & 2) != 0);
2fbac54b 6888 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6889 goto illegal_op;
6890 } else {
6891 /* reg = PSR */
6892 rd = (insn >> 12) & 0xf;
6893 if (op1 & 2) {
6894 if (IS_USER(s))
6895 goto illegal_op;
d9ba4830 6896 tmp = load_cpu_field(spsr);
9ee6e8bb 6897 } else {
7d1b0095 6898 tmp = tcg_temp_new_i32();
d9ba4830 6899 gen_helper_cpsr_read(tmp);
9ee6e8bb 6900 }
d9ba4830 6901 store_reg(s, rd, tmp);
9ee6e8bb
PB
6902 }
6903 break;
6904 case 0x1:
6905 if (op1 == 1) {
6906 /* branch/exchange thumb (bx). */
be5e7a76 6907 ARCH(4T);
d9ba4830
PB
6908 tmp = load_reg(s, rm);
6909 gen_bx(s, tmp);
9ee6e8bb
PB
6910 } else if (op1 == 3) {
6911 /* clz */
be5e7a76 6912 ARCH(5);
9ee6e8bb 6913 rd = (insn >> 12) & 0xf;
1497c961
PB
6914 tmp = load_reg(s, rm);
6915 gen_helper_clz(tmp, tmp);
6916 store_reg(s, rd, tmp);
9ee6e8bb
PB
6917 } else {
6918 goto illegal_op;
6919 }
6920 break;
6921 case 0x2:
6922 if (op1 == 1) {
6923 ARCH(5J); /* bxj */
6924 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6925 tmp = load_reg(s, rm);
6926 gen_bx(s, tmp);
9ee6e8bb
PB
6927 } else {
6928 goto illegal_op;
6929 }
6930 break;
6931 case 0x3:
6932 if (op1 != 1)
6933 goto illegal_op;
6934
be5e7a76 6935 ARCH(5);
9ee6e8bb 6936 /* branch link/exchange thumb (blx) */
d9ba4830 6937 tmp = load_reg(s, rm);
7d1b0095 6938 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6939 tcg_gen_movi_i32(tmp2, s->pc);
6940 store_reg(s, 14, tmp2);
6941 gen_bx(s, tmp);
9ee6e8bb
PB
6942 break;
6943 case 0x5: /* saturating add/subtract */
be5e7a76 6944 ARCH(5TE);
9ee6e8bb
PB
6945 rd = (insn >> 12) & 0xf;
6946 rn = (insn >> 16) & 0xf;
b40d0353 6947 tmp = load_reg(s, rm);
5e3f878a 6948 tmp2 = load_reg(s, rn);
9ee6e8bb 6949 if (op1 & 2)
5e3f878a 6950 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6951 if (op1 & 1)
5e3f878a 6952 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6953 else
5e3f878a 6954 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 6955 tcg_temp_free_i32(tmp2);
5e3f878a 6956 store_reg(s, rd, tmp);
9ee6e8bb 6957 break;
49e14940
AL
6958 case 7:
6959 /* SMC instruction (op1 == 3)
6960 and undefined instructions (op1 == 0 || op1 == 2)
6961 will trap */
6962 if (op1 != 1) {
6963 goto illegal_op;
6964 }
6965 /* bkpt */
be5e7a76 6966 ARCH(5);
bc4a0de0 6967 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6968 break;
6969 case 0x8: /* signed multiply */
6970 case 0xa:
6971 case 0xc:
6972 case 0xe:
be5e7a76 6973 ARCH(5TE);
9ee6e8bb
PB
6974 rs = (insn >> 8) & 0xf;
6975 rn = (insn >> 12) & 0xf;
6976 rd = (insn >> 16) & 0xf;
6977 if (op1 == 1) {
6978 /* (32 * 16) >> 16 */
5e3f878a
PB
6979 tmp = load_reg(s, rm);
6980 tmp2 = load_reg(s, rs);
9ee6e8bb 6981 if (sh & 4)
5e3f878a 6982 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6983 else
5e3f878a 6984 gen_sxth(tmp2);
a7812ae4
PB
6985 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6986 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 6987 tmp = tcg_temp_new_i32();
a7812ae4 6988 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6989 tcg_temp_free_i64(tmp64);
9ee6e8bb 6990 if ((sh & 2) == 0) {
5e3f878a
PB
6991 tmp2 = load_reg(s, rn);
6992 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6993 tcg_temp_free_i32(tmp2);
9ee6e8bb 6994 }
5e3f878a 6995 store_reg(s, rd, tmp);
9ee6e8bb
PB
6996 } else {
6997 /* 16 * 16 */
5e3f878a
PB
6998 tmp = load_reg(s, rm);
6999 tmp2 = load_reg(s, rs);
7000 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7001 tcg_temp_free_i32(tmp2);
9ee6e8bb 7002 if (op1 == 2) {
a7812ae4
PB
7003 tmp64 = tcg_temp_new_i64();
7004 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7005 tcg_temp_free_i32(tmp);
a7812ae4
PB
7006 gen_addq(s, tmp64, rn, rd);
7007 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7008 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7009 } else {
7010 if (op1 == 0) {
5e3f878a
PB
7011 tmp2 = load_reg(s, rn);
7012 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7013 tcg_temp_free_i32(tmp2);
9ee6e8bb 7014 }
5e3f878a 7015 store_reg(s, rd, tmp);
9ee6e8bb
PB
7016 }
7017 }
7018 break;
7019 default:
7020 goto illegal_op;
7021 }
7022 } else if (((insn & 0x0e000000) == 0 &&
7023 (insn & 0x00000090) != 0x90) ||
7024 ((insn & 0x0e000000) == (1 << 25))) {
7025 int set_cc, logic_cc, shiftop;
7026
7027 op1 = (insn >> 21) & 0xf;
7028 set_cc = (insn >> 20) & 1;
7029 logic_cc = table_logic_cc[op1] & set_cc;
7030
7031 /* data processing instruction */
7032 if (insn & (1 << 25)) {
7033 /* immediate operand */
7034 val = insn & 0xff;
7035 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7036 if (shift) {
9ee6e8bb 7037 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7038 }
7d1b0095 7039 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7040 tcg_gen_movi_i32(tmp2, val);
7041 if (logic_cc && shift) {
7042 gen_set_CF_bit31(tmp2);
7043 }
9ee6e8bb
PB
7044 } else {
7045 /* register */
7046 rm = (insn) & 0xf;
e9bb4aa9 7047 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7048 shiftop = (insn >> 5) & 3;
7049 if (!(insn & (1 << 4))) {
7050 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7051 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7052 } else {
7053 rs = (insn >> 8) & 0xf;
8984bd2e 7054 tmp = load_reg(s, rs);
e9bb4aa9 7055 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7056 }
7057 }
7058 if (op1 != 0x0f && op1 != 0x0d) {
7059 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7060 tmp = load_reg(s, rn);
7061 } else {
7062 TCGV_UNUSED(tmp);
9ee6e8bb
PB
7063 }
7064 rd = (insn >> 12) & 0xf;
7065 switch(op1) {
7066 case 0x00:
e9bb4aa9
JR
7067 tcg_gen_and_i32(tmp, tmp, tmp2);
7068 if (logic_cc) {
7069 gen_logic_CC(tmp);
7070 }
21aeb343 7071 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7072 break;
7073 case 0x01:
e9bb4aa9
JR
7074 tcg_gen_xor_i32(tmp, tmp, tmp2);
7075 if (logic_cc) {
7076 gen_logic_CC(tmp);
7077 }
21aeb343 7078 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7079 break;
7080 case 0x02:
7081 if (set_cc && rd == 15) {
7082 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7083 if (IS_USER(s)) {
9ee6e8bb 7084 goto illegal_op;
e9bb4aa9
JR
7085 }
7086 gen_helper_sub_cc(tmp, tmp, tmp2);
7087 gen_exception_return(s, tmp);
9ee6e8bb 7088 } else {
e9bb4aa9
JR
7089 if (set_cc) {
7090 gen_helper_sub_cc(tmp, tmp, tmp2);
7091 } else {
7092 tcg_gen_sub_i32(tmp, tmp, tmp2);
7093 }
21aeb343 7094 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7095 }
7096 break;
7097 case 0x03:
e9bb4aa9
JR
7098 if (set_cc) {
7099 gen_helper_sub_cc(tmp, tmp2, tmp);
7100 } else {
7101 tcg_gen_sub_i32(tmp, tmp2, tmp);
7102 }
21aeb343 7103 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7104 break;
7105 case 0x04:
e9bb4aa9
JR
7106 if (set_cc) {
7107 gen_helper_add_cc(tmp, tmp, tmp2);
7108 } else {
7109 tcg_gen_add_i32(tmp, tmp, tmp2);
7110 }
21aeb343 7111 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7112 break;
7113 case 0x05:
e9bb4aa9
JR
7114 if (set_cc) {
7115 gen_helper_adc_cc(tmp, tmp, tmp2);
7116 } else {
7117 gen_add_carry(tmp, tmp, tmp2);
7118 }
21aeb343 7119 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7120 break;
7121 case 0x06:
e9bb4aa9
JR
7122 if (set_cc) {
7123 gen_helper_sbc_cc(tmp, tmp, tmp2);
7124 } else {
7125 gen_sub_carry(tmp, tmp, tmp2);
7126 }
21aeb343 7127 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7128 break;
7129 case 0x07:
e9bb4aa9
JR
7130 if (set_cc) {
7131 gen_helper_sbc_cc(tmp, tmp2, tmp);
7132 } else {
7133 gen_sub_carry(tmp, tmp2, tmp);
7134 }
21aeb343 7135 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7136 break;
7137 case 0x08:
7138 if (set_cc) {
e9bb4aa9
JR
7139 tcg_gen_and_i32(tmp, tmp, tmp2);
7140 gen_logic_CC(tmp);
9ee6e8bb 7141 }
7d1b0095 7142 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7143 break;
7144 case 0x09:
7145 if (set_cc) {
e9bb4aa9
JR
7146 tcg_gen_xor_i32(tmp, tmp, tmp2);
7147 gen_logic_CC(tmp);
9ee6e8bb 7148 }
7d1b0095 7149 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7150 break;
7151 case 0x0a:
7152 if (set_cc) {
e9bb4aa9 7153 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 7154 }
7d1b0095 7155 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7156 break;
7157 case 0x0b:
7158 if (set_cc) {
e9bb4aa9 7159 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 7160 }
7d1b0095 7161 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7162 break;
7163 case 0x0c:
e9bb4aa9
JR
7164 tcg_gen_or_i32(tmp, tmp, tmp2);
7165 if (logic_cc) {
7166 gen_logic_CC(tmp);
7167 }
21aeb343 7168 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7169 break;
7170 case 0x0d:
7171 if (logic_cc && rd == 15) {
7172 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7173 if (IS_USER(s)) {
9ee6e8bb 7174 goto illegal_op;
e9bb4aa9
JR
7175 }
7176 gen_exception_return(s, tmp2);
9ee6e8bb 7177 } else {
e9bb4aa9
JR
7178 if (logic_cc) {
7179 gen_logic_CC(tmp2);
7180 }
21aeb343 7181 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7182 }
7183 break;
7184 case 0x0e:
f669df27 7185 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7186 if (logic_cc) {
7187 gen_logic_CC(tmp);
7188 }
21aeb343 7189 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7190 break;
7191 default:
7192 case 0x0f:
e9bb4aa9
JR
7193 tcg_gen_not_i32(tmp2, tmp2);
7194 if (logic_cc) {
7195 gen_logic_CC(tmp2);
7196 }
21aeb343 7197 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7198 break;
7199 }
e9bb4aa9 7200 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7201 tcg_temp_free_i32(tmp2);
e9bb4aa9 7202 }
9ee6e8bb
PB
7203 } else {
7204 /* other instructions */
7205 op1 = (insn >> 24) & 0xf;
7206 switch(op1) {
7207 case 0x0:
7208 case 0x1:
7209 /* multiplies, extra load/stores */
7210 sh = (insn >> 5) & 3;
7211 if (sh == 0) {
7212 if (op1 == 0x0) {
7213 rd = (insn >> 16) & 0xf;
7214 rn = (insn >> 12) & 0xf;
7215 rs = (insn >> 8) & 0xf;
7216 rm = (insn) & 0xf;
7217 op1 = (insn >> 20) & 0xf;
7218 switch (op1) {
7219 case 0: case 1: case 2: case 3: case 6:
7220 /* 32 bit mul */
5e3f878a
PB
7221 tmp = load_reg(s, rs);
7222 tmp2 = load_reg(s, rm);
7223 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7224 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7225 if (insn & (1 << 22)) {
7226 /* Subtract (mls) */
7227 ARCH(6T2);
5e3f878a
PB
7228 tmp2 = load_reg(s, rn);
7229 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7230 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7231 } else if (insn & (1 << 21)) {
7232 /* Add */
5e3f878a
PB
7233 tmp2 = load_reg(s, rn);
7234 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7235 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7236 }
7237 if (insn & (1 << 20))
5e3f878a
PB
7238 gen_logic_CC(tmp);
7239 store_reg(s, rd, tmp);
9ee6e8bb 7240 break;
8aac08b1
AJ
7241 case 4:
7242 /* 64 bit mul double accumulate (UMAAL) */
7243 ARCH(6);
7244 tmp = load_reg(s, rs);
7245 tmp2 = load_reg(s, rm);
7246 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7247 gen_addq_lo(s, tmp64, rn);
7248 gen_addq_lo(s, tmp64, rd);
7249 gen_storeq_reg(s, rn, rd, tmp64);
7250 tcg_temp_free_i64(tmp64);
7251 break;
7252 case 8: case 9: case 10: case 11:
7253 case 12: case 13: case 14: case 15:
7254 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7255 tmp = load_reg(s, rs);
7256 tmp2 = load_reg(s, rm);
8aac08b1 7257 if (insn & (1 << 22)) {
a7812ae4 7258 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 7259 } else {
a7812ae4 7260 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
7261 }
7262 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 7263 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 7264 }
8aac08b1 7265 if (insn & (1 << 20)) {
a7812ae4 7266 gen_logicq_cc(tmp64);
8aac08b1 7267 }
a7812ae4 7268 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7269 tcg_temp_free_i64(tmp64);
9ee6e8bb 7270 break;
8aac08b1
AJ
7271 default:
7272 goto illegal_op;
9ee6e8bb
PB
7273 }
7274 } else {
7275 rn = (insn >> 16) & 0xf;
7276 rd = (insn >> 12) & 0xf;
7277 if (insn & (1 << 23)) {
7278 /* load/store exclusive */
86753403
PB
7279 op1 = (insn >> 21) & 0x3;
7280 if (op1)
a47f43d2 7281 ARCH(6K);
86753403
PB
7282 else
7283 ARCH(6);
3174f8e9 7284 addr = tcg_temp_local_new_i32();
98a46317 7285 load_reg_var(s, addr, rn);
9ee6e8bb 7286 if (insn & (1 << 20)) {
86753403
PB
7287 switch (op1) {
7288 case 0: /* ldrex */
426f5abc 7289 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7290 break;
7291 case 1: /* ldrexd */
426f5abc 7292 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7293 break;
7294 case 2: /* ldrexb */
426f5abc 7295 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7296 break;
7297 case 3: /* ldrexh */
426f5abc 7298 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7299 break;
7300 default:
7301 abort();
7302 }
9ee6e8bb
PB
7303 } else {
7304 rm = insn & 0xf;
86753403
PB
7305 switch (op1) {
7306 case 0: /* strex */
426f5abc 7307 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7308 break;
7309 case 1: /* strexd */
502e64fe 7310 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7311 break;
7312 case 2: /* strexb */
426f5abc 7313 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7314 break;
7315 case 3: /* strexh */
426f5abc 7316 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7317 break;
7318 default:
7319 abort();
7320 }
9ee6e8bb 7321 }
3174f8e9 7322 tcg_temp_free(addr);
9ee6e8bb
PB
7323 } else {
7324 /* SWP instruction */
7325 rm = (insn) & 0xf;
7326
8984bd2e
PB
7327 /* ??? This is not really atomic. However we know
7328 we never have multiple CPUs running in parallel,
7329 so it is good enough. */
7330 addr = load_reg(s, rn);
7331 tmp = load_reg(s, rm);
9ee6e8bb 7332 if (insn & (1 << 22)) {
8984bd2e
PB
7333 tmp2 = gen_ld8u(addr, IS_USER(s));
7334 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7335 } else {
8984bd2e
PB
7336 tmp2 = gen_ld32(addr, IS_USER(s));
7337 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7338 }
7d1b0095 7339 tcg_temp_free_i32(addr);
8984bd2e 7340 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7341 }
7342 }
7343 } else {
7344 int address_offset;
7345 int load;
7346 /* Misc load/store */
7347 rn = (insn >> 16) & 0xf;
7348 rd = (insn >> 12) & 0xf;
b0109805 7349 addr = load_reg(s, rn);
9ee6e8bb 7350 if (insn & (1 << 24))
b0109805 7351 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7352 address_offset = 0;
7353 if (insn & (1 << 20)) {
7354 /* load */
7355 switch(sh) {
7356 case 1:
b0109805 7357 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
7358 break;
7359 case 2:
b0109805 7360 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
7361 break;
7362 default:
7363 case 3:
b0109805 7364 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
7365 break;
7366 }
7367 load = 1;
7368 } else if (sh & 2) {
be5e7a76 7369 ARCH(5TE);
9ee6e8bb
PB
7370 /* doubleword */
7371 if (sh & 1) {
7372 /* store */
b0109805
PB
7373 tmp = load_reg(s, rd);
7374 gen_st32(tmp, addr, IS_USER(s));
7375 tcg_gen_addi_i32(addr, addr, 4);
7376 tmp = load_reg(s, rd + 1);
7377 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7378 load = 0;
7379 } else {
7380 /* load */
b0109805
PB
7381 tmp = gen_ld32(addr, IS_USER(s));
7382 store_reg(s, rd, tmp);
7383 tcg_gen_addi_i32(addr, addr, 4);
7384 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
7385 rd++;
7386 load = 1;
7387 }
7388 address_offset = -4;
7389 } else {
7390 /* store */
b0109805
PB
7391 tmp = load_reg(s, rd);
7392 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7393 load = 0;
7394 }
7395 /* Perform base writeback before the loaded value to
7396 ensure correct behavior with overlapping index registers.
7397 ldrd with base writeback is is undefined if the
7398 destination and index registers overlap. */
7399 if (!(insn & (1 << 24))) {
b0109805
PB
7400 gen_add_datah_offset(s, insn, address_offset, addr);
7401 store_reg(s, rn, addr);
9ee6e8bb
PB
7402 } else if (insn & (1 << 21)) {
7403 if (address_offset)
b0109805
PB
7404 tcg_gen_addi_i32(addr, addr, address_offset);
7405 store_reg(s, rn, addr);
7406 } else {
7d1b0095 7407 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7408 }
7409 if (load) {
7410 /* Complete the load. */
b0109805 7411 store_reg(s, rd, tmp);
9ee6e8bb
PB
7412 }
7413 }
7414 break;
7415 case 0x4:
7416 case 0x5:
7417 goto do_ldst;
7418 case 0x6:
7419 case 0x7:
7420 if (insn & (1 << 4)) {
7421 ARCH(6);
7422 /* Armv6 Media instructions. */
7423 rm = insn & 0xf;
7424 rn = (insn >> 16) & 0xf;
2c0262af 7425 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7426 rs = (insn >> 8) & 0xf;
7427 switch ((insn >> 23) & 3) {
7428 case 0: /* Parallel add/subtract. */
7429 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7430 tmp = load_reg(s, rn);
7431 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7432 sh = (insn >> 5) & 7;
7433 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7434 goto illegal_op;
6ddbc6e4 7435 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7436 tcg_temp_free_i32(tmp2);
6ddbc6e4 7437 store_reg(s, rd, tmp);
9ee6e8bb
PB
7438 break;
7439 case 1:
7440 if ((insn & 0x00700020) == 0) {
6c95676b 7441 /* Halfword pack. */
3670669c
PB
7442 tmp = load_reg(s, rn);
7443 tmp2 = load_reg(s, rm);
9ee6e8bb 7444 shift = (insn >> 7) & 0x1f;
3670669c
PB
7445 if (insn & (1 << 6)) {
7446 /* pkhtb */
22478e79
AZ
7447 if (shift == 0)
7448 shift = 31;
7449 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7450 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7451 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7452 } else {
7453 /* pkhbt */
22478e79
AZ
7454 if (shift)
7455 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7456 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7457 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7458 }
7459 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7460 tcg_temp_free_i32(tmp2);
3670669c 7461 store_reg(s, rd, tmp);
9ee6e8bb
PB
7462 } else if ((insn & 0x00200020) == 0x00200000) {
7463 /* [us]sat */
6ddbc6e4 7464 tmp = load_reg(s, rm);
9ee6e8bb
PB
7465 shift = (insn >> 7) & 0x1f;
7466 if (insn & (1 << 6)) {
7467 if (shift == 0)
7468 shift = 31;
6ddbc6e4 7469 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7470 } else {
6ddbc6e4 7471 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7472 }
7473 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7474 tmp2 = tcg_const_i32(sh);
7475 if (insn & (1 << 22))
7476 gen_helper_usat(tmp, tmp, tmp2);
7477 else
7478 gen_helper_ssat(tmp, tmp, tmp2);
7479 tcg_temp_free_i32(tmp2);
6ddbc6e4 7480 store_reg(s, rd, tmp);
9ee6e8bb
PB
7481 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7482 /* [us]sat16 */
6ddbc6e4 7483 tmp = load_reg(s, rm);
9ee6e8bb 7484 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7485 tmp2 = tcg_const_i32(sh);
7486 if (insn & (1 << 22))
7487 gen_helper_usat16(tmp, tmp, tmp2);
7488 else
7489 gen_helper_ssat16(tmp, tmp, tmp2);
7490 tcg_temp_free_i32(tmp2);
6ddbc6e4 7491 store_reg(s, rd, tmp);
9ee6e8bb
PB
7492 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7493 /* Select bytes. */
6ddbc6e4
PB
7494 tmp = load_reg(s, rn);
7495 tmp2 = load_reg(s, rm);
7d1b0095 7496 tmp3 = tcg_temp_new_i32();
6ddbc6e4
PB
7497 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7498 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7499 tcg_temp_free_i32(tmp3);
7500 tcg_temp_free_i32(tmp2);
6ddbc6e4 7501 store_reg(s, rd, tmp);
9ee6e8bb 7502 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7503 tmp = load_reg(s, rm);
9ee6e8bb 7504 shift = (insn >> 10) & 3;
1301f322 7505 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7506 rotate, a shift is sufficient. */
7507 if (shift != 0)
f669df27 7508 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7509 op1 = (insn >> 20) & 7;
7510 switch (op1) {
5e3f878a
PB
7511 case 0: gen_sxtb16(tmp); break;
7512 case 2: gen_sxtb(tmp); break;
7513 case 3: gen_sxth(tmp); break;
7514 case 4: gen_uxtb16(tmp); break;
7515 case 6: gen_uxtb(tmp); break;
7516 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7517 default: goto illegal_op;
7518 }
7519 if (rn != 15) {
5e3f878a 7520 tmp2 = load_reg(s, rn);
9ee6e8bb 7521 if ((op1 & 3) == 0) {
5e3f878a 7522 gen_add16(tmp, tmp2);
9ee6e8bb 7523 } else {
5e3f878a 7524 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7525 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7526 }
7527 }
6c95676b 7528 store_reg(s, rd, tmp);
9ee6e8bb
PB
7529 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7530 /* rev */
b0109805 7531 tmp = load_reg(s, rm);
9ee6e8bb
PB
7532 if (insn & (1 << 22)) {
7533 if (insn & (1 << 7)) {
b0109805 7534 gen_revsh(tmp);
9ee6e8bb
PB
7535 } else {
7536 ARCH(6T2);
b0109805 7537 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7538 }
7539 } else {
7540 if (insn & (1 << 7))
b0109805 7541 gen_rev16(tmp);
9ee6e8bb 7542 else
66896cb8 7543 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7544 }
b0109805 7545 store_reg(s, rd, tmp);
9ee6e8bb
PB
7546 } else {
7547 goto illegal_op;
7548 }
7549 break;
7550 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
7551 tmp = load_reg(s, rm);
7552 tmp2 = load_reg(s, rs);
9ee6e8bb 7553 if (insn & (1 << 20)) {
838fa72d
AJ
7554 /* Signed multiply most significant [accumulate].
7555 (SMMUL, SMMLA, SMMLS) */
a7812ae4 7556 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7557
955a7dd5 7558 if (rd != 15) {
838fa72d 7559 tmp = load_reg(s, rd);
9ee6e8bb 7560 if (insn & (1 << 6)) {
838fa72d 7561 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7562 } else {
838fa72d 7563 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7564 }
7565 }
838fa72d
AJ
7566 if (insn & (1 << 5)) {
7567 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7568 }
7569 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7570 tmp = tcg_temp_new_i32();
838fa72d
AJ
7571 tcg_gen_trunc_i64_i32(tmp, tmp64);
7572 tcg_temp_free_i64(tmp64);
955a7dd5 7573 store_reg(s, rn, tmp);
9ee6e8bb
PB
7574 } else {
7575 if (insn & (1 << 5))
5e3f878a
PB
7576 gen_swap_half(tmp2);
7577 gen_smul_dual(tmp, tmp2);
5e3f878a 7578 if (insn & (1 << 6)) {
e1d177b9 7579 /* This subtraction cannot overflow. */
5e3f878a
PB
7580 tcg_gen_sub_i32(tmp, tmp, tmp2);
7581 } else {
e1d177b9
PM
7582 /* This addition cannot overflow 32 bits;
7583 * however it may overflow considered as a signed
7584 * operation, in which case we must set the Q flag.
7585 */
7586 gen_helper_add_setq(tmp, tmp, tmp2);
5e3f878a 7587 }
7d1b0095 7588 tcg_temp_free_i32(tmp2);
9ee6e8bb 7589 if (insn & (1 << 22)) {
5e3f878a 7590 /* smlald, smlsld */
a7812ae4
PB
7591 tmp64 = tcg_temp_new_i64();
7592 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7593 tcg_temp_free_i32(tmp);
a7812ae4
PB
7594 gen_addq(s, tmp64, rd, rn);
7595 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7596 tcg_temp_free_i64(tmp64);
9ee6e8bb 7597 } else {
5e3f878a 7598 /* smuad, smusd, smlad, smlsd */
22478e79 7599 if (rd != 15)
9ee6e8bb 7600 {
22478e79 7601 tmp2 = load_reg(s, rd);
5e3f878a 7602 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7603 tcg_temp_free_i32(tmp2);
9ee6e8bb 7604 }
22478e79 7605 store_reg(s, rn, tmp);
9ee6e8bb
PB
7606 }
7607 }
7608 break;
7609 case 3:
7610 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7611 switch (op1) {
7612 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7613 ARCH(6);
7614 tmp = load_reg(s, rm);
7615 tmp2 = load_reg(s, rs);
7616 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7617 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7618 if (rd != 15) {
7619 tmp2 = load_reg(s, rd);
6ddbc6e4 7620 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7621 tcg_temp_free_i32(tmp2);
9ee6e8bb 7622 }
ded9d295 7623 store_reg(s, rn, tmp);
9ee6e8bb
PB
7624 break;
7625 case 0x20: case 0x24: case 0x28: case 0x2c:
7626 /* Bitfield insert/clear. */
7627 ARCH(6T2);
7628 shift = (insn >> 7) & 0x1f;
7629 i = (insn >> 16) & 0x1f;
7630 i = i + 1 - shift;
7631 if (rm == 15) {
7d1b0095 7632 tmp = tcg_temp_new_i32();
5e3f878a 7633 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7634 } else {
5e3f878a 7635 tmp = load_reg(s, rm);
9ee6e8bb
PB
7636 }
7637 if (i != 32) {
5e3f878a 7638 tmp2 = load_reg(s, rd);
8f8e3aa4 7639 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7d1b0095 7640 tcg_temp_free_i32(tmp2);
9ee6e8bb 7641 }
5e3f878a 7642 store_reg(s, rd, tmp);
9ee6e8bb
PB
7643 break;
7644 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7645 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7646 ARCH(6T2);
5e3f878a 7647 tmp = load_reg(s, rm);
9ee6e8bb
PB
7648 shift = (insn >> 7) & 0x1f;
7649 i = ((insn >> 16) & 0x1f) + 1;
7650 if (shift + i > 32)
7651 goto illegal_op;
7652 if (i < 32) {
7653 if (op1 & 0x20) {
5e3f878a 7654 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7655 } else {
5e3f878a 7656 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7657 }
7658 }
5e3f878a 7659 store_reg(s, rd, tmp);
9ee6e8bb
PB
7660 break;
7661 default:
7662 goto illegal_op;
7663 }
7664 break;
7665 }
7666 break;
7667 }
7668 do_ldst:
7669 /* Check for undefined extension instructions
7670 * per the ARM Bible IE:
7671 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7672 */
7673 sh = (0xf << 20) | (0xf << 4);
7674 if (op1 == 0x7 && ((insn & sh) == sh))
7675 {
7676 goto illegal_op;
7677 }
7678 /* load/store byte/word */
7679 rn = (insn >> 16) & 0xf;
7680 rd = (insn >> 12) & 0xf;
b0109805 7681 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7682 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7683 if (insn & (1 << 24))
b0109805 7684 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7685 if (insn & (1 << 20)) {
7686 /* load */
9ee6e8bb 7687 if (insn & (1 << 22)) {
b0109805 7688 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7689 } else {
b0109805 7690 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7691 }
9ee6e8bb
PB
7692 } else {
7693 /* store */
b0109805 7694 tmp = load_reg(s, rd);
9ee6e8bb 7695 if (insn & (1 << 22))
b0109805 7696 gen_st8(tmp, tmp2, i);
9ee6e8bb 7697 else
b0109805 7698 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7699 }
7700 if (!(insn & (1 << 24))) {
b0109805
PB
7701 gen_add_data_offset(s, insn, tmp2);
7702 store_reg(s, rn, tmp2);
7703 } else if (insn & (1 << 21)) {
7704 store_reg(s, rn, tmp2);
7705 } else {
7d1b0095 7706 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7707 }
7708 if (insn & (1 << 20)) {
7709 /* Complete the load. */
be5e7a76 7710 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7711 }
7712 break;
7713 case 0x08:
7714 case 0x09:
7715 {
7716 int j, n, user, loaded_base;
b0109805 7717 TCGv loaded_var;
9ee6e8bb
PB
7718 /* load/store multiple words */
7719 /* XXX: store correct base if write back */
7720 user = 0;
7721 if (insn & (1 << 22)) {
7722 if (IS_USER(s))
7723 goto illegal_op; /* only usable in supervisor mode */
7724
7725 if ((insn & (1 << 15)) == 0)
7726 user = 1;
7727 }
7728 rn = (insn >> 16) & 0xf;
b0109805 7729 addr = load_reg(s, rn);
9ee6e8bb
PB
7730
7731 /* compute total size */
7732 loaded_base = 0;
a50f5b91 7733 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7734 n = 0;
7735 for(i=0;i<16;i++) {
7736 if (insn & (1 << i))
7737 n++;
7738 }
7739 /* XXX: test invalid n == 0 case ? */
7740 if (insn & (1 << 23)) {
7741 if (insn & (1 << 24)) {
7742 /* pre increment */
b0109805 7743 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7744 } else {
7745 /* post increment */
7746 }
7747 } else {
7748 if (insn & (1 << 24)) {
7749 /* pre decrement */
b0109805 7750 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7751 } else {
7752 /* post decrement */
7753 if (n != 1)
b0109805 7754 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7755 }
7756 }
7757 j = 0;
7758 for(i=0;i<16;i++) {
7759 if (insn & (1 << i)) {
7760 if (insn & (1 << 20)) {
7761 /* load */
b0109805 7762 tmp = gen_ld32(addr, IS_USER(s));
be5e7a76 7763 if (user) {
b75263d6
JR
7764 tmp2 = tcg_const_i32(i);
7765 gen_helper_set_user_reg(tmp2, tmp);
7766 tcg_temp_free_i32(tmp2);
7d1b0095 7767 tcg_temp_free_i32(tmp);
9ee6e8bb 7768 } else if (i == rn) {
b0109805 7769 loaded_var = tmp;
9ee6e8bb
PB
7770 loaded_base = 1;
7771 } else {
be5e7a76 7772 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7773 }
7774 } else {
7775 /* store */
7776 if (i == 15) {
7777 /* special case: r15 = PC + 8 */
7778 val = (long)s->pc + 4;
7d1b0095 7779 tmp = tcg_temp_new_i32();
b0109805 7780 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7781 } else if (user) {
7d1b0095 7782 tmp = tcg_temp_new_i32();
b75263d6
JR
7783 tmp2 = tcg_const_i32(i);
7784 gen_helper_get_user_reg(tmp, tmp2);
7785 tcg_temp_free_i32(tmp2);
9ee6e8bb 7786 } else {
b0109805 7787 tmp = load_reg(s, i);
9ee6e8bb 7788 }
b0109805 7789 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7790 }
7791 j++;
7792 /* no need to add after the last transfer */
7793 if (j != n)
b0109805 7794 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7795 }
7796 }
7797 if (insn & (1 << 21)) {
7798 /* write back */
7799 if (insn & (1 << 23)) {
7800 if (insn & (1 << 24)) {
7801 /* pre increment */
7802 } else {
7803 /* post increment */
b0109805 7804 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7805 }
7806 } else {
7807 if (insn & (1 << 24)) {
7808 /* pre decrement */
7809 if (n != 1)
b0109805 7810 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7811 } else {
7812 /* post decrement */
b0109805 7813 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7814 }
7815 }
b0109805
PB
7816 store_reg(s, rn, addr);
7817 } else {
7d1b0095 7818 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7819 }
7820 if (loaded_base) {
b0109805 7821 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7822 }
7823 if ((insn & (1 << 22)) && !user) {
7824 /* Restore CPSR from SPSR. */
d9ba4830
PB
7825 tmp = load_cpu_field(spsr);
7826 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7827 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7828 s->is_jmp = DISAS_UPDATE;
7829 }
7830 }
7831 break;
7832 case 0xa:
7833 case 0xb:
7834 {
7835 int32_t offset;
7836
7837 /* branch (and link) */
7838 val = (int32_t)s->pc;
7839 if (insn & (1 << 24)) {
7d1b0095 7840 tmp = tcg_temp_new_i32();
5e3f878a
PB
7841 tcg_gen_movi_i32(tmp, val);
7842 store_reg(s, 14, tmp);
9ee6e8bb
PB
7843 }
7844 offset = (((int32_t)insn << 8) >> 8);
7845 val += (offset << 2) + 4;
7846 gen_jmp(s, val);
7847 }
7848 break;
7849 case 0xc:
7850 case 0xd:
7851 case 0xe:
7852 /* Coprocessor. */
7853 if (disas_coproc_insn(env, s, insn))
7854 goto illegal_op;
7855 break;
7856 case 0xf:
7857 /* swi */
5e3f878a 7858 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7859 s->is_jmp = DISAS_SWI;
7860 break;
7861 default:
7862 illegal_op:
bc4a0de0 7863 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7864 break;
7865 }
7866 }
7867}
7868
7869/* Return true if this is a Thumb-2 logical op. */
7870static int
7871thumb2_logic_op(int op)
7872{
7873 return (op < 8);
7874}
7875
7876/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7877 then set condition code flags based on the result of the operation.
7878 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7879 to the high bit of T1.
7880 Returns zero if the opcode is valid. */
7881
7882static int
396e467c 7883gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7884{
7885 int logic_cc;
7886
7887 logic_cc = 0;
7888 switch (op) {
7889 case 0: /* and */
396e467c 7890 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7891 logic_cc = conds;
7892 break;
7893 case 1: /* bic */
f669df27 7894 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7895 logic_cc = conds;
7896 break;
7897 case 2: /* orr */
396e467c 7898 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7899 logic_cc = conds;
7900 break;
7901 case 3: /* orn */
29501f1b 7902 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7903 logic_cc = conds;
7904 break;
7905 case 4: /* eor */
396e467c 7906 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7907 logic_cc = conds;
7908 break;
7909 case 8: /* add */
7910 if (conds)
396e467c 7911 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7912 else
396e467c 7913 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7914 break;
7915 case 10: /* adc */
7916 if (conds)
396e467c 7917 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7918 else
396e467c 7919 gen_adc(t0, t1);
9ee6e8bb
PB
7920 break;
7921 case 11: /* sbc */
7922 if (conds)
396e467c 7923 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7924 else
396e467c 7925 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7926 break;
7927 case 13: /* sub */
7928 if (conds)
396e467c 7929 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7930 else
396e467c 7931 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7932 break;
7933 case 14: /* rsb */
7934 if (conds)
396e467c 7935 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7936 else
396e467c 7937 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7938 break;
7939 default: /* 5, 6, 7, 9, 12, 15. */
7940 return 1;
7941 }
7942 if (logic_cc) {
396e467c 7943 gen_logic_CC(t0);
9ee6e8bb 7944 if (shifter_out)
396e467c 7945 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7946 }
7947 return 0;
7948}
7949
7950/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7951 is not legal. */
7952static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7953{
b0109805 7954 uint32_t insn, imm, shift, offset;
9ee6e8bb 7955 uint32_t rd, rn, rm, rs;
b26eefb6 7956 TCGv tmp;
6ddbc6e4
PB
7957 TCGv tmp2;
7958 TCGv tmp3;
b0109805 7959 TCGv addr;
a7812ae4 7960 TCGv_i64 tmp64;
9ee6e8bb
PB
7961 int op;
7962 int shiftop;
7963 int conds;
7964 int logic_cc;
7965
7966 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7967 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7968 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7969 16-bit instructions to get correct prefetch abort behavior. */
7970 insn = insn_hw1;
7971 if ((insn & (1 << 12)) == 0) {
be5e7a76 7972 ARCH(5);
9ee6e8bb
PB
7973 /* Second half of blx. */
7974 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7975 tmp = load_reg(s, 14);
7976 tcg_gen_addi_i32(tmp, tmp, offset);
7977 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7978
7d1b0095 7979 tmp2 = tcg_temp_new_i32();
b0109805 7980 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7981 store_reg(s, 14, tmp2);
7982 gen_bx(s, tmp);
9ee6e8bb
PB
7983 return 0;
7984 }
7985 if (insn & (1 << 11)) {
7986 /* Second half of bl. */
7987 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7988 tmp = load_reg(s, 14);
6a0d8a1d 7989 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7990
7d1b0095 7991 tmp2 = tcg_temp_new_i32();
b0109805 7992 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7993 store_reg(s, 14, tmp2);
7994 gen_bx(s, tmp);
9ee6e8bb
PB
7995 return 0;
7996 }
7997 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7998 /* Instruction spans a page boundary. Implement it as two
7999 16-bit instructions in case the second half causes an
8000 prefetch abort. */
8001 offset = ((int32_t)insn << 21) >> 9;
396e467c 8002 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8003 return 0;
8004 }
8005 /* Fall through to 32-bit decode. */
8006 }
8007
8008 insn = lduw_code(s->pc);
8009 s->pc += 2;
8010 insn |= (uint32_t)insn_hw1 << 16;
8011
8012 if ((insn & 0xf800e800) != 0xf000e800) {
8013 ARCH(6T2);
8014 }
8015
8016 rn = (insn >> 16) & 0xf;
8017 rs = (insn >> 12) & 0xf;
8018 rd = (insn >> 8) & 0xf;
8019 rm = insn & 0xf;
8020 switch ((insn >> 25) & 0xf) {
8021 case 0: case 1: case 2: case 3:
8022 /* 16-bit instructions. Should never happen. */
8023 abort();
8024 case 4:
8025 if (insn & (1 << 22)) {
8026 /* Other load/store, table branch. */
8027 if (insn & 0x01200000) {
8028 /* Load/store doubleword. */
8029 if (rn == 15) {
7d1b0095 8030 addr = tcg_temp_new_i32();
b0109805 8031 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8032 } else {
b0109805 8033 addr = load_reg(s, rn);
9ee6e8bb
PB
8034 }
8035 offset = (insn & 0xff) * 4;
8036 if ((insn & (1 << 23)) == 0)
8037 offset = -offset;
8038 if (insn & (1 << 24)) {
b0109805 8039 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8040 offset = 0;
8041 }
8042 if (insn & (1 << 20)) {
8043 /* ldrd */
b0109805
PB
8044 tmp = gen_ld32(addr, IS_USER(s));
8045 store_reg(s, rs, tmp);
8046 tcg_gen_addi_i32(addr, addr, 4);
8047 tmp = gen_ld32(addr, IS_USER(s));
8048 store_reg(s, rd, tmp);
9ee6e8bb
PB
8049 } else {
8050 /* strd */
b0109805
PB
8051 tmp = load_reg(s, rs);
8052 gen_st32(tmp, addr, IS_USER(s));
8053 tcg_gen_addi_i32(addr, addr, 4);
8054 tmp = load_reg(s, rd);
8055 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8056 }
8057 if (insn & (1 << 21)) {
8058 /* Base writeback. */
8059 if (rn == 15)
8060 goto illegal_op;
b0109805
PB
8061 tcg_gen_addi_i32(addr, addr, offset - 4);
8062 store_reg(s, rn, addr);
8063 } else {
7d1b0095 8064 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8065 }
8066 } else if ((insn & (1 << 23)) == 0) {
8067 /* Load/store exclusive word. */
3174f8e9 8068 addr = tcg_temp_local_new();
98a46317 8069 load_reg_var(s, addr, rn);
426f5abc 8070 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8071 if (insn & (1 << 20)) {
426f5abc 8072 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8073 } else {
426f5abc 8074 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8075 }
3174f8e9 8076 tcg_temp_free(addr);
9ee6e8bb
PB
8077 } else if ((insn & (1 << 6)) == 0) {
8078 /* Table Branch. */
8079 if (rn == 15) {
7d1b0095 8080 addr = tcg_temp_new_i32();
b0109805 8081 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8082 } else {
b0109805 8083 addr = load_reg(s, rn);
9ee6e8bb 8084 }
b26eefb6 8085 tmp = load_reg(s, rm);
b0109805 8086 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8087 if (insn & (1 << 4)) {
8088 /* tbh */
b0109805 8089 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8090 tcg_temp_free_i32(tmp);
b0109805 8091 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 8092 } else { /* tbb */
7d1b0095 8093 tcg_temp_free_i32(tmp);
b0109805 8094 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 8095 }
7d1b0095 8096 tcg_temp_free_i32(addr);
b0109805
PB
8097 tcg_gen_shli_i32(tmp, tmp, 1);
8098 tcg_gen_addi_i32(tmp, tmp, s->pc);
8099 store_reg(s, 15, tmp);
9ee6e8bb
PB
8100 } else {
8101 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 8102 ARCH(7);
9ee6e8bb 8103 op = (insn >> 4) & 0x3;
426f5abc
PB
8104 if (op == 2) {
8105 goto illegal_op;
8106 }
3174f8e9 8107 addr = tcg_temp_local_new();
98a46317 8108 load_reg_var(s, addr, rn);
9ee6e8bb 8109 if (insn & (1 << 20)) {
426f5abc 8110 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8111 } else {
426f5abc 8112 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8113 }
3174f8e9 8114 tcg_temp_free(addr);
9ee6e8bb
PB
8115 }
8116 } else {
8117 /* Load/store multiple, RFE, SRS. */
8118 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8119 /* Not available in user mode. */
b0109805 8120 if (IS_USER(s))
9ee6e8bb
PB
8121 goto illegal_op;
8122 if (insn & (1 << 20)) {
8123 /* rfe */
b0109805
PB
8124 addr = load_reg(s, rn);
8125 if ((insn & (1 << 24)) == 0)
8126 tcg_gen_addi_i32(addr, addr, -8);
8127 /* Load PC into tmp and CPSR into tmp2. */
8128 tmp = gen_ld32(addr, 0);
8129 tcg_gen_addi_i32(addr, addr, 4);
8130 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
8131 if (insn & (1 << 21)) {
8132 /* Base writeback. */
b0109805
PB
8133 if (insn & (1 << 24)) {
8134 tcg_gen_addi_i32(addr, addr, 4);
8135 } else {
8136 tcg_gen_addi_i32(addr, addr, -4);
8137 }
8138 store_reg(s, rn, addr);
8139 } else {
7d1b0095 8140 tcg_temp_free_i32(addr);
9ee6e8bb 8141 }
b0109805 8142 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8143 } else {
8144 /* srs */
8145 op = (insn & 0x1f);
7d1b0095 8146 addr = tcg_temp_new_i32();
39ea3d4e
PM
8147 tmp = tcg_const_i32(op);
8148 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8149 tcg_temp_free_i32(tmp);
9ee6e8bb 8150 if ((insn & (1 << 24)) == 0) {
b0109805 8151 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 8152 }
b0109805
PB
8153 tmp = load_reg(s, 14);
8154 gen_st32(tmp, addr, 0);
8155 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 8156 tmp = tcg_temp_new_i32();
b0109805
PB
8157 gen_helper_cpsr_read(tmp);
8158 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
8159 if (insn & (1 << 21)) {
8160 if ((insn & (1 << 24)) == 0) {
b0109805 8161 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 8162 } else {
b0109805 8163 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8164 }
39ea3d4e
PM
8165 tmp = tcg_const_i32(op);
8166 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8167 tcg_temp_free_i32(tmp);
b0109805 8168 } else {
7d1b0095 8169 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8170 }
8171 }
8172 } else {
5856d44e
YO
8173 int i, loaded_base = 0;
8174 TCGv loaded_var;
9ee6e8bb 8175 /* Load/store multiple. */
b0109805 8176 addr = load_reg(s, rn);
9ee6e8bb
PB
8177 offset = 0;
8178 for (i = 0; i < 16; i++) {
8179 if (insn & (1 << i))
8180 offset += 4;
8181 }
8182 if (insn & (1 << 24)) {
b0109805 8183 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8184 }
8185
5856d44e 8186 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
8187 for (i = 0; i < 16; i++) {
8188 if ((insn & (1 << i)) == 0)
8189 continue;
8190 if (insn & (1 << 20)) {
8191 /* Load. */
b0109805 8192 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 8193 if (i == 15) {
b0109805 8194 gen_bx(s, tmp);
5856d44e
YO
8195 } else if (i == rn) {
8196 loaded_var = tmp;
8197 loaded_base = 1;
9ee6e8bb 8198 } else {
b0109805 8199 store_reg(s, i, tmp);
9ee6e8bb
PB
8200 }
8201 } else {
8202 /* Store. */
b0109805
PB
8203 tmp = load_reg(s, i);
8204 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8205 }
b0109805 8206 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8207 }
5856d44e
YO
8208 if (loaded_base) {
8209 store_reg(s, rn, loaded_var);
8210 }
9ee6e8bb
PB
8211 if (insn & (1 << 21)) {
8212 /* Base register writeback. */
8213 if (insn & (1 << 24)) {
b0109805 8214 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8215 }
8216 /* Fault if writeback register is in register list. */
8217 if (insn & (1 << rn))
8218 goto illegal_op;
b0109805
PB
8219 store_reg(s, rn, addr);
8220 } else {
7d1b0095 8221 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8222 }
8223 }
8224 }
8225 break;
2af9ab77
JB
8226 case 5:
8227
9ee6e8bb 8228 op = (insn >> 21) & 0xf;
2af9ab77
JB
8229 if (op == 6) {
8230 /* Halfword pack. */
8231 tmp = load_reg(s, rn);
8232 tmp2 = load_reg(s, rm);
8233 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8234 if (insn & (1 << 5)) {
8235 /* pkhtb */
8236 if (shift == 0)
8237 shift = 31;
8238 tcg_gen_sari_i32(tmp2, tmp2, shift);
8239 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8240 tcg_gen_ext16u_i32(tmp2, tmp2);
8241 } else {
8242 /* pkhbt */
8243 if (shift)
8244 tcg_gen_shli_i32(tmp2, tmp2, shift);
8245 tcg_gen_ext16u_i32(tmp, tmp);
8246 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8247 }
8248 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8249 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8250 store_reg(s, rd, tmp);
8251 } else {
2af9ab77
JB
8252 /* Data processing register constant shift. */
8253 if (rn == 15) {
7d1b0095 8254 tmp = tcg_temp_new_i32();
2af9ab77
JB
8255 tcg_gen_movi_i32(tmp, 0);
8256 } else {
8257 tmp = load_reg(s, rn);
8258 }
8259 tmp2 = load_reg(s, rm);
8260
8261 shiftop = (insn >> 4) & 3;
8262 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8263 conds = (insn & (1 << 20)) != 0;
8264 logic_cc = (conds && thumb2_logic_op(op));
8265 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8266 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8267 goto illegal_op;
7d1b0095 8268 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8269 if (rd != 15) {
8270 store_reg(s, rd, tmp);
8271 } else {
7d1b0095 8272 tcg_temp_free_i32(tmp);
2af9ab77 8273 }
3174f8e9 8274 }
9ee6e8bb
PB
8275 break;
8276 case 13: /* Misc data processing. */
8277 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8278 if (op < 4 && (insn & 0xf000) != 0xf000)
8279 goto illegal_op;
8280 switch (op) {
8281 case 0: /* Register controlled shift. */
8984bd2e
PB
8282 tmp = load_reg(s, rn);
8283 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8284 if ((insn & 0x70) != 0)
8285 goto illegal_op;
8286 op = (insn >> 21) & 3;
8984bd2e
PB
8287 logic_cc = (insn & (1 << 20)) != 0;
8288 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8289 if (logic_cc)
8290 gen_logic_CC(tmp);
21aeb343 8291 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8292 break;
8293 case 1: /* Sign/zero extend. */
5e3f878a 8294 tmp = load_reg(s, rm);
9ee6e8bb 8295 shift = (insn >> 4) & 3;
1301f322 8296 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8297 rotate, a shift is sufficient. */
8298 if (shift != 0)
f669df27 8299 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8300 op = (insn >> 20) & 7;
8301 switch (op) {
5e3f878a
PB
8302 case 0: gen_sxth(tmp); break;
8303 case 1: gen_uxth(tmp); break;
8304 case 2: gen_sxtb16(tmp); break;
8305 case 3: gen_uxtb16(tmp); break;
8306 case 4: gen_sxtb(tmp); break;
8307 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8308 default: goto illegal_op;
8309 }
8310 if (rn != 15) {
5e3f878a 8311 tmp2 = load_reg(s, rn);
9ee6e8bb 8312 if ((op >> 1) == 1) {
5e3f878a 8313 gen_add16(tmp, tmp2);
9ee6e8bb 8314 } else {
5e3f878a 8315 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8316 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8317 }
8318 }
5e3f878a 8319 store_reg(s, rd, tmp);
9ee6e8bb
PB
8320 break;
8321 case 2: /* SIMD add/subtract. */
8322 op = (insn >> 20) & 7;
8323 shift = (insn >> 4) & 7;
8324 if ((op & 3) == 3 || (shift & 3) == 3)
8325 goto illegal_op;
6ddbc6e4
PB
8326 tmp = load_reg(s, rn);
8327 tmp2 = load_reg(s, rm);
8328 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8329 tcg_temp_free_i32(tmp2);
6ddbc6e4 8330 store_reg(s, rd, tmp);
9ee6e8bb
PB
8331 break;
8332 case 3: /* Other data processing. */
8333 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8334 if (op < 4) {
8335 /* Saturating add/subtract. */
d9ba4830
PB
8336 tmp = load_reg(s, rn);
8337 tmp2 = load_reg(s, rm);
9ee6e8bb 8338 if (op & 1)
4809c612
JB
8339 gen_helper_double_saturate(tmp, tmp);
8340 if (op & 2)
d9ba4830 8341 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 8342 else
d9ba4830 8343 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 8344 tcg_temp_free_i32(tmp2);
9ee6e8bb 8345 } else {
d9ba4830 8346 tmp = load_reg(s, rn);
9ee6e8bb
PB
8347 switch (op) {
8348 case 0x0a: /* rbit */
d9ba4830 8349 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8350 break;
8351 case 0x08: /* rev */
66896cb8 8352 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8353 break;
8354 case 0x09: /* rev16 */
d9ba4830 8355 gen_rev16(tmp);
9ee6e8bb
PB
8356 break;
8357 case 0x0b: /* revsh */
d9ba4830 8358 gen_revsh(tmp);
9ee6e8bb
PB
8359 break;
8360 case 0x10: /* sel */
d9ba4830 8361 tmp2 = load_reg(s, rm);
7d1b0095 8362 tmp3 = tcg_temp_new_i32();
6ddbc6e4 8363 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 8364 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8365 tcg_temp_free_i32(tmp3);
8366 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8367 break;
8368 case 0x18: /* clz */
d9ba4830 8369 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8370 break;
8371 default:
8372 goto illegal_op;
8373 }
8374 }
d9ba4830 8375 store_reg(s, rd, tmp);
9ee6e8bb
PB
8376 break;
8377 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8378 op = (insn >> 4) & 0xf;
d9ba4830
PB
8379 tmp = load_reg(s, rn);
8380 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8381 switch ((insn >> 20) & 7) {
8382 case 0: /* 32 x 32 -> 32 */
d9ba4830 8383 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8384 tcg_temp_free_i32(tmp2);
9ee6e8bb 8385 if (rs != 15) {
d9ba4830 8386 tmp2 = load_reg(s, rs);
9ee6e8bb 8387 if (op)
d9ba4830 8388 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8389 else
d9ba4830 8390 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8391 tcg_temp_free_i32(tmp2);
9ee6e8bb 8392 }
9ee6e8bb
PB
8393 break;
8394 case 1: /* 16 x 16 -> 32 */
d9ba4830 8395 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8396 tcg_temp_free_i32(tmp2);
9ee6e8bb 8397 if (rs != 15) {
d9ba4830
PB
8398 tmp2 = load_reg(s, rs);
8399 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8400 tcg_temp_free_i32(tmp2);
9ee6e8bb 8401 }
9ee6e8bb
PB
8402 break;
8403 case 2: /* Dual multiply add. */
8404 case 4: /* Dual multiply subtract. */
8405 if (op)
d9ba4830
PB
8406 gen_swap_half(tmp2);
8407 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8408 if (insn & (1 << 22)) {
e1d177b9 8409 /* This subtraction cannot overflow. */
d9ba4830 8410 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8411 } else {
e1d177b9
PM
8412 /* This addition cannot overflow 32 bits;
8413 * however it may overflow considered as a signed
8414 * operation, in which case we must set the Q flag.
8415 */
8416 gen_helper_add_setq(tmp, tmp, tmp2);
9ee6e8bb 8417 }
7d1b0095 8418 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8419 if (rs != 15)
8420 {
d9ba4830
PB
8421 tmp2 = load_reg(s, rs);
8422 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8423 tcg_temp_free_i32(tmp2);
9ee6e8bb 8424 }
9ee6e8bb
PB
8425 break;
8426 case 3: /* 32 * 16 -> 32msb */
8427 if (op)
d9ba4830 8428 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8429 else
d9ba4830 8430 gen_sxth(tmp2);
a7812ae4
PB
8431 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8432 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8433 tmp = tcg_temp_new_i32();
a7812ae4 8434 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8435 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8436 if (rs != 15)
8437 {
d9ba4830
PB
8438 tmp2 = load_reg(s, rs);
8439 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8440 tcg_temp_free_i32(tmp2);
9ee6e8bb 8441 }
9ee6e8bb 8442 break;
838fa72d
AJ
8443 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8444 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8445 if (rs != 15) {
838fa72d
AJ
8446 tmp = load_reg(s, rs);
8447 if (insn & (1 << 20)) {
8448 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8449 } else {
838fa72d 8450 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8451 }
2c0262af 8452 }
838fa72d
AJ
8453 if (insn & (1 << 4)) {
8454 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8455 }
8456 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8457 tmp = tcg_temp_new_i32();
838fa72d
AJ
8458 tcg_gen_trunc_i64_i32(tmp, tmp64);
8459 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8460 break;
8461 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8462 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8463 tcg_temp_free_i32(tmp2);
9ee6e8bb 8464 if (rs != 15) {
d9ba4830
PB
8465 tmp2 = load_reg(s, rs);
8466 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8467 tcg_temp_free_i32(tmp2);
5fd46862 8468 }
9ee6e8bb 8469 break;
2c0262af 8470 }
d9ba4830 8471 store_reg(s, rd, tmp);
2c0262af 8472 break;
9ee6e8bb
PB
8473 case 6: case 7: /* 64-bit multiply, Divide. */
8474 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8475 tmp = load_reg(s, rn);
8476 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8477 if ((op & 0x50) == 0x10) {
8478 /* sdiv, udiv */
8479 if (!arm_feature(env, ARM_FEATURE_DIV))
8480 goto illegal_op;
8481 if (op & 0x20)
5e3f878a 8482 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8483 else
5e3f878a 8484 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8485 tcg_temp_free_i32(tmp2);
5e3f878a 8486 store_reg(s, rd, tmp);
9ee6e8bb
PB
8487 } else if ((op & 0xe) == 0xc) {
8488 /* Dual multiply accumulate long. */
8489 if (op & 1)
5e3f878a
PB
8490 gen_swap_half(tmp2);
8491 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8492 if (op & 0x10) {
5e3f878a 8493 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8494 } else {
5e3f878a 8495 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8496 }
7d1b0095 8497 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8498 /* BUGFIX */
8499 tmp64 = tcg_temp_new_i64();
8500 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8501 tcg_temp_free_i32(tmp);
a7812ae4
PB
8502 gen_addq(s, tmp64, rs, rd);
8503 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8504 tcg_temp_free_i64(tmp64);
2c0262af 8505 } else {
9ee6e8bb
PB
8506 if (op & 0x20) {
8507 /* Unsigned 64-bit multiply */
a7812ae4 8508 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8509 } else {
9ee6e8bb
PB
8510 if (op & 8) {
8511 /* smlalxy */
5e3f878a 8512 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8513 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8514 tmp64 = tcg_temp_new_i64();
8515 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8516 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8517 } else {
8518 /* Signed 64-bit multiply */
a7812ae4 8519 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8520 }
b5ff1b31 8521 }
9ee6e8bb
PB
8522 if (op & 4) {
8523 /* umaal */
a7812ae4
PB
8524 gen_addq_lo(s, tmp64, rs);
8525 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8526 } else if (op & 0x40) {
8527 /* 64-bit accumulate. */
a7812ae4 8528 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8529 }
a7812ae4 8530 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8531 tcg_temp_free_i64(tmp64);
5fd46862 8532 }
2c0262af 8533 break;
9ee6e8bb
PB
8534 }
8535 break;
8536 case 6: case 7: case 14: case 15:
8537 /* Coprocessor. */
8538 if (((insn >> 24) & 3) == 3) {
8539 /* Translate into the equivalent ARM encoding. */
f06053e3 8540 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8541 if (disas_neon_data_insn(env, s, insn))
8542 goto illegal_op;
8543 } else {
8544 if (insn & (1 << 28))
8545 goto illegal_op;
8546 if (disas_coproc_insn (env, s, insn))
8547 goto illegal_op;
8548 }
8549 break;
8550 case 8: case 9: case 10: case 11:
8551 if (insn & (1 << 15)) {
8552 /* Branches, misc control. */
8553 if (insn & 0x5000) {
8554 /* Unconditional branch. */
8555 /* signextend(hw1[10:0]) -> offset[:12]. */
8556 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8557 /* hw1[10:0] -> offset[11:1]. */
8558 offset |= (insn & 0x7ff) << 1;
8559 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8560 offset[24:22] already have the same value because of the
8561 sign extension above. */
8562 offset ^= ((~insn) & (1 << 13)) << 10;
8563 offset ^= ((~insn) & (1 << 11)) << 11;
8564
9ee6e8bb
PB
8565 if (insn & (1 << 14)) {
8566 /* Branch and link. */
3174f8e9 8567 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8568 }
3b46e624 8569
b0109805 8570 offset += s->pc;
9ee6e8bb
PB
8571 if (insn & (1 << 12)) {
8572 /* b/bl */
b0109805 8573 gen_jmp(s, offset);
9ee6e8bb
PB
8574 } else {
8575 /* blx */
b0109805 8576 offset &= ~(uint32_t)2;
be5e7a76 8577 /* thumb2 bx, no need to check */
b0109805 8578 gen_bx_im(s, offset);
2c0262af 8579 }
9ee6e8bb
PB
8580 } else if (((insn >> 23) & 7) == 7) {
8581 /* Misc control */
8582 if (insn & (1 << 13))
8583 goto illegal_op;
8584
8585 if (insn & (1 << 26)) {
8586 /* Secure monitor call (v6Z) */
8587 goto illegal_op; /* not implemented. */
2c0262af 8588 } else {
9ee6e8bb
PB
8589 op = (insn >> 20) & 7;
8590 switch (op) {
8591 case 0: /* msr cpsr. */
8592 if (IS_M(env)) {
8984bd2e
PB
8593 tmp = load_reg(s, rn);
8594 addr = tcg_const_i32(insn & 0xff);
8595 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8596 tcg_temp_free_i32(addr);
7d1b0095 8597 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8598 gen_lookup_tb(s);
8599 break;
8600 }
8601 /* fall through */
8602 case 1: /* msr spsr. */
8603 if (IS_M(env))
8604 goto illegal_op;
2fbac54b
FN
8605 tmp = load_reg(s, rn);
8606 if (gen_set_psr(s,
9ee6e8bb 8607 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8608 op == 1, tmp))
9ee6e8bb
PB
8609 goto illegal_op;
8610 break;
8611 case 2: /* cps, nop-hint. */
8612 if (((insn >> 8) & 7) == 0) {
8613 gen_nop_hint(s, insn & 0xff);
8614 }
8615 /* Implemented as NOP in user mode. */
8616 if (IS_USER(s))
8617 break;
8618 offset = 0;
8619 imm = 0;
8620 if (insn & (1 << 10)) {
8621 if (insn & (1 << 7))
8622 offset |= CPSR_A;
8623 if (insn & (1 << 6))
8624 offset |= CPSR_I;
8625 if (insn & (1 << 5))
8626 offset |= CPSR_F;
8627 if (insn & (1 << 9))
8628 imm = CPSR_A | CPSR_I | CPSR_F;
8629 }
8630 if (insn & (1 << 8)) {
8631 offset |= 0x1f;
8632 imm |= (insn & 0x1f);
8633 }
8634 if (offset) {
2fbac54b 8635 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8636 }
8637 break;
8638 case 3: /* Special control operations. */
426f5abc 8639 ARCH(7);
9ee6e8bb
PB
8640 op = (insn >> 4) & 0xf;
8641 switch (op) {
8642 case 2: /* clrex */
426f5abc 8643 gen_clrex(s);
9ee6e8bb
PB
8644 break;
8645 case 4: /* dsb */
8646 case 5: /* dmb */
8647 case 6: /* isb */
8648 /* These execute as NOPs. */
9ee6e8bb
PB
8649 break;
8650 default:
8651 goto illegal_op;
8652 }
8653 break;
8654 case 4: /* bxj */
8655 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8656 tmp = load_reg(s, rn);
8657 gen_bx(s, tmp);
9ee6e8bb
PB
8658 break;
8659 case 5: /* Exception return. */
b8b45b68
RV
8660 if (IS_USER(s)) {
8661 goto illegal_op;
8662 }
8663 if (rn != 14 || rd != 15) {
8664 goto illegal_op;
8665 }
8666 tmp = load_reg(s, rn);
8667 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8668 gen_exception_return(s, tmp);
8669 break;
9ee6e8bb 8670 case 6: /* mrs cpsr. */
7d1b0095 8671 tmp = tcg_temp_new_i32();
9ee6e8bb 8672 if (IS_M(env)) {
8984bd2e
PB
8673 addr = tcg_const_i32(insn & 0xff);
8674 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8675 tcg_temp_free_i32(addr);
9ee6e8bb 8676 } else {
8984bd2e 8677 gen_helper_cpsr_read(tmp);
9ee6e8bb 8678 }
8984bd2e 8679 store_reg(s, rd, tmp);
9ee6e8bb
PB
8680 break;
8681 case 7: /* mrs spsr. */
8682 /* Not accessible in user mode. */
8683 if (IS_USER(s) || IS_M(env))
8684 goto illegal_op;
d9ba4830
PB
8685 tmp = load_cpu_field(spsr);
8686 store_reg(s, rd, tmp);
9ee6e8bb 8687 break;
2c0262af
FB
8688 }
8689 }
9ee6e8bb
PB
8690 } else {
8691 /* Conditional branch. */
8692 op = (insn >> 22) & 0xf;
8693 /* Generate a conditional jump to next instruction. */
8694 s->condlabel = gen_new_label();
d9ba4830 8695 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8696 s->condjmp = 1;
8697
8698 /* offset[11:1] = insn[10:0] */
8699 offset = (insn & 0x7ff) << 1;
8700 /* offset[17:12] = insn[21:16]. */
8701 offset |= (insn & 0x003f0000) >> 4;
8702 /* offset[31:20] = insn[26]. */
8703 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8704 /* offset[18] = insn[13]. */
8705 offset |= (insn & (1 << 13)) << 5;
8706 /* offset[19] = insn[11]. */
8707 offset |= (insn & (1 << 11)) << 8;
8708
8709 /* jump to the offset */
b0109805 8710 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8711 }
8712 } else {
8713 /* Data processing immediate. */
8714 if (insn & (1 << 25)) {
8715 if (insn & (1 << 24)) {
8716 if (insn & (1 << 20))
8717 goto illegal_op;
8718 /* Bitfield/Saturate. */
8719 op = (insn >> 21) & 7;
8720 imm = insn & 0x1f;
8721 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8722 if (rn == 15) {
7d1b0095 8723 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8724 tcg_gen_movi_i32(tmp, 0);
8725 } else {
8726 tmp = load_reg(s, rn);
8727 }
9ee6e8bb
PB
8728 switch (op) {
8729 case 2: /* Signed bitfield extract. */
8730 imm++;
8731 if (shift + imm > 32)
8732 goto illegal_op;
8733 if (imm < 32)
6ddbc6e4 8734 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8735 break;
8736 case 6: /* Unsigned bitfield extract. */
8737 imm++;
8738 if (shift + imm > 32)
8739 goto illegal_op;
8740 if (imm < 32)
6ddbc6e4 8741 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8742 break;
8743 case 3: /* Bitfield insert/clear. */
8744 if (imm < shift)
8745 goto illegal_op;
8746 imm = imm + 1 - shift;
8747 if (imm != 32) {
6ddbc6e4 8748 tmp2 = load_reg(s, rd);
8f8e3aa4 8749 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7d1b0095 8750 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8751 }
8752 break;
8753 case 7:
8754 goto illegal_op;
8755 default: /* Saturate. */
9ee6e8bb
PB
8756 if (shift) {
8757 if (op & 1)
6ddbc6e4 8758 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8759 else
6ddbc6e4 8760 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8761 }
6ddbc6e4 8762 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8763 if (op & 4) {
8764 /* Unsigned. */
9ee6e8bb 8765 if ((op & 1) && shift == 0)
6ddbc6e4 8766 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8767 else
6ddbc6e4 8768 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8769 } else {
9ee6e8bb 8770 /* Signed. */
9ee6e8bb 8771 if ((op & 1) && shift == 0)
6ddbc6e4 8772 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8773 else
6ddbc6e4 8774 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8775 }
b75263d6 8776 tcg_temp_free_i32(tmp2);
9ee6e8bb 8777 break;
2c0262af 8778 }
6ddbc6e4 8779 store_reg(s, rd, tmp);
9ee6e8bb
PB
8780 } else {
8781 imm = ((insn & 0x04000000) >> 15)
8782 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8783 if (insn & (1 << 22)) {
8784 /* 16-bit immediate. */
8785 imm |= (insn >> 4) & 0xf000;
8786 if (insn & (1 << 23)) {
8787 /* movt */
5e3f878a 8788 tmp = load_reg(s, rd);
86831435 8789 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8790 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8791 } else {
9ee6e8bb 8792 /* movw */
7d1b0095 8793 tmp = tcg_temp_new_i32();
5e3f878a 8794 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8795 }
8796 } else {
9ee6e8bb
PB
8797 /* Add/sub 12-bit immediate. */
8798 if (rn == 15) {
b0109805 8799 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8800 if (insn & (1 << 23))
b0109805 8801 offset -= imm;
9ee6e8bb 8802 else
b0109805 8803 offset += imm;
7d1b0095 8804 tmp = tcg_temp_new_i32();
5e3f878a 8805 tcg_gen_movi_i32(tmp, offset);
2c0262af 8806 } else {
5e3f878a 8807 tmp = load_reg(s, rn);
9ee6e8bb 8808 if (insn & (1 << 23))
5e3f878a 8809 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8810 else
5e3f878a 8811 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8812 }
9ee6e8bb 8813 }
5e3f878a 8814 store_reg(s, rd, tmp);
191abaa2 8815 }
9ee6e8bb
PB
8816 } else {
8817 int shifter_out = 0;
8818 /* modified 12-bit immediate. */
8819 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8820 imm = (insn & 0xff);
8821 switch (shift) {
8822 case 0: /* XY */
8823 /* Nothing to do. */
8824 break;
8825 case 1: /* 00XY00XY */
8826 imm |= imm << 16;
8827 break;
8828 case 2: /* XY00XY00 */
8829 imm |= imm << 16;
8830 imm <<= 8;
8831 break;
8832 case 3: /* XYXYXYXY */
8833 imm |= imm << 16;
8834 imm |= imm << 8;
8835 break;
8836 default: /* Rotated constant. */
8837 shift = (shift << 1) | (imm >> 7);
8838 imm |= 0x80;
8839 imm = imm << (32 - shift);
8840 shifter_out = 1;
8841 break;
b5ff1b31 8842 }
7d1b0095 8843 tmp2 = tcg_temp_new_i32();
3174f8e9 8844 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8845 rn = (insn >> 16) & 0xf;
3174f8e9 8846 if (rn == 15) {
7d1b0095 8847 tmp = tcg_temp_new_i32();
3174f8e9
FN
8848 tcg_gen_movi_i32(tmp, 0);
8849 } else {
8850 tmp = load_reg(s, rn);
8851 }
9ee6e8bb
PB
8852 op = (insn >> 21) & 0xf;
8853 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8854 shifter_out, tmp, tmp2))
9ee6e8bb 8855 goto illegal_op;
7d1b0095 8856 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8857 rd = (insn >> 8) & 0xf;
8858 if (rd != 15) {
3174f8e9
FN
8859 store_reg(s, rd, tmp);
8860 } else {
7d1b0095 8861 tcg_temp_free_i32(tmp);
2c0262af 8862 }
2c0262af 8863 }
9ee6e8bb
PB
8864 }
8865 break;
8866 case 12: /* Load/store single data item. */
8867 {
8868 int postinc = 0;
8869 int writeback = 0;
b0109805 8870 int user;
9ee6e8bb
PB
8871 if ((insn & 0x01100000) == 0x01000000) {
8872 if (disas_neon_ls_insn(env, s, insn))
c1713132 8873 goto illegal_op;
9ee6e8bb
PB
8874 break;
8875 }
a2fdc890
PM
8876 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8877 if (rs == 15) {
8878 if (!(insn & (1 << 20))) {
8879 goto illegal_op;
8880 }
8881 if (op != 2) {
8882 /* Byte or halfword load space with dest == r15 : memory hints.
8883 * Catch them early so we don't emit pointless addressing code.
8884 * This space is a mix of:
8885 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8886 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8887 * cores)
8888 * unallocated hints, which must be treated as NOPs
8889 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8890 * which is easiest for the decoding logic
8891 * Some space which must UNDEF
8892 */
8893 int op1 = (insn >> 23) & 3;
8894 int op2 = (insn >> 6) & 0x3f;
8895 if (op & 2) {
8896 goto illegal_op;
8897 }
8898 if (rn == 15) {
8899 /* UNPREDICTABLE or unallocated hint */
8900 return 0;
8901 }
8902 if (op1 & 1) {
8903 return 0; /* PLD* or unallocated hint */
8904 }
8905 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8906 return 0; /* PLD* or unallocated hint */
8907 }
8908 /* UNDEF space, or an UNPREDICTABLE */
8909 return 1;
8910 }
8911 }
b0109805 8912 user = IS_USER(s);
9ee6e8bb 8913 if (rn == 15) {
7d1b0095 8914 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8915 /* PC relative. */
8916 /* s->pc has already been incremented by 4. */
8917 imm = s->pc & 0xfffffffc;
8918 if (insn & (1 << 23))
8919 imm += insn & 0xfff;
8920 else
8921 imm -= insn & 0xfff;
b0109805 8922 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8923 } else {
b0109805 8924 addr = load_reg(s, rn);
9ee6e8bb
PB
8925 if (insn & (1 << 23)) {
8926 /* Positive offset. */
8927 imm = insn & 0xfff;
b0109805 8928 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8929 } else {
9ee6e8bb 8930 imm = insn & 0xff;
2a0308c5
PM
8931 switch ((insn >> 8) & 0xf) {
8932 case 0x0: /* Shifted Register. */
9ee6e8bb 8933 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8934 if (shift > 3) {
8935 tcg_temp_free_i32(addr);
18c9b560 8936 goto illegal_op;
2a0308c5 8937 }
b26eefb6 8938 tmp = load_reg(s, rm);
9ee6e8bb 8939 if (shift)
b26eefb6 8940 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8941 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8942 tcg_temp_free_i32(tmp);
9ee6e8bb 8943 break;
2a0308c5 8944 case 0xc: /* Negative offset. */
b0109805 8945 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8946 break;
2a0308c5 8947 case 0xe: /* User privilege. */
b0109805
PB
8948 tcg_gen_addi_i32(addr, addr, imm);
8949 user = 1;
9ee6e8bb 8950 break;
2a0308c5 8951 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8952 imm = -imm;
8953 /* Fall through. */
2a0308c5 8954 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8955 postinc = 1;
8956 writeback = 1;
8957 break;
2a0308c5 8958 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
8959 imm = -imm;
8960 /* Fall through. */
2a0308c5 8961 case 0xf: /* Pre-increment. */
b0109805 8962 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8963 writeback = 1;
8964 break;
8965 default:
2a0308c5 8966 tcg_temp_free_i32(addr);
b7bcbe95 8967 goto illegal_op;
9ee6e8bb
PB
8968 }
8969 }
8970 }
9ee6e8bb
PB
8971 if (insn & (1 << 20)) {
8972 /* Load. */
a2fdc890
PM
8973 switch (op) {
8974 case 0: tmp = gen_ld8u(addr, user); break;
8975 case 4: tmp = gen_ld8s(addr, user); break;
8976 case 1: tmp = gen_ld16u(addr, user); break;
8977 case 5: tmp = gen_ld16s(addr, user); break;
8978 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
8979 default:
8980 tcg_temp_free_i32(addr);
8981 goto illegal_op;
a2fdc890
PM
8982 }
8983 if (rs == 15) {
8984 gen_bx(s, tmp);
9ee6e8bb 8985 } else {
a2fdc890 8986 store_reg(s, rs, tmp);
9ee6e8bb
PB
8987 }
8988 } else {
8989 /* Store. */
b0109805 8990 tmp = load_reg(s, rs);
9ee6e8bb 8991 switch (op) {
b0109805
PB
8992 case 0: gen_st8(tmp, addr, user); break;
8993 case 1: gen_st16(tmp, addr, user); break;
8994 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
8995 default:
8996 tcg_temp_free_i32(addr);
8997 goto illegal_op;
b7bcbe95 8998 }
2c0262af 8999 }
9ee6e8bb 9000 if (postinc)
b0109805
PB
9001 tcg_gen_addi_i32(addr, addr, imm);
9002 if (writeback) {
9003 store_reg(s, rn, addr);
9004 } else {
7d1b0095 9005 tcg_temp_free_i32(addr);
b0109805 9006 }
9ee6e8bb
PB
9007 }
9008 break;
9009 default:
9010 goto illegal_op;
2c0262af 9011 }
9ee6e8bb
PB
9012 return 0;
9013illegal_op:
9014 return 1;
2c0262af
FB
9015}
9016
9ee6e8bb 9017static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
9018{
9019 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9020 int32_t offset;
9021 int i;
b26eefb6 9022 TCGv tmp;
d9ba4830 9023 TCGv tmp2;
b0109805 9024 TCGv addr;
99c475ab 9025
9ee6e8bb
PB
9026 if (s->condexec_mask) {
9027 cond = s->condexec_cond;
bedd2912
JB
9028 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9029 s->condlabel = gen_new_label();
9030 gen_test_cc(cond ^ 1, s->condlabel);
9031 s->condjmp = 1;
9032 }
9ee6e8bb
PB
9033 }
9034
b5ff1b31 9035 insn = lduw_code(s->pc);
99c475ab 9036 s->pc += 2;
b5ff1b31 9037
99c475ab
FB
9038 switch (insn >> 12) {
9039 case 0: case 1:
396e467c 9040
99c475ab
FB
9041 rd = insn & 7;
9042 op = (insn >> 11) & 3;
9043 if (op == 3) {
9044 /* add/subtract */
9045 rn = (insn >> 3) & 7;
396e467c 9046 tmp = load_reg(s, rn);
99c475ab
FB
9047 if (insn & (1 << 10)) {
9048 /* immediate */
7d1b0095 9049 tmp2 = tcg_temp_new_i32();
396e467c 9050 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9051 } else {
9052 /* reg */
9053 rm = (insn >> 6) & 7;
396e467c 9054 tmp2 = load_reg(s, rm);
99c475ab 9055 }
9ee6e8bb
PB
9056 if (insn & (1 << 9)) {
9057 if (s->condexec_mask)
396e467c 9058 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9059 else
396e467c 9060 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
9061 } else {
9062 if (s->condexec_mask)
396e467c 9063 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9064 else
396e467c 9065 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 9066 }
7d1b0095 9067 tcg_temp_free_i32(tmp2);
396e467c 9068 store_reg(s, rd, tmp);
99c475ab
FB
9069 } else {
9070 /* shift immediate */
9071 rm = (insn >> 3) & 7;
9072 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9073 tmp = load_reg(s, rm);
9074 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9075 if (!s->condexec_mask)
9076 gen_logic_CC(tmp);
9077 store_reg(s, rd, tmp);
99c475ab
FB
9078 }
9079 break;
9080 case 2: case 3:
9081 /* arithmetic large immediate */
9082 op = (insn >> 11) & 3;
9083 rd = (insn >> 8) & 0x7;
396e467c 9084 if (op == 0) { /* mov */
7d1b0095 9085 tmp = tcg_temp_new_i32();
396e467c 9086 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9087 if (!s->condexec_mask)
396e467c
FN
9088 gen_logic_CC(tmp);
9089 store_reg(s, rd, tmp);
9090 } else {
9091 tmp = load_reg(s, rd);
7d1b0095 9092 tmp2 = tcg_temp_new_i32();
396e467c
FN
9093 tcg_gen_movi_i32(tmp2, insn & 0xff);
9094 switch (op) {
9095 case 1: /* cmp */
9096 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
9097 tcg_temp_free_i32(tmp);
9098 tcg_temp_free_i32(tmp2);
396e467c
FN
9099 break;
9100 case 2: /* add */
9101 if (s->condexec_mask)
9102 tcg_gen_add_i32(tmp, tmp, tmp2);
9103 else
9104 gen_helper_add_cc(tmp, tmp, tmp2);
7d1b0095 9105 tcg_temp_free_i32(tmp2);
396e467c
FN
9106 store_reg(s, rd, tmp);
9107 break;
9108 case 3: /* sub */
9109 if (s->condexec_mask)
9110 tcg_gen_sub_i32(tmp, tmp, tmp2);
9111 else
9112 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095 9113 tcg_temp_free_i32(tmp2);
396e467c
FN
9114 store_reg(s, rd, tmp);
9115 break;
9116 }
99c475ab 9117 }
99c475ab
FB
9118 break;
9119 case 4:
9120 if (insn & (1 << 11)) {
9121 rd = (insn >> 8) & 7;
5899f386
FB
9122 /* load pc-relative. Bit 1 of PC is ignored. */
9123 val = s->pc + 2 + ((insn & 0xff) * 4);
9124 val &= ~(uint32_t)2;
7d1b0095 9125 addr = tcg_temp_new_i32();
b0109805
PB
9126 tcg_gen_movi_i32(addr, val);
9127 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 9128 tcg_temp_free_i32(addr);
b0109805 9129 store_reg(s, rd, tmp);
99c475ab
FB
9130 break;
9131 }
9132 if (insn & (1 << 10)) {
9133 /* data processing extended or blx */
9134 rd = (insn & 7) | ((insn >> 4) & 8);
9135 rm = (insn >> 3) & 0xf;
9136 op = (insn >> 8) & 3;
9137 switch (op) {
9138 case 0: /* add */
396e467c
FN
9139 tmp = load_reg(s, rd);
9140 tmp2 = load_reg(s, rm);
9141 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9142 tcg_temp_free_i32(tmp2);
396e467c 9143 store_reg(s, rd, tmp);
99c475ab
FB
9144 break;
9145 case 1: /* cmp */
396e467c
FN
9146 tmp = load_reg(s, rd);
9147 tmp2 = load_reg(s, rm);
9148 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
9149 tcg_temp_free_i32(tmp2);
9150 tcg_temp_free_i32(tmp);
99c475ab
FB
9151 break;
9152 case 2: /* mov/cpy */
396e467c
FN
9153 tmp = load_reg(s, rm);
9154 store_reg(s, rd, tmp);
99c475ab
FB
9155 break;
9156 case 3:/* branch [and link] exchange thumb register */
b0109805 9157 tmp = load_reg(s, rm);
99c475ab 9158 if (insn & (1 << 7)) {
be5e7a76 9159 ARCH(5);
99c475ab 9160 val = (uint32_t)s->pc | 1;
7d1b0095 9161 tmp2 = tcg_temp_new_i32();
b0109805
PB
9162 tcg_gen_movi_i32(tmp2, val);
9163 store_reg(s, 14, tmp2);
99c475ab 9164 }
be5e7a76 9165 /* already thumb, no need to check */
d9ba4830 9166 gen_bx(s, tmp);
99c475ab
FB
9167 break;
9168 }
9169 break;
9170 }
9171
9172 /* data processing register */
9173 rd = insn & 7;
9174 rm = (insn >> 3) & 7;
9175 op = (insn >> 6) & 0xf;
9176 if (op == 2 || op == 3 || op == 4 || op == 7) {
9177 /* the shift/rotate ops want the operands backwards */
9178 val = rm;
9179 rm = rd;
9180 rd = val;
9181 val = 1;
9182 } else {
9183 val = 0;
9184 }
9185
396e467c 9186 if (op == 9) { /* neg */
7d1b0095 9187 tmp = tcg_temp_new_i32();
396e467c
FN
9188 tcg_gen_movi_i32(tmp, 0);
9189 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9190 tmp = load_reg(s, rd);
9191 } else {
9192 TCGV_UNUSED(tmp);
9193 }
99c475ab 9194
396e467c 9195 tmp2 = load_reg(s, rm);
5899f386 9196 switch (op) {
99c475ab 9197 case 0x0: /* and */
396e467c 9198 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9199 if (!s->condexec_mask)
396e467c 9200 gen_logic_CC(tmp);
99c475ab
FB
9201 break;
9202 case 0x1: /* eor */
396e467c 9203 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9204 if (!s->condexec_mask)
396e467c 9205 gen_logic_CC(tmp);
99c475ab
FB
9206 break;
9207 case 0x2: /* lsl */
9ee6e8bb 9208 if (s->condexec_mask) {
396e467c 9209 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 9210 } else {
396e467c
FN
9211 gen_helper_shl_cc(tmp2, tmp2, tmp);
9212 gen_logic_CC(tmp2);
9ee6e8bb 9213 }
99c475ab
FB
9214 break;
9215 case 0x3: /* lsr */
9ee6e8bb 9216 if (s->condexec_mask) {
396e467c 9217 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 9218 } else {
396e467c
FN
9219 gen_helper_shr_cc(tmp2, tmp2, tmp);
9220 gen_logic_CC(tmp2);
9ee6e8bb 9221 }
99c475ab
FB
9222 break;
9223 case 0x4: /* asr */
9ee6e8bb 9224 if (s->condexec_mask) {
396e467c 9225 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 9226 } else {
396e467c
FN
9227 gen_helper_sar_cc(tmp2, tmp2, tmp);
9228 gen_logic_CC(tmp2);
9ee6e8bb 9229 }
99c475ab
FB
9230 break;
9231 case 0x5: /* adc */
9ee6e8bb 9232 if (s->condexec_mask)
396e467c 9233 gen_adc(tmp, tmp2);
9ee6e8bb 9234 else
396e467c 9235 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
9236 break;
9237 case 0x6: /* sbc */
9ee6e8bb 9238 if (s->condexec_mask)
396e467c 9239 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 9240 else
396e467c 9241 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
9242 break;
9243 case 0x7: /* ror */
9ee6e8bb 9244 if (s->condexec_mask) {
f669df27
AJ
9245 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9246 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9247 } else {
396e467c
FN
9248 gen_helper_ror_cc(tmp2, tmp2, tmp);
9249 gen_logic_CC(tmp2);
9ee6e8bb 9250 }
99c475ab
FB
9251 break;
9252 case 0x8: /* tst */
396e467c
FN
9253 tcg_gen_and_i32(tmp, tmp, tmp2);
9254 gen_logic_CC(tmp);
99c475ab 9255 rd = 16;
5899f386 9256 break;
99c475ab 9257 case 0x9: /* neg */
9ee6e8bb 9258 if (s->condexec_mask)
396e467c 9259 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9260 else
396e467c 9261 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9262 break;
9263 case 0xa: /* cmp */
396e467c 9264 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9265 rd = 16;
9266 break;
9267 case 0xb: /* cmn */
396e467c 9268 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
9269 rd = 16;
9270 break;
9271 case 0xc: /* orr */
396e467c 9272 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9273 if (!s->condexec_mask)
396e467c 9274 gen_logic_CC(tmp);
99c475ab
FB
9275 break;
9276 case 0xd: /* mul */
7b2919a0 9277 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9278 if (!s->condexec_mask)
396e467c 9279 gen_logic_CC(tmp);
99c475ab
FB
9280 break;
9281 case 0xe: /* bic */
f669df27 9282 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9283 if (!s->condexec_mask)
396e467c 9284 gen_logic_CC(tmp);
99c475ab
FB
9285 break;
9286 case 0xf: /* mvn */
396e467c 9287 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9288 if (!s->condexec_mask)
396e467c 9289 gen_logic_CC(tmp2);
99c475ab 9290 val = 1;
5899f386 9291 rm = rd;
99c475ab
FB
9292 break;
9293 }
9294 if (rd != 16) {
396e467c
FN
9295 if (val) {
9296 store_reg(s, rm, tmp2);
9297 if (op != 0xf)
7d1b0095 9298 tcg_temp_free_i32(tmp);
396e467c
FN
9299 } else {
9300 store_reg(s, rd, tmp);
7d1b0095 9301 tcg_temp_free_i32(tmp2);
396e467c
FN
9302 }
9303 } else {
7d1b0095
PM
9304 tcg_temp_free_i32(tmp);
9305 tcg_temp_free_i32(tmp2);
99c475ab
FB
9306 }
9307 break;
9308
9309 case 5:
9310 /* load/store register offset. */
9311 rd = insn & 7;
9312 rn = (insn >> 3) & 7;
9313 rm = (insn >> 6) & 7;
9314 op = (insn >> 9) & 7;
b0109805 9315 addr = load_reg(s, rn);
b26eefb6 9316 tmp = load_reg(s, rm);
b0109805 9317 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9318 tcg_temp_free_i32(tmp);
99c475ab
FB
9319
9320 if (op < 3) /* store */
b0109805 9321 tmp = load_reg(s, rd);
99c475ab
FB
9322
9323 switch (op) {
9324 case 0: /* str */
b0109805 9325 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9326 break;
9327 case 1: /* strh */
b0109805 9328 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9329 break;
9330 case 2: /* strb */
b0109805 9331 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9332 break;
9333 case 3: /* ldrsb */
b0109805 9334 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
9335 break;
9336 case 4: /* ldr */
b0109805 9337 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9338 break;
9339 case 5: /* ldrh */
b0109805 9340 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
9341 break;
9342 case 6: /* ldrb */
b0109805 9343 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
9344 break;
9345 case 7: /* ldrsh */
b0109805 9346 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
9347 break;
9348 }
9349 if (op >= 3) /* load */
b0109805 9350 store_reg(s, rd, tmp);
7d1b0095 9351 tcg_temp_free_i32(addr);
99c475ab
FB
9352 break;
9353
9354 case 6:
9355 /* load/store word immediate offset */
9356 rd = insn & 7;
9357 rn = (insn >> 3) & 7;
b0109805 9358 addr = load_reg(s, rn);
99c475ab 9359 val = (insn >> 4) & 0x7c;
b0109805 9360 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9361
9362 if (insn & (1 << 11)) {
9363 /* load */
b0109805
PB
9364 tmp = gen_ld32(addr, IS_USER(s));
9365 store_reg(s, rd, tmp);
99c475ab
FB
9366 } else {
9367 /* store */
b0109805
PB
9368 tmp = load_reg(s, rd);
9369 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9370 }
7d1b0095 9371 tcg_temp_free_i32(addr);
99c475ab
FB
9372 break;
9373
9374 case 7:
9375 /* load/store byte immediate offset */
9376 rd = insn & 7;
9377 rn = (insn >> 3) & 7;
b0109805 9378 addr = load_reg(s, rn);
99c475ab 9379 val = (insn >> 6) & 0x1f;
b0109805 9380 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9381
9382 if (insn & (1 << 11)) {
9383 /* load */
b0109805
PB
9384 tmp = gen_ld8u(addr, IS_USER(s));
9385 store_reg(s, rd, tmp);
99c475ab
FB
9386 } else {
9387 /* store */
b0109805
PB
9388 tmp = load_reg(s, rd);
9389 gen_st8(tmp, addr, IS_USER(s));
99c475ab 9390 }
7d1b0095 9391 tcg_temp_free_i32(addr);
99c475ab
FB
9392 break;
9393
9394 case 8:
9395 /* load/store halfword immediate offset */
9396 rd = insn & 7;
9397 rn = (insn >> 3) & 7;
b0109805 9398 addr = load_reg(s, rn);
99c475ab 9399 val = (insn >> 5) & 0x3e;
b0109805 9400 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9401
9402 if (insn & (1 << 11)) {
9403 /* load */
b0109805
PB
9404 tmp = gen_ld16u(addr, IS_USER(s));
9405 store_reg(s, rd, tmp);
99c475ab
FB
9406 } else {
9407 /* store */
b0109805
PB
9408 tmp = load_reg(s, rd);
9409 gen_st16(tmp, addr, IS_USER(s));
99c475ab 9410 }
7d1b0095 9411 tcg_temp_free_i32(addr);
99c475ab
FB
9412 break;
9413
9414 case 9:
9415 /* load/store from stack */
9416 rd = (insn >> 8) & 7;
b0109805 9417 addr = load_reg(s, 13);
99c475ab 9418 val = (insn & 0xff) * 4;
b0109805 9419 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9420
9421 if (insn & (1 << 11)) {
9422 /* load */
b0109805
PB
9423 tmp = gen_ld32(addr, IS_USER(s));
9424 store_reg(s, rd, tmp);
99c475ab
FB
9425 } else {
9426 /* store */
b0109805
PB
9427 tmp = load_reg(s, rd);
9428 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9429 }
7d1b0095 9430 tcg_temp_free_i32(addr);
99c475ab
FB
9431 break;
9432
9433 case 10:
9434 /* add to high reg */
9435 rd = (insn >> 8) & 7;
5899f386
FB
9436 if (insn & (1 << 11)) {
9437 /* SP */
5e3f878a 9438 tmp = load_reg(s, 13);
5899f386
FB
9439 } else {
9440 /* PC. bit 1 is ignored. */
7d1b0095 9441 tmp = tcg_temp_new_i32();
5e3f878a 9442 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9443 }
99c475ab 9444 val = (insn & 0xff) * 4;
5e3f878a
PB
9445 tcg_gen_addi_i32(tmp, tmp, val);
9446 store_reg(s, rd, tmp);
99c475ab
FB
9447 break;
9448
9449 case 11:
9450 /* misc */
9451 op = (insn >> 8) & 0xf;
9452 switch (op) {
9453 case 0:
9454 /* adjust stack pointer */
b26eefb6 9455 tmp = load_reg(s, 13);
99c475ab
FB
9456 val = (insn & 0x7f) * 4;
9457 if (insn & (1 << 7))
6a0d8a1d 9458 val = -(int32_t)val;
b26eefb6
PB
9459 tcg_gen_addi_i32(tmp, tmp, val);
9460 store_reg(s, 13, tmp);
99c475ab
FB
9461 break;
9462
9ee6e8bb
PB
9463 case 2: /* sign/zero extend. */
9464 ARCH(6);
9465 rd = insn & 7;
9466 rm = (insn >> 3) & 7;
b0109805 9467 tmp = load_reg(s, rm);
9ee6e8bb 9468 switch ((insn >> 6) & 3) {
b0109805
PB
9469 case 0: gen_sxth(tmp); break;
9470 case 1: gen_sxtb(tmp); break;
9471 case 2: gen_uxth(tmp); break;
9472 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9473 }
b0109805 9474 store_reg(s, rd, tmp);
9ee6e8bb 9475 break;
99c475ab
FB
9476 case 4: case 5: case 0xc: case 0xd:
9477 /* push/pop */
b0109805 9478 addr = load_reg(s, 13);
5899f386
FB
9479 if (insn & (1 << 8))
9480 offset = 4;
99c475ab 9481 else
5899f386
FB
9482 offset = 0;
9483 for (i = 0; i < 8; i++) {
9484 if (insn & (1 << i))
9485 offset += 4;
9486 }
9487 if ((insn & (1 << 11)) == 0) {
b0109805 9488 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9489 }
99c475ab
FB
9490 for (i = 0; i < 8; i++) {
9491 if (insn & (1 << i)) {
9492 if (insn & (1 << 11)) {
9493 /* pop */
b0109805
PB
9494 tmp = gen_ld32(addr, IS_USER(s));
9495 store_reg(s, i, tmp);
99c475ab
FB
9496 } else {
9497 /* push */
b0109805
PB
9498 tmp = load_reg(s, i);
9499 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9500 }
5899f386 9501 /* advance to the next address. */
b0109805 9502 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9503 }
9504 }
a50f5b91 9505 TCGV_UNUSED(tmp);
99c475ab
FB
9506 if (insn & (1 << 8)) {
9507 if (insn & (1 << 11)) {
9508 /* pop pc */
b0109805 9509 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9510 /* don't set the pc until the rest of the instruction
9511 has completed */
9512 } else {
9513 /* push lr */
b0109805
PB
9514 tmp = load_reg(s, 14);
9515 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9516 }
b0109805 9517 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9518 }
5899f386 9519 if ((insn & (1 << 11)) == 0) {
b0109805 9520 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9521 }
99c475ab 9522 /* write back the new stack pointer */
b0109805 9523 store_reg(s, 13, addr);
99c475ab 9524 /* set the new PC value */
be5e7a76
DES
9525 if ((insn & 0x0900) == 0x0900) {
9526 store_reg_from_load(env, s, 15, tmp);
9527 }
99c475ab
FB
9528 break;
9529
9ee6e8bb
PB
9530 case 1: case 3: case 9: case 11: /* czb */
9531 rm = insn & 7;
d9ba4830 9532 tmp = load_reg(s, rm);
9ee6e8bb
PB
9533 s->condlabel = gen_new_label();
9534 s->condjmp = 1;
9535 if (insn & (1 << 11))
cb63669a 9536 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9537 else
cb63669a 9538 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9539 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9540 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9541 val = (uint32_t)s->pc + 2;
9542 val += offset;
9543 gen_jmp(s, val);
9544 break;
9545
9546 case 15: /* IT, nop-hint. */
9547 if ((insn & 0xf) == 0) {
9548 gen_nop_hint(s, (insn >> 4) & 0xf);
9549 break;
9550 }
9551 /* If Then. */
9552 s->condexec_cond = (insn >> 4) & 0xe;
9553 s->condexec_mask = insn & 0x1f;
9554 /* No actual code generated for this insn, just setup state. */
9555 break;
9556
06c949e6 9557 case 0xe: /* bkpt */
be5e7a76 9558 ARCH(5);
bc4a0de0 9559 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9560 break;
9561
9ee6e8bb
PB
9562 case 0xa: /* rev */
9563 ARCH(6);
9564 rn = (insn >> 3) & 0x7;
9565 rd = insn & 0x7;
b0109805 9566 tmp = load_reg(s, rn);
9ee6e8bb 9567 switch ((insn >> 6) & 3) {
66896cb8 9568 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9569 case 1: gen_rev16(tmp); break;
9570 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9571 default: goto illegal_op;
9572 }
b0109805 9573 store_reg(s, rd, tmp);
9ee6e8bb
PB
9574 break;
9575
9576 case 6: /* cps */
9577 ARCH(6);
9578 if (IS_USER(s))
9579 break;
9580 if (IS_M(env)) {
8984bd2e 9581 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 9582 /* PRIMASK */
8984bd2e
PB
9583 if (insn & 1) {
9584 addr = tcg_const_i32(16);
9585 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9586 tcg_temp_free_i32(addr);
8984bd2e 9587 }
9ee6e8bb 9588 /* FAULTMASK */
8984bd2e
PB
9589 if (insn & 2) {
9590 addr = tcg_const_i32(17);
9591 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9592 tcg_temp_free_i32(addr);
8984bd2e 9593 }
b75263d6 9594 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9595 gen_lookup_tb(s);
9596 } else {
9597 if (insn & (1 << 4))
9598 shift = CPSR_A | CPSR_I | CPSR_F;
9599 else
9600 shift = 0;
fa26df03 9601 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
9602 }
9603 break;
9604
99c475ab
FB
9605 default:
9606 goto undef;
9607 }
9608 break;
9609
9610 case 12:
a7d3970d 9611 {
99c475ab 9612 /* load/store multiple */
a7d3970d
PM
9613 TCGv loaded_var;
9614 TCGV_UNUSED(loaded_var);
99c475ab 9615 rn = (insn >> 8) & 0x7;
b0109805 9616 addr = load_reg(s, rn);
99c475ab
FB
9617 for (i = 0; i < 8; i++) {
9618 if (insn & (1 << i)) {
99c475ab
FB
9619 if (insn & (1 << 11)) {
9620 /* load */
b0109805 9621 tmp = gen_ld32(addr, IS_USER(s));
a7d3970d
PM
9622 if (i == rn) {
9623 loaded_var = tmp;
9624 } else {
9625 store_reg(s, i, tmp);
9626 }
99c475ab
FB
9627 } else {
9628 /* store */
b0109805
PB
9629 tmp = load_reg(s, i);
9630 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9631 }
5899f386 9632 /* advance to the next address */
b0109805 9633 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9634 }
9635 }
b0109805 9636 if ((insn & (1 << rn)) == 0) {
a7d3970d 9637 /* base reg not in list: base register writeback */
b0109805
PB
9638 store_reg(s, rn, addr);
9639 } else {
a7d3970d
PM
9640 /* base reg in list: if load, complete it now */
9641 if (insn & (1 << 11)) {
9642 store_reg(s, rn, loaded_var);
9643 }
7d1b0095 9644 tcg_temp_free_i32(addr);
b0109805 9645 }
99c475ab 9646 break;
a7d3970d 9647 }
99c475ab
FB
9648 case 13:
9649 /* conditional branch or swi */
9650 cond = (insn >> 8) & 0xf;
9651 if (cond == 0xe)
9652 goto undef;
9653
9654 if (cond == 0xf) {
9655 /* swi */
422ebf69 9656 gen_set_pc_im(s->pc);
9ee6e8bb 9657 s->is_jmp = DISAS_SWI;
99c475ab
FB
9658 break;
9659 }
9660 /* generate a conditional jump to next instruction */
e50e6a20 9661 s->condlabel = gen_new_label();
d9ba4830 9662 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9663 s->condjmp = 1;
99c475ab
FB
9664
9665 /* jump to the offset */
5899f386 9666 val = (uint32_t)s->pc + 2;
99c475ab 9667 offset = ((int32_t)insn << 24) >> 24;
5899f386 9668 val += offset << 1;
8aaca4c0 9669 gen_jmp(s, val);
99c475ab
FB
9670 break;
9671
9672 case 14:
358bf29e 9673 if (insn & (1 << 11)) {
9ee6e8bb
PB
9674 if (disas_thumb2_insn(env, s, insn))
9675 goto undef32;
358bf29e
PB
9676 break;
9677 }
9ee6e8bb 9678 /* unconditional branch */
99c475ab
FB
9679 val = (uint32_t)s->pc;
9680 offset = ((int32_t)insn << 21) >> 21;
9681 val += (offset << 1) + 2;
8aaca4c0 9682 gen_jmp(s, val);
99c475ab
FB
9683 break;
9684
9685 case 15:
9ee6e8bb 9686 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9687 goto undef32;
9ee6e8bb 9688 break;
99c475ab
FB
9689 }
9690 return;
9ee6e8bb 9691undef32:
bc4a0de0 9692 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9693 return;
9694illegal_op:
99c475ab 9695undef:
bc4a0de0 9696 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9697}
9698
2c0262af
FB
9699/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9700 basic block 'tb'. If search_pc is TRUE, also generate PC
9701 information for each intermediate instruction. */
2cfc5f17
TS
9702static inline void gen_intermediate_code_internal(CPUState *env,
9703 TranslationBlock *tb,
9704 int search_pc)
2c0262af
FB
9705{
9706 DisasContext dc1, *dc = &dc1;
a1d1bb31 9707 CPUBreakpoint *bp;
2c0262af
FB
9708 uint16_t *gen_opc_end;
9709 int j, lj;
0fa85d43 9710 target_ulong pc_start;
b5ff1b31 9711 uint32_t next_page_start;
2e70f6ef
PB
9712 int num_insns;
9713 int max_insns;
3b46e624 9714
2c0262af 9715 /* generate intermediate code */
0fa85d43 9716 pc_start = tb->pc;
3b46e624 9717
2c0262af
FB
9718 dc->tb = tb;
9719
2c0262af 9720 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9721
9722 dc->is_jmp = DISAS_NEXT;
9723 dc->pc = pc_start;
8aaca4c0 9724 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9725 dc->condjmp = 0;
7204ab88 9726 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
98eac7ca
PM
9727 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9728 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9729#if !defined(CONFIG_USER_ONLY)
61f74d6a 9730 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9731#endif
5df8bac1 9732 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9733 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9734 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9735 cpu_F0s = tcg_temp_new_i32();
9736 cpu_F1s = tcg_temp_new_i32();
9737 cpu_F0d = tcg_temp_new_i64();
9738 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9739 cpu_V0 = cpu_F0d;
9740 cpu_V1 = cpu_F1d;
e677137d 9741 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9742 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9743 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9744 lj = -1;
2e70f6ef
PB
9745 num_insns = 0;
9746 max_insns = tb->cflags & CF_COUNT_MASK;
9747 if (max_insns == 0)
9748 max_insns = CF_COUNT_MASK;
9749
9750 gen_icount_start();
e12ce78d 9751
3849902c
PM
9752 tcg_clear_temp_count();
9753
e12ce78d
PM
9754 /* A note on handling of the condexec (IT) bits:
9755 *
9756 * We want to avoid the overhead of having to write the updated condexec
9757 * bits back to the CPUState for every instruction in an IT block. So:
9758 * (1) if the condexec bits are not already zero then we write
9759 * zero back into the CPUState now. This avoids complications trying
9760 * to do it at the end of the block. (For example if we don't do this
9761 * it's hard to identify whether we can safely skip writing condexec
9762 * at the end of the TB, which we definitely want to do for the case
9763 * where a TB doesn't do anything with the IT state at all.)
9764 * (2) if we are going to leave the TB then we call gen_set_condexec()
9765 * which will write the correct value into CPUState if zero is wrong.
9766 * This is done both for leaving the TB at the end, and for leaving
9767 * it because of an exception we know will happen, which is done in
9768 * gen_exception_insn(). The latter is necessary because we need to
9769 * leave the TB with the PC/IT state just prior to execution of the
9770 * instruction which caused the exception.
9771 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9772 * then the CPUState will be wrong and we need to reset it.
9773 * This is handled in the same way as restoration of the
9774 * PC in these situations: we will be called again with search_pc=1
9775 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9776 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9777 * this to restore the condexec bits.
e12ce78d
PM
9778 *
9779 * Note that there are no instructions which can read the condexec
9780 * bits, and none which can write non-static values to them, so
9781 * we don't need to care about whether CPUState is correct in the
9782 * middle of a TB.
9783 */
9784
9ee6e8bb
PB
9785 /* Reset the conditional execution bits immediately. This avoids
9786 complications trying to do it at the end of the block. */
98eac7ca 9787 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9788 {
7d1b0095 9789 TCGv tmp = tcg_temp_new_i32();
8f01245e 9790 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9791 store_cpu_field(tmp, condexec_bits);
8f01245e 9792 }
2c0262af 9793 do {
fbb4a2e3
PB
9794#ifdef CONFIG_USER_ONLY
9795 /* Intercept jump to the magic kernel page. */
9796 if (dc->pc >= 0xffff0000) {
9797 /* We always get here via a jump, so know we are not in a
9798 conditional execution block. */
9799 gen_exception(EXCP_KERNEL_TRAP);
9800 dc->is_jmp = DISAS_UPDATE;
9801 break;
9802 }
9803#else
9ee6e8bb
PB
9804 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9805 /* We always get here via a jump, so know we are not in a
9806 conditional execution block. */
d9ba4830 9807 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9808 dc->is_jmp = DISAS_UPDATE;
9809 break;
9ee6e8bb
PB
9810 }
9811#endif
9812
72cf2d4f
BS
9813 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9814 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9815 if (bp->pc == dc->pc) {
bc4a0de0 9816 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9817 /* Advance PC so that clearing the breakpoint will
9818 invalidate this TB. */
9819 dc->pc += 2;
9820 goto done_generating;
1fddef4b
FB
9821 break;
9822 }
9823 }
9824 }
2c0262af
FB
9825 if (search_pc) {
9826 j = gen_opc_ptr - gen_opc_buf;
9827 if (lj < j) {
9828 lj++;
9829 while (lj < j)
9830 gen_opc_instr_start[lj++] = 0;
9831 }
0fa85d43 9832 gen_opc_pc[lj] = dc->pc;
e12ce78d 9833 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
2c0262af 9834 gen_opc_instr_start[lj] = 1;
2e70f6ef 9835 gen_opc_icount[lj] = num_insns;
2c0262af 9836 }
e50e6a20 9837
2e70f6ef
PB
9838 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9839 gen_io_start();
9840
5642463a
PM
9841 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9842 tcg_gen_debug_insn_start(dc->pc);
9843 }
9844
7204ab88 9845 if (dc->thumb) {
9ee6e8bb
PB
9846 disas_thumb_insn(env, dc);
9847 if (dc->condexec_mask) {
9848 dc->condexec_cond = (dc->condexec_cond & 0xe)
9849 | ((dc->condexec_mask >> 4) & 1);
9850 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9851 if (dc->condexec_mask == 0) {
9852 dc->condexec_cond = 0;
9853 }
9854 }
9855 } else {
9856 disas_arm_insn(env, dc);
9857 }
e50e6a20
FB
9858
9859 if (dc->condjmp && !dc->is_jmp) {
9860 gen_set_label(dc->condlabel);
9861 dc->condjmp = 0;
9862 }
3849902c
PM
9863
9864 if (tcg_check_temp_count()) {
9865 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9866 }
9867
aaf2d97d 9868 /* Translation stops when a conditional branch is encountered.
e50e6a20 9869 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9870 * Also stop translation when a page boundary is reached. This
bf20dc07 9871 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9872 num_insns ++;
1fddef4b
FB
9873 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9874 !env->singlestep_enabled &&
1b530a6d 9875 !singlestep &&
2e70f6ef
PB
9876 dc->pc < next_page_start &&
9877 num_insns < max_insns);
9878
9879 if (tb->cflags & CF_LAST_IO) {
9880 if (dc->condjmp) {
9881 /* FIXME: This can theoretically happen with self-modifying
9882 code. */
9883 cpu_abort(env, "IO on conditional branch instruction");
9884 }
9885 gen_io_end();
9886 }
9ee6e8bb 9887
b5ff1b31 9888 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9889 instruction was a conditional branch or trap, and the PC has
9890 already been written. */
551bd27f 9891 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9892 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9893 if (dc->condjmp) {
9ee6e8bb
PB
9894 gen_set_condexec(dc);
9895 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9896 gen_exception(EXCP_SWI);
9ee6e8bb 9897 } else {
d9ba4830 9898 gen_exception(EXCP_DEBUG);
9ee6e8bb 9899 }
e50e6a20
FB
9900 gen_set_label(dc->condlabel);
9901 }
9902 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9903 gen_set_pc_im(dc->pc);
e50e6a20 9904 dc->condjmp = 0;
8aaca4c0 9905 }
9ee6e8bb
PB
9906 gen_set_condexec(dc);
9907 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9908 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9909 } else {
9910 /* FIXME: Single stepping a WFI insn will not halt
9911 the CPU. */
d9ba4830 9912 gen_exception(EXCP_DEBUG);
9ee6e8bb 9913 }
8aaca4c0 9914 } else {
9ee6e8bb
PB
9915 /* While branches must always occur at the end of an IT block,
9916 there are a few other things that can cause us to terminate
9917 the TB in the middel of an IT block:
9918 - Exception generating instructions (bkpt, swi, undefined).
9919 - Page boundaries.
9920 - Hardware watchpoints.
9921 Hardware breakpoints have already been handled and skip this code.
9922 */
9923 gen_set_condexec(dc);
8aaca4c0 9924 switch(dc->is_jmp) {
8aaca4c0 9925 case DISAS_NEXT:
6e256c93 9926 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9927 break;
9928 default:
9929 case DISAS_JUMP:
9930 case DISAS_UPDATE:
9931 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9932 tcg_gen_exit_tb(0);
8aaca4c0
FB
9933 break;
9934 case DISAS_TB_JUMP:
9935 /* nothing more to generate */
9936 break;
9ee6e8bb 9937 case DISAS_WFI:
d9ba4830 9938 gen_helper_wfi();
9ee6e8bb
PB
9939 break;
9940 case DISAS_SWI:
d9ba4830 9941 gen_exception(EXCP_SWI);
9ee6e8bb 9942 break;
8aaca4c0 9943 }
e50e6a20
FB
9944 if (dc->condjmp) {
9945 gen_set_label(dc->condlabel);
9ee6e8bb 9946 gen_set_condexec(dc);
6e256c93 9947 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9948 dc->condjmp = 0;
9949 }
2c0262af 9950 }
2e70f6ef 9951
9ee6e8bb 9952done_generating:
2e70f6ef 9953 gen_icount_end(tb, num_insns);
2c0262af
FB
9954 *gen_opc_ptr = INDEX_op_end;
9955
9956#ifdef DEBUG_DISAS
8fec2b8c 9957 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9958 qemu_log("----------------\n");
9959 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7204ab88 9960 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
93fcfe39 9961 qemu_log("\n");
2c0262af
FB
9962 }
9963#endif
b5ff1b31
FB
9964 if (search_pc) {
9965 j = gen_opc_ptr - gen_opc_buf;
9966 lj++;
9967 while (lj <= j)
9968 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9969 } else {
2c0262af 9970 tb->size = dc->pc - pc_start;
2e70f6ef 9971 tb->icount = num_insns;
b5ff1b31 9972 }
2c0262af
FB
9973}
9974
2cfc5f17 9975void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9976{
2cfc5f17 9977 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9978}
9979
2cfc5f17 9980void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9981{
2cfc5f17 9982 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9983}
9984
b5ff1b31
FB
9985static const char *cpu_mode_names[16] = {
9986 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9987 "???", "???", "???", "und", "???", "???", "???", "sys"
9988};
9ee6e8bb 9989
9a78eead 9990void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 9991 int flags)
2c0262af
FB
9992{
9993 int i;
06e80fc9 9994#if 0
bc380d17 9995 union {
b7bcbe95
FB
9996 uint32_t i;
9997 float s;
9998 } s0, s1;
9999 CPU_DoubleU d;
a94a6abf
PB
10000 /* ??? This assumes float64 and double have the same layout.
10001 Oh well, it's only debug dumps. */
10002 union {
10003 float64 f64;
10004 double d;
10005 } d0;
06e80fc9 10006#endif
b5ff1b31 10007 uint32_t psr;
2c0262af
FB
10008
10009 for(i=0;i<16;i++) {
7fe48483 10010 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10011 if ((i % 4) == 3)
7fe48483 10012 cpu_fprintf(f, "\n");
2c0262af 10013 else
7fe48483 10014 cpu_fprintf(f, " ");
2c0262af 10015 }
b5ff1b31 10016 psr = cpsr_read(env);
687fa640
TS
10017 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10018 psr,
b5ff1b31
FB
10019 psr & (1 << 31) ? 'N' : '-',
10020 psr & (1 << 30) ? 'Z' : '-',
10021 psr & (1 << 29) ? 'C' : '-',
10022 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10023 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10024 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10025
5e3f878a 10026#if 0
b7bcbe95 10027 for (i = 0; i < 16; i++) {
8e96005d
FB
10028 d.d = env->vfp.regs[i];
10029 s0.i = d.l.lower;
10030 s1.i = d.l.upper;
a94a6abf
PB
10031 d0.f64 = d.d;
10032 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 10033 i * 2, (int)s0.i, s0.s,
a94a6abf 10034 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 10035 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 10036 d0.d);
b7bcbe95 10037 }
40f137e1 10038 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 10039#endif
2c0262af 10040}
a6b025d3 10041
e87b7cb0 10042void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
10043{
10044 env->regs[15] = gen_opc_pc[pc_pos];
e12ce78d 10045 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10046}