]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
Version 0.15.1
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
2c0262af 28#include "disas.h"
57fec1fe 29#include "tcg-op.h"
79383c9c 30#include "qemu-log.h"
1497c961 31
7b59220e 32#include "helper.h"
1497c961 33#define GEN_HELPER 1
7b59220e 34#include "helper.h"
2c0262af 35
be5e7a76
DES
36#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38/* currently all emulated v5 cores are also v5TE, so don't bother */
39#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
40#define ENABLE_ARCH_5J 0
41#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 45
86753403 46#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 47
2c0262af
FB
48/* internal defines */
49typedef struct DisasContext {
0fa85d43 50 target_ulong pc;
2c0262af 51 int is_jmp;
e50e6a20
FB
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
9ee6e8bb
PB
56 /* Thumb-2 condtional execution bits. */
57 int condexec_mask;
58 int condexec_cond;
2c0262af 59 struct TranslationBlock *tb;
8aaca4c0 60 int singlestep_enabled;
5899f386 61 int thumb;
b5ff1b31
FB
62#if !defined(CONFIG_USER_ONLY)
63 int user;
64#endif
5df8bac1 65 int vfp_enabled;
69d1fc22
PM
66 int vec_len;
67 int vec_stride;
2c0262af
FB
68} DisasContext;
69
e12ce78d
PM
70static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
71
b5ff1b31
FB
72#if defined(CONFIG_USER_ONLY)
73#define IS_USER(s) 1
74#else
75#define IS_USER(s) (s->user)
76#endif
77
9ee6e8bb
PB
78/* These instructions trap after executing, so defer them until after the
79 conditional executions state has been updated. */
80#define DISAS_WFI 4
81#define DISAS_SWI 5
2c0262af 82
a7812ae4 83static TCGv_ptr cpu_env;
ad69471c 84/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 85static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 86static TCGv_i32 cpu_R[16];
426f5abc
PB
87static TCGv_i32 cpu_exclusive_addr;
88static TCGv_i32 cpu_exclusive_val;
89static TCGv_i32 cpu_exclusive_high;
90#ifdef CONFIG_USER_ONLY
91static TCGv_i32 cpu_exclusive_test;
92static TCGv_i32 cpu_exclusive_info;
93#endif
ad69471c 94
b26eefb6 95/* FIXME: These should be removed. */
a7812ae4
PB
96static TCGv cpu_F0s, cpu_F1s;
97static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 98
2e70f6ef
PB
99#include "gen-icount.h"
100
155c3eac
FN
101static const char *regnames[] =
102 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
103 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
104
b26eefb6
PB
105/* initialize TCG globals. */
106void arm_translate_init(void)
107{
155c3eac
FN
108 int i;
109
a7812ae4
PB
110 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
111
155c3eac
FN
112 for (i = 0; i < 16; i++) {
113 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, regs[i]),
115 regnames[i]);
116 }
426f5abc
PB
117 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUState, exclusive_addr), "exclusive_addr");
119 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUState, exclusive_val), "exclusive_val");
121 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_high), "exclusive_high");
123#ifdef CONFIG_USER_ONLY
124 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUState, exclusive_test), "exclusive_test");
126 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUState, exclusive_info), "exclusive_info");
128#endif
155c3eac 129
a7812ae4 130#define GEN_HELPER 2
7b59220e 131#include "helper.h"
b26eefb6
PB
132}
133
d9ba4830
PB
134static inline TCGv load_cpu_offset(int offset)
135{
7d1b0095 136 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
137 tcg_gen_ld_i32(tmp, cpu_env, offset);
138 return tmp;
139}
140
141#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
142
143static inline void store_cpu_offset(TCGv var, int offset)
144{
145 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 146 tcg_temp_free_i32(var);
d9ba4830
PB
147}
148
149#define store_cpu_field(var, name) \
150 store_cpu_offset(var, offsetof(CPUState, name))
151
b26eefb6
PB
152/* Set a variable to the value of a CPU register. */
153static void load_reg_var(DisasContext *s, TCGv var, int reg)
154{
155 if (reg == 15) {
156 uint32_t addr;
157 /* normaly, since we updated PC, we need only to add one insn */
158 if (s->thumb)
159 addr = (long)s->pc + 2;
160 else
161 addr = (long)s->pc + 4;
162 tcg_gen_movi_i32(var, addr);
163 } else {
155c3eac 164 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
165 }
166}
167
168/* Create a new temporary and set it to the value of a CPU register. */
169static inline TCGv load_reg(DisasContext *s, int reg)
170{
7d1b0095 171 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
172 load_reg_var(s, tmp, reg);
173 return tmp;
174}
175
176/* Set a CPU register. The source must be a temporary and will be
177 marked as dead. */
178static void store_reg(DisasContext *s, int reg, TCGv var)
179{
180 if (reg == 15) {
181 tcg_gen_andi_i32(var, var, ~1);
182 s->is_jmp = DISAS_JUMP;
183 }
155c3eac 184 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 185 tcg_temp_free_i32(var);
b26eefb6
PB
186}
187
b26eefb6 188/* Value extensions. */
86831435
PB
189#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
190#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
191#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
192#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
193
1497c961
PB
194#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
195#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 196
b26eefb6 197
b75263d6
JR
198static inline void gen_set_cpsr(TCGv var, uint32_t mask)
199{
200 TCGv tmp_mask = tcg_const_i32(mask);
201 gen_helper_cpsr_write(var, tmp_mask);
202 tcg_temp_free_i32(tmp_mask);
203}
d9ba4830
PB
204/* Set NZCV flags from the high 4 bits of var. */
205#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
206
207static void gen_exception(int excp)
208{
7d1b0095 209 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
210 tcg_gen_movi_i32(tmp, excp);
211 gen_helper_exception(tmp);
7d1b0095 212 tcg_temp_free_i32(tmp);
d9ba4830
PB
213}
214
3670669c
PB
215static void gen_smul_dual(TCGv a, TCGv b)
216{
7d1b0095
PM
217 TCGv tmp1 = tcg_temp_new_i32();
218 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
219 tcg_gen_ext16s_i32(tmp1, a);
220 tcg_gen_ext16s_i32(tmp2, b);
3670669c 221 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 222 tcg_temp_free_i32(tmp2);
3670669c
PB
223 tcg_gen_sari_i32(a, a, 16);
224 tcg_gen_sari_i32(b, b, 16);
225 tcg_gen_mul_i32(b, b, a);
226 tcg_gen_mov_i32(a, tmp1);
7d1b0095 227 tcg_temp_free_i32(tmp1);
3670669c
PB
228}
229
230/* Byteswap each halfword. */
231static void gen_rev16(TCGv var)
232{
7d1b0095 233 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
234 tcg_gen_shri_i32(tmp, var, 8);
235 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
236 tcg_gen_shli_i32(var, var, 8);
237 tcg_gen_andi_i32(var, var, 0xff00ff00);
238 tcg_gen_or_i32(var, var, tmp);
7d1b0095 239 tcg_temp_free_i32(tmp);
3670669c
PB
240}
241
242/* Byteswap low halfword and sign extend. */
243static void gen_revsh(TCGv var)
244{
1a855029
AJ
245 tcg_gen_ext16u_i32(var, var);
246 tcg_gen_bswap16_i32(var, var);
247 tcg_gen_ext16s_i32(var, var);
3670669c
PB
248}
249
250/* Unsigned bitfield extract. */
251static void gen_ubfx(TCGv var, int shift, uint32_t mask)
252{
253 if (shift)
254 tcg_gen_shri_i32(var, var, shift);
255 tcg_gen_andi_i32(var, var, mask);
256}
257
258/* Signed bitfield extract. */
259static void gen_sbfx(TCGv var, int shift, int width)
260{
261 uint32_t signbit;
262
263 if (shift)
264 tcg_gen_sari_i32(var, var, shift);
265 if (shift + width < 32) {
266 signbit = 1u << (width - 1);
267 tcg_gen_andi_i32(var, var, (1u << width) - 1);
268 tcg_gen_xori_i32(var, var, signbit);
269 tcg_gen_subi_i32(var, var, signbit);
270 }
271}
272
273/* Bitfield insertion. Insert val into base. Clobbers base and val. */
274static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
275{
3670669c 276 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
277 tcg_gen_shli_i32(val, val, shift);
278 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
279 tcg_gen_or_i32(dest, base, val);
280}
281
838fa72d
AJ
282/* Return (b << 32) + a. Mark inputs as dead */
283static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 284{
838fa72d
AJ
285 TCGv_i64 tmp64 = tcg_temp_new_i64();
286
287 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 288 tcg_temp_free_i32(b);
838fa72d
AJ
289 tcg_gen_shli_i64(tmp64, tmp64, 32);
290 tcg_gen_add_i64(a, tmp64, a);
291
292 tcg_temp_free_i64(tmp64);
293 return a;
294}
295
296/* Return (b << 32) - a. Mark inputs as dead. */
297static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
298{
299 TCGv_i64 tmp64 = tcg_temp_new_i64();
300
301 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 302 tcg_temp_free_i32(b);
838fa72d
AJ
303 tcg_gen_shli_i64(tmp64, tmp64, 32);
304 tcg_gen_sub_i64(a, tmp64, a);
305
306 tcg_temp_free_i64(tmp64);
307 return a;
3670669c
PB
308}
309
8f01245e
PB
310/* FIXME: Most targets have native widening multiplication.
311 It would be good to use that instead of a full wide multiply. */
5e3f878a 312/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 313static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 314{
a7812ae4
PB
315 TCGv_i64 tmp1 = tcg_temp_new_i64();
316 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
317
318 tcg_gen_extu_i32_i64(tmp1, a);
7d1b0095 319 tcg_temp_free_i32(a);
5e3f878a 320 tcg_gen_extu_i32_i64(tmp2, b);
7d1b0095 321 tcg_temp_free_i32(b);
5e3f878a 322 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 323 tcg_temp_free_i64(tmp2);
5e3f878a
PB
324 return tmp1;
325}
326
a7812ae4 327static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 328{
a7812ae4
PB
329 TCGv_i64 tmp1 = tcg_temp_new_i64();
330 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
331
332 tcg_gen_ext_i32_i64(tmp1, a);
7d1b0095 333 tcg_temp_free_i32(a);
5e3f878a 334 tcg_gen_ext_i32_i64(tmp2, b);
7d1b0095 335 tcg_temp_free_i32(b);
5e3f878a 336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 337 tcg_temp_free_i64(tmp2);
5e3f878a
PB
338 return tmp1;
339}
340
8f01245e
PB
341/* Swap low and high halfwords. */
342static void gen_swap_half(TCGv var)
343{
7d1b0095 344 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
345 tcg_gen_shri_i32(tmp, var, 16);
346 tcg_gen_shli_i32(var, var, 16);
347 tcg_gen_or_i32(var, var, tmp);
7d1b0095 348 tcg_temp_free_i32(tmp);
8f01245e
PB
349}
350
b26eefb6
PB
351/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
352 tmp = (t0 ^ t1) & 0x8000;
353 t0 &= ~0x8000;
354 t1 &= ~0x8000;
355 t0 = (t0 + t1) ^ tmp;
356 */
357
358static void gen_add16(TCGv t0, TCGv t1)
359{
7d1b0095 360 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
361 tcg_gen_xor_i32(tmp, t0, t1);
362 tcg_gen_andi_i32(tmp, tmp, 0x8000);
363 tcg_gen_andi_i32(t0, t0, ~0x8000);
364 tcg_gen_andi_i32(t1, t1, ~0x8000);
365 tcg_gen_add_i32(t0, t0, t1);
366 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
367 tcg_temp_free_i32(tmp);
368 tcg_temp_free_i32(t1);
b26eefb6
PB
369}
370
9a119ff6
PB
371#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
372
b26eefb6
PB
373/* Set CF to the top bit of var. */
374static void gen_set_CF_bit31(TCGv var)
375{
7d1b0095 376 TCGv tmp = tcg_temp_new_i32();
b26eefb6 377 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 378 gen_set_CF(tmp);
7d1b0095 379 tcg_temp_free_i32(tmp);
b26eefb6
PB
380}
381
382/* Set N and Z flags from var. */
383static inline void gen_logic_CC(TCGv var)
384{
6fbe23d5
PB
385 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
387}
388
389/* T0 += T1 + CF. */
396e467c 390static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 391{
d9ba4830 392 TCGv tmp;
396e467c 393 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 394 tmp = load_cpu_field(CF);
396e467c 395 tcg_gen_add_i32(t0, t0, tmp);
7d1b0095 396 tcg_temp_free_i32(tmp);
b26eefb6
PB
397}
398
e9bb4aa9
JR
399/* dest = T0 + T1 + CF. */
400static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
401{
402 TCGv tmp;
403 tcg_gen_add_i32(dest, t0, t1);
404 tmp = load_cpu_field(CF);
405 tcg_gen_add_i32(dest, dest, tmp);
7d1b0095 406 tcg_temp_free_i32(tmp);
e9bb4aa9
JR
407}
408
3670669c
PB
409/* dest = T0 - T1 + CF - 1. */
410static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
411{
d9ba4830 412 TCGv tmp;
3670669c 413 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 414 tmp = load_cpu_field(CF);
3670669c
PB
415 tcg_gen_add_i32(dest, dest, tmp);
416 tcg_gen_subi_i32(dest, dest, 1);
7d1b0095 417 tcg_temp_free_i32(tmp);
3670669c
PB
418}
419
ad69471c
PB
420/* FIXME: Implement this natively. */
421#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
422
9a119ff6 423static void shifter_out_im(TCGv var, int shift)
b26eefb6 424{
7d1b0095 425 TCGv tmp = tcg_temp_new_i32();
9a119ff6
PB
426 if (shift == 0) {
427 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 428 } else {
9a119ff6 429 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 430 if (shift != 31)
9a119ff6
PB
431 tcg_gen_andi_i32(tmp, tmp, 1);
432 }
433 gen_set_CF(tmp);
7d1b0095 434 tcg_temp_free_i32(tmp);
9a119ff6 435}
b26eefb6 436
9a119ff6
PB
437/* Shift by immediate. Includes special handling for shift == 0. */
438static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
439{
440 switch (shiftop) {
441 case 0: /* LSL */
442 if (shift != 0) {
443 if (flags)
444 shifter_out_im(var, 32 - shift);
445 tcg_gen_shli_i32(var, var, shift);
446 }
447 break;
448 case 1: /* LSR */
449 if (shift == 0) {
450 if (flags) {
451 tcg_gen_shri_i32(var, var, 31);
452 gen_set_CF(var);
453 }
454 tcg_gen_movi_i32(var, 0);
455 } else {
456 if (flags)
457 shifter_out_im(var, shift - 1);
458 tcg_gen_shri_i32(var, var, shift);
459 }
460 break;
461 case 2: /* ASR */
462 if (shift == 0)
463 shift = 32;
464 if (flags)
465 shifter_out_im(var, shift - 1);
466 if (shift == 32)
467 shift = 31;
468 tcg_gen_sari_i32(var, var, shift);
469 break;
470 case 3: /* ROR/RRX */
471 if (shift != 0) {
472 if (flags)
473 shifter_out_im(var, shift - 1);
f669df27 474 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 475 } else {
d9ba4830 476 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
477 if (flags)
478 shifter_out_im(var, 0);
479 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
480 tcg_gen_shli_i32(tmp, tmp, 31);
481 tcg_gen_or_i32(var, var, tmp);
7d1b0095 482 tcg_temp_free_i32(tmp);
b26eefb6
PB
483 }
484 }
485};
486
8984bd2e
PB
487static inline void gen_arm_shift_reg(TCGv var, int shiftop,
488 TCGv shift, int flags)
489{
490 if (flags) {
491 switch (shiftop) {
492 case 0: gen_helper_shl_cc(var, var, shift); break;
493 case 1: gen_helper_shr_cc(var, var, shift); break;
494 case 2: gen_helper_sar_cc(var, var, shift); break;
495 case 3: gen_helper_ror_cc(var, var, shift); break;
496 }
497 } else {
498 switch (shiftop) {
499 case 0: gen_helper_shl(var, var, shift); break;
500 case 1: gen_helper_shr(var, var, shift); break;
501 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
502 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
503 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
504 }
505 }
7d1b0095 506 tcg_temp_free_i32(shift);
8984bd2e
PB
507}
508
6ddbc6e4
PB
509#define PAS_OP(pfx) \
510 switch (op2) { \
511 case 0: gen_pas_helper(glue(pfx,add16)); break; \
512 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
513 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
514 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
515 case 4: gen_pas_helper(glue(pfx,add8)); break; \
516 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
517 }
d9ba4830 518static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 519{
a7812ae4 520 TCGv_ptr tmp;
6ddbc6e4
PB
521
522 switch (op1) {
523#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
524 case 1:
a7812ae4 525 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
526 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
527 PAS_OP(s)
b75263d6 528 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
529 break;
530 case 5:
a7812ae4 531 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
532 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
533 PAS_OP(u)
b75263d6 534 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
535 break;
536#undef gen_pas_helper
537#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
538 case 2:
539 PAS_OP(q);
540 break;
541 case 3:
542 PAS_OP(sh);
543 break;
544 case 6:
545 PAS_OP(uq);
546 break;
547 case 7:
548 PAS_OP(uh);
549 break;
550#undef gen_pas_helper
551 }
552}
9ee6e8bb
PB
553#undef PAS_OP
554
6ddbc6e4
PB
555/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
556#define PAS_OP(pfx) \
ed89a2f1 557 switch (op1) { \
6ddbc6e4
PB
558 case 0: gen_pas_helper(glue(pfx,add8)); break; \
559 case 1: gen_pas_helper(glue(pfx,add16)); break; \
560 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
561 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
562 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
563 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
564 }
d9ba4830 565static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 566{
a7812ae4 567 TCGv_ptr tmp;
6ddbc6e4 568
ed89a2f1 569 switch (op2) {
6ddbc6e4
PB
570#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
571 case 0:
a7812ae4 572 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
573 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
574 PAS_OP(s)
b75263d6 575 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
576 break;
577 case 4:
a7812ae4 578 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
579 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
580 PAS_OP(u)
b75263d6 581 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
582 break;
583#undef gen_pas_helper
584#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
585 case 1:
586 PAS_OP(q);
587 break;
588 case 2:
589 PAS_OP(sh);
590 break;
591 case 5:
592 PAS_OP(uq);
593 break;
594 case 6:
595 PAS_OP(uh);
596 break;
597#undef gen_pas_helper
598 }
599}
9ee6e8bb
PB
600#undef PAS_OP
601
d9ba4830
PB
602static void gen_test_cc(int cc, int label)
603{
604 TCGv tmp;
605 TCGv tmp2;
d9ba4830
PB
606 int inv;
607
d9ba4830
PB
608 switch (cc) {
609 case 0: /* eq: Z */
6fbe23d5 610 tmp = load_cpu_field(ZF);
cb63669a 611 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
612 break;
613 case 1: /* ne: !Z */
6fbe23d5 614 tmp = load_cpu_field(ZF);
cb63669a 615 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
616 break;
617 case 2: /* cs: C */
618 tmp = load_cpu_field(CF);
cb63669a 619 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
620 break;
621 case 3: /* cc: !C */
622 tmp = load_cpu_field(CF);
cb63669a 623 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
624 break;
625 case 4: /* mi: N */
6fbe23d5 626 tmp = load_cpu_field(NF);
cb63669a 627 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
628 break;
629 case 5: /* pl: !N */
6fbe23d5 630 tmp = load_cpu_field(NF);
cb63669a 631 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
632 break;
633 case 6: /* vs: V */
634 tmp = load_cpu_field(VF);
cb63669a 635 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
636 break;
637 case 7: /* vc: !V */
638 tmp = load_cpu_field(VF);
cb63669a 639 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
640 break;
641 case 8: /* hi: C && !Z */
642 inv = gen_new_label();
643 tmp = load_cpu_field(CF);
cb63669a 644 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 645 tcg_temp_free_i32(tmp);
6fbe23d5 646 tmp = load_cpu_field(ZF);
cb63669a 647 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
648 gen_set_label(inv);
649 break;
650 case 9: /* ls: !C || Z */
651 tmp = load_cpu_field(CF);
cb63669a 652 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 653 tcg_temp_free_i32(tmp);
6fbe23d5 654 tmp = load_cpu_field(ZF);
cb63669a 655 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
656 break;
657 case 10: /* ge: N == V -> N ^ V == 0 */
658 tmp = load_cpu_field(VF);
6fbe23d5 659 tmp2 = load_cpu_field(NF);
d9ba4830 660 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 661 tcg_temp_free_i32(tmp2);
cb63669a 662 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
663 break;
664 case 11: /* lt: N != V -> N ^ V != 0 */
665 tmp = load_cpu_field(VF);
6fbe23d5 666 tmp2 = load_cpu_field(NF);
d9ba4830 667 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 668 tcg_temp_free_i32(tmp2);
cb63669a 669 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
670 break;
671 case 12: /* gt: !Z && N == V */
672 inv = gen_new_label();
6fbe23d5 673 tmp = load_cpu_field(ZF);
cb63669a 674 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 675 tcg_temp_free_i32(tmp);
d9ba4830 676 tmp = load_cpu_field(VF);
6fbe23d5 677 tmp2 = load_cpu_field(NF);
d9ba4830 678 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 679 tcg_temp_free_i32(tmp2);
cb63669a 680 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
681 gen_set_label(inv);
682 break;
683 case 13: /* le: Z || N != V */
6fbe23d5 684 tmp = load_cpu_field(ZF);
cb63669a 685 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 686 tcg_temp_free_i32(tmp);
d9ba4830 687 tmp = load_cpu_field(VF);
6fbe23d5 688 tmp2 = load_cpu_field(NF);
d9ba4830 689 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 690 tcg_temp_free_i32(tmp2);
cb63669a 691 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
692 break;
693 default:
694 fprintf(stderr, "Bad condition code 0x%x\n", cc);
695 abort();
696 }
7d1b0095 697 tcg_temp_free_i32(tmp);
d9ba4830 698}
2c0262af 699
b1d8e52e 700static const uint8_t table_logic_cc[16] = {
2c0262af
FB
701 1, /* and */
702 1, /* xor */
703 0, /* sub */
704 0, /* rsb */
705 0, /* add */
706 0, /* adc */
707 0, /* sbc */
708 0, /* rsc */
709 1, /* andl */
710 1, /* xorl */
711 0, /* cmp */
712 0, /* cmn */
713 1, /* orr */
714 1, /* mov */
715 1, /* bic */
716 1, /* mvn */
717};
3b46e624 718
d9ba4830
PB
719/* Set PC and Thumb state from an immediate address. */
720static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 721{
b26eefb6 722 TCGv tmp;
99c475ab 723
b26eefb6 724 s->is_jmp = DISAS_UPDATE;
d9ba4830 725 if (s->thumb != (addr & 1)) {
7d1b0095 726 tmp = tcg_temp_new_i32();
d9ba4830
PB
727 tcg_gen_movi_i32(tmp, addr & 1);
728 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
7d1b0095 729 tcg_temp_free_i32(tmp);
d9ba4830 730 }
155c3eac 731 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
732}
733
734/* Set PC and Thumb state from var. var is marked as dead. */
735static inline void gen_bx(DisasContext *s, TCGv var)
736{
d9ba4830 737 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
738 tcg_gen_andi_i32(cpu_R[15], var, ~1);
739 tcg_gen_andi_i32(var, var, 1);
740 store_cpu_field(var, thumb);
d9ba4830
PB
741}
742
21aeb343
JR
743/* Variant of store_reg which uses branch&exchange logic when storing
744 to r15 in ARM architecture v7 and above. The source must be a temporary
745 and will be marked as dead. */
746static inline void store_reg_bx(CPUState *env, DisasContext *s,
747 int reg, TCGv var)
748{
749 if (reg == 15 && ENABLE_ARCH_7) {
750 gen_bx(s, var);
751 } else {
752 store_reg(s, reg, var);
753 }
754}
755
be5e7a76
DES
756/* Variant of store_reg which uses branch&exchange logic when storing
757 * to r15 in ARM architecture v5T and above. This is used for storing
758 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
759 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
760static inline void store_reg_from_load(CPUState *env, DisasContext *s,
761 int reg, TCGv var)
762{
763 if (reg == 15 && ENABLE_ARCH_5) {
764 gen_bx(s, var);
765 } else {
766 store_reg(s, reg, var);
767 }
768}
769
b0109805
PB
770static inline TCGv gen_ld8s(TCGv addr, int index)
771{
7d1b0095 772 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
773 tcg_gen_qemu_ld8s(tmp, addr, index);
774 return tmp;
775}
776static inline TCGv gen_ld8u(TCGv addr, int index)
777{
7d1b0095 778 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
779 tcg_gen_qemu_ld8u(tmp, addr, index);
780 return tmp;
781}
782static inline TCGv gen_ld16s(TCGv addr, int index)
783{
7d1b0095 784 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
785 tcg_gen_qemu_ld16s(tmp, addr, index);
786 return tmp;
787}
788static inline TCGv gen_ld16u(TCGv addr, int index)
789{
7d1b0095 790 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
791 tcg_gen_qemu_ld16u(tmp, addr, index);
792 return tmp;
793}
794static inline TCGv gen_ld32(TCGv addr, int index)
795{
7d1b0095 796 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
797 tcg_gen_qemu_ld32u(tmp, addr, index);
798 return tmp;
799}
84496233
JR
800static inline TCGv_i64 gen_ld64(TCGv addr, int index)
801{
802 TCGv_i64 tmp = tcg_temp_new_i64();
803 tcg_gen_qemu_ld64(tmp, addr, index);
804 return tmp;
805}
b0109805
PB
806static inline void gen_st8(TCGv val, TCGv addr, int index)
807{
808 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 809 tcg_temp_free_i32(val);
b0109805
PB
810}
811static inline void gen_st16(TCGv val, TCGv addr, int index)
812{
813 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 814 tcg_temp_free_i32(val);
b0109805
PB
815}
816static inline void gen_st32(TCGv val, TCGv addr, int index)
817{
818 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 819 tcg_temp_free_i32(val);
b0109805 820}
84496233
JR
821static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
822{
823 tcg_gen_qemu_st64(val, addr, index);
824 tcg_temp_free_i64(val);
825}
b5ff1b31 826
5e3f878a
PB
827static inline void gen_set_pc_im(uint32_t val)
828{
155c3eac 829 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
830}
831
b5ff1b31
FB
832/* Force a TB lookup after an instruction that changes the CPU state. */
833static inline void gen_lookup_tb(DisasContext *s)
834{
a6445c52 835 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
836 s->is_jmp = DISAS_UPDATE;
837}
838
b0109805
PB
839static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
840 TCGv var)
2c0262af 841{
1e8d4eec 842 int val, rm, shift, shiftop;
b26eefb6 843 TCGv offset;
2c0262af
FB
844
845 if (!(insn & (1 << 25))) {
846 /* immediate */
847 val = insn & 0xfff;
848 if (!(insn & (1 << 23)))
849 val = -val;
537730b9 850 if (val != 0)
b0109805 851 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
852 } else {
853 /* shift/register */
854 rm = (insn) & 0xf;
855 shift = (insn >> 7) & 0x1f;
1e8d4eec 856 shiftop = (insn >> 5) & 3;
b26eefb6 857 offset = load_reg(s, rm);
9a119ff6 858 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 859 if (!(insn & (1 << 23)))
b0109805 860 tcg_gen_sub_i32(var, var, offset);
2c0262af 861 else
b0109805 862 tcg_gen_add_i32(var, var, offset);
7d1b0095 863 tcg_temp_free_i32(offset);
2c0262af
FB
864 }
865}
866
191f9a93 867static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 868 int extra, TCGv var)
2c0262af
FB
869{
870 int val, rm;
b26eefb6 871 TCGv offset;
3b46e624 872
2c0262af
FB
873 if (insn & (1 << 22)) {
874 /* immediate */
875 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
876 if (!(insn & (1 << 23)))
877 val = -val;
18acad92 878 val += extra;
537730b9 879 if (val != 0)
b0109805 880 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
881 } else {
882 /* register */
191f9a93 883 if (extra)
b0109805 884 tcg_gen_addi_i32(var, var, extra);
2c0262af 885 rm = (insn) & 0xf;
b26eefb6 886 offset = load_reg(s, rm);
2c0262af 887 if (!(insn & (1 << 23)))
b0109805 888 tcg_gen_sub_i32(var, var, offset);
2c0262af 889 else
b0109805 890 tcg_gen_add_i32(var, var, offset);
7d1b0095 891 tcg_temp_free_i32(offset);
2c0262af
FB
892 }
893}
894
5aaebd13
PM
895static TCGv_ptr get_fpstatus_ptr(int neon)
896{
897 TCGv_ptr statusptr = tcg_temp_new_ptr();
898 int offset;
899 if (neon) {
900 offset = offsetof(CPUState, vfp.standard_fp_status);
901 } else {
902 offset = offsetof(CPUState, vfp.fp_status);
903 }
904 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
905 return statusptr;
906}
907
4373f3ce
PB
908#define VFP_OP2(name) \
909static inline void gen_vfp_##name(int dp) \
910{ \
ae1857ec
PM
911 TCGv_ptr fpst = get_fpstatus_ptr(0); \
912 if (dp) { \
913 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
914 } else { \
915 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
916 } \
917 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
918}
919
4373f3ce
PB
920VFP_OP2(add)
921VFP_OP2(sub)
922VFP_OP2(mul)
923VFP_OP2(div)
924
925#undef VFP_OP2
926
605a6aed
PM
927static inline void gen_vfp_F1_mul(int dp)
928{
929 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 930 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 931 if (dp) {
ae1857ec 932 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 933 } else {
ae1857ec 934 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 935 }
ae1857ec 936 tcg_temp_free_ptr(fpst);
605a6aed
PM
937}
938
939static inline void gen_vfp_F1_neg(int dp)
940{
941 /* Like gen_vfp_neg() but put result in F1 */
942 if (dp) {
943 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
944 } else {
945 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
946 }
947}
948
4373f3ce
PB
949static inline void gen_vfp_abs(int dp)
950{
951 if (dp)
952 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
953 else
954 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
955}
956
957static inline void gen_vfp_neg(int dp)
958{
959 if (dp)
960 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
961 else
962 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
963}
964
965static inline void gen_vfp_sqrt(int dp)
966{
967 if (dp)
968 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
969 else
970 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
971}
972
973static inline void gen_vfp_cmp(int dp)
974{
975 if (dp)
976 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
977 else
978 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
979}
980
981static inline void gen_vfp_cmpe(int dp)
982{
983 if (dp)
984 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
985 else
986 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
987}
988
989static inline void gen_vfp_F1_ld0(int dp)
990{
991 if (dp)
5b340b51 992 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 993 else
5b340b51 994 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
995}
996
5500b06c
PM
997#define VFP_GEN_ITOF(name) \
998static inline void gen_vfp_##name(int dp, int neon) \
999{ \
5aaebd13 1000 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1001 if (dp) { \
1002 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1003 } else { \
1004 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1005 } \
b7fa9214 1006 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1007}
1008
5500b06c
PM
1009VFP_GEN_ITOF(uito)
1010VFP_GEN_ITOF(sito)
1011#undef VFP_GEN_ITOF
4373f3ce 1012
5500b06c
PM
1013#define VFP_GEN_FTOI(name) \
1014static inline void gen_vfp_##name(int dp, int neon) \
1015{ \
5aaebd13 1016 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1017 if (dp) { \
1018 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1019 } else { \
1020 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1021 } \
b7fa9214 1022 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1023}
1024
5500b06c
PM
1025VFP_GEN_FTOI(toui)
1026VFP_GEN_FTOI(touiz)
1027VFP_GEN_FTOI(tosi)
1028VFP_GEN_FTOI(tosiz)
1029#undef VFP_GEN_FTOI
4373f3ce
PB
1030
1031#define VFP_GEN_FIX(name) \
5500b06c 1032static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1033{ \
b75263d6 1034 TCGv tmp_shift = tcg_const_i32(shift); \
5aaebd13 1035 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1036 if (dp) { \
1037 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1038 } else { \
1039 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1040 } \
b75263d6 1041 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1042 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1043}
4373f3ce
PB
1044VFP_GEN_FIX(tosh)
1045VFP_GEN_FIX(tosl)
1046VFP_GEN_FIX(touh)
1047VFP_GEN_FIX(toul)
1048VFP_GEN_FIX(shto)
1049VFP_GEN_FIX(slto)
1050VFP_GEN_FIX(uhto)
1051VFP_GEN_FIX(ulto)
1052#undef VFP_GEN_FIX
9ee6e8bb 1053
312eea9f 1054static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1055{
1056 if (dp)
312eea9f 1057 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1058 else
312eea9f 1059 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1060}
1061
312eea9f 1062static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1063{
1064 if (dp)
312eea9f 1065 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1066 else
312eea9f 1067 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1068}
1069
8e96005d
FB
1070static inline long
1071vfp_reg_offset (int dp, int reg)
1072{
1073 if (dp)
1074 return offsetof(CPUARMState, vfp.regs[reg]);
1075 else if (reg & 1) {
1076 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1077 + offsetof(CPU_DoubleU, l.upper);
1078 } else {
1079 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1080 + offsetof(CPU_DoubleU, l.lower);
1081 }
1082}
9ee6e8bb
PB
1083
1084/* Return the offset of a 32-bit piece of a NEON register.
1085 zero is the least significant end of the register. */
1086static inline long
1087neon_reg_offset (int reg, int n)
1088{
1089 int sreg;
1090 sreg = reg * 2 + n;
1091 return vfp_reg_offset(0, sreg);
1092}
1093
8f8e3aa4
PB
1094static TCGv neon_load_reg(int reg, int pass)
1095{
7d1b0095 1096 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1097 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1098 return tmp;
1099}
1100
1101static void neon_store_reg(int reg, int pass, TCGv var)
1102{
1103 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1104 tcg_temp_free_i32(var);
8f8e3aa4
PB
1105}
1106
a7812ae4 1107static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1108{
1109 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1110}
1111
a7812ae4 1112static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1113{
1114 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1115}
1116
4373f3ce
PB
1117#define tcg_gen_ld_f32 tcg_gen_ld_i32
1118#define tcg_gen_ld_f64 tcg_gen_ld_i64
1119#define tcg_gen_st_f32 tcg_gen_st_i32
1120#define tcg_gen_st_f64 tcg_gen_st_i64
1121
b7bcbe95
FB
1122static inline void gen_mov_F0_vreg(int dp, int reg)
1123{
1124 if (dp)
4373f3ce 1125 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1126 else
4373f3ce 1127 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1128}
1129
1130static inline void gen_mov_F1_vreg(int dp, int reg)
1131{
1132 if (dp)
4373f3ce 1133 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1134 else
4373f3ce 1135 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1136}
1137
1138static inline void gen_mov_vreg_F0(int dp, int reg)
1139{
1140 if (dp)
4373f3ce 1141 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1142 else
4373f3ce 1143 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1144}
1145
18c9b560
AZ
1146#define ARM_CP_RW_BIT (1 << 20)
1147
a7812ae4 1148static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1149{
1150 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1151}
1152
a7812ae4 1153static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1154{
1155 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1156}
1157
da6b5335 1158static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1159{
7d1b0095 1160 TCGv var = tcg_temp_new_i32();
da6b5335
FN
1161 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1162 return var;
e677137d
PB
1163}
1164
da6b5335 1165static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1166{
da6b5335 1167 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
7d1b0095 1168 tcg_temp_free_i32(var);
e677137d
PB
1169}
1170
1171static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1172{
1173 iwmmxt_store_reg(cpu_M0, rn);
1174}
1175
1176static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1177{
1178 iwmmxt_load_reg(cpu_M0, rn);
1179}
1180
1181static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1182{
1183 iwmmxt_load_reg(cpu_V1, rn);
1184 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1185}
1186
1187static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1188{
1189 iwmmxt_load_reg(cpu_V1, rn);
1190 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1191}
1192
1193static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1194{
1195 iwmmxt_load_reg(cpu_V1, rn);
1196 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1197}
1198
1199#define IWMMXT_OP(name) \
1200static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1201{ \
1202 iwmmxt_load_reg(cpu_V1, rn); \
1203 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1204}
1205
477955bd
PM
1206#define IWMMXT_OP_ENV(name) \
1207static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1208{ \
1209 iwmmxt_load_reg(cpu_V1, rn); \
1210 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1211}
1212
1213#define IWMMXT_OP_ENV_SIZE(name) \
1214IWMMXT_OP_ENV(name##b) \
1215IWMMXT_OP_ENV(name##w) \
1216IWMMXT_OP_ENV(name##l)
e677137d 1217
477955bd 1218#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1219static inline void gen_op_iwmmxt_##name##_M0(void) \
1220{ \
477955bd 1221 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1222}
1223
1224IWMMXT_OP(maddsq)
1225IWMMXT_OP(madduq)
1226IWMMXT_OP(sadb)
1227IWMMXT_OP(sadw)
1228IWMMXT_OP(mulslw)
1229IWMMXT_OP(mulshw)
1230IWMMXT_OP(mululw)
1231IWMMXT_OP(muluhw)
1232IWMMXT_OP(macsw)
1233IWMMXT_OP(macuw)
1234
477955bd
PM
1235IWMMXT_OP_ENV_SIZE(unpackl)
1236IWMMXT_OP_ENV_SIZE(unpackh)
1237
1238IWMMXT_OP_ENV1(unpacklub)
1239IWMMXT_OP_ENV1(unpackluw)
1240IWMMXT_OP_ENV1(unpacklul)
1241IWMMXT_OP_ENV1(unpackhub)
1242IWMMXT_OP_ENV1(unpackhuw)
1243IWMMXT_OP_ENV1(unpackhul)
1244IWMMXT_OP_ENV1(unpacklsb)
1245IWMMXT_OP_ENV1(unpacklsw)
1246IWMMXT_OP_ENV1(unpacklsl)
1247IWMMXT_OP_ENV1(unpackhsb)
1248IWMMXT_OP_ENV1(unpackhsw)
1249IWMMXT_OP_ENV1(unpackhsl)
1250
1251IWMMXT_OP_ENV_SIZE(cmpeq)
1252IWMMXT_OP_ENV_SIZE(cmpgtu)
1253IWMMXT_OP_ENV_SIZE(cmpgts)
1254
1255IWMMXT_OP_ENV_SIZE(mins)
1256IWMMXT_OP_ENV_SIZE(minu)
1257IWMMXT_OP_ENV_SIZE(maxs)
1258IWMMXT_OP_ENV_SIZE(maxu)
1259
1260IWMMXT_OP_ENV_SIZE(subn)
1261IWMMXT_OP_ENV_SIZE(addn)
1262IWMMXT_OP_ENV_SIZE(subu)
1263IWMMXT_OP_ENV_SIZE(addu)
1264IWMMXT_OP_ENV_SIZE(subs)
1265IWMMXT_OP_ENV_SIZE(adds)
1266
1267IWMMXT_OP_ENV(avgb0)
1268IWMMXT_OP_ENV(avgb1)
1269IWMMXT_OP_ENV(avgw0)
1270IWMMXT_OP_ENV(avgw1)
e677137d
PB
1271
1272IWMMXT_OP(msadb)
1273
477955bd
PM
1274IWMMXT_OP_ENV(packuw)
1275IWMMXT_OP_ENV(packul)
1276IWMMXT_OP_ENV(packuq)
1277IWMMXT_OP_ENV(packsw)
1278IWMMXT_OP_ENV(packsl)
1279IWMMXT_OP_ENV(packsq)
e677137d 1280
e677137d
PB
1281static void gen_op_iwmmxt_set_mup(void)
1282{
1283 TCGv tmp;
1284 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1285 tcg_gen_ori_i32(tmp, tmp, 2);
1286 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1287}
1288
1289static void gen_op_iwmmxt_set_cup(void)
1290{
1291 TCGv tmp;
1292 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1293 tcg_gen_ori_i32(tmp, tmp, 1);
1294 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1295}
1296
1297static void gen_op_iwmmxt_setpsr_nz(void)
1298{
7d1b0095 1299 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1300 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1301 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1302}
1303
1304static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1305{
1306 iwmmxt_load_reg(cpu_V1, rn);
86831435 1307 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1308 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1309}
1310
da6b5335 1311static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1312{
1313 int rd;
1314 uint32_t offset;
da6b5335 1315 TCGv tmp;
18c9b560
AZ
1316
1317 rd = (insn >> 16) & 0xf;
da6b5335 1318 tmp = load_reg(s, rd);
18c9b560
AZ
1319
1320 offset = (insn & 0xff) << ((insn >> 7) & 2);
1321 if (insn & (1 << 24)) {
1322 /* Pre indexed */
1323 if (insn & (1 << 23))
da6b5335 1324 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1325 else
da6b5335
FN
1326 tcg_gen_addi_i32(tmp, tmp, -offset);
1327 tcg_gen_mov_i32(dest, tmp);
18c9b560 1328 if (insn & (1 << 21))
da6b5335
FN
1329 store_reg(s, rd, tmp);
1330 else
7d1b0095 1331 tcg_temp_free_i32(tmp);
18c9b560
AZ
1332 } else if (insn & (1 << 21)) {
1333 /* Post indexed */
da6b5335 1334 tcg_gen_mov_i32(dest, tmp);
18c9b560 1335 if (insn & (1 << 23))
da6b5335 1336 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1337 else
da6b5335
FN
1338 tcg_gen_addi_i32(tmp, tmp, -offset);
1339 store_reg(s, rd, tmp);
18c9b560
AZ
1340 } else if (!(insn & (1 << 23)))
1341 return 1;
1342 return 0;
1343}
1344
da6b5335 1345static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1346{
1347 int rd = (insn >> 0) & 0xf;
da6b5335 1348 TCGv tmp;
18c9b560 1349
da6b5335
FN
1350 if (insn & (1 << 8)) {
1351 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1352 return 1;
da6b5335
FN
1353 } else {
1354 tmp = iwmmxt_load_creg(rd);
1355 }
1356 } else {
7d1b0095 1357 tmp = tcg_temp_new_i32();
da6b5335
FN
1358 iwmmxt_load_reg(cpu_V0, rd);
1359 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1360 }
1361 tcg_gen_andi_i32(tmp, tmp, mask);
1362 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1363 tcg_temp_free_i32(tmp);
18c9b560
AZ
1364 return 0;
1365}
1366
a1c7273b 1367/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560
AZ
1368 (ie. an undefined instruction). */
1369static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1370{
1371 int rd, wrd;
1372 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1373 TCGv addr;
1374 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1375
1376 if ((insn & 0x0e000e00) == 0x0c000000) {
1377 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1378 wrd = insn & 0xf;
1379 rdlo = (insn >> 12) & 0xf;
1380 rdhi = (insn >> 16) & 0xf;
1381 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1382 iwmmxt_load_reg(cpu_V0, wrd);
1383 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1384 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1385 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1386 } else { /* TMCRR */
da6b5335
FN
1387 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1388 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1389 gen_op_iwmmxt_set_mup();
1390 }
1391 return 0;
1392 }
1393
1394 wrd = (insn >> 12) & 0xf;
7d1b0095 1395 addr = tcg_temp_new_i32();
da6b5335 1396 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1397 tcg_temp_free_i32(addr);
18c9b560 1398 return 1;
da6b5335 1399 }
18c9b560
AZ
1400 if (insn & ARM_CP_RW_BIT) {
1401 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1402 tmp = tcg_temp_new_i32();
da6b5335
FN
1403 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1404 iwmmxt_store_creg(wrd, tmp);
18c9b560 1405 } else {
e677137d
PB
1406 i = 1;
1407 if (insn & (1 << 8)) {
1408 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1409 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1410 i = 0;
1411 } else { /* WLDRW wRd */
da6b5335 1412 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1413 }
1414 } else {
1415 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1416 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1417 } else { /* WLDRB */
da6b5335 1418 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1419 }
1420 }
1421 if (i) {
1422 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1423 tcg_temp_free_i32(tmp);
e677137d 1424 }
18c9b560
AZ
1425 gen_op_iwmmxt_movq_wRn_M0(wrd);
1426 }
1427 } else {
1428 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1429 tmp = iwmmxt_load_creg(wrd);
1430 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1431 } else {
1432 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1433 tmp = tcg_temp_new_i32();
e677137d
PB
1434 if (insn & (1 << 8)) {
1435 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1436 tcg_temp_free_i32(tmp);
da6b5335 1437 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1438 } else { /* WSTRW wRd */
1439 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1440 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1441 }
1442 } else {
1443 if (insn & (1 << 22)) { /* WSTRH */
1444 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1445 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1446 } else { /* WSTRB */
1447 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1448 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1449 }
1450 }
18c9b560
AZ
1451 }
1452 }
7d1b0095 1453 tcg_temp_free_i32(addr);
18c9b560
AZ
1454 return 0;
1455 }
1456
1457 if ((insn & 0x0f000000) != 0x0e000000)
1458 return 1;
1459
1460 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1461 case 0x000: /* WOR */
1462 wrd = (insn >> 12) & 0xf;
1463 rd0 = (insn >> 0) & 0xf;
1464 rd1 = (insn >> 16) & 0xf;
1465 gen_op_iwmmxt_movq_M0_wRn(rd0);
1466 gen_op_iwmmxt_orq_M0_wRn(rd1);
1467 gen_op_iwmmxt_setpsr_nz();
1468 gen_op_iwmmxt_movq_wRn_M0(wrd);
1469 gen_op_iwmmxt_set_mup();
1470 gen_op_iwmmxt_set_cup();
1471 break;
1472 case 0x011: /* TMCR */
1473 if (insn & 0xf)
1474 return 1;
1475 rd = (insn >> 12) & 0xf;
1476 wrd = (insn >> 16) & 0xf;
1477 switch (wrd) {
1478 case ARM_IWMMXT_wCID:
1479 case ARM_IWMMXT_wCASF:
1480 break;
1481 case ARM_IWMMXT_wCon:
1482 gen_op_iwmmxt_set_cup();
1483 /* Fall through. */
1484 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1485 tmp = iwmmxt_load_creg(wrd);
1486 tmp2 = load_reg(s, rd);
f669df27 1487 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1488 tcg_temp_free_i32(tmp2);
da6b5335 1489 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1490 break;
1491 case ARM_IWMMXT_wCGR0:
1492 case ARM_IWMMXT_wCGR1:
1493 case ARM_IWMMXT_wCGR2:
1494 case ARM_IWMMXT_wCGR3:
1495 gen_op_iwmmxt_set_cup();
da6b5335
FN
1496 tmp = load_reg(s, rd);
1497 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1498 break;
1499 default:
1500 return 1;
1501 }
1502 break;
1503 case 0x100: /* WXOR */
1504 wrd = (insn >> 12) & 0xf;
1505 rd0 = (insn >> 0) & 0xf;
1506 rd1 = (insn >> 16) & 0xf;
1507 gen_op_iwmmxt_movq_M0_wRn(rd0);
1508 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1509 gen_op_iwmmxt_setpsr_nz();
1510 gen_op_iwmmxt_movq_wRn_M0(wrd);
1511 gen_op_iwmmxt_set_mup();
1512 gen_op_iwmmxt_set_cup();
1513 break;
1514 case 0x111: /* TMRC */
1515 if (insn & 0xf)
1516 return 1;
1517 rd = (insn >> 12) & 0xf;
1518 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1519 tmp = iwmmxt_load_creg(wrd);
1520 store_reg(s, rd, tmp);
18c9b560
AZ
1521 break;
1522 case 0x300: /* WANDN */
1523 wrd = (insn >> 12) & 0xf;
1524 rd0 = (insn >> 0) & 0xf;
1525 rd1 = (insn >> 16) & 0xf;
1526 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1527 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1528 gen_op_iwmmxt_andq_M0_wRn(rd1);
1529 gen_op_iwmmxt_setpsr_nz();
1530 gen_op_iwmmxt_movq_wRn_M0(wrd);
1531 gen_op_iwmmxt_set_mup();
1532 gen_op_iwmmxt_set_cup();
1533 break;
1534 case 0x200: /* WAND */
1535 wrd = (insn >> 12) & 0xf;
1536 rd0 = (insn >> 0) & 0xf;
1537 rd1 = (insn >> 16) & 0xf;
1538 gen_op_iwmmxt_movq_M0_wRn(rd0);
1539 gen_op_iwmmxt_andq_M0_wRn(rd1);
1540 gen_op_iwmmxt_setpsr_nz();
1541 gen_op_iwmmxt_movq_wRn_M0(wrd);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1544 break;
1545 case 0x810: case 0xa10: /* WMADD */
1546 wrd = (insn >> 12) & 0xf;
1547 rd0 = (insn >> 0) & 0xf;
1548 rd1 = (insn >> 16) & 0xf;
1549 gen_op_iwmmxt_movq_M0_wRn(rd0);
1550 if (insn & (1 << 21))
1551 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1552 else
1553 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1554 gen_op_iwmmxt_movq_wRn_M0(wrd);
1555 gen_op_iwmmxt_set_mup();
1556 break;
1557 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1558 wrd = (insn >> 12) & 0xf;
1559 rd0 = (insn >> 16) & 0xf;
1560 rd1 = (insn >> 0) & 0xf;
1561 gen_op_iwmmxt_movq_M0_wRn(rd0);
1562 switch ((insn >> 22) & 3) {
1563 case 0:
1564 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1565 break;
1566 case 1:
1567 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1568 break;
1569 case 2:
1570 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1571 break;
1572 case 3:
1573 return 1;
1574 }
1575 gen_op_iwmmxt_movq_wRn_M0(wrd);
1576 gen_op_iwmmxt_set_mup();
1577 gen_op_iwmmxt_set_cup();
1578 break;
1579 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1580 wrd = (insn >> 12) & 0xf;
1581 rd0 = (insn >> 16) & 0xf;
1582 rd1 = (insn >> 0) & 0xf;
1583 gen_op_iwmmxt_movq_M0_wRn(rd0);
1584 switch ((insn >> 22) & 3) {
1585 case 0:
1586 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1587 break;
1588 case 1:
1589 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1590 break;
1591 case 2:
1592 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1593 break;
1594 case 3:
1595 return 1;
1596 }
1597 gen_op_iwmmxt_movq_wRn_M0(wrd);
1598 gen_op_iwmmxt_set_mup();
1599 gen_op_iwmmxt_set_cup();
1600 break;
1601 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1602 wrd = (insn >> 12) & 0xf;
1603 rd0 = (insn >> 16) & 0xf;
1604 rd1 = (insn >> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0);
1606 if (insn & (1 << 22))
1607 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1608 else
1609 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1610 if (!(insn & (1 << 20)))
1611 gen_op_iwmmxt_addl_M0_wRn(wrd);
1612 gen_op_iwmmxt_movq_wRn_M0(wrd);
1613 gen_op_iwmmxt_set_mup();
1614 break;
1615 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1616 wrd = (insn >> 12) & 0xf;
1617 rd0 = (insn >> 16) & 0xf;
1618 rd1 = (insn >> 0) & 0xf;
1619 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1620 if (insn & (1 << 21)) {
1621 if (insn & (1 << 20))
1622 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1623 else
1624 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1625 } else {
1626 if (insn & (1 << 20))
1627 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1628 else
1629 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1630 }
18c9b560
AZ
1631 gen_op_iwmmxt_movq_wRn_M0(wrd);
1632 gen_op_iwmmxt_set_mup();
1633 break;
1634 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1635 wrd = (insn >> 12) & 0xf;
1636 rd0 = (insn >> 16) & 0xf;
1637 rd1 = (insn >> 0) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0);
1639 if (insn & (1 << 21))
1640 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1641 else
1642 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1643 if (!(insn & (1 << 20))) {
e677137d
PB
1644 iwmmxt_load_reg(cpu_V1, wrd);
1645 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1646 }
1647 gen_op_iwmmxt_movq_wRn_M0(wrd);
1648 gen_op_iwmmxt_set_mup();
1649 break;
1650 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1651 wrd = (insn >> 12) & 0xf;
1652 rd0 = (insn >> 16) & 0xf;
1653 rd1 = (insn >> 0) & 0xf;
1654 gen_op_iwmmxt_movq_M0_wRn(rd0);
1655 switch ((insn >> 22) & 3) {
1656 case 0:
1657 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1658 break;
1659 case 1:
1660 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1661 break;
1662 case 2:
1663 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1664 break;
1665 case 3:
1666 return 1;
1667 }
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 gen_op_iwmmxt_set_cup();
1671 break;
1672 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1673 wrd = (insn >> 12) & 0xf;
1674 rd0 = (insn >> 16) & 0xf;
1675 rd1 = (insn >> 0) & 0xf;
1676 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1677 if (insn & (1 << 22)) {
1678 if (insn & (1 << 20))
1679 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1680 else
1681 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1682 } else {
1683 if (insn & (1 << 20))
1684 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1685 else
1686 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1687 }
18c9b560
AZ
1688 gen_op_iwmmxt_movq_wRn_M0(wrd);
1689 gen_op_iwmmxt_set_mup();
1690 gen_op_iwmmxt_set_cup();
1691 break;
1692 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1693 wrd = (insn >> 12) & 0xf;
1694 rd0 = (insn >> 16) & 0xf;
1695 rd1 = (insn >> 0) & 0xf;
1696 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1697 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1698 tcg_gen_andi_i32(tmp, tmp, 7);
1699 iwmmxt_load_reg(cpu_V1, rd1);
1700 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1701 tcg_temp_free_i32(tmp);
18c9b560
AZ
1702 gen_op_iwmmxt_movq_wRn_M0(wrd);
1703 gen_op_iwmmxt_set_mup();
1704 break;
1705 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1706 if (((insn >> 6) & 3) == 3)
1707 return 1;
18c9b560
AZ
1708 rd = (insn >> 12) & 0xf;
1709 wrd = (insn >> 16) & 0xf;
da6b5335 1710 tmp = load_reg(s, rd);
18c9b560
AZ
1711 gen_op_iwmmxt_movq_M0_wRn(wrd);
1712 switch ((insn >> 6) & 3) {
1713 case 0:
da6b5335
FN
1714 tmp2 = tcg_const_i32(0xff);
1715 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1716 break;
1717 case 1:
da6b5335
FN
1718 tmp2 = tcg_const_i32(0xffff);
1719 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1720 break;
1721 case 2:
da6b5335
FN
1722 tmp2 = tcg_const_i32(0xffffffff);
1723 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1724 break;
da6b5335
FN
1725 default:
1726 TCGV_UNUSED(tmp2);
1727 TCGV_UNUSED(tmp3);
18c9b560 1728 }
da6b5335
FN
1729 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1730 tcg_temp_free(tmp3);
1731 tcg_temp_free(tmp2);
7d1b0095 1732 tcg_temp_free_i32(tmp);
18c9b560
AZ
1733 gen_op_iwmmxt_movq_wRn_M0(wrd);
1734 gen_op_iwmmxt_set_mup();
1735 break;
1736 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1737 rd = (insn >> 12) & 0xf;
1738 wrd = (insn >> 16) & 0xf;
da6b5335 1739 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1740 return 1;
1741 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1742 tmp = tcg_temp_new_i32();
18c9b560
AZ
1743 switch ((insn >> 22) & 3) {
1744 case 0:
da6b5335
FN
1745 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1746 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1747 if (insn & 8) {
1748 tcg_gen_ext8s_i32(tmp, tmp);
1749 } else {
1750 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1751 }
1752 break;
1753 case 1:
da6b5335
FN
1754 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1755 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1756 if (insn & 8) {
1757 tcg_gen_ext16s_i32(tmp, tmp);
1758 } else {
1759 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1760 }
1761 break;
1762 case 2:
da6b5335
FN
1763 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1764 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1765 break;
18c9b560 1766 }
da6b5335 1767 store_reg(s, rd, tmp);
18c9b560
AZ
1768 break;
1769 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1770 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1771 return 1;
da6b5335 1772 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1773 switch ((insn >> 22) & 3) {
1774 case 0:
da6b5335 1775 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1776 break;
1777 case 1:
da6b5335 1778 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1779 break;
1780 case 2:
da6b5335 1781 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1782 break;
18c9b560 1783 }
da6b5335
FN
1784 tcg_gen_shli_i32(tmp, tmp, 28);
1785 gen_set_nzcv(tmp);
7d1b0095 1786 tcg_temp_free_i32(tmp);
18c9b560
AZ
1787 break;
1788 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1789 if (((insn >> 6) & 3) == 3)
1790 return 1;
18c9b560
AZ
1791 rd = (insn >> 12) & 0xf;
1792 wrd = (insn >> 16) & 0xf;
da6b5335 1793 tmp = load_reg(s, rd);
18c9b560
AZ
1794 switch ((insn >> 6) & 3) {
1795 case 0:
da6b5335 1796 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1797 break;
1798 case 1:
da6b5335 1799 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1800 break;
1801 case 2:
da6b5335 1802 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1803 break;
18c9b560 1804 }
7d1b0095 1805 tcg_temp_free_i32(tmp);
18c9b560
AZ
1806 gen_op_iwmmxt_movq_wRn_M0(wrd);
1807 gen_op_iwmmxt_set_mup();
1808 break;
1809 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1810 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1811 return 1;
da6b5335 1812 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1813 tmp2 = tcg_temp_new_i32();
da6b5335 1814 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1815 switch ((insn >> 22) & 3) {
1816 case 0:
1817 for (i = 0; i < 7; i ++) {
da6b5335
FN
1818 tcg_gen_shli_i32(tmp2, tmp2, 4);
1819 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1820 }
1821 break;
1822 case 1:
1823 for (i = 0; i < 3; i ++) {
da6b5335
FN
1824 tcg_gen_shli_i32(tmp2, tmp2, 8);
1825 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1826 }
1827 break;
1828 case 2:
da6b5335
FN
1829 tcg_gen_shli_i32(tmp2, tmp2, 16);
1830 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1831 break;
18c9b560 1832 }
da6b5335 1833 gen_set_nzcv(tmp);
7d1b0095
PM
1834 tcg_temp_free_i32(tmp2);
1835 tcg_temp_free_i32(tmp);
18c9b560
AZ
1836 break;
1837 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1838 wrd = (insn >> 12) & 0xf;
1839 rd0 = (insn >> 16) & 0xf;
1840 gen_op_iwmmxt_movq_M0_wRn(rd0);
1841 switch ((insn >> 22) & 3) {
1842 case 0:
e677137d 1843 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1844 break;
1845 case 1:
e677137d 1846 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1847 break;
1848 case 2:
e677137d 1849 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1850 break;
1851 case 3:
1852 return 1;
1853 }
1854 gen_op_iwmmxt_movq_wRn_M0(wrd);
1855 gen_op_iwmmxt_set_mup();
1856 break;
1857 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1858 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1859 return 1;
da6b5335 1860 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1861 tmp2 = tcg_temp_new_i32();
da6b5335 1862 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1863 switch ((insn >> 22) & 3) {
1864 case 0:
1865 for (i = 0; i < 7; i ++) {
da6b5335
FN
1866 tcg_gen_shli_i32(tmp2, tmp2, 4);
1867 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1868 }
1869 break;
1870 case 1:
1871 for (i = 0; i < 3; i ++) {
da6b5335
FN
1872 tcg_gen_shli_i32(tmp2, tmp2, 8);
1873 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1874 }
1875 break;
1876 case 2:
da6b5335
FN
1877 tcg_gen_shli_i32(tmp2, tmp2, 16);
1878 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1879 break;
18c9b560 1880 }
da6b5335 1881 gen_set_nzcv(tmp);
7d1b0095
PM
1882 tcg_temp_free_i32(tmp2);
1883 tcg_temp_free_i32(tmp);
18c9b560
AZ
1884 break;
1885 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1886 rd = (insn >> 12) & 0xf;
1887 rd0 = (insn >> 16) & 0xf;
da6b5335 1888 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1889 return 1;
1890 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1891 tmp = tcg_temp_new_i32();
18c9b560
AZ
1892 switch ((insn >> 22) & 3) {
1893 case 0:
da6b5335 1894 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1895 break;
1896 case 1:
da6b5335 1897 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1898 break;
1899 case 2:
da6b5335 1900 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1901 break;
18c9b560 1902 }
da6b5335 1903 store_reg(s, rd, tmp);
18c9b560
AZ
1904 break;
1905 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1906 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1907 wrd = (insn >> 12) & 0xf;
1908 rd0 = (insn >> 16) & 0xf;
1909 rd1 = (insn >> 0) & 0xf;
1910 gen_op_iwmmxt_movq_M0_wRn(rd0);
1911 switch ((insn >> 22) & 3) {
1912 case 0:
1913 if (insn & (1 << 21))
1914 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1915 else
1916 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1917 break;
1918 case 1:
1919 if (insn & (1 << 21))
1920 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1921 else
1922 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1923 break;
1924 case 2:
1925 if (insn & (1 << 21))
1926 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1927 else
1928 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1929 break;
1930 case 3:
1931 return 1;
1932 }
1933 gen_op_iwmmxt_movq_wRn_M0(wrd);
1934 gen_op_iwmmxt_set_mup();
1935 gen_op_iwmmxt_set_cup();
1936 break;
1937 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1938 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1939 wrd = (insn >> 12) & 0xf;
1940 rd0 = (insn >> 16) & 0xf;
1941 gen_op_iwmmxt_movq_M0_wRn(rd0);
1942 switch ((insn >> 22) & 3) {
1943 case 0:
1944 if (insn & (1 << 21))
1945 gen_op_iwmmxt_unpacklsb_M0();
1946 else
1947 gen_op_iwmmxt_unpacklub_M0();
1948 break;
1949 case 1:
1950 if (insn & (1 << 21))
1951 gen_op_iwmmxt_unpacklsw_M0();
1952 else
1953 gen_op_iwmmxt_unpackluw_M0();
1954 break;
1955 case 2:
1956 if (insn & (1 << 21))
1957 gen_op_iwmmxt_unpacklsl_M0();
1958 else
1959 gen_op_iwmmxt_unpacklul_M0();
1960 break;
1961 case 3:
1962 return 1;
1963 }
1964 gen_op_iwmmxt_movq_wRn_M0(wrd);
1965 gen_op_iwmmxt_set_mup();
1966 gen_op_iwmmxt_set_cup();
1967 break;
1968 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1969 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
1973 switch ((insn >> 22) & 3) {
1974 case 0:
1975 if (insn & (1 << 21))
1976 gen_op_iwmmxt_unpackhsb_M0();
1977 else
1978 gen_op_iwmmxt_unpackhub_M0();
1979 break;
1980 case 1:
1981 if (insn & (1 << 21))
1982 gen_op_iwmmxt_unpackhsw_M0();
1983 else
1984 gen_op_iwmmxt_unpackhuw_M0();
1985 break;
1986 case 2:
1987 if (insn & (1 << 21))
1988 gen_op_iwmmxt_unpackhsl_M0();
1989 else
1990 gen_op_iwmmxt_unpackhul_M0();
1991 break;
1992 case 3:
1993 return 1;
1994 }
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2000 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2001 if (((insn >> 22) & 3) == 0)
2002 return 1;
18c9b560
AZ
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2006 tmp = tcg_temp_new_i32();
da6b5335 2007 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2008 tcg_temp_free_i32(tmp);
18c9b560 2009 return 1;
da6b5335 2010 }
18c9b560 2011 switch ((insn >> 22) & 3) {
18c9b560 2012 case 1:
477955bd 2013 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2014 break;
2015 case 2:
477955bd 2016 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2017 break;
2018 case 3:
477955bd 2019 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2020 break;
2021 }
7d1b0095 2022 tcg_temp_free_i32(tmp);
18c9b560
AZ
2023 gen_op_iwmmxt_movq_wRn_M0(wrd);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2026 break;
2027 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2028 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2029 if (((insn >> 22) & 3) == 0)
2030 return 1;
18c9b560
AZ
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 16) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2034 tmp = tcg_temp_new_i32();
da6b5335 2035 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2036 tcg_temp_free_i32(tmp);
18c9b560 2037 return 1;
da6b5335 2038 }
18c9b560 2039 switch ((insn >> 22) & 3) {
18c9b560 2040 case 1:
477955bd 2041 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2042 break;
2043 case 2:
477955bd 2044 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2045 break;
2046 case 3:
477955bd 2047 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2048 break;
2049 }
7d1b0095 2050 tcg_temp_free_i32(tmp);
18c9b560
AZ
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2054 break;
2055 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2056 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2057 if (((insn >> 22) & 3) == 0)
2058 return 1;
18c9b560
AZ
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2062 tmp = tcg_temp_new_i32();
da6b5335 2063 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2064 tcg_temp_free_i32(tmp);
18c9b560 2065 return 1;
da6b5335 2066 }
18c9b560 2067 switch ((insn >> 22) & 3) {
18c9b560 2068 case 1:
477955bd 2069 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2070 break;
2071 case 2:
477955bd 2072 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2073 break;
2074 case 3:
477955bd 2075 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2076 break;
2077 }
7d1b0095 2078 tcg_temp_free_i32(tmp);
18c9b560
AZ
2079 gen_op_iwmmxt_movq_wRn_M0(wrd);
2080 gen_op_iwmmxt_set_mup();
2081 gen_op_iwmmxt_set_cup();
2082 break;
2083 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2084 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2085 if (((insn >> 22) & 3) == 0)
2086 return 1;
18c9b560
AZ
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2090 tmp = tcg_temp_new_i32();
18c9b560 2091 switch ((insn >> 22) & 3) {
18c9b560 2092 case 1:
da6b5335 2093 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2094 tcg_temp_free_i32(tmp);
18c9b560 2095 return 1;
da6b5335 2096 }
477955bd 2097 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2098 break;
2099 case 2:
da6b5335 2100 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2101 tcg_temp_free_i32(tmp);
18c9b560 2102 return 1;
da6b5335 2103 }
477955bd 2104 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2105 break;
2106 case 3:
da6b5335 2107 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2108 tcg_temp_free_i32(tmp);
18c9b560 2109 return 1;
da6b5335 2110 }
477955bd 2111 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2112 break;
2113 }
7d1b0095 2114 tcg_temp_free_i32(tmp);
18c9b560
AZ
2115 gen_op_iwmmxt_movq_wRn_M0(wrd);
2116 gen_op_iwmmxt_set_mup();
2117 gen_op_iwmmxt_set_cup();
2118 break;
2119 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2120 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2121 wrd = (insn >> 12) & 0xf;
2122 rd0 = (insn >> 16) & 0xf;
2123 rd1 = (insn >> 0) & 0xf;
2124 gen_op_iwmmxt_movq_M0_wRn(rd0);
2125 switch ((insn >> 22) & 3) {
2126 case 0:
2127 if (insn & (1 << 21))
2128 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2129 else
2130 gen_op_iwmmxt_minub_M0_wRn(rd1);
2131 break;
2132 case 1:
2133 if (insn & (1 << 21))
2134 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2135 else
2136 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2137 break;
2138 case 2:
2139 if (insn & (1 << 21))
2140 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2141 else
2142 gen_op_iwmmxt_minul_M0_wRn(rd1);
2143 break;
2144 case 3:
2145 return 1;
2146 }
2147 gen_op_iwmmxt_movq_wRn_M0(wrd);
2148 gen_op_iwmmxt_set_mup();
2149 break;
2150 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2151 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2152 wrd = (insn >> 12) & 0xf;
2153 rd0 = (insn >> 16) & 0xf;
2154 rd1 = (insn >> 0) & 0xf;
2155 gen_op_iwmmxt_movq_M0_wRn(rd0);
2156 switch ((insn >> 22) & 3) {
2157 case 0:
2158 if (insn & (1 << 21))
2159 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2160 else
2161 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2162 break;
2163 case 1:
2164 if (insn & (1 << 21))
2165 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2166 else
2167 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2168 break;
2169 case 2:
2170 if (insn & (1 << 21))
2171 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2172 else
2173 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2174 break;
2175 case 3:
2176 return 1;
2177 }
2178 gen_op_iwmmxt_movq_wRn_M0(wrd);
2179 gen_op_iwmmxt_set_mup();
2180 break;
2181 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2182 case 0x402: case 0x502: case 0x602: case 0x702:
2183 wrd = (insn >> 12) & 0xf;
2184 rd0 = (insn >> 16) & 0xf;
2185 rd1 = (insn >> 0) & 0xf;
2186 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2187 tmp = tcg_const_i32((insn >> 20) & 3);
2188 iwmmxt_load_reg(cpu_V1, rd1);
2189 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2190 tcg_temp_free(tmp);
18c9b560
AZ
2191 gen_op_iwmmxt_movq_wRn_M0(wrd);
2192 gen_op_iwmmxt_set_mup();
2193 break;
2194 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2195 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2196 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2197 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2198 wrd = (insn >> 12) & 0xf;
2199 rd0 = (insn >> 16) & 0xf;
2200 rd1 = (insn >> 0) & 0xf;
2201 gen_op_iwmmxt_movq_M0_wRn(rd0);
2202 switch ((insn >> 20) & 0xf) {
2203 case 0x0:
2204 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2205 break;
2206 case 0x1:
2207 gen_op_iwmmxt_subub_M0_wRn(rd1);
2208 break;
2209 case 0x3:
2210 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2211 break;
2212 case 0x4:
2213 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2214 break;
2215 case 0x5:
2216 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2217 break;
2218 case 0x7:
2219 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2220 break;
2221 case 0x8:
2222 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2223 break;
2224 case 0x9:
2225 gen_op_iwmmxt_subul_M0_wRn(rd1);
2226 break;
2227 case 0xb:
2228 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2229 break;
2230 default:
2231 return 1;
2232 }
2233 gen_op_iwmmxt_movq_wRn_M0(wrd);
2234 gen_op_iwmmxt_set_mup();
2235 gen_op_iwmmxt_set_cup();
2236 break;
2237 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2238 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2239 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2240 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2241 wrd = (insn >> 12) & 0xf;
2242 rd0 = (insn >> 16) & 0xf;
2243 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2244 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2245 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
da6b5335 2246 tcg_temp_free(tmp);
18c9b560
AZ
2247 gen_op_iwmmxt_movq_wRn_M0(wrd);
2248 gen_op_iwmmxt_set_mup();
2249 gen_op_iwmmxt_set_cup();
2250 break;
2251 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2252 case 0x418: case 0x518: case 0x618: case 0x718:
2253 case 0x818: case 0x918: case 0xa18: case 0xb18:
2254 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2255 wrd = (insn >> 12) & 0xf;
2256 rd0 = (insn >> 16) & 0xf;
2257 rd1 = (insn >> 0) & 0xf;
2258 gen_op_iwmmxt_movq_M0_wRn(rd0);
2259 switch ((insn >> 20) & 0xf) {
2260 case 0x0:
2261 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2262 break;
2263 case 0x1:
2264 gen_op_iwmmxt_addub_M0_wRn(rd1);
2265 break;
2266 case 0x3:
2267 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2268 break;
2269 case 0x4:
2270 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2271 break;
2272 case 0x5:
2273 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2274 break;
2275 case 0x7:
2276 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2277 break;
2278 case 0x8:
2279 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2280 break;
2281 case 0x9:
2282 gen_op_iwmmxt_addul_M0_wRn(rd1);
2283 break;
2284 case 0xb:
2285 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2286 break;
2287 default:
2288 return 1;
2289 }
2290 gen_op_iwmmxt_movq_wRn_M0(wrd);
2291 gen_op_iwmmxt_set_mup();
2292 gen_op_iwmmxt_set_cup();
2293 break;
2294 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2295 case 0x408: case 0x508: case 0x608: case 0x708:
2296 case 0x808: case 0x908: case 0xa08: case 0xb08:
2297 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2298 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2299 return 1;
18c9b560
AZ
2300 wrd = (insn >> 12) & 0xf;
2301 rd0 = (insn >> 16) & 0xf;
2302 rd1 = (insn >> 0) & 0xf;
2303 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2304 switch ((insn >> 22) & 3) {
18c9b560
AZ
2305 case 1:
2306 if (insn & (1 << 21))
2307 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2308 else
2309 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2310 break;
2311 case 2:
2312 if (insn & (1 << 21))
2313 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2314 else
2315 gen_op_iwmmxt_packul_M0_wRn(rd1);
2316 break;
2317 case 3:
2318 if (insn & (1 << 21))
2319 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2320 else
2321 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2322 break;
2323 }
2324 gen_op_iwmmxt_movq_wRn_M0(wrd);
2325 gen_op_iwmmxt_set_mup();
2326 gen_op_iwmmxt_set_cup();
2327 break;
2328 case 0x201: case 0x203: case 0x205: case 0x207:
2329 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2330 case 0x211: case 0x213: case 0x215: case 0x217:
2331 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2332 wrd = (insn >> 5) & 0xf;
2333 rd0 = (insn >> 12) & 0xf;
2334 rd1 = (insn >> 0) & 0xf;
2335 if (rd0 == 0xf || rd1 == 0xf)
2336 return 1;
2337 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2338 tmp = load_reg(s, rd0);
2339 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2340 switch ((insn >> 16) & 0xf) {
2341 case 0x0: /* TMIA */
da6b5335 2342 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2343 break;
2344 case 0x8: /* TMIAPH */
da6b5335 2345 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2346 break;
2347 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2348 if (insn & (1 << 16))
da6b5335 2349 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2350 if (insn & (1 << 17))
da6b5335
FN
2351 tcg_gen_shri_i32(tmp2, tmp2, 16);
2352 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2353 break;
2354 default:
7d1b0095
PM
2355 tcg_temp_free_i32(tmp2);
2356 tcg_temp_free_i32(tmp);
18c9b560
AZ
2357 return 1;
2358 }
7d1b0095
PM
2359 tcg_temp_free_i32(tmp2);
2360 tcg_temp_free_i32(tmp);
18c9b560
AZ
2361 gen_op_iwmmxt_movq_wRn_M0(wrd);
2362 gen_op_iwmmxt_set_mup();
2363 break;
2364 default:
2365 return 1;
2366 }
2367
2368 return 0;
2369}
2370
a1c7273b 2371/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560
AZ
2372 (ie. an undefined instruction). */
2373static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2374{
2375 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2376 TCGv tmp, tmp2;
18c9b560
AZ
2377
2378 if ((insn & 0x0ff00f10) == 0x0e200010) {
2379 /* Multiply with Internal Accumulate Format */
2380 rd0 = (insn >> 12) & 0xf;
2381 rd1 = insn & 0xf;
2382 acc = (insn >> 5) & 7;
2383
2384 if (acc != 0)
2385 return 1;
2386
3a554c0f
FN
2387 tmp = load_reg(s, rd0);
2388 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2389 switch ((insn >> 16) & 0xf) {
2390 case 0x0: /* MIA */
3a554c0f 2391 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2392 break;
2393 case 0x8: /* MIAPH */
3a554c0f 2394 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2395 break;
2396 case 0xc: /* MIABB */
2397 case 0xd: /* MIABT */
2398 case 0xe: /* MIATB */
2399 case 0xf: /* MIATT */
18c9b560 2400 if (insn & (1 << 16))
3a554c0f 2401 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2402 if (insn & (1 << 17))
3a554c0f
FN
2403 tcg_gen_shri_i32(tmp2, tmp2, 16);
2404 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2405 break;
2406 default:
2407 return 1;
2408 }
7d1b0095
PM
2409 tcg_temp_free_i32(tmp2);
2410 tcg_temp_free_i32(tmp);
18c9b560
AZ
2411
2412 gen_op_iwmmxt_movq_wRn_M0(acc);
2413 return 0;
2414 }
2415
2416 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2417 /* Internal Accumulator Access Format */
2418 rdhi = (insn >> 16) & 0xf;
2419 rdlo = (insn >> 12) & 0xf;
2420 acc = insn & 7;
2421
2422 if (acc != 0)
2423 return 1;
2424
2425 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2426 iwmmxt_load_reg(cpu_V0, acc);
2427 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2428 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2429 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2430 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2431 } else { /* MAR */
3a554c0f
FN
2432 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2433 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2434 }
2435 return 0;
2436 }
2437
2438 return 1;
2439}
2440
c1713132
AZ
2441/* Disassemble system coprocessor instruction. Return nonzero if
2442 instruction is not defined. */
2443static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2444{
b75263d6 2445 TCGv tmp, tmp2;
c1713132
AZ
2446 uint32_t rd = (insn >> 12) & 0xf;
2447 uint32_t cp = (insn >> 8) & 0xf;
2448 if (IS_USER(s)) {
2449 return 1;
2450 }
2451
18c9b560 2452 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2453 if (!env->cp[cp].cp_read)
2454 return 1;
8984bd2e 2455 gen_set_pc_im(s->pc);
7d1b0095 2456 tmp = tcg_temp_new_i32();
b75263d6
JR
2457 tmp2 = tcg_const_i32(insn);
2458 gen_helper_get_cp(tmp, cpu_env, tmp2);
2459 tcg_temp_free(tmp2);
8984bd2e 2460 store_reg(s, rd, tmp);
c1713132
AZ
2461 } else {
2462 if (!env->cp[cp].cp_write)
2463 return 1;
8984bd2e
PB
2464 gen_set_pc_im(s->pc);
2465 tmp = load_reg(s, rd);
b75263d6
JR
2466 tmp2 = tcg_const_i32(insn);
2467 gen_helper_set_cp(cpu_env, tmp2, tmp);
2468 tcg_temp_free(tmp2);
7d1b0095 2469 tcg_temp_free_i32(tmp);
c1713132
AZ
2470 }
2471 return 0;
2472}
2473
74594c9d 2474static int cp15_user_ok(CPUState *env, uint32_t insn)
9ee6e8bb
PB
2475{
2476 int cpn = (insn >> 16) & 0xf;
2477 int cpm = insn & 0xf;
2478 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2479
74594c9d
PM
2480 if (arm_feature(env, ARM_FEATURE_V7) && cpn == 9) {
2481 /* Performance monitor registers fall into three categories:
2482 * (a) always UNDEF in usermode
2483 * (b) UNDEF only if PMUSERENR.EN is 0
2484 * (c) always read OK and UNDEF on write (PMUSERENR only)
2485 */
2486 if ((cpm == 12 && (op < 6)) ||
2487 (cpm == 13 && (op < 3))) {
2488 return env->cp15.c9_pmuserenr;
2489 } else if (cpm == 14 && op == 0 && (insn & ARM_CP_RW_BIT)) {
2490 /* PMUSERENR, read only */
2491 return 1;
2492 }
2493 return 0;
2494 }
2495
9ee6e8bb
PB
2496 if (cpn == 13 && cpm == 0) {
2497 /* TLS register. */
2498 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2499 return 1;
2500 }
9ee6e8bb
PB
2501 return 0;
2502}
2503
3f26c122
RV
2504static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2505{
2506 TCGv tmp;
2507 int cpn = (insn >> 16) & 0xf;
2508 int cpm = insn & 0xf;
2509 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2510
2511 if (!arm_feature(env, ARM_FEATURE_V6K))
2512 return 0;
2513
2514 if (!(cpn == 13 && cpm == 0))
2515 return 0;
2516
2517 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2518 switch (op) {
2519 case 2:
c5883be2 2520 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2521 break;
2522 case 3:
c5883be2 2523 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2524 break;
2525 case 4:
c5883be2 2526 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2527 break;
2528 default:
3f26c122
RV
2529 return 0;
2530 }
2531 store_reg(s, rd, tmp);
2532
2533 } else {
2534 tmp = load_reg(s, rd);
2535 switch (op) {
2536 case 2:
c5883be2 2537 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2538 break;
2539 case 3:
c5883be2 2540 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2541 break;
2542 case 4:
c5883be2 2543 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2544 break;
2545 default:
7d1b0095 2546 tcg_temp_free_i32(tmp);
3f26c122
RV
2547 return 0;
2548 }
3f26c122
RV
2549 }
2550 return 1;
2551}
2552
b5ff1b31
FB
2553/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2554 instruction is not defined. */
a90b7318 2555static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2556{
2557 uint32_t rd;
b75263d6 2558 TCGv tmp, tmp2;
b5ff1b31 2559
9ee6e8bb
PB
2560 /* M profile cores use memory mapped registers instead of cp15. */
2561 if (arm_feature(env, ARM_FEATURE_M))
2562 return 1;
2563
2564 if ((insn & (1 << 25)) == 0) {
2565 if (insn & (1 << 20)) {
2566 /* mrrc */
2567 return 1;
2568 }
2569 /* mcrr. Used for block cache operations, so implement as no-op. */
2570 return 0;
2571 }
2572 if ((insn & (1 << 4)) == 0) {
2573 /* cdp */
2574 return 1;
2575 }
4ec648dd
PM
2576 /* We special case a number of cp15 instructions which were used
2577 * for things which are real instructions in ARMv7. This allows
2578 * them to work in linux-user mode which doesn't provide functional
2579 * get_cp15/set_cp15 helpers, and is more efficient anyway.
cc688901 2580 */
4ec648dd
PM
2581 switch ((insn & 0x0fff0fff)) {
2582 case 0x0e070f90:
cc688901
PM
2583 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2584 * In v7, this must NOP.
2585 */
4ec648dd
PM
2586 if (IS_USER(s)) {
2587 return 1;
2588 }
cc688901
PM
2589 if (!arm_feature(env, ARM_FEATURE_V7)) {
2590 /* Wait for interrupt. */
2591 gen_set_pc_im(s->pc);
2592 s->is_jmp = DISAS_WFI;
2593 }
9332f9da 2594 return 0;
4ec648dd 2595 case 0x0e070f58:
cc688901
PM
2596 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2597 * so this is slightly over-broad.
2598 */
4ec648dd 2599 if (!IS_USER(s) && !arm_feature(env, ARM_FEATURE_V6)) {
cc688901
PM
2600 /* Wait for interrupt. */
2601 gen_set_pc_im(s->pc);
2602 s->is_jmp = DISAS_WFI;
2603 return 0;
2604 }
4ec648dd 2605 /* Otherwise continue to handle via helper function.
cc688901
PM
2606 * In particular, on v7 and some v6 cores this is one of
2607 * the VA-PA registers.
2608 */
4ec648dd
PM
2609 break;
2610 case 0x0e070f3d:
2611 /* 0,c7,c13,1: prefetch-by-MVA in v6, NOP in v7 */
2612 if (arm_feature(env, ARM_FEATURE_V6)) {
2613 return IS_USER(s) ? 1 : 0;
2614 }
2615 break;
2616 case 0x0e070f95: /* 0,c7,c5,4 : ISB */
2617 case 0x0e070f9a: /* 0,c7,c10,4: DSB */
2618 case 0x0e070fba: /* 0,c7,c10,5: DMB */
2619 /* Barriers in both v6 and v7 */
2620 if (arm_feature(env, ARM_FEATURE_V6)) {
2621 return 0;
2622 }
2623 break;
2624 default:
2625 break;
2626 }
2627
2628 if (IS_USER(s) && !cp15_user_ok(env, insn)) {
2629 return 1;
cc688901
PM
2630 }
2631
b5ff1b31 2632 rd = (insn >> 12) & 0xf;
3f26c122
RV
2633
2634 if (cp15_tls_load_store(env, s, insn, rd))
2635 return 0;
2636
b75263d6 2637 tmp2 = tcg_const_i32(insn);
18c9b560 2638 if (insn & ARM_CP_RW_BIT) {
7d1b0095 2639 tmp = tcg_temp_new_i32();
b75263d6 2640 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2641 /* If the destination register is r15 then sets condition codes. */
2642 if (rd != 15)
8984bd2e
PB
2643 store_reg(s, rd, tmp);
2644 else
7d1b0095 2645 tcg_temp_free_i32(tmp);
b5ff1b31 2646 } else {
8984bd2e 2647 tmp = load_reg(s, rd);
b75263d6 2648 gen_helper_set_cp15(cpu_env, tmp2, tmp);
7d1b0095 2649 tcg_temp_free_i32(tmp);
a90b7318
AZ
2650 /* Normally we would always end the TB here, but Linux
2651 * arch/arm/mach-pxa/sleep.S expects two instructions following
2652 * an MMU enable to execute from cache. Imitate this behaviour. */
2653 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2654 (insn & 0x0fff0fff) != 0x0e010f10)
2655 gen_lookup_tb(s);
b5ff1b31 2656 }
b75263d6 2657 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2658 return 0;
2659}
2660
9ee6e8bb
PB
2661#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2662#define VFP_SREG(insn, bigbit, smallbit) \
2663 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2664#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2665 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2666 reg = (((insn) >> (bigbit)) & 0x0f) \
2667 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2668 } else { \
2669 if (insn & (1 << (smallbit))) \
2670 return 1; \
2671 reg = ((insn) >> (bigbit)) & 0x0f; \
2672 }} while (0)
2673
2674#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2675#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2676#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2677#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2678#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2679#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2680
4373f3ce
PB
2681/* Move between integer and VFP cores. */
2682static TCGv gen_vfp_mrs(void)
2683{
7d1b0095 2684 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2685 tcg_gen_mov_i32(tmp, cpu_F0s);
2686 return tmp;
2687}
2688
2689static void gen_vfp_msr(TCGv tmp)
2690{
2691 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2692 tcg_temp_free_i32(tmp);
4373f3ce
PB
2693}
2694
ad69471c
PB
2695static void gen_neon_dup_u8(TCGv var, int shift)
2696{
7d1b0095 2697 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2698 if (shift)
2699 tcg_gen_shri_i32(var, var, shift);
86831435 2700 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2701 tcg_gen_shli_i32(tmp, var, 8);
2702 tcg_gen_or_i32(var, var, tmp);
2703 tcg_gen_shli_i32(tmp, var, 16);
2704 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2705 tcg_temp_free_i32(tmp);
ad69471c
PB
2706}
2707
2708static void gen_neon_dup_low16(TCGv var)
2709{
7d1b0095 2710 TCGv tmp = tcg_temp_new_i32();
86831435 2711 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2712 tcg_gen_shli_i32(tmp, var, 16);
2713 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2714 tcg_temp_free_i32(tmp);
ad69471c
PB
2715}
2716
2717static void gen_neon_dup_high16(TCGv var)
2718{
7d1b0095 2719 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2720 tcg_gen_andi_i32(var, var, 0xffff0000);
2721 tcg_gen_shri_i32(tmp, var, 16);
2722 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2723 tcg_temp_free_i32(tmp);
ad69471c
PB
2724}
2725
8e18cde3
PM
2726static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2727{
2728 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2729 TCGv tmp;
2730 switch (size) {
2731 case 0:
2732 tmp = gen_ld8u(addr, IS_USER(s));
2733 gen_neon_dup_u8(tmp, 0);
2734 break;
2735 case 1:
2736 tmp = gen_ld16u(addr, IS_USER(s));
2737 gen_neon_dup_low16(tmp);
2738 break;
2739 case 2:
2740 tmp = gen_ld32(addr, IS_USER(s));
2741 break;
2742 default: /* Avoid compiler warnings. */
2743 abort();
2744 }
2745 return tmp;
2746}
2747
a1c7273b 2748/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95
FB
2749 (ie. an undefined instruction). */
2750static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2751{
2752 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2753 int dp, veclen;
312eea9f 2754 TCGv addr;
4373f3ce 2755 TCGv tmp;
ad69471c 2756 TCGv tmp2;
b7bcbe95 2757
40f137e1
PB
2758 if (!arm_feature(env, ARM_FEATURE_VFP))
2759 return 1;
2760
5df8bac1 2761 if (!s->vfp_enabled) {
9ee6e8bb 2762 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2763 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2764 return 1;
2765 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2766 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2767 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2768 return 1;
2769 }
b7bcbe95
FB
2770 dp = ((insn & 0xf00) == 0xb00);
2771 switch ((insn >> 24) & 0xf) {
2772 case 0xe:
2773 if (insn & (1 << 4)) {
2774 /* single register transfer */
b7bcbe95
FB
2775 rd = (insn >> 12) & 0xf;
2776 if (dp) {
9ee6e8bb
PB
2777 int size;
2778 int pass;
2779
2780 VFP_DREG_N(rn, insn);
2781 if (insn & 0xf)
b7bcbe95 2782 return 1;
9ee6e8bb
PB
2783 if (insn & 0x00c00060
2784 && !arm_feature(env, ARM_FEATURE_NEON))
2785 return 1;
2786
2787 pass = (insn >> 21) & 1;
2788 if (insn & (1 << 22)) {
2789 size = 0;
2790 offset = ((insn >> 5) & 3) * 8;
2791 } else if (insn & (1 << 5)) {
2792 size = 1;
2793 offset = (insn & (1 << 6)) ? 16 : 0;
2794 } else {
2795 size = 2;
2796 offset = 0;
2797 }
18c9b560 2798 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2799 /* vfp->arm */
ad69471c 2800 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2801 switch (size) {
2802 case 0:
9ee6e8bb 2803 if (offset)
ad69471c 2804 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2805 if (insn & (1 << 23))
ad69471c 2806 gen_uxtb(tmp);
9ee6e8bb 2807 else
ad69471c 2808 gen_sxtb(tmp);
9ee6e8bb
PB
2809 break;
2810 case 1:
9ee6e8bb
PB
2811 if (insn & (1 << 23)) {
2812 if (offset) {
ad69471c 2813 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2814 } else {
ad69471c 2815 gen_uxth(tmp);
9ee6e8bb
PB
2816 }
2817 } else {
2818 if (offset) {
ad69471c 2819 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2820 } else {
ad69471c 2821 gen_sxth(tmp);
9ee6e8bb
PB
2822 }
2823 }
2824 break;
2825 case 2:
9ee6e8bb
PB
2826 break;
2827 }
ad69471c 2828 store_reg(s, rd, tmp);
b7bcbe95
FB
2829 } else {
2830 /* arm->vfp */
ad69471c 2831 tmp = load_reg(s, rd);
9ee6e8bb
PB
2832 if (insn & (1 << 23)) {
2833 /* VDUP */
2834 if (size == 0) {
ad69471c 2835 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2836 } else if (size == 1) {
ad69471c 2837 gen_neon_dup_low16(tmp);
9ee6e8bb 2838 }
cbbccffc 2839 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2840 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2841 tcg_gen_mov_i32(tmp2, tmp);
2842 neon_store_reg(rn, n, tmp2);
2843 }
2844 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2845 } else {
2846 /* VMOV */
2847 switch (size) {
2848 case 0:
ad69471c
PB
2849 tmp2 = neon_load_reg(rn, pass);
2850 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
7d1b0095 2851 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2852 break;
2853 case 1:
ad69471c
PB
2854 tmp2 = neon_load_reg(rn, pass);
2855 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
7d1b0095 2856 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2857 break;
2858 case 2:
9ee6e8bb
PB
2859 break;
2860 }
ad69471c 2861 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2862 }
b7bcbe95 2863 }
9ee6e8bb
PB
2864 } else { /* !dp */
2865 if ((insn & 0x6f) != 0x00)
2866 return 1;
2867 rn = VFP_SREG_N(insn);
18c9b560 2868 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2869 /* vfp->arm */
2870 if (insn & (1 << 21)) {
2871 /* system register */
40f137e1 2872 rn >>= 1;
9ee6e8bb 2873
b7bcbe95 2874 switch (rn) {
40f137e1 2875 case ARM_VFP_FPSID:
4373f3ce 2876 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2877 VFP3 restricts all id registers to privileged
2878 accesses. */
2879 if (IS_USER(s)
2880 && arm_feature(env, ARM_FEATURE_VFP3))
2881 return 1;
4373f3ce 2882 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2883 break;
40f137e1 2884 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2885 if (IS_USER(s))
2886 return 1;
4373f3ce 2887 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2888 break;
40f137e1
PB
2889 case ARM_VFP_FPINST:
2890 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2891 /* Not present in VFP3. */
2892 if (IS_USER(s)
2893 || arm_feature(env, ARM_FEATURE_VFP3))
2894 return 1;
4373f3ce 2895 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2896 break;
40f137e1 2897 case ARM_VFP_FPSCR:
601d70b9 2898 if (rd == 15) {
4373f3ce
PB
2899 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2900 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2901 } else {
7d1b0095 2902 tmp = tcg_temp_new_i32();
4373f3ce
PB
2903 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2904 }
b7bcbe95 2905 break;
9ee6e8bb
PB
2906 case ARM_VFP_MVFR0:
2907 case ARM_VFP_MVFR1:
2908 if (IS_USER(s)
2909 || !arm_feature(env, ARM_FEATURE_VFP3))
2910 return 1;
4373f3ce 2911 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2912 break;
b7bcbe95
FB
2913 default:
2914 return 1;
2915 }
2916 } else {
2917 gen_mov_F0_vreg(0, rn);
4373f3ce 2918 tmp = gen_vfp_mrs();
b7bcbe95
FB
2919 }
2920 if (rd == 15) {
b5ff1b31 2921 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2922 gen_set_nzcv(tmp);
7d1b0095 2923 tcg_temp_free_i32(tmp);
4373f3ce
PB
2924 } else {
2925 store_reg(s, rd, tmp);
2926 }
b7bcbe95
FB
2927 } else {
2928 /* arm->vfp */
4373f3ce 2929 tmp = load_reg(s, rd);
b7bcbe95 2930 if (insn & (1 << 21)) {
40f137e1 2931 rn >>= 1;
b7bcbe95
FB
2932 /* system register */
2933 switch (rn) {
40f137e1 2934 case ARM_VFP_FPSID:
9ee6e8bb
PB
2935 case ARM_VFP_MVFR0:
2936 case ARM_VFP_MVFR1:
b7bcbe95
FB
2937 /* Writes are ignored. */
2938 break;
40f137e1 2939 case ARM_VFP_FPSCR:
4373f3ce 2940 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2941 tcg_temp_free_i32(tmp);
b5ff1b31 2942 gen_lookup_tb(s);
b7bcbe95 2943 break;
40f137e1 2944 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2945 if (IS_USER(s))
2946 return 1;
71b3c3de
JR
2947 /* TODO: VFP subarchitecture support.
2948 * For now, keep the EN bit only */
2949 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2950 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2951 gen_lookup_tb(s);
2952 break;
2953 case ARM_VFP_FPINST:
2954 case ARM_VFP_FPINST2:
4373f3ce 2955 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2956 break;
b7bcbe95
FB
2957 default:
2958 return 1;
2959 }
2960 } else {
4373f3ce 2961 gen_vfp_msr(tmp);
b7bcbe95
FB
2962 gen_mov_vreg_F0(0, rn);
2963 }
2964 }
2965 }
2966 } else {
2967 /* data processing */
2968 /* The opcode is in bits 23, 21, 20 and 6. */
2969 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2970 if (dp) {
2971 if (op == 15) {
2972 /* rn is opcode */
2973 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2974 } else {
2975 /* rn is register number */
9ee6e8bb 2976 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2977 }
2978
04595bf6 2979 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2980 /* Integer or single precision destination. */
9ee6e8bb 2981 rd = VFP_SREG_D(insn);
b7bcbe95 2982 } else {
9ee6e8bb 2983 VFP_DREG_D(rd, insn);
b7bcbe95 2984 }
04595bf6
PM
2985 if (op == 15 &&
2986 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2987 /* VCVT from int is always from S reg regardless of dp bit.
2988 * VCVT with immediate frac_bits has same format as SREG_M
2989 */
2990 rm = VFP_SREG_M(insn);
b7bcbe95 2991 } else {
9ee6e8bb 2992 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2993 }
2994 } else {
9ee6e8bb 2995 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2996 if (op == 15 && rn == 15) {
2997 /* Double precision destination. */
9ee6e8bb
PB
2998 VFP_DREG_D(rd, insn);
2999 } else {
3000 rd = VFP_SREG_D(insn);
3001 }
04595bf6
PM
3002 /* NB that we implicitly rely on the encoding for the frac_bits
3003 * in VCVT of fixed to float being the same as that of an SREG_M
3004 */
9ee6e8bb 3005 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3006 }
3007
69d1fc22 3008 veclen = s->vec_len;
b7bcbe95
FB
3009 if (op == 15 && rn > 3)
3010 veclen = 0;
3011
3012 /* Shut up compiler warnings. */
3013 delta_m = 0;
3014 delta_d = 0;
3015 bank_mask = 0;
3b46e624 3016
b7bcbe95
FB
3017 if (veclen > 0) {
3018 if (dp)
3019 bank_mask = 0xc;
3020 else
3021 bank_mask = 0x18;
3022
3023 /* Figure out what type of vector operation this is. */
3024 if ((rd & bank_mask) == 0) {
3025 /* scalar */
3026 veclen = 0;
3027 } else {
3028 if (dp)
69d1fc22 3029 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3030 else
69d1fc22 3031 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3032
3033 if ((rm & bank_mask) == 0) {
3034 /* mixed scalar/vector */
3035 delta_m = 0;
3036 } else {
3037 /* vector */
3038 delta_m = delta_d;
3039 }
3040 }
3041 }
3042
3043 /* Load the initial operands. */
3044 if (op == 15) {
3045 switch (rn) {
3046 case 16:
3047 case 17:
3048 /* Integer source */
3049 gen_mov_F0_vreg(0, rm);
3050 break;
3051 case 8:
3052 case 9:
3053 /* Compare */
3054 gen_mov_F0_vreg(dp, rd);
3055 gen_mov_F1_vreg(dp, rm);
3056 break;
3057 case 10:
3058 case 11:
3059 /* Compare with zero */
3060 gen_mov_F0_vreg(dp, rd);
3061 gen_vfp_F1_ld0(dp);
3062 break;
9ee6e8bb
PB
3063 case 20:
3064 case 21:
3065 case 22:
3066 case 23:
644ad806
PB
3067 case 28:
3068 case 29:
3069 case 30:
3070 case 31:
9ee6e8bb
PB
3071 /* Source and destination the same. */
3072 gen_mov_F0_vreg(dp, rd);
3073 break;
7ec7f280
PM
3074 case 4:
3075 case 5:
3076 case 6:
3077 case 7:
3078 /* VCVTB, VCVTT: only present with the halfprec extension,
3079 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
3080 */
3081 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
3082 return 1;
3083 }
3084 /* Otherwise fall through */
b7bcbe95
FB
3085 default:
3086 /* One source operand. */
3087 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3088 break;
b7bcbe95
FB
3089 }
3090 } else {
3091 /* Two source operands. */
3092 gen_mov_F0_vreg(dp, rn);
3093 gen_mov_F1_vreg(dp, rm);
3094 }
3095
3096 for (;;) {
3097 /* Perform the calculation. */
3098 switch (op) {
605a6aed
PM
3099 case 0: /* VMLA: fd + (fn * fm) */
3100 /* Note that order of inputs to the add matters for NaNs */
3101 gen_vfp_F1_mul(dp);
3102 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3103 gen_vfp_add(dp);
3104 break;
605a6aed 3105 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3106 gen_vfp_mul(dp);
605a6aed
PM
3107 gen_vfp_F1_neg(dp);
3108 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3109 gen_vfp_add(dp);
3110 break;
605a6aed
PM
3111 case 2: /* VNMLS: -fd + (fn * fm) */
3112 /* Note that it isn't valid to replace (-A + B) with (B - A)
3113 * or similar plausible looking simplifications
3114 * because this will give wrong results for NaNs.
3115 */
3116 gen_vfp_F1_mul(dp);
3117 gen_mov_F0_vreg(dp, rd);
3118 gen_vfp_neg(dp);
3119 gen_vfp_add(dp);
b7bcbe95 3120 break;
605a6aed 3121 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3122 gen_vfp_mul(dp);
605a6aed
PM
3123 gen_vfp_F1_neg(dp);
3124 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3125 gen_vfp_neg(dp);
605a6aed 3126 gen_vfp_add(dp);
b7bcbe95
FB
3127 break;
3128 case 4: /* mul: fn * fm */
3129 gen_vfp_mul(dp);
3130 break;
3131 case 5: /* nmul: -(fn * fm) */
3132 gen_vfp_mul(dp);
3133 gen_vfp_neg(dp);
3134 break;
3135 case 6: /* add: fn + fm */
3136 gen_vfp_add(dp);
3137 break;
3138 case 7: /* sub: fn - fm */
3139 gen_vfp_sub(dp);
3140 break;
3141 case 8: /* div: fn / fm */
3142 gen_vfp_div(dp);
3143 break;
9ee6e8bb
PB
3144 case 14: /* fconst */
3145 if (!arm_feature(env, ARM_FEATURE_VFP3))
3146 return 1;
3147
3148 n = (insn << 12) & 0x80000000;
3149 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3150 if (dp) {
3151 if (i & 0x40)
3152 i |= 0x3f80;
3153 else
3154 i |= 0x4000;
3155 n |= i << 16;
4373f3ce 3156 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3157 } else {
3158 if (i & 0x40)
3159 i |= 0x780;
3160 else
3161 i |= 0x800;
3162 n |= i << 19;
5b340b51 3163 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3164 }
9ee6e8bb 3165 break;
b7bcbe95
FB
3166 case 15: /* extension space */
3167 switch (rn) {
3168 case 0: /* cpy */
3169 /* no-op */
3170 break;
3171 case 1: /* abs */
3172 gen_vfp_abs(dp);
3173 break;
3174 case 2: /* neg */
3175 gen_vfp_neg(dp);
3176 break;
3177 case 3: /* sqrt */
3178 gen_vfp_sqrt(dp);
3179 break;
60011498 3180 case 4: /* vcvtb.f32.f16 */
60011498
PB
3181 tmp = gen_vfp_mrs();
3182 tcg_gen_ext16u_i32(tmp, tmp);
3183 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3184 tcg_temp_free_i32(tmp);
60011498
PB
3185 break;
3186 case 5: /* vcvtt.f32.f16 */
60011498
PB
3187 tmp = gen_vfp_mrs();
3188 tcg_gen_shri_i32(tmp, tmp, 16);
3189 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3190 tcg_temp_free_i32(tmp);
60011498
PB
3191 break;
3192 case 6: /* vcvtb.f16.f32 */
7d1b0095 3193 tmp = tcg_temp_new_i32();
60011498
PB
3194 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3195 gen_mov_F0_vreg(0, rd);
3196 tmp2 = gen_vfp_mrs();
3197 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3198 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3199 tcg_temp_free_i32(tmp2);
60011498
PB
3200 gen_vfp_msr(tmp);
3201 break;
3202 case 7: /* vcvtt.f16.f32 */
7d1b0095 3203 tmp = tcg_temp_new_i32();
60011498
PB
3204 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3205 tcg_gen_shli_i32(tmp, tmp, 16);
3206 gen_mov_F0_vreg(0, rd);
3207 tmp2 = gen_vfp_mrs();
3208 tcg_gen_ext16u_i32(tmp2, tmp2);
3209 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3210 tcg_temp_free_i32(tmp2);
60011498
PB
3211 gen_vfp_msr(tmp);
3212 break;
b7bcbe95
FB
3213 case 8: /* cmp */
3214 gen_vfp_cmp(dp);
3215 break;
3216 case 9: /* cmpe */
3217 gen_vfp_cmpe(dp);
3218 break;
3219 case 10: /* cmpz */
3220 gen_vfp_cmp(dp);
3221 break;
3222 case 11: /* cmpez */
3223 gen_vfp_F1_ld0(dp);
3224 gen_vfp_cmpe(dp);
3225 break;
3226 case 15: /* single<->double conversion */
3227 if (dp)
4373f3ce 3228 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3229 else
4373f3ce 3230 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3231 break;
3232 case 16: /* fuito */
5500b06c 3233 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3234 break;
3235 case 17: /* fsito */
5500b06c 3236 gen_vfp_sito(dp, 0);
b7bcbe95 3237 break;
9ee6e8bb
PB
3238 case 20: /* fshto */
3239 if (!arm_feature(env, ARM_FEATURE_VFP3))
3240 return 1;
5500b06c 3241 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3242 break;
3243 case 21: /* fslto */
3244 if (!arm_feature(env, ARM_FEATURE_VFP3))
3245 return 1;
5500b06c 3246 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3247 break;
3248 case 22: /* fuhto */
3249 if (!arm_feature(env, ARM_FEATURE_VFP3))
3250 return 1;
5500b06c 3251 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3252 break;
3253 case 23: /* fulto */
3254 if (!arm_feature(env, ARM_FEATURE_VFP3))
3255 return 1;
5500b06c 3256 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3257 break;
b7bcbe95 3258 case 24: /* ftoui */
5500b06c 3259 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3260 break;
3261 case 25: /* ftouiz */
5500b06c 3262 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3263 break;
3264 case 26: /* ftosi */
5500b06c 3265 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3266 break;
3267 case 27: /* ftosiz */
5500b06c 3268 gen_vfp_tosiz(dp, 0);
b7bcbe95 3269 break;
9ee6e8bb
PB
3270 case 28: /* ftosh */
3271 if (!arm_feature(env, ARM_FEATURE_VFP3))
3272 return 1;
5500b06c 3273 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3274 break;
3275 case 29: /* ftosl */
3276 if (!arm_feature(env, ARM_FEATURE_VFP3))
3277 return 1;
5500b06c 3278 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3279 break;
3280 case 30: /* ftouh */
3281 if (!arm_feature(env, ARM_FEATURE_VFP3))
3282 return 1;
5500b06c 3283 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3284 break;
3285 case 31: /* ftoul */
3286 if (!arm_feature(env, ARM_FEATURE_VFP3))
3287 return 1;
5500b06c 3288 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3289 break;
b7bcbe95
FB
3290 default: /* undefined */
3291 printf ("rn:%d\n", rn);
3292 return 1;
3293 }
3294 break;
3295 default: /* undefined */
3296 printf ("op:%d\n", op);
3297 return 1;
3298 }
3299
3300 /* Write back the result. */
3301 if (op == 15 && (rn >= 8 && rn <= 11))
3302 ; /* Comparison, do nothing. */
04595bf6
PM
3303 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3304 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3305 gen_mov_vreg_F0(0, rd);
3306 else if (op == 15 && rn == 15)
3307 /* conversion */
3308 gen_mov_vreg_F0(!dp, rd);
3309 else
3310 gen_mov_vreg_F0(dp, rd);
3311
3312 /* break out of the loop if we have finished */
3313 if (veclen == 0)
3314 break;
3315
3316 if (op == 15 && delta_m == 0) {
3317 /* single source one-many */
3318 while (veclen--) {
3319 rd = ((rd + delta_d) & (bank_mask - 1))
3320 | (rd & bank_mask);
3321 gen_mov_vreg_F0(dp, rd);
3322 }
3323 break;
3324 }
3325 /* Setup the next operands. */
3326 veclen--;
3327 rd = ((rd + delta_d) & (bank_mask - 1))
3328 | (rd & bank_mask);
3329
3330 if (op == 15) {
3331 /* One source operand. */
3332 rm = ((rm + delta_m) & (bank_mask - 1))
3333 | (rm & bank_mask);
3334 gen_mov_F0_vreg(dp, rm);
3335 } else {
3336 /* Two source operands. */
3337 rn = ((rn + delta_d) & (bank_mask - 1))
3338 | (rn & bank_mask);
3339 gen_mov_F0_vreg(dp, rn);
3340 if (delta_m) {
3341 rm = ((rm + delta_m) & (bank_mask - 1))
3342 | (rm & bank_mask);
3343 gen_mov_F1_vreg(dp, rm);
3344 }
3345 }
3346 }
3347 }
3348 break;
3349 case 0xc:
3350 case 0xd:
8387da81 3351 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3352 /* two-register transfer */
3353 rn = (insn >> 16) & 0xf;
3354 rd = (insn >> 12) & 0xf;
3355 if (dp) {
9ee6e8bb
PB
3356 VFP_DREG_M(rm, insn);
3357 } else {
3358 rm = VFP_SREG_M(insn);
3359 }
b7bcbe95 3360
18c9b560 3361 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3362 /* vfp->arm */
3363 if (dp) {
4373f3ce
PB
3364 gen_mov_F0_vreg(0, rm * 2);
3365 tmp = gen_vfp_mrs();
3366 store_reg(s, rd, tmp);
3367 gen_mov_F0_vreg(0, rm * 2 + 1);
3368 tmp = gen_vfp_mrs();
3369 store_reg(s, rn, tmp);
b7bcbe95
FB
3370 } else {
3371 gen_mov_F0_vreg(0, rm);
4373f3ce 3372 tmp = gen_vfp_mrs();
8387da81 3373 store_reg(s, rd, tmp);
b7bcbe95 3374 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3375 tmp = gen_vfp_mrs();
8387da81 3376 store_reg(s, rn, tmp);
b7bcbe95
FB
3377 }
3378 } else {
3379 /* arm->vfp */
3380 if (dp) {
4373f3ce
PB
3381 tmp = load_reg(s, rd);
3382 gen_vfp_msr(tmp);
3383 gen_mov_vreg_F0(0, rm * 2);
3384 tmp = load_reg(s, rn);
3385 gen_vfp_msr(tmp);
3386 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3387 } else {
8387da81 3388 tmp = load_reg(s, rd);
4373f3ce 3389 gen_vfp_msr(tmp);
b7bcbe95 3390 gen_mov_vreg_F0(0, rm);
8387da81 3391 tmp = load_reg(s, rn);
4373f3ce 3392 gen_vfp_msr(tmp);
b7bcbe95
FB
3393 gen_mov_vreg_F0(0, rm + 1);
3394 }
3395 }
3396 } else {
3397 /* Load/store */
3398 rn = (insn >> 16) & 0xf;
3399 if (dp)
9ee6e8bb 3400 VFP_DREG_D(rd, insn);
b7bcbe95 3401 else
9ee6e8bb 3402 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3403 if ((insn & 0x01200000) == 0x01000000) {
3404 /* Single load/store */
3405 offset = (insn & 0xff) << 2;
3406 if ((insn & (1 << 23)) == 0)
3407 offset = -offset;
31b13080
PM
3408 if (s->thumb && rn == 15) {
3409 /* This is actually UNPREDICTABLE */
3410 addr = tcg_temp_new_i32();
3411 tcg_gen_movi_i32(addr, s->pc & ~2);
3412 } else {
3413 addr = load_reg(s, rn);
3414 }
312eea9f 3415 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3416 if (insn & (1 << 20)) {
312eea9f 3417 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3418 gen_mov_vreg_F0(dp, rd);
3419 } else {
3420 gen_mov_F0_vreg(dp, rd);
312eea9f 3421 gen_vfp_st(s, dp, addr);
b7bcbe95 3422 }
7d1b0095 3423 tcg_temp_free_i32(addr);
b7bcbe95
FB
3424 } else {
3425 /* load/store multiple */
31b13080 3426 int w = insn & (1 << 21);
b7bcbe95
FB
3427 if (dp)
3428 n = (insn >> 1) & 0x7f;
3429 else
3430 n = insn & 0xff;
3431
31b13080
PM
3432 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3433 /* P == U , W == 1 => UNDEF */
3434 return 1;
3435 }
3436 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3437 /* UNPREDICTABLE cases for bad immediates: we choose to
3438 * UNDEF to avoid generating huge numbers of TCG ops
3439 */
3440 return 1;
3441 }
3442 if (rn == 15 && w) {
3443 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3444 return 1;
3445 }
3446
3447 if (s->thumb && rn == 15) {
3448 /* This is actually UNPREDICTABLE */
3449 addr = tcg_temp_new_i32();
3450 tcg_gen_movi_i32(addr, s->pc & ~2);
3451 } else {
3452 addr = load_reg(s, rn);
3453 }
b7bcbe95 3454 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3455 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3456
3457 if (dp)
3458 offset = 8;
3459 else
3460 offset = 4;
3461 for (i = 0; i < n; i++) {
18c9b560 3462 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3463 /* load */
312eea9f 3464 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3465 gen_mov_vreg_F0(dp, rd + i);
3466 } else {
3467 /* store */
3468 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3469 gen_vfp_st(s, dp, addr);
b7bcbe95 3470 }
312eea9f 3471 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3472 }
31b13080 3473 if (w) {
b7bcbe95
FB
3474 /* writeback */
3475 if (insn & (1 << 24))
3476 offset = -offset * n;
3477 else if (dp && (insn & 1))
3478 offset = 4;
3479 else
3480 offset = 0;
3481
3482 if (offset != 0)
312eea9f
FN
3483 tcg_gen_addi_i32(addr, addr, offset);
3484 store_reg(s, rn, addr);
3485 } else {
7d1b0095 3486 tcg_temp_free_i32(addr);
b7bcbe95
FB
3487 }
3488 }
3489 }
3490 break;
3491 default:
3492 /* Should never happen. */
3493 return 1;
3494 }
3495 return 0;
3496}
3497
6e256c93 3498static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3499{
6e256c93
FB
3500 TranslationBlock *tb;
3501
3502 tb = s->tb;
3503 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3504 tcg_gen_goto_tb(n);
8984bd2e 3505 gen_set_pc_im(dest);
4b4a72e5 3506 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3507 } else {
8984bd2e 3508 gen_set_pc_im(dest);
57fec1fe 3509 tcg_gen_exit_tb(0);
6e256c93 3510 }
c53be334
FB
3511}
3512
8aaca4c0
FB
3513static inline void gen_jmp (DisasContext *s, uint32_t dest)
3514{
551bd27f 3515 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3516 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3517 if (s->thumb)
d9ba4830
PB
3518 dest |= 1;
3519 gen_bx_im(s, dest);
8aaca4c0 3520 } else {
6e256c93 3521 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3522 s->is_jmp = DISAS_TB_JUMP;
3523 }
3524}
3525
d9ba4830 3526static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3527{
ee097184 3528 if (x)
d9ba4830 3529 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3530 else
d9ba4830 3531 gen_sxth(t0);
ee097184 3532 if (y)
d9ba4830 3533 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3534 else
d9ba4830
PB
3535 gen_sxth(t1);
3536 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3537}
3538
3539/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3540static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3541 uint32_t mask;
3542
3543 mask = 0;
3544 if (flags & (1 << 0))
3545 mask |= 0xff;
3546 if (flags & (1 << 1))
3547 mask |= 0xff00;
3548 if (flags & (1 << 2))
3549 mask |= 0xff0000;
3550 if (flags & (1 << 3))
3551 mask |= 0xff000000;
9ee6e8bb 3552
2ae23e75 3553 /* Mask out undefined bits. */
9ee6e8bb 3554 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3555 if (!arm_feature(env, ARM_FEATURE_V4T))
3556 mask &= ~CPSR_T;
3557 if (!arm_feature(env, ARM_FEATURE_V5))
3558 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3559 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3560 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3561 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3562 mask &= ~CPSR_IT;
9ee6e8bb 3563 /* Mask out execution state bits. */
2ae23e75 3564 if (!spsr)
e160c51c 3565 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3566 /* Mask out privileged bits. */
3567 if (IS_USER(s))
9ee6e8bb 3568 mask &= CPSR_USER;
b5ff1b31
FB
3569 return mask;
3570}
3571
2fbac54b
FN
3572/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3573static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3574{
d9ba4830 3575 TCGv tmp;
b5ff1b31
FB
3576 if (spsr) {
3577 /* ??? This is also undefined in system mode. */
3578 if (IS_USER(s))
3579 return 1;
d9ba4830
PB
3580
3581 tmp = load_cpu_field(spsr);
3582 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3583 tcg_gen_andi_i32(t0, t0, mask);
3584 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3585 store_cpu_field(tmp, spsr);
b5ff1b31 3586 } else {
2fbac54b 3587 gen_set_cpsr(t0, mask);
b5ff1b31 3588 }
7d1b0095 3589 tcg_temp_free_i32(t0);
b5ff1b31
FB
3590 gen_lookup_tb(s);
3591 return 0;
3592}
3593
2fbac54b
FN
3594/* Returns nonzero if access to the PSR is not permitted. */
3595static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3596{
3597 TCGv tmp;
7d1b0095 3598 tmp = tcg_temp_new_i32();
2fbac54b
FN
3599 tcg_gen_movi_i32(tmp, val);
3600 return gen_set_psr(s, mask, spsr, tmp);
3601}
3602
e9bb4aa9
JR
3603/* Generate an old-style exception return. Marks pc as dead. */
3604static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3605{
d9ba4830 3606 TCGv tmp;
e9bb4aa9 3607 store_reg(s, 15, pc);
d9ba4830
PB
3608 tmp = load_cpu_field(spsr);
3609 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3610 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3611 s->is_jmp = DISAS_UPDATE;
3612}
3613
b0109805
PB
3614/* Generate a v6 exception return. Marks both values as dead. */
3615static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3616{
b0109805 3617 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3618 tcg_temp_free_i32(cpsr);
b0109805 3619 store_reg(s, 15, pc);
9ee6e8bb
PB
3620 s->is_jmp = DISAS_UPDATE;
3621}
3b46e624 3622
9ee6e8bb
PB
3623static inline void
3624gen_set_condexec (DisasContext *s)
3625{
3626 if (s->condexec_mask) {
8f01245e 3627 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3628 TCGv tmp = tcg_temp_new_i32();
8f01245e 3629 tcg_gen_movi_i32(tmp, val);
d9ba4830 3630 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3631 }
3632}
3b46e624 3633
bc4a0de0
PM
3634static void gen_exception_insn(DisasContext *s, int offset, int excp)
3635{
3636 gen_set_condexec(s);
3637 gen_set_pc_im(s->pc - offset);
3638 gen_exception(excp);
3639 s->is_jmp = DISAS_JUMP;
3640}
3641
9ee6e8bb
PB
3642static void gen_nop_hint(DisasContext *s, int val)
3643{
3644 switch (val) {
3645 case 3: /* wfi */
8984bd2e 3646 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3647 s->is_jmp = DISAS_WFI;
3648 break;
3649 case 2: /* wfe */
3650 case 4: /* sev */
3651 /* TODO: Implement SEV and WFE. May help SMP performance. */
3652 default: /* nop */
3653 break;
3654 }
3655}
99c475ab 3656
ad69471c 3657#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3658
62698be3 3659static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3660{
3661 switch (size) {
dd8fbd78
FN
3662 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3663 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3664 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3665 default: abort();
9ee6e8bb 3666 }
9ee6e8bb
PB
3667}
3668
dd8fbd78 3669static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3670{
3671 switch (size) {
dd8fbd78
FN
3672 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3673 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3674 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3675 default: return;
3676 }
3677}
3678
3679/* 32-bit pairwise ops end up the same as the elementwise versions. */
3680#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3681#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3682#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3683#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3684
ad69471c
PB
3685#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3686 switch ((size << 1) | u) { \
3687 case 0: \
dd8fbd78 3688 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3689 break; \
3690 case 1: \
dd8fbd78 3691 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3692 break; \
3693 case 2: \
dd8fbd78 3694 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3695 break; \
3696 case 3: \
dd8fbd78 3697 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3698 break; \
3699 case 4: \
dd8fbd78 3700 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3701 break; \
3702 case 5: \
dd8fbd78 3703 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3704 break; \
3705 default: return 1; \
3706 }} while (0)
9ee6e8bb
PB
3707
3708#define GEN_NEON_INTEGER_OP(name) do { \
3709 switch ((size << 1) | u) { \
ad69471c 3710 case 0: \
dd8fbd78 3711 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3712 break; \
3713 case 1: \
dd8fbd78 3714 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3715 break; \
3716 case 2: \
dd8fbd78 3717 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3718 break; \
3719 case 3: \
dd8fbd78 3720 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3721 break; \
3722 case 4: \
dd8fbd78 3723 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3724 break; \
3725 case 5: \
dd8fbd78 3726 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3727 break; \
9ee6e8bb
PB
3728 default: return 1; \
3729 }} while (0)
3730
dd8fbd78 3731static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3732{
7d1b0095 3733 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3734 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3735 return tmp;
9ee6e8bb
PB
3736}
3737
dd8fbd78 3738static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3739{
dd8fbd78 3740 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3741 tcg_temp_free_i32(var);
9ee6e8bb
PB
3742}
3743
dd8fbd78 3744static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3745{
dd8fbd78 3746 TCGv tmp;
9ee6e8bb 3747 if (size == 1) {
0fad6efc
PM
3748 tmp = neon_load_reg(reg & 7, reg >> 4);
3749 if (reg & 8) {
dd8fbd78 3750 gen_neon_dup_high16(tmp);
0fad6efc
PM
3751 } else {
3752 gen_neon_dup_low16(tmp);
dd8fbd78 3753 }
0fad6efc
PM
3754 } else {
3755 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3756 }
dd8fbd78 3757 return tmp;
9ee6e8bb
PB
3758}
3759
02acedf9 3760static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3761{
02acedf9 3762 TCGv tmp, tmp2;
600b828c 3763 if (!q && size == 2) {
02acedf9
PM
3764 return 1;
3765 }
3766 tmp = tcg_const_i32(rd);
3767 tmp2 = tcg_const_i32(rm);
3768 if (q) {
3769 switch (size) {
3770 case 0:
02da0b2d 3771 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3772 break;
3773 case 1:
02da0b2d 3774 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3775 break;
3776 case 2:
02da0b2d 3777 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3778 break;
3779 default:
3780 abort();
3781 }
3782 } else {
3783 switch (size) {
3784 case 0:
02da0b2d 3785 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3786 break;
3787 case 1:
02da0b2d 3788 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3789 break;
3790 default:
3791 abort();
3792 }
3793 }
3794 tcg_temp_free_i32(tmp);
3795 tcg_temp_free_i32(tmp2);
3796 return 0;
19457615
FN
3797}
3798
d68a6f3a 3799static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3800{
3801 TCGv tmp, tmp2;
600b828c 3802 if (!q && size == 2) {
d68a6f3a
PM
3803 return 1;
3804 }
3805 tmp = tcg_const_i32(rd);
3806 tmp2 = tcg_const_i32(rm);
3807 if (q) {
3808 switch (size) {
3809 case 0:
02da0b2d 3810 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3811 break;
3812 case 1:
02da0b2d 3813 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3814 break;
3815 case 2:
02da0b2d 3816 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3817 break;
3818 default:
3819 abort();
3820 }
3821 } else {
3822 switch (size) {
3823 case 0:
02da0b2d 3824 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3825 break;
3826 case 1:
02da0b2d 3827 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3828 break;
3829 default:
3830 abort();
3831 }
3832 }
3833 tcg_temp_free_i32(tmp);
3834 tcg_temp_free_i32(tmp2);
3835 return 0;
19457615
FN
3836}
3837
19457615
FN
3838static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3839{
3840 TCGv rd, tmp;
3841
7d1b0095
PM
3842 rd = tcg_temp_new_i32();
3843 tmp = tcg_temp_new_i32();
19457615
FN
3844
3845 tcg_gen_shli_i32(rd, t0, 8);
3846 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3847 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3848 tcg_gen_or_i32(rd, rd, tmp);
3849
3850 tcg_gen_shri_i32(t1, t1, 8);
3851 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3852 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3853 tcg_gen_or_i32(t1, t1, tmp);
3854 tcg_gen_mov_i32(t0, rd);
3855
7d1b0095
PM
3856 tcg_temp_free_i32(tmp);
3857 tcg_temp_free_i32(rd);
19457615
FN
3858}
3859
3860static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3861{
3862 TCGv rd, tmp;
3863
7d1b0095
PM
3864 rd = tcg_temp_new_i32();
3865 tmp = tcg_temp_new_i32();
19457615
FN
3866
3867 tcg_gen_shli_i32(rd, t0, 16);
3868 tcg_gen_andi_i32(tmp, t1, 0xffff);
3869 tcg_gen_or_i32(rd, rd, tmp);
3870 tcg_gen_shri_i32(t1, t1, 16);
3871 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3872 tcg_gen_or_i32(t1, t1, tmp);
3873 tcg_gen_mov_i32(t0, rd);
3874
7d1b0095
PM
3875 tcg_temp_free_i32(tmp);
3876 tcg_temp_free_i32(rd);
19457615
FN
3877}
3878
3879
9ee6e8bb
PB
3880static struct {
3881 int nregs;
3882 int interleave;
3883 int spacing;
3884} neon_ls_element_type[11] = {
3885 {4, 4, 1},
3886 {4, 4, 2},
3887 {4, 1, 1},
3888 {4, 2, 1},
3889 {3, 3, 1},
3890 {3, 3, 2},
3891 {3, 1, 1},
3892 {1, 1, 1},
3893 {2, 2, 1},
3894 {2, 2, 2},
3895 {2, 1, 1}
3896};
3897
3898/* Translate a NEON load/store element instruction. Return nonzero if the
3899 instruction is invalid. */
3900static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3901{
3902 int rd, rn, rm;
3903 int op;
3904 int nregs;
3905 int interleave;
84496233 3906 int spacing;
9ee6e8bb
PB
3907 int stride;
3908 int size;
3909 int reg;
3910 int pass;
3911 int load;
3912 int shift;
9ee6e8bb 3913 int n;
1b2b1e54 3914 TCGv addr;
b0109805 3915 TCGv tmp;
8f8e3aa4 3916 TCGv tmp2;
84496233 3917 TCGv_i64 tmp64;
9ee6e8bb 3918
5df8bac1 3919 if (!s->vfp_enabled)
9ee6e8bb
PB
3920 return 1;
3921 VFP_DREG_D(rd, insn);
3922 rn = (insn >> 16) & 0xf;
3923 rm = insn & 0xf;
3924 load = (insn & (1 << 21)) != 0;
3925 if ((insn & (1 << 23)) == 0) {
3926 /* Load store all elements. */
3927 op = (insn >> 8) & 0xf;
3928 size = (insn >> 6) & 3;
84496233 3929 if (op > 10)
9ee6e8bb 3930 return 1;
f2dd89d0
PM
3931 /* Catch UNDEF cases for bad values of align field */
3932 switch (op & 0xc) {
3933 case 4:
3934 if (((insn >> 5) & 1) == 1) {
3935 return 1;
3936 }
3937 break;
3938 case 8:
3939 if (((insn >> 4) & 3) == 3) {
3940 return 1;
3941 }
3942 break;
3943 default:
3944 break;
3945 }
9ee6e8bb
PB
3946 nregs = neon_ls_element_type[op].nregs;
3947 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3948 spacing = neon_ls_element_type[op].spacing;
3949 if (size == 3 && (interleave | spacing) != 1)
3950 return 1;
e318a60b 3951 addr = tcg_temp_new_i32();
dcc65026 3952 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3953 stride = (1 << size) * interleave;
3954 for (reg = 0; reg < nregs; reg++) {
3955 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3956 load_reg_var(s, addr, rn);
3957 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3958 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3959 load_reg_var(s, addr, rn);
3960 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3961 }
84496233
JR
3962 if (size == 3) {
3963 if (load) {
3964 tmp64 = gen_ld64(addr, IS_USER(s));
3965 neon_store_reg64(tmp64, rd);
3966 tcg_temp_free_i64(tmp64);
3967 } else {
3968 tmp64 = tcg_temp_new_i64();
3969 neon_load_reg64(tmp64, rd);
3970 gen_st64(tmp64, addr, IS_USER(s));
3971 }
3972 tcg_gen_addi_i32(addr, addr, stride);
3973 } else {
3974 for (pass = 0; pass < 2; pass++) {
3975 if (size == 2) {
3976 if (load) {
3977 tmp = gen_ld32(addr, IS_USER(s));
3978 neon_store_reg(rd, pass, tmp);
3979 } else {
3980 tmp = neon_load_reg(rd, pass);
3981 gen_st32(tmp, addr, IS_USER(s));
3982 }
1b2b1e54 3983 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3984 } else if (size == 1) {
3985 if (load) {
3986 tmp = gen_ld16u(addr, IS_USER(s));
3987 tcg_gen_addi_i32(addr, addr, stride);
3988 tmp2 = gen_ld16u(addr, IS_USER(s));
3989 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3990 tcg_gen_shli_i32(tmp2, tmp2, 16);
3991 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3992 tcg_temp_free_i32(tmp2);
84496233
JR
3993 neon_store_reg(rd, pass, tmp);
3994 } else {
3995 tmp = neon_load_reg(rd, pass);
7d1b0095 3996 tmp2 = tcg_temp_new_i32();
84496233
JR
3997 tcg_gen_shri_i32(tmp2, tmp, 16);
3998 gen_st16(tmp, addr, IS_USER(s));
3999 tcg_gen_addi_i32(addr, addr, stride);
4000 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 4001 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4002 }
84496233
JR
4003 } else /* size == 0 */ {
4004 if (load) {
4005 TCGV_UNUSED(tmp2);
4006 for (n = 0; n < 4; n++) {
4007 tmp = gen_ld8u(addr, IS_USER(s));
4008 tcg_gen_addi_i32(addr, addr, stride);
4009 if (n == 0) {
4010 tmp2 = tmp;
4011 } else {
41ba8341
PB
4012 tcg_gen_shli_i32(tmp, tmp, n * 8);
4013 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4014 tcg_temp_free_i32(tmp);
84496233 4015 }
9ee6e8bb 4016 }
84496233
JR
4017 neon_store_reg(rd, pass, tmp2);
4018 } else {
4019 tmp2 = neon_load_reg(rd, pass);
4020 for (n = 0; n < 4; n++) {
7d1b0095 4021 tmp = tcg_temp_new_i32();
84496233
JR
4022 if (n == 0) {
4023 tcg_gen_mov_i32(tmp, tmp2);
4024 } else {
4025 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4026 }
4027 gen_st8(tmp, addr, IS_USER(s));
4028 tcg_gen_addi_i32(addr, addr, stride);
4029 }
7d1b0095 4030 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4031 }
4032 }
4033 }
4034 }
84496233 4035 rd += spacing;
9ee6e8bb 4036 }
e318a60b 4037 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4038 stride = nregs * 8;
4039 } else {
4040 size = (insn >> 10) & 3;
4041 if (size == 3) {
4042 /* Load single element to all lanes. */
8e18cde3
PM
4043 int a = (insn >> 4) & 1;
4044 if (!load) {
9ee6e8bb 4045 return 1;
8e18cde3 4046 }
9ee6e8bb
PB
4047 size = (insn >> 6) & 3;
4048 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4049
4050 if (size == 3) {
4051 if (nregs != 4 || a == 0) {
9ee6e8bb 4052 return 1;
99c475ab 4053 }
8e18cde3
PM
4054 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4055 size = 2;
4056 }
4057 if (nregs == 1 && a == 1 && size == 0) {
4058 return 1;
4059 }
4060 if (nregs == 3 && a == 1) {
4061 return 1;
4062 }
e318a60b 4063 addr = tcg_temp_new_i32();
8e18cde3
PM
4064 load_reg_var(s, addr, rn);
4065 if (nregs == 1) {
4066 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4067 tmp = gen_load_and_replicate(s, addr, size);
4068 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4069 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4070 if (insn & (1 << 5)) {
4071 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4072 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4073 }
4074 tcg_temp_free_i32(tmp);
4075 } else {
4076 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4077 stride = (insn & (1 << 5)) ? 2 : 1;
4078 for (reg = 0; reg < nregs; reg++) {
4079 tmp = gen_load_and_replicate(s, addr, size);
4080 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4081 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4082 tcg_temp_free_i32(tmp);
4083 tcg_gen_addi_i32(addr, addr, 1 << size);
4084 rd += stride;
4085 }
9ee6e8bb 4086 }
e318a60b 4087 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4088 stride = (1 << size) * nregs;
4089 } else {
4090 /* Single element. */
93262b16 4091 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4092 pass = (insn >> 7) & 1;
4093 switch (size) {
4094 case 0:
4095 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4096 stride = 1;
4097 break;
4098 case 1:
4099 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4100 stride = (insn & (1 << 5)) ? 2 : 1;
4101 break;
4102 case 2:
4103 shift = 0;
9ee6e8bb
PB
4104 stride = (insn & (1 << 6)) ? 2 : 1;
4105 break;
4106 default:
4107 abort();
4108 }
4109 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4110 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4111 switch (nregs) {
4112 case 1:
4113 if (((idx & (1 << size)) != 0) ||
4114 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4115 return 1;
4116 }
4117 break;
4118 case 3:
4119 if ((idx & 1) != 0) {
4120 return 1;
4121 }
4122 /* fall through */
4123 case 2:
4124 if (size == 2 && (idx & 2) != 0) {
4125 return 1;
4126 }
4127 break;
4128 case 4:
4129 if ((size == 2) && ((idx & 3) == 3)) {
4130 return 1;
4131 }
4132 break;
4133 default:
4134 abort();
4135 }
4136 if ((rd + stride * (nregs - 1)) > 31) {
4137 /* Attempts to write off the end of the register file
4138 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4139 * the neon_load_reg() would write off the end of the array.
4140 */
4141 return 1;
4142 }
e318a60b 4143 addr = tcg_temp_new_i32();
dcc65026 4144 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4145 for (reg = 0; reg < nregs; reg++) {
4146 if (load) {
9ee6e8bb
PB
4147 switch (size) {
4148 case 0:
1b2b1e54 4149 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
4150 break;
4151 case 1:
1b2b1e54 4152 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
4153 break;
4154 case 2:
1b2b1e54 4155 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 4156 break;
a50f5b91
PB
4157 default: /* Avoid compiler warnings. */
4158 abort();
9ee6e8bb
PB
4159 }
4160 if (size != 2) {
8f8e3aa4
PB
4161 tmp2 = neon_load_reg(rd, pass);
4162 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
7d1b0095 4163 tcg_temp_free_i32(tmp2);
9ee6e8bb 4164 }
8f8e3aa4 4165 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4166 } else { /* Store */
8f8e3aa4
PB
4167 tmp = neon_load_reg(rd, pass);
4168 if (shift)
4169 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4170 switch (size) {
4171 case 0:
1b2b1e54 4172 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4173 break;
4174 case 1:
1b2b1e54 4175 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4176 break;
4177 case 2:
1b2b1e54 4178 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4179 break;
99c475ab 4180 }
99c475ab 4181 }
9ee6e8bb 4182 rd += stride;
1b2b1e54 4183 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4184 }
e318a60b 4185 tcg_temp_free_i32(addr);
9ee6e8bb 4186 stride = nregs * (1 << size);
99c475ab 4187 }
9ee6e8bb
PB
4188 }
4189 if (rm != 15) {
b26eefb6
PB
4190 TCGv base;
4191
4192 base = load_reg(s, rn);
9ee6e8bb 4193 if (rm == 13) {
b26eefb6 4194 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4195 } else {
b26eefb6
PB
4196 TCGv index;
4197 index = load_reg(s, rm);
4198 tcg_gen_add_i32(base, base, index);
7d1b0095 4199 tcg_temp_free_i32(index);
9ee6e8bb 4200 }
b26eefb6 4201 store_reg(s, rn, base);
9ee6e8bb
PB
4202 }
4203 return 0;
4204}
3b46e624 4205
8f8e3aa4
PB
4206/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4207static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4208{
4209 tcg_gen_and_i32(t, t, c);
f669df27 4210 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4211 tcg_gen_or_i32(dest, t, f);
4212}
4213
a7812ae4 4214static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4215{
4216 switch (size) {
4217 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4218 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4219 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4220 default: abort();
4221 }
4222}
4223
a7812ae4 4224static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4225{
4226 switch (size) {
02da0b2d
PM
4227 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4228 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4229 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4230 default: abort();
4231 }
4232}
4233
a7812ae4 4234static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4235{
4236 switch (size) {
02da0b2d
PM
4237 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4238 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4239 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4240 default: abort();
4241 }
4242}
4243
af1bbf30
JR
4244static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4245{
4246 switch (size) {
02da0b2d
PM
4247 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4248 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4249 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4250 default: abort();
4251 }
4252}
4253
ad69471c
PB
4254static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4255 int q, int u)
4256{
4257 if (q) {
4258 if (u) {
4259 switch (size) {
4260 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4261 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4262 default: abort();
4263 }
4264 } else {
4265 switch (size) {
4266 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4267 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4268 default: abort();
4269 }
4270 }
4271 } else {
4272 if (u) {
4273 switch (size) {
b408a9b0
CL
4274 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4275 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4276 default: abort();
4277 }
4278 } else {
4279 switch (size) {
4280 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4281 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4282 default: abort();
4283 }
4284 }
4285 }
4286}
4287
a7812ae4 4288static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4289{
4290 if (u) {
4291 switch (size) {
4292 case 0: gen_helper_neon_widen_u8(dest, src); break;
4293 case 1: gen_helper_neon_widen_u16(dest, src); break;
4294 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4295 default: abort();
4296 }
4297 } else {
4298 switch (size) {
4299 case 0: gen_helper_neon_widen_s8(dest, src); break;
4300 case 1: gen_helper_neon_widen_s16(dest, src); break;
4301 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4302 default: abort();
4303 }
4304 }
7d1b0095 4305 tcg_temp_free_i32(src);
ad69471c
PB
4306}
4307
4308static inline void gen_neon_addl(int size)
4309{
4310 switch (size) {
4311 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4312 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4313 case 2: tcg_gen_add_i64(CPU_V001); break;
4314 default: abort();
4315 }
4316}
4317
4318static inline void gen_neon_subl(int size)
4319{
4320 switch (size) {
4321 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4322 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4323 case 2: tcg_gen_sub_i64(CPU_V001); break;
4324 default: abort();
4325 }
4326}
4327
a7812ae4 4328static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4329{
4330 switch (size) {
4331 case 0: gen_helper_neon_negl_u16(var, var); break;
4332 case 1: gen_helper_neon_negl_u32(var, var); break;
4333 case 2: gen_helper_neon_negl_u64(var, var); break;
4334 default: abort();
4335 }
4336}
4337
a7812ae4 4338static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4339{
4340 switch (size) {
02da0b2d
PM
4341 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4342 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4343 default: abort();
4344 }
4345}
4346
a7812ae4 4347static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4348{
a7812ae4 4349 TCGv_i64 tmp;
ad69471c
PB
4350
4351 switch ((size << 1) | u) {
4352 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4353 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4354 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4355 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4356 case 4:
4357 tmp = gen_muls_i64_i32(a, b);
4358 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4359 tcg_temp_free_i64(tmp);
ad69471c
PB
4360 break;
4361 case 5:
4362 tmp = gen_mulu_i64_i32(a, b);
4363 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4364 tcg_temp_free_i64(tmp);
ad69471c
PB
4365 break;
4366 default: abort();
4367 }
c6067f04
CL
4368
4369 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4370 Don't forget to clean them now. */
4371 if (size < 2) {
7d1b0095
PM
4372 tcg_temp_free_i32(a);
4373 tcg_temp_free_i32(b);
c6067f04 4374 }
ad69471c
PB
4375}
4376
c33171c7
PM
4377static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4378{
4379 if (op) {
4380 if (u) {
4381 gen_neon_unarrow_sats(size, dest, src);
4382 } else {
4383 gen_neon_narrow(size, dest, src);
4384 }
4385 } else {
4386 if (u) {
4387 gen_neon_narrow_satu(size, dest, src);
4388 } else {
4389 gen_neon_narrow_sats(size, dest, src);
4390 }
4391 }
4392}
4393
62698be3
PM
4394/* Symbolic constants for op fields for Neon 3-register same-length.
4395 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4396 * table A7-9.
4397 */
4398#define NEON_3R_VHADD 0
4399#define NEON_3R_VQADD 1
4400#define NEON_3R_VRHADD 2
4401#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4402#define NEON_3R_VHSUB 4
4403#define NEON_3R_VQSUB 5
4404#define NEON_3R_VCGT 6
4405#define NEON_3R_VCGE 7
4406#define NEON_3R_VSHL 8
4407#define NEON_3R_VQSHL 9
4408#define NEON_3R_VRSHL 10
4409#define NEON_3R_VQRSHL 11
4410#define NEON_3R_VMAX 12
4411#define NEON_3R_VMIN 13
4412#define NEON_3R_VABD 14
4413#define NEON_3R_VABA 15
4414#define NEON_3R_VADD_VSUB 16
4415#define NEON_3R_VTST_VCEQ 17
4416#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4417#define NEON_3R_VMUL 19
4418#define NEON_3R_VPMAX 20
4419#define NEON_3R_VPMIN 21
4420#define NEON_3R_VQDMULH_VQRDMULH 22
4421#define NEON_3R_VPADD 23
4422#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4423#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4424#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4425#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4426#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4427#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4428
4429static const uint8_t neon_3r_sizes[] = {
4430 [NEON_3R_VHADD] = 0x7,
4431 [NEON_3R_VQADD] = 0xf,
4432 [NEON_3R_VRHADD] = 0x7,
4433 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4434 [NEON_3R_VHSUB] = 0x7,
4435 [NEON_3R_VQSUB] = 0xf,
4436 [NEON_3R_VCGT] = 0x7,
4437 [NEON_3R_VCGE] = 0x7,
4438 [NEON_3R_VSHL] = 0xf,
4439 [NEON_3R_VQSHL] = 0xf,
4440 [NEON_3R_VRSHL] = 0xf,
4441 [NEON_3R_VQRSHL] = 0xf,
4442 [NEON_3R_VMAX] = 0x7,
4443 [NEON_3R_VMIN] = 0x7,
4444 [NEON_3R_VABD] = 0x7,
4445 [NEON_3R_VABA] = 0x7,
4446 [NEON_3R_VADD_VSUB] = 0xf,
4447 [NEON_3R_VTST_VCEQ] = 0x7,
4448 [NEON_3R_VML] = 0x7,
4449 [NEON_3R_VMUL] = 0x7,
4450 [NEON_3R_VPMAX] = 0x7,
4451 [NEON_3R_VPMIN] = 0x7,
4452 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4453 [NEON_3R_VPADD] = 0x7,
4454 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4455 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4456 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4457 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4458 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4459 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4460};
4461
600b828c
PM
4462/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4463 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4464 * table A7-13.
4465 */
4466#define NEON_2RM_VREV64 0
4467#define NEON_2RM_VREV32 1
4468#define NEON_2RM_VREV16 2
4469#define NEON_2RM_VPADDL 4
4470#define NEON_2RM_VPADDL_U 5
4471#define NEON_2RM_VCLS 8
4472#define NEON_2RM_VCLZ 9
4473#define NEON_2RM_VCNT 10
4474#define NEON_2RM_VMVN 11
4475#define NEON_2RM_VPADAL 12
4476#define NEON_2RM_VPADAL_U 13
4477#define NEON_2RM_VQABS 14
4478#define NEON_2RM_VQNEG 15
4479#define NEON_2RM_VCGT0 16
4480#define NEON_2RM_VCGE0 17
4481#define NEON_2RM_VCEQ0 18
4482#define NEON_2RM_VCLE0 19
4483#define NEON_2RM_VCLT0 20
4484#define NEON_2RM_VABS 22
4485#define NEON_2RM_VNEG 23
4486#define NEON_2RM_VCGT0_F 24
4487#define NEON_2RM_VCGE0_F 25
4488#define NEON_2RM_VCEQ0_F 26
4489#define NEON_2RM_VCLE0_F 27
4490#define NEON_2RM_VCLT0_F 28
4491#define NEON_2RM_VABS_F 30
4492#define NEON_2RM_VNEG_F 31
4493#define NEON_2RM_VSWP 32
4494#define NEON_2RM_VTRN 33
4495#define NEON_2RM_VUZP 34
4496#define NEON_2RM_VZIP 35
4497#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4498#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4499#define NEON_2RM_VSHLL 38
4500#define NEON_2RM_VCVT_F16_F32 44
4501#define NEON_2RM_VCVT_F32_F16 46
4502#define NEON_2RM_VRECPE 56
4503#define NEON_2RM_VRSQRTE 57
4504#define NEON_2RM_VRECPE_F 58
4505#define NEON_2RM_VRSQRTE_F 59
4506#define NEON_2RM_VCVT_FS 60
4507#define NEON_2RM_VCVT_FU 61
4508#define NEON_2RM_VCVT_SF 62
4509#define NEON_2RM_VCVT_UF 63
4510
4511static int neon_2rm_is_float_op(int op)
4512{
4513 /* Return true if this neon 2reg-misc op is float-to-float */
4514 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4515 op >= NEON_2RM_VRECPE_F);
4516}
4517
4518/* Each entry in this array has bit n set if the insn allows
4519 * size value n (otherwise it will UNDEF). Since unallocated
4520 * op values will have no bits set they always UNDEF.
4521 */
4522static const uint8_t neon_2rm_sizes[] = {
4523 [NEON_2RM_VREV64] = 0x7,
4524 [NEON_2RM_VREV32] = 0x3,
4525 [NEON_2RM_VREV16] = 0x1,
4526 [NEON_2RM_VPADDL] = 0x7,
4527 [NEON_2RM_VPADDL_U] = 0x7,
4528 [NEON_2RM_VCLS] = 0x7,
4529 [NEON_2RM_VCLZ] = 0x7,
4530 [NEON_2RM_VCNT] = 0x1,
4531 [NEON_2RM_VMVN] = 0x1,
4532 [NEON_2RM_VPADAL] = 0x7,
4533 [NEON_2RM_VPADAL_U] = 0x7,
4534 [NEON_2RM_VQABS] = 0x7,
4535 [NEON_2RM_VQNEG] = 0x7,
4536 [NEON_2RM_VCGT0] = 0x7,
4537 [NEON_2RM_VCGE0] = 0x7,
4538 [NEON_2RM_VCEQ0] = 0x7,
4539 [NEON_2RM_VCLE0] = 0x7,
4540 [NEON_2RM_VCLT0] = 0x7,
4541 [NEON_2RM_VABS] = 0x7,
4542 [NEON_2RM_VNEG] = 0x7,
4543 [NEON_2RM_VCGT0_F] = 0x4,
4544 [NEON_2RM_VCGE0_F] = 0x4,
4545 [NEON_2RM_VCEQ0_F] = 0x4,
4546 [NEON_2RM_VCLE0_F] = 0x4,
4547 [NEON_2RM_VCLT0_F] = 0x4,
4548 [NEON_2RM_VABS_F] = 0x4,
4549 [NEON_2RM_VNEG_F] = 0x4,
4550 [NEON_2RM_VSWP] = 0x1,
4551 [NEON_2RM_VTRN] = 0x7,
4552 [NEON_2RM_VUZP] = 0x7,
4553 [NEON_2RM_VZIP] = 0x7,
4554 [NEON_2RM_VMOVN] = 0x7,
4555 [NEON_2RM_VQMOVN] = 0x7,
4556 [NEON_2RM_VSHLL] = 0x7,
4557 [NEON_2RM_VCVT_F16_F32] = 0x2,
4558 [NEON_2RM_VCVT_F32_F16] = 0x2,
4559 [NEON_2RM_VRECPE] = 0x4,
4560 [NEON_2RM_VRSQRTE] = 0x4,
4561 [NEON_2RM_VRECPE_F] = 0x4,
4562 [NEON_2RM_VRSQRTE_F] = 0x4,
4563 [NEON_2RM_VCVT_FS] = 0x4,
4564 [NEON_2RM_VCVT_FU] = 0x4,
4565 [NEON_2RM_VCVT_SF] = 0x4,
4566 [NEON_2RM_VCVT_UF] = 0x4,
4567};
4568
9ee6e8bb
PB
4569/* Translate a NEON data processing instruction. Return nonzero if the
4570 instruction is invalid.
ad69471c
PB
4571 We process data in a mixture of 32-bit and 64-bit chunks.
4572 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4573
9ee6e8bb
PB
4574static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4575{
4576 int op;
4577 int q;
4578 int rd, rn, rm;
4579 int size;
4580 int shift;
4581 int pass;
4582 int count;
4583 int pairwise;
4584 int u;
ca9a32e4 4585 uint32_t imm, mask;
b75263d6 4586 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4587 TCGv_i64 tmp64;
9ee6e8bb 4588
5df8bac1 4589 if (!s->vfp_enabled)
9ee6e8bb
PB
4590 return 1;
4591 q = (insn & (1 << 6)) != 0;
4592 u = (insn >> 24) & 1;
4593 VFP_DREG_D(rd, insn);
4594 VFP_DREG_N(rn, insn);
4595 VFP_DREG_M(rm, insn);
4596 size = (insn >> 20) & 3;
4597 if ((insn & (1 << 23)) == 0) {
4598 /* Three register same length. */
4599 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4600 /* Catch invalid op and bad size combinations: UNDEF */
4601 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4602 return 1;
4603 }
25f84f79
PM
4604 /* All insns of this form UNDEF for either this condition or the
4605 * superset of cases "Q==1"; we catch the latter later.
4606 */
4607 if (q && ((rd | rn | rm) & 1)) {
4608 return 1;
4609 }
62698be3
PM
4610 if (size == 3 && op != NEON_3R_LOGIC) {
4611 /* 64-bit element instructions. */
9ee6e8bb 4612 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4613 neon_load_reg64(cpu_V0, rn + pass);
4614 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4615 switch (op) {
62698be3 4616 case NEON_3R_VQADD:
9ee6e8bb 4617 if (u) {
02da0b2d
PM
4618 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4619 cpu_V0, cpu_V1);
2c0262af 4620 } else {
02da0b2d
PM
4621 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4622 cpu_V0, cpu_V1);
2c0262af 4623 }
9ee6e8bb 4624 break;
62698be3 4625 case NEON_3R_VQSUB:
9ee6e8bb 4626 if (u) {
02da0b2d
PM
4627 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4628 cpu_V0, cpu_V1);
ad69471c 4629 } else {
02da0b2d
PM
4630 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4631 cpu_V0, cpu_V1);
ad69471c
PB
4632 }
4633 break;
62698be3 4634 case NEON_3R_VSHL:
ad69471c
PB
4635 if (u) {
4636 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4637 } else {
4638 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4639 }
4640 break;
62698be3 4641 case NEON_3R_VQSHL:
ad69471c 4642 if (u) {
02da0b2d
PM
4643 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4644 cpu_V1, cpu_V0);
ad69471c 4645 } else {
02da0b2d
PM
4646 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4647 cpu_V1, cpu_V0);
ad69471c
PB
4648 }
4649 break;
62698be3 4650 case NEON_3R_VRSHL:
ad69471c
PB
4651 if (u) {
4652 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4653 } else {
ad69471c
PB
4654 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4655 }
4656 break;
62698be3 4657 case NEON_3R_VQRSHL:
ad69471c 4658 if (u) {
02da0b2d
PM
4659 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4660 cpu_V1, cpu_V0);
ad69471c 4661 } else {
02da0b2d
PM
4662 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4663 cpu_V1, cpu_V0);
1e8d4eec 4664 }
9ee6e8bb 4665 break;
62698be3 4666 case NEON_3R_VADD_VSUB:
9ee6e8bb 4667 if (u) {
ad69471c 4668 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4669 } else {
ad69471c 4670 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4671 }
4672 break;
4673 default:
4674 abort();
2c0262af 4675 }
ad69471c 4676 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4677 }
9ee6e8bb 4678 return 0;
2c0262af 4679 }
25f84f79 4680 pairwise = 0;
9ee6e8bb 4681 switch (op) {
62698be3
PM
4682 case NEON_3R_VSHL:
4683 case NEON_3R_VQSHL:
4684 case NEON_3R_VRSHL:
4685 case NEON_3R_VQRSHL:
9ee6e8bb 4686 {
ad69471c
PB
4687 int rtmp;
4688 /* Shift instruction operands are reversed. */
4689 rtmp = rn;
9ee6e8bb 4690 rn = rm;
ad69471c 4691 rm = rtmp;
9ee6e8bb 4692 }
2c0262af 4693 break;
25f84f79
PM
4694 case NEON_3R_VPADD:
4695 if (u) {
4696 return 1;
4697 }
4698 /* Fall through */
62698be3
PM
4699 case NEON_3R_VPMAX:
4700 case NEON_3R_VPMIN:
9ee6e8bb 4701 pairwise = 1;
2c0262af 4702 break;
25f84f79
PM
4703 case NEON_3R_FLOAT_ARITH:
4704 pairwise = (u && size < 2); /* if VPADD (float) */
4705 break;
4706 case NEON_3R_FLOAT_MINMAX:
4707 pairwise = u; /* if VPMIN/VPMAX (float) */
4708 break;
4709 case NEON_3R_FLOAT_CMP:
4710 if (!u && size) {
4711 /* no encoding for U=0 C=1x */
4712 return 1;
4713 }
4714 break;
4715 case NEON_3R_FLOAT_ACMP:
4716 if (!u) {
4717 return 1;
4718 }
4719 break;
4720 case NEON_3R_VRECPS_VRSQRTS:
4721 if (u) {
4722 return 1;
4723 }
2c0262af 4724 break;
25f84f79
PM
4725 case NEON_3R_VMUL:
4726 if (u && (size != 0)) {
4727 /* UNDEF on invalid size for polynomial subcase */
4728 return 1;
4729 }
2c0262af 4730 break;
9ee6e8bb 4731 default:
2c0262af 4732 break;
9ee6e8bb 4733 }
dd8fbd78 4734
25f84f79
PM
4735 if (pairwise && q) {
4736 /* All the pairwise insns UNDEF if Q is set */
4737 return 1;
4738 }
4739
9ee6e8bb
PB
4740 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4741
4742 if (pairwise) {
4743 /* Pairwise. */
a5a14945
JR
4744 if (pass < 1) {
4745 tmp = neon_load_reg(rn, 0);
4746 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4747 } else {
a5a14945
JR
4748 tmp = neon_load_reg(rm, 0);
4749 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4750 }
4751 } else {
4752 /* Elementwise. */
dd8fbd78
FN
4753 tmp = neon_load_reg(rn, pass);
4754 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4755 }
4756 switch (op) {
62698be3 4757 case NEON_3R_VHADD:
9ee6e8bb
PB
4758 GEN_NEON_INTEGER_OP(hadd);
4759 break;
62698be3 4760 case NEON_3R_VQADD:
02da0b2d 4761 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4762 break;
62698be3 4763 case NEON_3R_VRHADD:
9ee6e8bb 4764 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4765 break;
62698be3 4766 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4767 switch ((u << 2) | size) {
4768 case 0: /* VAND */
dd8fbd78 4769 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4770 break;
4771 case 1: /* BIC */
f669df27 4772 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4773 break;
4774 case 2: /* VORR */
dd8fbd78 4775 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4776 break;
4777 case 3: /* VORN */
f669df27 4778 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4779 break;
4780 case 4: /* VEOR */
dd8fbd78 4781 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4782 break;
4783 case 5: /* VBSL */
dd8fbd78
FN
4784 tmp3 = neon_load_reg(rd, pass);
4785 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4786 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4787 break;
4788 case 6: /* VBIT */
dd8fbd78
FN
4789 tmp3 = neon_load_reg(rd, pass);
4790 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4791 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4792 break;
4793 case 7: /* VBIF */
dd8fbd78
FN
4794 tmp3 = neon_load_reg(rd, pass);
4795 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4796 tcg_temp_free_i32(tmp3);
9ee6e8bb 4797 break;
2c0262af
FB
4798 }
4799 break;
62698be3 4800 case NEON_3R_VHSUB:
9ee6e8bb
PB
4801 GEN_NEON_INTEGER_OP(hsub);
4802 break;
62698be3 4803 case NEON_3R_VQSUB:
02da0b2d 4804 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4805 break;
62698be3 4806 case NEON_3R_VCGT:
9ee6e8bb
PB
4807 GEN_NEON_INTEGER_OP(cgt);
4808 break;
62698be3 4809 case NEON_3R_VCGE:
9ee6e8bb
PB
4810 GEN_NEON_INTEGER_OP(cge);
4811 break;
62698be3 4812 case NEON_3R_VSHL:
ad69471c 4813 GEN_NEON_INTEGER_OP(shl);
2c0262af 4814 break;
62698be3 4815 case NEON_3R_VQSHL:
02da0b2d 4816 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4817 break;
62698be3 4818 case NEON_3R_VRSHL:
ad69471c 4819 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4820 break;
62698be3 4821 case NEON_3R_VQRSHL:
02da0b2d 4822 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4823 break;
62698be3 4824 case NEON_3R_VMAX:
9ee6e8bb
PB
4825 GEN_NEON_INTEGER_OP(max);
4826 break;
62698be3 4827 case NEON_3R_VMIN:
9ee6e8bb
PB
4828 GEN_NEON_INTEGER_OP(min);
4829 break;
62698be3 4830 case NEON_3R_VABD:
9ee6e8bb
PB
4831 GEN_NEON_INTEGER_OP(abd);
4832 break;
62698be3 4833 case NEON_3R_VABA:
9ee6e8bb 4834 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4835 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4836 tmp2 = neon_load_reg(rd, pass);
4837 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4838 break;
62698be3 4839 case NEON_3R_VADD_VSUB:
9ee6e8bb 4840 if (!u) { /* VADD */
62698be3 4841 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4842 } else { /* VSUB */
4843 switch (size) {
dd8fbd78
FN
4844 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4845 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4846 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4847 default: abort();
9ee6e8bb
PB
4848 }
4849 }
4850 break;
62698be3 4851 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4852 if (!u) { /* VTST */
4853 switch (size) {
dd8fbd78
FN
4854 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4855 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4856 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4857 default: abort();
9ee6e8bb
PB
4858 }
4859 } else { /* VCEQ */
4860 switch (size) {
dd8fbd78
FN
4861 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4862 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4863 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4864 default: abort();
9ee6e8bb
PB
4865 }
4866 }
4867 break;
62698be3 4868 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4869 switch (size) {
dd8fbd78
FN
4870 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4871 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4872 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4873 default: abort();
9ee6e8bb 4874 }
7d1b0095 4875 tcg_temp_free_i32(tmp2);
dd8fbd78 4876 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4877 if (u) { /* VMLS */
dd8fbd78 4878 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4879 } else { /* VMLA */
dd8fbd78 4880 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4881 }
4882 break;
62698be3 4883 case NEON_3R_VMUL:
9ee6e8bb 4884 if (u) { /* polynomial */
dd8fbd78 4885 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4886 } else { /* Integer */
4887 switch (size) {
dd8fbd78
FN
4888 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4889 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4890 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4891 default: abort();
9ee6e8bb
PB
4892 }
4893 }
4894 break;
62698be3 4895 case NEON_3R_VPMAX:
9ee6e8bb
PB
4896 GEN_NEON_INTEGER_OP(pmax);
4897 break;
62698be3 4898 case NEON_3R_VPMIN:
9ee6e8bb
PB
4899 GEN_NEON_INTEGER_OP(pmin);
4900 break;
62698be3 4901 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4902 if (!u) { /* VQDMULH */
4903 switch (size) {
02da0b2d
PM
4904 case 1:
4905 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4906 break;
4907 case 2:
4908 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4909 break;
62698be3 4910 default: abort();
9ee6e8bb 4911 }
62698be3 4912 } else { /* VQRDMULH */
9ee6e8bb 4913 switch (size) {
02da0b2d
PM
4914 case 1:
4915 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4916 break;
4917 case 2:
4918 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4919 break;
62698be3 4920 default: abort();
9ee6e8bb
PB
4921 }
4922 }
4923 break;
62698be3 4924 case NEON_3R_VPADD:
9ee6e8bb 4925 switch (size) {
dd8fbd78
FN
4926 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4927 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4928 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4929 default: abort();
9ee6e8bb
PB
4930 }
4931 break;
62698be3 4932 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4933 {
4934 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4935 switch ((u << 2) | size) {
4936 case 0: /* VADD */
aa47cfdd
PM
4937 case 4: /* VPADD */
4938 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4939 break;
4940 case 2: /* VSUB */
aa47cfdd 4941 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4942 break;
4943 case 6: /* VABD */
aa47cfdd 4944 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4945 break;
4946 default:
62698be3 4947 abort();
9ee6e8bb 4948 }
aa47cfdd 4949 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4950 break;
aa47cfdd 4951 }
62698be3 4952 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
4953 {
4954 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4955 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4956 if (!u) {
7d1b0095 4957 tcg_temp_free_i32(tmp2);
dd8fbd78 4958 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4959 if (size == 0) {
aa47cfdd 4960 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4961 } else {
aa47cfdd 4962 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
4963 }
4964 }
aa47cfdd 4965 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4966 break;
aa47cfdd 4967 }
62698be3 4968 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
4969 {
4970 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 4971 if (!u) {
aa47cfdd 4972 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 4973 } else {
aa47cfdd
PM
4974 if (size == 0) {
4975 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4976 } else {
4977 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4978 }
b5ff1b31 4979 }
aa47cfdd 4980 tcg_temp_free_ptr(fpstatus);
2c0262af 4981 break;
aa47cfdd 4982 }
62698be3 4983 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
4984 {
4985 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4986 if (size == 0) {
4987 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4988 } else {
4989 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4990 }
4991 tcg_temp_free_ptr(fpstatus);
2c0262af 4992 break;
aa47cfdd 4993 }
62698be3 4994 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
4995 {
4996 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4997 if (size == 0) {
4998 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4999 } else {
5000 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
5001 }
5002 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5003 break;
aa47cfdd 5004 }
62698be3 5005 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 5006 if (size == 0)
dd8fbd78 5007 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 5008 else
dd8fbd78 5009 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 5010 break;
9ee6e8bb
PB
5011 default:
5012 abort();
2c0262af 5013 }
7d1b0095 5014 tcg_temp_free_i32(tmp2);
dd8fbd78 5015
9ee6e8bb
PB
5016 /* Save the result. For elementwise operations we can put it
5017 straight into the destination register. For pairwise operations
5018 we have to be careful to avoid clobbering the source operands. */
5019 if (pairwise && rd == rm) {
dd8fbd78 5020 neon_store_scratch(pass, tmp);
9ee6e8bb 5021 } else {
dd8fbd78 5022 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5023 }
5024
5025 } /* for pass */
5026 if (pairwise && rd == rm) {
5027 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5028 tmp = neon_load_scratch(pass);
5029 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5030 }
5031 }
ad69471c 5032 /* End of 3 register same size operations. */
9ee6e8bb
PB
5033 } else if (insn & (1 << 4)) {
5034 if ((insn & 0x00380080) != 0) {
5035 /* Two registers and shift. */
5036 op = (insn >> 8) & 0xf;
5037 if (insn & (1 << 7)) {
cc13115b
PM
5038 /* 64-bit shift. */
5039 if (op > 7) {
5040 return 1;
5041 }
9ee6e8bb
PB
5042 size = 3;
5043 } else {
5044 size = 2;
5045 while ((insn & (1 << (size + 19))) == 0)
5046 size--;
5047 }
5048 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5049 /* To avoid excessive dumplication of ops we implement shift
5050 by immediate using the variable shift operations. */
5051 if (op < 8) {
5052 /* Shift by immediate:
5053 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5054 if (q && ((rd | rm) & 1)) {
5055 return 1;
5056 }
5057 if (!u && (op == 4 || op == 6)) {
5058 return 1;
5059 }
9ee6e8bb
PB
5060 /* Right shifts are encoded as N - shift, where N is the
5061 element size in bits. */
5062 if (op <= 4)
5063 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5064 if (size == 3) {
5065 count = q + 1;
5066 } else {
5067 count = q ? 4: 2;
5068 }
5069 switch (size) {
5070 case 0:
5071 imm = (uint8_t) shift;
5072 imm |= imm << 8;
5073 imm |= imm << 16;
5074 break;
5075 case 1:
5076 imm = (uint16_t) shift;
5077 imm |= imm << 16;
5078 break;
5079 case 2:
5080 case 3:
5081 imm = shift;
5082 break;
5083 default:
5084 abort();
5085 }
5086
5087 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5088 if (size == 3) {
5089 neon_load_reg64(cpu_V0, rm + pass);
5090 tcg_gen_movi_i64(cpu_V1, imm);
5091 switch (op) {
5092 case 0: /* VSHR */
5093 case 1: /* VSRA */
5094 if (u)
5095 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5096 else
ad69471c 5097 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5098 break;
ad69471c
PB
5099 case 2: /* VRSHR */
5100 case 3: /* VRSRA */
5101 if (u)
5102 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5103 else
ad69471c 5104 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5105 break;
ad69471c 5106 case 4: /* VSRI */
ad69471c
PB
5107 case 5: /* VSHL, VSLI */
5108 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5109 break;
0322b26e 5110 case 6: /* VQSHLU */
02da0b2d
PM
5111 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5112 cpu_V0, cpu_V1);
ad69471c 5113 break;
0322b26e
PM
5114 case 7: /* VQSHL */
5115 if (u) {
02da0b2d 5116 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5117 cpu_V0, cpu_V1);
5118 } else {
02da0b2d 5119 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5120 cpu_V0, cpu_V1);
5121 }
9ee6e8bb 5122 break;
9ee6e8bb 5123 }
ad69471c
PB
5124 if (op == 1 || op == 3) {
5125 /* Accumulate. */
5371cb81 5126 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5127 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5128 } else if (op == 4 || (op == 5 && u)) {
5129 /* Insert */
923e6509
CL
5130 neon_load_reg64(cpu_V1, rd + pass);
5131 uint64_t mask;
5132 if (shift < -63 || shift > 63) {
5133 mask = 0;
5134 } else {
5135 if (op == 4) {
5136 mask = 0xffffffffffffffffull >> -shift;
5137 } else {
5138 mask = 0xffffffffffffffffull << shift;
5139 }
5140 }
5141 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5142 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5143 }
5144 neon_store_reg64(cpu_V0, rd + pass);
5145 } else { /* size < 3 */
5146 /* Operands in T0 and T1. */
dd8fbd78 5147 tmp = neon_load_reg(rm, pass);
7d1b0095 5148 tmp2 = tcg_temp_new_i32();
dd8fbd78 5149 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5150 switch (op) {
5151 case 0: /* VSHR */
5152 case 1: /* VSRA */
5153 GEN_NEON_INTEGER_OP(shl);
5154 break;
5155 case 2: /* VRSHR */
5156 case 3: /* VRSRA */
5157 GEN_NEON_INTEGER_OP(rshl);
5158 break;
5159 case 4: /* VSRI */
ad69471c
PB
5160 case 5: /* VSHL, VSLI */
5161 switch (size) {
dd8fbd78
FN
5162 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5163 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5164 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5165 default: abort();
ad69471c
PB
5166 }
5167 break;
0322b26e 5168 case 6: /* VQSHLU */
ad69471c 5169 switch (size) {
0322b26e 5170 case 0:
02da0b2d
PM
5171 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5172 tmp, tmp2);
0322b26e
PM
5173 break;
5174 case 1:
02da0b2d
PM
5175 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5176 tmp, tmp2);
0322b26e
PM
5177 break;
5178 case 2:
02da0b2d
PM
5179 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5180 tmp, tmp2);
0322b26e
PM
5181 break;
5182 default:
cc13115b 5183 abort();
ad69471c
PB
5184 }
5185 break;
0322b26e 5186 case 7: /* VQSHL */
02da0b2d 5187 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5188 break;
ad69471c 5189 }
7d1b0095 5190 tcg_temp_free_i32(tmp2);
ad69471c
PB
5191
5192 if (op == 1 || op == 3) {
5193 /* Accumulate. */
dd8fbd78 5194 tmp2 = neon_load_reg(rd, pass);
5371cb81 5195 gen_neon_add(size, tmp, tmp2);
7d1b0095 5196 tcg_temp_free_i32(tmp2);
ad69471c
PB
5197 } else if (op == 4 || (op == 5 && u)) {
5198 /* Insert */
5199 switch (size) {
5200 case 0:
5201 if (op == 4)
ca9a32e4 5202 mask = 0xff >> -shift;
ad69471c 5203 else
ca9a32e4
JR
5204 mask = (uint8_t)(0xff << shift);
5205 mask |= mask << 8;
5206 mask |= mask << 16;
ad69471c
PB
5207 break;
5208 case 1:
5209 if (op == 4)
ca9a32e4 5210 mask = 0xffff >> -shift;
ad69471c 5211 else
ca9a32e4
JR
5212 mask = (uint16_t)(0xffff << shift);
5213 mask |= mask << 16;
ad69471c
PB
5214 break;
5215 case 2:
ca9a32e4
JR
5216 if (shift < -31 || shift > 31) {
5217 mask = 0;
5218 } else {
5219 if (op == 4)
5220 mask = 0xffffffffu >> -shift;
5221 else
5222 mask = 0xffffffffu << shift;
5223 }
ad69471c
PB
5224 break;
5225 default:
5226 abort();
5227 }
dd8fbd78 5228 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5229 tcg_gen_andi_i32(tmp, tmp, mask);
5230 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5231 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5232 tcg_temp_free_i32(tmp2);
ad69471c 5233 }
dd8fbd78 5234 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5235 }
5236 } /* for pass */
5237 } else if (op < 10) {
ad69471c 5238 /* Shift by immediate and narrow:
9ee6e8bb 5239 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5240 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5241 if (rm & 1) {
5242 return 1;
5243 }
9ee6e8bb
PB
5244 shift = shift - (1 << (size + 3));
5245 size++;
92cdfaeb 5246 if (size == 3) {
a7812ae4 5247 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5248 neon_load_reg64(cpu_V0, rm);
5249 neon_load_reg64(cpu_V1, rm + 1);
5250 for (pass = 0; pass < 2; pass++) {
5251 TCGv_i64 in;
5252 if (pass == 0) {
5253 in = cpu_V0;
5254 } else {
5255 in = cpu_V1;
5256 }
ad69471c 5257 if (q) {
0b36f4cd 5258 if (input_unsigned) {
92cdfaeb 5259 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5260 } else {
92cdfaeb 5261 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5262 }
ad69471c 5263 } else {
0b36f4cd 5264 if (input_unsigned) {
92cdfaeb 5265 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5266 } else {
92cdfaeb 5267 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5268 }
ad69471c 5269 }
7d1b0095 5270 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5271 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5272 neon_store_reg(rd, pass, tmp);
5273 } /* for pass */
5274 tcg_temp_free_i64(tmp64);
5275 } else {
5276 if (size == 1) {
5277 imm = (uint16_t)shift;
5278 imm |= imm << 16;
2c0262af 5279 } else {
92cdfaeb
PM
5280 /* size == 2 */
5281 imm = (uint32_t)shift;
5282 }
5283 tmp2 = tcg_const_i32(imm);
5284 tmp4 = neon_load_reg(rm + 1, 0);
5285 tmp5 = neon_load_reg(rm + 1, 1);
5286 for (pass = 0; pass < 2; pass++) {
5287 if (pass == 0) {
5288 tmp = neon_load_reg(rm, 0);
5289 } else {
5290 tmp = tmp4;
5291 }
0b36f4cd
CL
5292 gen_neon_shift_narrow(size, tmp, tmp2, q,
5293 input_unsigned);
92cdfaeb
PM
5294 if (pass == 0) {
5295 tmp3 = neon_load_reg(rm, 1);
5296 } else {
5297 tmp3 = tmp5;
5298 }
0b36f4cd
CL
5299 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5300 input_unsigned);
36aa55dc 5301 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5302 tcg_temp_free_i32(tmp);
5303 tcg_temp_free_i32(tmp3);
5304 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5305 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5306 neon_store_reg(rd, pass, tmp);
5307 } /* for pass */
c6067f04 5308 tcg_temp_free_i32(tmp2);
b75263d6 5309 }
9ee6e8bb 5310 } else if (op == 10) {
cc13115b
PM
5311 /* VSHLL, VMOVL */
5312 if (q || (rd & 1)) {
9ee6e8bb 5313 return 1;
cc13115b 5314 }
ad69471c
PB
5315 tmp = neon_load_reg(rm, 0);
5316 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5317 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5318 if (pass == 1)
5319 tmp = tmp2;
5320
5321 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5322
9ee6e8bb
PB
5323 if (shift != 0) {
5324 /* The shift is less than the width of the source
ad69471c
PB
5325 type, so we can just shift the whole register. */
5326 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5327 /* Widen the result of shift: we need to clear
5328 * the potential overflow bits resulting from
5329 * left bits of the narrow input appearing as
5330 * right bits of left the neighbour narrow
5331 * input. */
ad69471c
PB
5332 if (size < 2 || !u) {
5333 uint64_t imm64;
5334 if (size == 0) {
5335 imm = (0xffu >> (8 - shift));
5336 imm |= imm << 16;
acdf01ef 5337 } else if (size == 1) {
ad69471c 5338 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5339 } else {
5340 /* size == 2 */
5341 imm = 0xffffffff >> (32 - shift);
5342 }
5343 if (size < 2) {
5344 imm64 = imm | (((uint64_t)imm) << 32);
5345 } else {
5346 imm64 = imm;
9ee6e8bb 5347 }
acdf01ef 5348 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5349 }
5350 }
ad69471c 5351 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5352 }
f73534a5 5353 } else if (op >= 14) {
9ee6e8bb 5354 /* VCVT fixed-point. */
cc13115b
PM
5355 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5356 return 1;
5357 }
f73534a5
PM
5358 /* We have already masked out the must-be-1 top bit of imm6,
5359 * hence this 32-shift where the ARM ARM has 64-imm6.
5360 */
5361 shift = 32 - shift;
9ee6e8bb 5362 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5363 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5364 if (!(op & 1)) {
9ee6e8bb 5365 if (u)
5500b06c 5366 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5367 else
5500b06c 5368 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5369 } else {
5370 if (u)
5500b06c 5371 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5372 else
5500b06c 5373 gen_vfp_tosl(0, shift, 1);
2c0262af 5374 }
4373f3ce 5375 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5376 }
5377 } else {
9ee6e8bb
PB
5378 return 1;
5379 }
5380 } else { /* (insn & 0x00380080) == 0 */
5381 int invert;
7d80fee5
PM
5382 if (q && (rd & 1)) {
5383 return 1;
5384 }
9ee6e8bb
PB
5385
5386 op = (insn >> 8) & 0xf;
5387 /* One register and immediate. */
5388 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5389 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5390 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5391 * We choose to not special-case this and will behave as if a
5392 * valid constant encoding of 0 had been given.
5393 */
9ee6e8bb
PB
5394 switch (op) {
5395 case 0: case 1:
5396 /* no-op */
5397 break;
5398 case 2: case 3:
5399 imm <<= 8;
5400 break;
5401 case 4: case 5:
5402 imm <<= 16;
5403 break;
5404 case 6: case 7:
5405 imm <<= 24;
5406 break;
5407 case 8: case 9:
5408 imm |= imm << 16;
5409 break;
5410 case 10: case 11:
5411 imm = (imm << 8) | (imm << 24);
5412 break;
5413 case 12:
8e31209e 5414 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5415 break;
5416 case 13:
5417 imm = (imm << 16) | 0xffff;
5418 break;
5419 case 14:
5420 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5421 if (invert)
5422 imm = ~imm;
5423 break;
5424 case 15:
7d80fee5
PM
5425 if (invert) {
5426 return 1;
5427 }
9ee6e8bb
PB
5428 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5429 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5430 break;
5431 }
5432 if (invert)
5433 imm = ~imm;
5434
9ee6e8bb
PB
5435 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5436 if (op & 1 && op < 12) {
ad69471c 5437 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5438 if (invert) {
5439 /* The immediate value has already been inverted, so
5440 BIC becomes AND. */
ad69471c 5441 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5442 } else {
ad69471c 5443 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5444 }
9ee6e8bb 5445 } else {
ad69471c 5446 /* VMOV, VMVN. */
7d1b0095 5447 tmp = tcg_temp_new_i32();
9ee6e8bb 5448 if (op == 14 && invert) {
a5a14945 5449 int n;
ad69471c
PB
5450 uint32_t val;
5451 val = 0;
9ee6e8bb
PB
5452 for (n = 0; n < 4; n++) {
5453 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5454 val |= 0xff << (n * 8);
9ee6e8bb 5455 }
ad69471c
PB
5456 tcg_gen_movi_i32(tmp, val);
5457 } else {
5458 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5459 }
9ee6e8bb 5460 }
ad69471c 5461 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5462 }
5463 }
e4b3861d 5464 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5465 if (size != 3) {
5466 op = (insn >> 8) & 0xf;
5467 if ((insn & (1 << 6)) == 0) {
5468 /* Three registers of different lengths. */
5469 int src1_wide;
5470 int src2_wide;
5471 int prewiden;
695272dc
PM
5472 /* undefreq: bit 0 : UNDEF if size != 0
5473 * bit 1 : UNDEF if size == 0
5474 * bit 2 : UNDEF if U == 1
5475 * Note that [1:0] set implies 'always UNDEF'
5476 */
5477 int undefreq;
5478 /* prewiden, src1_wide, src2_wide, undefreq */
5479 static const int neon_3reg_wide[16][4] = {
5480 {1, 0, 0, 0}, /* VADDL */
5481 {1, 1, 0, 0}, /* VADDW */
5482 {1, 0, 0, 0}, /* VSUBL */
5483 {1, 1, 0, 0}, /* VSUBW */
5484 {0, 1, 1, 0}, /* VADDHN */
5485 {0, 0, 0, 0}, /* VABAL */
5486 {0, 1, 1, 0}, /* VSUBHN */
5487 {0, 0, 0, 0}, /* VABDL */
5488 {0, 0, 0, 0}, /* VMLAL */
5489 {0, 0, 0, 6}, /* VQDMLAL */
5490 {0, 0, 0, 0}, /* VMLSL */
5491 {0, 0, 0, 6}, /* VQDMLSL */
5492 {0, 0, 0, 0}, /* Integer VMULL */
5493 {0, 0, 0, 2}, /* VQDMULL */
5494 {0, 0, 0, 5}, /* Polynomial VMULL */
5495 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5496 };
5497
5498 prewiden = neon_3reg_wide[op][0];
5499 src1_wide = neon_3reg_wide[op][1];
5500 src2_wide = neon_3reg_wide[op][2];
695272dc 5501 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5502
695272dc
PM
5503 if (((undefreq & 1) && (size != 0)) ||
5504 ((undefreq & 2) && (size == 0)) ||
5505 ((undefreq & 4) && u)) {
5506 return 1;
5507 }
5508 if ((src1_wide && (rn & 1)) ||
5509 (src2_wide && (rm & 1)) ||
5510 (!src2_wide && (rd & 1))) {
ad69471c 5511 return 1;
695272dc 5512 }
ad69471c 5513
9ee6e8bb
PB
5514 /* Avoid overlapping operands. Wide source operands are
5515 always aligned so will never overlap with wide
5516 destinations in problematic ways. */
8f8e3aa4 5517 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5518 tmp = neon_load_reg(rm, 1);
5519 neon_store_scratch(2, tmp);
8f8e3aa4 5520 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5521 tmp = neon_load_reg(rn, 1);
5522 neon_store_scratch(2, tmp);
9ee6e8bb 5523 }
a50f5b91 5524 TCGV_UNUSED(tmp3);
9ee6e8bb 5525 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5526 if (src1_wide) {
5527 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5528 TCGV_UNUSED(tmp);
9ee6e8bb 5529 } else {
ad69471c 5530 if (pass == 1 && rd == rn) {
dd8fbd78 5531 tmp = neon_load_scratch(2);
9ee6e8bb 5532 } else {
ad69471c
PB
5533 tmp = neon_load_reg(rn, pass);
5534 }
5535 if (prewiden) {
5536 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5537 }
5538 }
ad69471c
PB
5539 if (src2_wide) {
5540 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5541 TCGV_UNUSED(tmp2);
9ee6e8bb 5542 } else {
ad69471c 5543 if (pass == 1 && rd == rm) {
dd8fbd78 5544 tmp2 = neon_load_scratch(2);
9ee6e8bb 5545 } else {
ad69471c
PB
5546 tmp2 = neon_load_reg(rm, pass);
5547 }
5548 if (prewiden) {
5549 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5550 }
9ee6e8bb
PB
5551 }
5552 switch (op) {
5553 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5554 gen_neon_addl(size);
9ee6e8bb 5555 break;
79b0e534 5556 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5557 gen_neon_subl(size);
9ee6e8bb
PB
5558 break;
5559 case 5: case 7: /* VABAL, VABDL */
5560 switch ((size << 1) | u) {
ad69471c
PB
5561 case 0:
5562 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5563 break;
5564 case 1:
5565 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5566 break;
5567 case 2:
5568 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5569 break;
5570 case 3:
5571 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5572 break;
5573 case 4:
5574 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5575 break;
5576 case 5:
5577 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5578 break;
9ee6e8bb
PB
5579 default: abort();
5580 }
7d1b0095
PM
5581 tcg_temp_free_i32(tmp2);
5582 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5583 break;
5584 case 8: case 9: case 10: case 11: case 12: case 13:
5585 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5586 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5587 break;
5588 case 14: /* Polynomial VMULL */
e5ca24cb 5589 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5590 tcg_temp_free_i32(tmp2);
5591 tcg_temp_free_i32(tmp);
e5ca24cb 5592 break;
695272dc
PM
5593 default: /* 15 is RESERVED: caught earlier */
5594 abort();
9ee6e8bb 5595 }
ebcd88ce
PM
5596 if (op == 13) {
5597 /* VQDMULL */
5598 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5599 neon_store_reg64(cpu_V0, rd + pass);
5600 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5601 /* Accumulate. */
ebcd88ce 5602 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5603 switch (op) {
4dc064e6
PM
5604 case 10: /* VMLSL */
5605 gen_neon_negl(cpu_V0, size);
5606 /* Fall through */
5607 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5608 gen_neon_addl(size);
9ee6e8bb
PB
5609 break;
5610 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5611 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5612 if (op == 11) {
5613 gen_neon_negl(cpu_V0, size);
5614 }
ad69471c
PB
5615 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5616 break;
9ee6e8bb
PB
5617 default:
5618 abort();
5619 }
ad69471c 5620 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5621 } else if (op == 4 || op == 6) {
5622 /* Narrowing operation. */
7d1b0095 5623 tmp = tcg_temp_new_i32();
79b0e534 5624 if (!u) {
9ee6e8bb 5625 switch (size) {
ad69471c
PB
5626 case 0:
5627 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5628 break;
5629 case 1:
5630 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5631 break;
5632 case 2:
5633 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5634 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5635 break;
9ee6e8bb
PB
5636 default: abort();
5637 }
5638 } else {
5639 switch (size) {
ad69471c
PB
5640 case 0:
5641 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5642 break;
5643 case 1:
5644 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5645 break;
5646 case 2:
5647 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5648 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5649 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5650 break;
9ee6e8bb
PB
5651 default: abort();
5652 }
5653 }
ad69471c
PB
5654 if (pass == 0) {
5655 tmp3 = tmp;
5656 } else {
5657 neon_store_reg(rd, 0, tmp3);
5658 neon_store_reg(rd, 1, tmp);
5659 }
9ee6e8bb
PB
5660 } else {
5661 /* Write back the result. */
ad69471c 5662 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5663 }
5664 }
5665 } else {
3e3326df
PM
5666 /* Two registers and a scalar. NB that for ops of this form
5667 * the ARM ARM labels bit 24 as Q, but it is in our variable
5668 * 'u', not 'q'.
5669 */
5670 if (size == 0) {
5671 return 1;
5672 }
9ee6e8bb 5673 switch (op) {
9ee6e8bb 5674 case 1: /* Float VMLA scalar */
9ee6e8bb 5675 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5676 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5677 if (size == 1) {
5678 return 1;
5679 }
5680 /* fall through */
5681 case 0: /* Integer VMLA scalar */
5682 case 4: /* Integer VMLS scalar */
5683 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5684 case 12: /* VQDMULH scalar */
5685 case 13: /* VQRDMULH scalar */
3e3326df
PM
5686 if (u && ((rd | rn) & 1)) {
5687 return 1;
5688 }
dd8fbd78
FN
5689 tmp = neon_get_scalar(size, rm);
5690 neon_store_scratch(0, tmp);
9ee6e8bb 5691 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5692 tmp = neon_load_scratch(0);
5693 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5694 if (op == 12) {
5695 if (size == 1) {
02da0b2d 5696 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5697 } else {
02da0b2d 5698 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5699 }
5700 } else if (op == 13) {
5701 if (size == 1) {
02da0b2d 5702 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5703 } else {
02da0b2d 5704 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5705 }
5706 } else if (op & 1) {
aa47cfdd
PM
5707 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5708 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5709 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5710 } else {
5711 switch (size) {
dd8fbd78
FN
5712 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5713 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5714 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5715 default: abort();
9ee6e8bb
PB
5716 }
5717 }
7d1b0095 5718 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5719 if (op < 8) {
5720 /* Accumulate. */
dd8fbd78 5721 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5722 switch (op) {
5723 case 0:
dd8fbd78 5724 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5725 break;
5726 case 1:
aa47cfdd
PM
5727 {
5728 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5729 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5730 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5731 break;
aa47cfdd 5732 }
9ee6e8bb 5733 case 4:
dd8fbd78 5734 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5735 break;
5736 case 5:
aa47cfdd
PM
5737 {
5738 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5739 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5740 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5741 break;
aa47cfdd 5742 }
9ee6e8bb
PB
5743 default:
5744 abort();
5745 }
7d1b0095 5746 tcg_temp_free_i32(tmp2);
9ee6e8bb 5747 }
dd8fbd78 5748 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5749 }
5750 break;
9ee6e8bb 5751 case 3: /* VQDMLAL scalar */
9ee6e8bb 5752 case 7: /* VQDMLSL scalar */
9ee6e8bb 5753 case 11: /* VQDMULL scalar */
3e3326df 5754 if (u == 1) {
ad69471c 5755 return 1;
3e3326df
PM
5756 }
5757 /* fall through */
5758 case 2: /* VMLAL sclar */
5759 case 6: /* VMLSL scalar */
5760 case 10: /* VMULL scalar */
5761 if (rd & 1) {
5762 return 1;
5763 }
dd8fbd78 5764 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5765 /* We need a copy of tmp2 because gen_neon_mull
5766 * deletes it during pass 0. */
7d1b0095 5767 tmp4 = tcg_temp_new_i32();
c6067f04 5768 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5769 tmp3 = neon_load_reg(rn, 1);
ad69471c 5770
9ee6e8bb 5771 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5772 if (pass == 0) {
5773 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5774 } else {
dd8fbd78 5775 tmp = tmp3;
c6067f04 5776 tmp2 = tmp4;
9ee6e8bb 5777 }
ad69471c 5778 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5779 if (op != 11) {
5780 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5781 }
9ee6e8bb 5782 switch (op) {
4dc064e6
PM
5783 case 6:
5784 gen_neon_negl(cpu_V0, size);
5785 /* Fall through */
5786 case 2:
ad69471c 5787 gen_neon_addl(size);
9ee6e8bb
PB
5788 break;
5789 case 3: case 7:
ad69471c 5790 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5791 if (op == 7) {
5792 gen_neon_negl(cpu_V0, size);
5793 }
ad69471c 5794 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5795 break;
5796 case 10:
5797 /* no-op */
5798 break;
5799 case 11:
ad69471c 5800 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5801 break;
5802 default:
5803 abort();
5804 }
ad69471c 5805 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5806 }
dd8fbd78 5807
dd8fbd78 5808
9ee6e8bb
PB
5809 break;
5810 default: /* 14 and 15 are RESERVED */
5811 return 1;
5812 }
5813 }
5814 } else { /* size == 3 */
5815 if (!u) {
5816 /* Extract. */
9ee6e8bb 5817 imm = (insn >> 8) & 0xf;
ad69471c
PB
5818
5819 if (imm > 7 && !q)
5820 return 1;
5821
52579ea1
PM
5822 if (q && ((rd | rn | rm) & 1)) {
5823 return 1;
5824 }
5825
ad69471c
PB
5826 if (imm == 0) {
5827 neon_load_reg64(cpu_V0, rn);
5828 if (q) {
5829 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5830 }
ad69471c
PB
5831 } else if (imm == 8) {
5832 neon_load_reg64(cpu_V0, rn + 1);
5833 if (q) {
5834 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5835 }
ad69471c 5836 } else if (q) {
a7812ae4 5837 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5838 if (imm < 8) {
5839 neon_load_reg64(cpu_V0, rn);
a7812ae4 5840 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5841 } else {
5842 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5843 neon_load_reg64(tmp64, rm);
ad69471c
PB
5844 }
5845 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5846 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5847 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5848 if (imm < 8) {
5849 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5850 } else {
ad69471c
PB
5851 neon_load_reg64(cpu_V1, rm + 1);
5852 imm -= 8;
9ee6e8bb 5853 }
ad69471c 5854 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5855 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5856 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5857 tcg_temp_free_i64(tmp64);
ad69471c 5858 } else {
a7812ae4 5859 /* BUGFIX */
ad69471c 5860 neon_load_reg64(cpu_V0, rn);
a7812ae4 5861 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5862 neon_load_reg64(cpu_V1, rm);
a7812ae4 5863 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5864 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5865 }
5866 neon_store_reg64(cpu_V0, rd);
5867 if (q) {
5868 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5869 }
5870 } else if ((insn & (1 << 11)) == 0) {
5871 /* Two register misc. */
5872 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5873 size = (insn >> 18) & 3;
600b828c
PM
5874 /* UNDEF for unknown op values and bad op-size combinations */
5875 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5876 return 1;
5877 }
fc2a9b37
PM
5878 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5879 q && ((rm | rd) & 1)) {
5880 return 1;
5881 }
9ee6e8bb 5882 switch (op) {
600b828c 5883 case NEON_2RM_VREV64:
9ee6e8bb 5884 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5885 tmp = neon_load_reg(rm, pass * 2);
5886 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5887 switch (size) {
dd8fbd78
FN
5888 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5889 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5890 case 2: /* no-op */ break;
5891 default: abort();
5892 }
dd8fbd78 5893 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5894 if (size == 2) {
dd8fbd78 5895 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5896 } else {
9ee6e8bb 5897 switch (size) {
dd8fbd78
FN
5898 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5899 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5900 default: abort();
5901 }
dd8fbd78 5902 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5903 }
5904 }
5905 break;
600b828c
PM
5906 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5907 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5908 for (pass = 0; pass < q + 1; pass++) {
5909 tmp = neon_load_reg(rm, pass * 2);
5910 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5911 tmp = neon_load_reg(rm, pass * 2 + 1);
5912 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5913 switch (size) {
5914 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5915 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5916 case 2: tcg_gen_add_i64(CPU_V001); break;
5917 default: abort();
5918 }
600b828c 5919 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5920 /* Accumulate. */
ad69471c
PB
5921 neon_load_reg64(cpu_V1, rd + pass);
5922 gen_neon_addl(size);
9ee6e8bb 5923 }
ad69471c 5924 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5925 }
5926 break;
600b828c 5927 case NEON_2RM_VTRN:
9ee6e8bb 5928 if (size == 2) {
a5a14945 5929 int n;
9ee6e8bb 5930 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5931 tmp = neon_load_reg(rm, n);
5932 tmp2 = neon_load_reg(rd, n + 1);
5933 neon_store_reg(rm, n, tmp2);
5934 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5935 }
5936 } else {
5937 goto elementwise;
5938 }
5939 break;
600b828c 5940 case NEON_2RM_VUZP:
02acedf9 5941 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5942 return 1;
9ee6e8bb
PB
5943 }
5944 break;
600b828c 5945 case NEON_2RM_VZIP:
d68a6f3a 5946 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5947 return 1;
9ee6e8bb
PB
5948 }
5949 break;
600b828c
PM
5950 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5951 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5952 if (rm & 1) {
5953 return 1;
5954 }
a50f5b91 5955 TCGV_UNUSED(tmp2);
9ee6e8bb 5956 for (pass = 0; pass < 2; pass++) {
ad69471c 5957 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5958 tmp = tcg_temp_new_i32();
600b828c
PM
5959 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5960 tmp, cpu_V0);
ad69471c
PB
5961 if (pass == 0) {
5962 tmp2 = tmp;
5963 } else {
5964 neon_store_reg(rd, 0, tmp2);
5965 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5966 }
9ee6e8bb
PB
5967 }
5968 break;
600b828c 5969 case NEON_2RM_VSHLL:
fc2a9b37 5970 if (q || (rd & 1)) {
9ee6e8bb 5971 return 1;
600b828c 5972 }
ad69471c
PB
5973 tmp = neon_load_reg(rm, 0);
5974 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5975 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5976 if (pass == 1)
5977 tmp = tmp2;
5978 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5979 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5980 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5981 }
5982 break;
600b828c 5983 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5984 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5985 q || (rm & 1)) {
5986 return 1;
5987 }
7d1b0095
PM
5988 tmp = tcg_temp_new_i32();
5989 tmp2 = tcg_temp_new_i32();
60011498 5990 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5991 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5992 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5993 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5994 tcg_gen_shli_i32(tmp2, tmp2, 16);
5995 tcg_gen_or_i32(tmp2, tmp2, tmp);
5996 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5997 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5998 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5999 neon_store_reg(rd, 0, tmp2);
7d1b0095 6000 tmp2 = tcg_temp_new_i32();
2d981da7 6001 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6002 tcg_gen_shli_i32(tmp2, tmp2, 16);
6003 tcg_gen_or_i32(tmp2, tmp2, tmp);
6004 neon_store_reg(rd, 1, tmp2);
7d1b0095 6005 tcg_temp_free_i32(tmp);
60011498 6006 break;
600b828c 6007 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
6008 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6009 q || (rd & 1)) {
6010 return 1;
6011 }
7d1b0095 6012 tmp3 = tcg_temp_new_i32();
60011498
PB
6013 tmp = neon_load_reg(rm, 0);
6014 tmp2 = neon_load_reg(rm, 1);
6015 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6016 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6017 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6018 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6019 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6020 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6021 tcg_temp_free_i32(tmp);
60011498 6022 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6023 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6024 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6025 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6026 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6027 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6028 tcg_temp_free_i32(tmp2);
6029 tcg_temp_free_i32(tmp3);
60011498 6030 break;
9ee6e8bb
PB
6031 default:
6032 elementwise:
6033 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6034 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6035 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6036 neon_reg_offset(rm, pass));
dd8fbd78 6037 TCGV_UNUSED(tmp);
9ee6e8bb 6038 } else {
dd8fbd78 6039 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6040 }
6041 switch (op) {
600b828c 6042 case NEON_2RM_VREV32:
9ee6e8bb 6043 switch (size) {
dd8fbd78
FN
6044 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6045 case 1: gen_swap_half(tmp); break;
600b828c 6046 default: abort();
9ee6e8bb
PB
6047 }
6048 break;
600b828c 6049 case NEON_2RM_VREV16:
dd8fbd78 6050 gen_rev16(tmp);
9ee6e8bb 6051 break;
600b828c 6052 case NEON_2RM_VCLS:
9ee6e8bb 6053 switch (size) {
dd8fbd78
FN
6054 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6055 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6056 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6057 default: abort();
9ee6e8bb
PB
6058 }
6059 break;
600b828c 6060 case NEON_2RM_VCLZ:
9ee6e8bb 6061 switch (size) {
dd8fbd78
FN
6062 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6063 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6064 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6065 default: abort();
9ee6e8bb
PB
6066 }
6067 break;
600b828c 6068 case NEON_2RM_VCNT:
dd8fbd78 6069 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6070 break;
600b828c 6071 case NEON_2RM_VMVN:
dd8fbd78 6072 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6073 break;
600b828c 6074 case NEON_2RM_VQABS:
9ee6e8bb 6075 switch (size) {
02da0b2d
PM
6076 case 0:
6077 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6078 break;
6079 case 1:
6080 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6081 break;
6082 case 2:
6083 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6084 break;
600b828c 6085 default: abort();
9ee6e8bb
PB
6086 }
6087 break;
600b828c 6088 case NEON_2RM_VQNEG:
9ee6e8bb 6089 switch (size) {
02da0b2d
PM
6090 case 0:
6091 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6092 break;
6093 case 1:
6094 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6095 break;
6096 case 2:
6097 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6098 break;
600b828c 6099 default: abort();
9ee6e8bb
PB
6100 }
6101 break;
600b828c 6102 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6103 tmp2 = tcg_const_i32(0);
9ee6e8bb 6104 switch(size) {
dd8fbd78
FN
6105 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6106 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6107 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6108 default: abort();
9ee6e8bb 6109 }
dd8fbd78 6110 tcg_temp_free(tmp2);
600b828c 6111 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6112 tcg_gen_not_i32(tmp, tmp);
600b828c 6113 }
9ee6e8bb 6114 break;
600b828c 6115 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6116 tmp2 = tcg_const_i32(0);
9ee6e8bb 6117 switch(size) {
dd8fbd78
FN
6118 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6119 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6120 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6121 default: abort();
9ee6e8bb 6122 }
dd8fbd78 6123 tcg_temp_free(tmp2);
600b828c 6124 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6125 tcg_gen_not_i32(tmp, tmp);
600b828c 6126 }
9ee6e8bb 6127 break;
600b828c 6128 case NEON_2RM_VCEQ0:
dd8fbd78 6129 tmp2 = tcg_const_i32(0);
9ee6e8bb 6130 switch(size) {
dd8fbd78
FN
6131 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6132 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6133 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6134 default: abort();
9ee6e8bb 6135 }
dd8fbd78 6136 tcg_temp_free(tmp2);
9ee6e8bb 6137 break;
600b828c 6138 case NEON_2RM_VABS:
9ee6e8bb 6139 switch(size) {
dd8fbd78
FN
6140 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6141 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6142 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6143 default: abort();
9ee6e8bb
PB
6144 }
6145 break;
600b828c 6146 case NEON_2RM_VNEG:
dd8fbd78
FN
6147 tmp2 = tcg_const_i32(0);
6148 gen_neon_rsb(size, tmp, tmp2);
6149 tcg_temp_free(tmp2);
9ee6e8bb 6150 break;
600b828c 6151 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6152 {
6153 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6154 tmp2 = tcg_const_i32(0);
aa47cfdd 6155 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6156 tcg_temp_free(tmp2);
aa47cfdd 6157 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6158 break;
aa47cfdd 6159 }
600b828c 6160 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6161 {
6162 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6163 tmp2 = tcg_const_i32(0);
aa47cfdd 6164 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6165 tcg_temp_free(tmp2);
aa47cfdd 6166 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6167 break;
aa47cfdd 6168 }
600b828c 6169 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6170 {
6171 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6172 tmp2 = tcg_const_i32(0);
aa47cfdd 6173 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6174 tcg_temp_free(tmp2);
aa47cfdd 6175 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6176 break;
aa47cfdd 6177 }
600b828c 6178 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6179 {
6180 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6181 tmp2 = tcg_const_i32(0);
aa47cfdd 6182 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6183 tcg_temp_free(tmp2);
aa47cfdd 6184 tcg_temp_free_ptr(fpstatus);
0e326109 6185 break;
aa47cfdd 6186 }
600b828c 6187 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6188 {
6189 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6190 tmp2 = tcg_const_i32(0);
aa47cfdd 6191 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6192 tcg_temp_free(tmp2);
aa47cfdd 6193 tcg_temp_free_ptr(fpstatus);
0e326109 6194 break;
aa47cfdd 6195 }
600b828c 6196 case NEON_2RM_VABS_F:
4373f3ce 6197 gen_vfp_abs(0);
9ee6e8bb 6198 break;
600b828c 6199 case NEON_2RM_VNEG_F:
4373f3ce 6200 gen_vfp_neg(0);
9ee6e8bb 6201 break;
600b828c 6202 case NEON_2RM_VSWP:
dd8fbd78
FN
6203 tmp2 = neon_load_reg(rd, pass);
6204 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6205 break;
600b828c 6206 case NEON_2RM_VTRN:
dd8fbd78 6207 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6208 switch (size) {
dd8fbd78
FN
6209 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6210 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6211 default: abort();
9ee6e8bb 6212 }
dd8fbd78 6213 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6214 break;
600b828c 6215 case NEON_2RM_VRECPE:
dd8fbd78 6216 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6217 break;
600b828c 6218 case NEON_2RM_VRSQRTE:
dd8fbd78 6219 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6220 break;
600b828c 6221 case NEON_2RM_VRECPE_F:
4373f3ce 6222 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6223 break;
600b828c 6224 case NEON_2RM_VRSQRTE_F:
4373f3ce 6225 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6226 break;
600b828c 6227 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6228 gen_vfp_sito(0, 1);
9ee6e8bb 6229 break;
600b828c 6230 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6231 gen_vfp_uito(0, 1);
9ee6e8bb 6232 break;
600b828c 6233 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6234 gen_vfp_tosiz(0, 1);
9ee6e8bb 6235 break;
600b828c 6236 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6237 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6238 break;
6239 default:
600b828c
PM
6240 /* Reserved op values were caught by the
6241 * neon_2rm_sizes[] check earlier.
6242 */
6243 abort();
9ee6e8bb 6244 }
600b828c 6245 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6246 tcg_gen_st_f32(cpu_F0s, cpu_env,
6247 neon_reg_offset(rd, pass));
9ee6e8bb 6248 } else {
dd8fbd78 6249 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6250 }
6251 }
6252 break;
6253 }
6254 } else if ((insn & (1 << 10)) == 0) {
6255 /* VTBL, VTBX. */
56907d77
PM
6256 int n = ((insn >> 8) & 3) + 1;
6257 if ((rn + n) > 32) {
6258 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6259 * helper function running off the end of the register file.
6260 */
6261 return 1;
6262 }
6263 n <<= 3;
9ee6e8bb 6264 if (insn & (1 << 6)) {
8f8e3aa4 6265 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6266 } else {
7d1b0095 6267 tmp = tcg_temp_new_i32();
8f8e3aa4 6268 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6269 }
8f8e3aa4 6270 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6271 tmp4 = tcg_const_i32(rn);
6272 tmp5 = tcg_const_i32(n);
6273 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
7d1b0095 6274 tcg_temp_free_i32(tmp);
9ee6e8bb 6275 if (insn & (1 << 6)) {
8f8e3aa4 6276 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6277 } else {
7d1b0095 6278 tmp = tcg_temp_new_i32();
8f8e3aa4 6279 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6280 }
8f8e3aa4 6281 tmp3 = neon_load_reg(rm, 1);
b75263d6 6282 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6283 tcg_temp_free_i32(tmp5);
6284 tcg_temp_free_i32(tmp4);
8f8e3aa4 6285 neon_store_reg(rd, 0, tmp2);
3018f259 6286 neon_store_reg(rd, 1, tmp3);
7d1b0095 6287 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6288 } else if ((insn & 0x380) == 0) {
6289 /* VDUP */
133da6aa
JR
6290 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6291 return 1;
6292 }
9ee6e8bb 6293 if (insn & (1 << 19)) {
dd8fbd78 6294 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6295 } else {
dd8fbd78 6296 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6297 }
6298 if (insn & (1 << 16)) {
dd8fbd78 6299 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6300 } else if (insn & (1 << 17)) {
6301 if ((insn >> 18) & 1)
dd8fbd78 6302 gen_neon_dup_high16(tmp);
9ee6e8bb 6303 else
dd8fbd78 6304 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6305 }
6306 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6307 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6308 tcg_gen_mov_i32(tmp2, tmp);
6309 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6310 }
7d1b0095 6311 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6312 } else {
6313 return 1;
6314 }
6315 }
6316 }
6317 return 0;
6318}
6319
fe1479c3
PB
6320static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
6321{
6322 int crn = (insn >> 16) & 0xf;
6323 int crm = insn & 0xf;
6324 int op1 = (insn >> 21) & 7;
6325 int op2 = (insn >> 5) & 7;
6326 int rt = (insn >> 12) & 0xf;
6327 TCGv tmp;
6328
ca27c052
PM
6329 /* Minimal set of debug registers, since we don't support debug */
6330 if (op1 == 0 && crn == 0 && op2 == 0) {
6331 switch (crm) {
6332 case 0:
6333 /* DBGDIDR: just RAZ. In particular this means the
6334 * "debug architecture version" bits will read as
6335 * a reserved value, which should cause Linux to
6336 * not try to use the debug hardware.
6337 */
6338 tmp = tcg_const_i32(0);
6339 store_reg(s, rt, tmp);
6340 return 0;
6341 case 1:
6342 case 2:
6343 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6344 * don't implement memory mapped debug components
6345 */
6346 if (ENABLE_ARCH_7) {
6347 tmp = tcg_const_i32(0);
6348 store_reg(s, rt, tmp);
6349 return 0;
6350 }
6351 break;
6352 default:
6353 break;
6354 }
6355 }
6356
fe1479c3
PB
6357 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6358 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6359 /* TEECR */
6360 if (IS_USER(s))
6361 return 1;
6362 tmp = load_cpu_field(teecr);
6363 store_reg(s, rt, tmp);
6364 return 0;
6365 }
6366 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6367 /* TEEHBR */
6368 if (IS_USER(s) && (env->teecr & 1))
6369 return 1;
6370 tmp = load_cpu_field(teehbr);
6371 store_reg(s, rt, tmp);
6372 return 0;
6373 }
6374 }
6375 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
6376 op1, crn, crm, op2);
6377 return 1;
6378}
6379
6380static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
6381{
6382 int crn = (insn >> 16) & 0xf;
6383 int crm = insn & 0xf;
6384 int op1 = (insn >> 21) & 7;
6385 int op2 = (insn >> 5) & 7;
6386 int rt = (insn >> 12) & 0xf;
6387 TCGv tmp;
6388
6389 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6390 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6391 /* TEECR */
6392 if (IS_USER(s))
6393 return 1;
6394 tmp = load_reg(s, rt);
6395 gen_helper_set_teecr(cpu_env, tmp);
7d1b0095 6396 tcg_temp_free_i32(tmp);
fe1479c3
PB
6397 return 0;
6398 }
6399 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6400 /* TEEHBR */
6401 if (IS_USER(s) && (env->teecr & 1))
6402 return 1;
6403 tmp = load_reg(s, rt);
6404 store_cpu_field(tmp, teehbr);
6405 return 0;
6406 }
6407 }
6408 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6409 op1, crn, crm, op2);
6410 return 1;
6411}
6412
9ee6e8bb
PB
6413static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
6414{
6415 int cpnum;
6416
6417 cpnum = (insn >> 8) & 0xf;
6418 if (arm_feature(env, ARM_FEATURE_XSCALE)
6419 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6420 return 1;
6421
6422 switch (cpnum) {
6423 case 0:
6424 case 1:
6425 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6426 return disas_iwmmxt_insn(env, s, insn);
6427 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6428 return disas_dsp_insn(env, s, insn);
6429 }
6430 return 1;
6431 case 10:
6432 case 11:
6433 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
6434 case 14:
6435 /* Coprocessors 7-15 are architecturally reserved by ARM.
6436 Unfortunately Intel decided to ignore this. */
6437 if (arm_feature(env, ARM_FEATURE_XSCALE))
6438 goto board;
6439 if (insn & (1 << 20))
6440 return disas_cp14_read(env, s, insn);
6441 else
6442 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
6443 case 15:
6444 return disas_cp15_insn (env, s, insn);
6445 default:
fe1479c3 6446 board:
9ee6e8bb
PB
6447 /* Unknown coprocessor. See if the board has hooked it. */
6448 return disas_cp_insn (env, s, insn);
6449 }
6450}
6451
5e3f878a
PB
6452
6453/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6454static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
6455{
6456 TCGv tmp;
7d1b0095 6457 tmp = tcg_temp_new_i32();
5e3f878a
PB
6458 tcg_gen_trunc_i64_i32(tmp, val);
6459 store_reg(s, rlow, tmp);
7d1b0095 6460 tmp = tcg_temp_new_i32();
5e3f878a
PB
6461 tcg_gen_shri_i64(val, val, 32);
6462 tcg_gen_trunc_i64_i32(tmp, val);
6463 store_reg(s, rhigh, tmp);
6464}
6465
6466/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6467static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6468{
a7812ae4 6469 TCGv_i64 tmp;
5e3f878a
PB
6470 TCGv tmp2;
6471
36aa55dc 6472 /* Load value and extend to 64 bits. */
a7812ae4 6473 tmp = tcg_temp_new_i64();
5e3f878a
PB
6474 tmp2 = load_reg(s, rlow);
6475 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6476 tcg_temp_free_i32(tmp2);
5e3f878a 6477 tcg_gen_add_i64(val, val, tmp);
b75263d6 6478 tcg_temp_free_i64(tmp);
5e3f878a
PB
6479}
6480
6481/* load and add a 64-bit value from a register pair. */
a7812ae4 6482static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6483{
a7812ae4 6484 TCGv_i64 tmp;
36aa55dc
PB
6485 TCGv tmpl;
6486 TCGv tmph;
5e3f878a
PB
6487
6488 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6489 tmpl = load_reg(s, rlow);
6490 tmph = load_reg(s, rhigh);
a7812ae4 6491 tmp = tcg_temp_new_i64();
36aa55dc 6492 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6493 tcg_temp_free_i32(tmpl);
6494 tcg_temp_free_i32(tmph);
5e3f878a 6495 tcg_gen_add_i64(val, val, tmp);
b75263d6 6496 tcg_temp_free_i64(tmp);
5e3f878a
PB
6497}
6498
6499/* Set N and Z flags from a 64-bit value. */
a7812ae4 6500static void gen_logicq_cc(TCGv_i64 val)
5e3f878a 6501{
7d1b0095 6502 TCGv tmp = tcg_temp_new_i32();
5e3f878a 6503 gen_helper_logicq_cc(tmp, val);
6fbe23d5 6504 gen_logic_CC(tmp);
7d1b0095 6505 tcg_temp_free_i32(tmp);
5e3f878a
PB
6506}
6507
426f5abc
PB
6508/* Load/Store exclusive instructions are implemented by remembering
6509 the value/address loaded, and seeing if these are the same
6510 when the store is performed. This should be is sufficient to implement
6511 the architecturally mandated semantics, and avoids having to monitor
6512 regular stores.
6513
6514 In system emulation mode only one CPU will be running at once, so
6515 this sequence is effectively atomic. In user emulation mode we
6516 throw an exception and handle the atomic operation elsewhere. */
6517static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6518 TCGv addr, int size)
6519{
6520 TCGv tmp;
6521
6522 switch (size) {
6523 case 0:
6524 tmp = gen_ld8u(addr, IS_USER(s));
6525 break;
6526 case 1:
6527 tmp = gen_ld16u(addr, IS_USER(s));
6528 break;
6529 case 2:
6530 case 3:
6531 tmp = gen_ld32(addr, IS_USER(s));
6532 break;
6533 default:
6534 abort();
6535 }
6536 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6537 store_reg(s, rt, tmp);
6538 if (size == 3) {
7d1b0095 6539 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6540 tcg_gen_addi_i32(tmp2, addr, 4);
6541 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6542 tcg_temp_free_i32(tmp2);
426f5abc
PB
6543 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6544 store_reg(s, rt2, tmp);
6545 }
6546 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6547}
6548
6549static void gen_clrex(DisasContext *s)
6550{
6551 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6552}
6553
6554#ifdef CONFIG_USER_ONLY
6555static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6556 TCGv addr, int size)
6557{
6558 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6559 tcg_gen_movi_i32(cpu_exclusive_info,
6560 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6561 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6562}
6563#else
6564static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6565 TCGv addr, int size)
6566{
6567 TCGv tmp;
6568 int done_label;
6569 int fail_label;
6570
6571 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6572 [addr] = {Rt};
6573 {Rd} = 0;
6574 } else {
6575 {Rd} = 1;
6576 } */
6577 fail_label = gen_new_label();
6578 done_label = gen_new_label();
6579 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6580 switch (size) {
6581 case 0:
6582 tmp = gen_ld8u(addr, IS_USER(s));
6583 break;
6584 case 1:
6585 tmp = gen_ld16u(addr, IS_USER(s));
6586 break;
6587 case 2:
6588 case 3:
6589 tmp = gen_ld32(addr, IS_USER(s));
6590 break;
6591 default:
6592 abort();
6593 }
6594 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6595 tcg_temp_free_i32(tmp);
426f5abc 6596 if (size == 3) {
7d1b0095 6597 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6598 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6599 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6600 tcg_temp_free_i32(tmp2);
426f5abc 6601 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6602 tcg_temp_free_i32(tmp);
426f5abc
PB
6603 }
6604 tmp = load_reg(s, rt);
6605 switch (size) {
6606 case 0:
6607 gen_st8(tmp, addr, IS_USER(s));
6608 break;
6609 case 1:
6610 gen_st16(tmp, addr, IS_USER(s));
6611 break;
6612 case 2:
6613 case 3:
6614 gen_st32(tmp, addr, IS_USER(s));
6615 break;
6616 default:
6617 abort();
6618 }
6619 if (size == 3) {
6620 tcg_gen_addi_i32(addr, addr, 4);
6621 tmp = load_reg(s, rt2);
6622 gen_st32(tmp, addr, IS_USER(s));
6623 }
6624 tcg_gen_movi_i32(cpu_R[rd], 0);
6625 tcg_gen_br(done_label);
6626 gen_set_label(fail_label);
6627 tcg_gen_movi_i32(cpu_R[rd], 1);
6628 gen_set_label(done_label);
6629 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6630}
6631#endif
6632
9ee6e8bb
PB
6633static void disas_arm_insn(CPUState * env, DisasContext *s)
6634{
6635 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6636 TCGv tmp;
3670669c 6637 TCGv tmp2;
6ddbc6e4 6638 TCGv tmp3;
b0109805 6639 TCGv addr;
a7812ae4 6640 TCGv_i64 tmp64;
9ee6e8bb
PB
6641
6642 insn = ldl_code(s->pc);
6643 s->pc += 4;
6644
6645 /* M variants do not implement ARM mode. */
6646 if (IS_M(env))
6647 goto illegal_op;
6648 cond = insn >> 28;
6649 if (cond == 0xf){
be5e7a76
DES
6650 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6651 * choose to UNDEF. In ARMv5 and above the space is used
6652 * for miscellaneous unconditional instructions.
6653 */
6654 ARCH(5);
6655
9ee6e8bb
PB
6656 /* Unconditional instructions. */
6657 if (((insn >> 25) & 7) == 1) {
6658 /* NEON Data processing. */
6659 if (!arm_feature(env, ARM_FEATURE_NEON))
6660 goto illegal_op;
6661
6662 if (disas_neon_data_insn(env, s, insn))
6663 goto illegal_op;
6664 return;
6665 }
6666 if ((insn & 0x0f100000) == 0x04000000) {
6667 /* NEON load/store. */
6668 if (!arm_feature(env, ARM_FEATURE_NEON))
6669 goto illegal_op;
6670
6671 if (disas_neon_ls_insn(env, s, insn))
6672 goto illegal_op;
6673 return;
6674 }
3d185e5d
PM
6675 if (((insn & 0x0f30f000) == 0x0510f000) ||
6676 ((insn & 0x0f30f010) == 0x0710f000)) {
6677 if ((insn & (1 << 22)) == 0) {
6678 /* PLDW; v7MP */
6679 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6680 goto illegal_op;
6681 }
6682 }
6683 /* Otherwise PLD; v5TE+ */
be5e7a76 6684 ARCH(5TE);
3d185e5d
PM
6685 return;
6686 }
6687 if (((insn & 0x0f70f000) == 0x0450f000) ||
6688 ((insn & 0x0f70f010) == 0x0650f000)) {
6689 ARCH(7);
6690 return; /* PLI; V7 */
6691 }
6692 if (((insn & 0x0f700000) == 0x04100000) ||
6693 ((insn & 0x0f700010) == 0x06100000)) {
6694 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6695 goto illegal_op;
6696 }
6697 return; /* v7MP: Unallocated memory hint: must NOP */
6698 }
6699
6700 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6701 ARCH(6);
6702 /* setend */
6703 if (insn & (1 << 9)) {
6704 /* BE8 mode not implemented. */
6705 goto illegal_op;
6706 }
6707 return;
6708 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6709 switch ((insn >> 4) & 0xf) {
6710 case 1: /* clrex */
6711 ARCH(6K);
426f5abc 6712 gen_clrex(s);
9ee6e8bb
PB
6713 return;
6714 case 4: /* dsb */
6715 case 5: /* dmb */
6716 case 6: /* isb */
6717 ARCH(7);
6718 /* We don't emulate caches so these are a no-op. */
6719 return;
6720 default:
6721 goto illegal_op;
6722 }
6723 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6724 /* srs */
c67b6b71 6725 int32_t offset;
9ee6e8bb
PB
6726 if (IS_USER(s))
6727 goto illegal_op;
6728 ARCH(6);
6729 op1 = (insn & 0x1f);
7d1b0095 6730 addr = tcg_temp_new_i32();
39ea3d4e
PM
6731 tmp = tcg_const_i32(op1);
6732 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6733 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6734 i = (insn >> 23) & 3;
6735 switch (i) {
6736 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6737 case 1: offset = 0; break; /* IA */
6738 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6739 case 3: offset = 4; break; /* IB */
6740 default: abort();
6741 }
6742 if (offset)
b0109805
PB
6743 tcg_gen_addi_i32(addr, addr, offset);
6744 tmp = load_reg(s, 14);
6745 gen_st32(tmp, addr, 0);
c67b6b71 6746 tmp = load_cpu_field(spsr);
b0109805
PB
6747 tcg_gen_addi_i32(addr, addr, 4);
6748 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6749 if (insn & (1 << 21)) {
6750 /* Base writeback. */
6751 switch (i) {
6752 case 0: offset = -8; break;
c67b6b71
FN
6753 case 1: offset = 4; break;
6754 case 2: offset = -4; break;
9ee6e8bb
PB
6755 case 3: offset = 0; break;
6756 default: abort();
6757 }
6758 if (offset)
c67b6b71 6759 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6760 tmp = tcg_const_i32(op1);
6761 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6762 tcg_temp_free_i32(tmp);
7d1b0095 6763 tcg_temp_free_i32(addr);
b0109805 6764 } else {
7d1b0095 6765 tcg_temp_free_i32(addr);
9ee6e8bb 6766 }
a990f58f 6767 return;
ea825eee 6768 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6769 /* rfe */
c67b6b71 6770 int32_t offset;
9ee6e8bb
PB
6771 if (IS_USER(s))
6772 goto illegal_op;
6773 ARCH(6);
6774 rn = (insn >> 16) & 0xf;
b0109805 6775 addr = load_reg(s, rn);
9ee6e8bb
PB
6776 i = (insn >> 23) & 3;
6777 switch (i) {
b0109805 6778 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6779 case 1: offset = 0; break; /* IA */
6780 case 2: offset = -8; break; /* DB */
b0109805 6781 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6782 default: abort();
6783 }
6784 if (offset)
b0109805
PB
6785 tcg_gen_addi_i32(addr, addr, offset);
6786 /* Load PC into tmp and CPSR into tmp2. */
6787 tmp = gen_ld32(addr, 0);
6788 tcg_gen_addi_i32(addr, addr, 4);
6789 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6790 if (insn & (1 << 21)) {
6791 /* Base writeback. */
6792 switch (i) {
b0109805 6793 case 0: offset = -8; break;
c67b6b71
FN
6794 case 1: offset = 4; break;
6795 case 2: offset = -4; break;
b0109805 6796 case 3: offset = 0; break;
9ee6e8bb
PB
6797 default: abort();
6798 }
6799 if (offset)
b0109805
PB
6800 tcg_gen_addi_i32(addr, addr, offset);
6801 store_reg(s, rn, addr);
6802 } else {
7d1b0095 6803 tcg_temp_free_i32(addr);
9ee6e8bb 6804 }
b0109805 6805 gen_rfe(s, tmp, tmp2);
c67b6b71 6806 return;
9ee6e8bb
PB
6807 } else if ((insn & 0x0e000000) == 0x0a000000) {
6808 /* branch link and change to thumb (blx <offset>) */
6809 int32_t offset;
6810
6811 val = (uint32_t)s->pc;
7d1b0095 6812 tmp = tcg_temp_new_i32();
d9ba4830
PB
6813 tcg_gen_movi_i32(tmp, val);
6814 store_reg(s, 14, tmp);
9ee6e8bb
PB
6815 /* Sign-extend the 24-bit offset */
6816 offset = (((int32_t)insn) << 8) >> 8;
6817 /* offset * 4 + bit24 * 2 + (thumb bit) */
6818 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6819 /* pipeline offset */
6820 val += 4;
be5e7a76 6821 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6822 gen_bx_im(s, val);
9ee6e8bb
PB
6823 return;
6824 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6825 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6826 /* iWMMXt register transfer. */
6827 if (env->cp15.c15_cpar & (1 << 1))
6828 if (!disas_iwmmxt_insn(env, s, insn))
6829 return;
6830 }
6831 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6832 /* Coprocessor double register transfer. */
be5e7a76 6833 ARCH(5TE);
9ee6e8bb
PB
6834 } else if ((insn & 0x0f000010) == 0x0e000010) {
6835 /* Additional coprocessor register transfer. */
7997d92f 6836 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6837 uint32_t mask;
6838 uint32_t val;
6839 /* cps (privileged) */
6840 if (IS_USER(s))
6841 return;
6842 mask = val = 0;
6843 if (insn & (1 << 19)) {
6844 if (insn & (1 << 8))
6845 mask |= CPSR_A;
6846 if (insn & (1 << 7))
6847 mask |= CPSR_I;
6848 if (insn & (1 << 6))
6849 mask |= CPSR_F;
6850 if (insn & (1 << 18))
6851 val |= mask;
6852 }
7997d92f 6853 if (insn & (1 << 17)) {
9ee6e8bb
PB
6854 mask |= CPSR_M;
6855 val |= (insn & 0x1f);
6856 }
6857 if (mask) {
2fbac54b 6858 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6859 }
6860 return;
6861 }
6862 goto illegal_op;
6863 }
6864 if (cond != 0xe) {
6865 /* if not always execute, we generate a conditional jump to
6866 next instruction */
6867 s->condlabel = gen_new_label();
d9ba4830 6868 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6869 s->condjmp = 1;
6870 }
6871 if ((insn & 0x0f900000) == 0x03000000) {
6872 if ((insn & (1 << 21)) == 0) {
6873 ARCH(6T2);
6874 rd = (insn >> 12) & 0xf;
6875 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6876 if ((insn & (1 << 22)) == 0) {
6877 /* MOVW */
7d1b0095 6878 tmp = tcg_temp_new_i32();
5e3f878a 6879 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6880 } else {
6881 /* MOVT */
5e3f878a 6882 tmp = load_reg(s, rd);
86831435 6883 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6884 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6885 }
5e3f878a 6886 store_reg(s, rd, tmp);
9ee6e8bb
PB
6887 } else {
6888 if (((insn >> 12) & 0xf) != 0xf)
6889 goto illegal_op;
6890 if (((insn >> 16) & 0xf) == 0) {
6891 gen_nop_hint(s, insn & 0xff);
6892 } else {
6893 /* CPSR = immediate */
6894 val = insn & 0xff;
6895 shift = ((insn >> 8) & 0xf) * 2;
6896 if (shift)
6897 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6898 i = ((insn & (1 << 22)) != 0);
2fbac54b 6899 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6900 goto illegal_op;
6901 }
6902 }
6903 } else if ((insn & 0x0f900000) == 0x01000000
6904 && (insn & 0x00000090) != 0x00000090) {
6905 /* miscellaneous instructions */
6906 op1 = (insn >> 21) & 3;
6907 sh = (insn >> 4) & 0xf;
6908 rm = insn & 0xf;
6909 switch (sh) {
6910 case 0x0: /* move program status register */
6911 if (op1 & 1) {
6912 /* PSR = reg */
2fbac54b 6913 tmp = load_reg(s, rm);
9ee6e8bb 6914 i = ((op1 & 2) != 0);
2fbac54b 6915 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6916 goto illegal_op;
6917 } else {
6918 /* reg = PSR */
6919 rd = (insn >> 12) & 0xf;
6920 if (op1 & 2) {
6921 if (IS_USER(s))
6922 goto illegal_op;
d9ba4830 6923 tmp = load_cpu_field(spsr);
9ee6e8bb 6924 } else {
7d1b0095 6925 tmp = tcg_temp_new_i32();
d9ba4830 6926 gen_helper_cpsr_read(tmp);
9ee6e8bb 6927 }
d9ba4830 6928 store_reg(s, rd, tmp);
9ee6e8bb
PB
6929 }
6930 break;
6931 case 0x1:
6932 if (op1 == 1) {
6933 /* branch/exchange thumb (bx). */
be5e7a76 6934 ARCH(4T);
d9ba4830
PB
6935 tmp = load_reg(s, rm);
6936 gen_bx(s, tmp);
9ee6e8bb
PB
6937 } else if (op1 == 3) {
6938 /* clz */
be5e7a76 6939 ARCH(5);
9ee6e8bb 6940 rd = (insn >> 12) & 0xf;
1497c961
PB
6941 tmp = load_reg(s, rm);
6942 gen_helper_clz(tmp, tmp);
6943 store_reg(s, rd, tmp);
9ee6e8bb
PB
6944 } else {
6945 goto illegal_op;
6946 }
6947 break;
6948 case 0x2:
6949 if (op1 == 1) {
6950 ARCH(5J); /* bxj */
6951 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6952 tmp = load_reg(s, rm);
6953 gen_bx(s, tmp);
9ee6e8bb
PB
6954 } else {
6955 goto illegal_op;
6956 }
6957 break;
6958 case 0x3:
6959 if (op1 != 1)
6960 goto illegal_op;
6961
be5e7a76 6962 ARCH(5);
9ee6e8bb 6963 /* branch link/exchange thumb (blx) */
d9ba4830 6964 tmp = load_reg(s, rm);
7d1b0095 6965 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6966 tcg_gen_movi_i32(tmp2, s->pc);
6967 store_reg(s, 14, tmp2);
6968 gen_bx(s, tmp);
9ee6e8bb
PB
6969 break;
6970 case 0x5: /* saturating add/subtract */
be5e7a76 6971 ARCH(5TE);
9ee6e8bb
PB
6972 rd = (insn >> 12) & 0xf;
6973 rn = (insn >> 16) & 0xf;
b40d0353 6974 tmp = load_reg(s, rm);
5e3f878a 6975 tmp2 = load_reg(s, rn);
9ee6e8bb 6976 if (op1 & 2)
5e3f878a 6977 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6978 if (op1 & 1)
5e3f878a 6979 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6980 else
5e3f878a 6981 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 6982 tcg_temp_free_i32(tmp2);
5e3f878a 6983 store_reg(s, rd, tmp);
9ee6e8bb 6984 break;
49e14940
AL
6985 case 7:
6986 /* SMC instruction (op1 == 3)
6987 and undefined instructions (op1 == 0 || op1 == 2)
6988 will trap */
6989 if (op1 != 1) {
6990 goto illegal_op;
6991 }
6992 /* bkpt */
be5e7a76 6993 ARCH(5);
bc4a0de0 6994 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6995 break;
6996 case 0x8: /* signed multiply */
6997 case 0xa:
6998 case 0xc:
6999 case 0xe:
be5e7a76 7000 ARCH(5TE);
9ee6e8bb
PB
7001 rs = (insn >> 8) & 0xf;
7002 rn = (insn >> 12) & 0xf;
7003 rd = (insn >> 16) & 0xf;
7004 if (op1 == 1) {
7005 /* (32 * 16) >> 16 */
5e3f878a
PB
7006 tmp = load_reg(s, rm);
7007 tmp2 = load_reg(s, rs);
9ee6e8bb 7008 if (sh & 4)
5e3f878a 7009 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7010 else
5e3f878a 7011 gen_sxth(tmp2);
a7812ae4
PB
7012 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7013 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7014 tmp = tcg_temp_new_i32();
a7812ae4 7015 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7016 tcg_temp_free_i64(tmp64);
9ee6e8bb 7017 if ((sh & 2) == 0) {
5e3f878a
PB
7018 tmp2 = load_reg(s, rn);
7019 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7020 tcg_temp_free_i32(tmp2);
9ee6e8bb 7021 }
5e3f878a 7022 store_reg(s, rd, tmp);
9ee6e8bb
PB
7023 } else {
7024 /* 16 * 16 */
5e3f878a
PB
7025 tmp = load_reg(s, rm);
7026 tmp2 = load_reg(s, rs);
7027 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7028 tcg_temp_free_i32(tmp2);
9ee6e8bb 7029 if (op1 == 2) {
a7812ae4
PB
7030 tmp64 = tcg_temp_new_i64();
7031 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7032 tcg_temp_free_i32(tmp);
a7812ae4
PB
7033 gen_addq(s, tmp64, rn, rd);
7034 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7035 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7036 } else {
7037 if (op1 == 0) {
5e3f878a
PB
7038 tmp2 = load_reg(s, rn);
7039 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7040 tcg_temp_free_i32(tmp2);
9ee6e8bb 7041 }
5e3f878a 7042 store_reg(s, rd, tmp);
9ee6e8bb
PB
7043 }
7044 }
7045 break;
7046 default:
7047 goto illegal_op;
7048 }
7049 } else if (((insn & 0x0e000000) == 0 &&
7050 (insn & 0x00000090) != 0x90) ||
7051 ((insn & 0x0e000000) == (1 << 25))) {
7052 int set_cc, logic_cc, shiftop;
7053
7054 op1 = (insn >> 21) & 0xf;
7055 set_cc = (insn >> 20) & 1;
7056 logic_cc = table_logic_cc[op1] & set_cc;
7057
7058 /* data processing instruction */
7059 if (insn & (1 << 25)) {
7060 /* immediate operand */
7061 val = insn & 0xff;
7062 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7063 if (shift) {
9ee6e8bb 7064 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7065 }
7d1b0095 7066 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7067 tcg_gen_movi_i32(tmp2, val);
7068 if (logic_cc && shift) {
7069 gen_set_CF_bit31(tmp2);
7070 }
9ee6e8bb
PB
7071 } else {
7072 /* register */
7073 rm = (insn) & 0xf;
e9bb4aa9 7074 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7075 shiftop = (insn >> 5) & 3;
7076 if (!(insn & (1 << 4))) {
7077 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7078 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7079 } else {
7080 rs = (insn >> 8) & 0xf;
8984bd2e 7081 tmp = load_reg(s, rs);
e9bb4aa9 7082 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7083 }
7084 }
7085 if (op1 != 0x0f && op1 != 0x0d) {
7086 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7087 tmp = load_reg(s, rn);
7088 } else {
7089 TCGV_UNUSED(tmp);
9ee6e8bb
PB
7090 }
7091 rd = (insn >> 12) & 0xf;
7092 switch(op1) {
7093 case 0x00:
e9bb4aa9
JR
7094 tcg_gen_and_i32(tmp, tmp, tmp2);
7095 if (logic_cc) {
7096 gen_logic_CC(tmp);
7097 }
21aeb343 7098 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7099 break;
7100 case 0x01:
e9bb4aa9
JR
7101 tcg_gen_xor_i32(tmp, tmp, tmp2);
7102 if (logic_cc) {
7103 gen_logic_CC(tmp);
7104 }
21aeb343 7105 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7106 break;
7107 case 0x02:
7108 if (set_cc && rd == 15) {
7109 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7110 if (IS_USER(s)) {
9ee6e8bb 7111 goto illegal_op;
e9bb4aa9
JR
7112 }
7113 gen_helper_sub_cc(tmp, tmp, tmp2);
7114 gen_exception_return(s, tmp);
9ee6e8bb 7115 } else {
e9bb4aa9
JR
7116 if (set_cc) {
7117 gen_helper_sub_cc(tmp, tmp, tmp2);
7118 } else {
7119 tcg_gen_sub_i32(tmp, tmp, tmp2);
7120 }
21aeb343 7121 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7122 }
7123 break;
7124 case 0x03:
e9bb4aa9
JR
7125 if (set_cc) {
7126 gen_helper_sub_cc(tmp, tmp2, tmp);
7127 } else {
7128 tcg_gen_sub_i32(tmp, tmp2, tmp);
7129 }
21aeb343 7130 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7131 break;
7132 case 0x04:
e9bb4aa9
JR
7133 if (set_cc) {
7134 gen_helper_add_cc(tmp, tmp, tmp2);
7135 } else {
7136 tcg_gen_add_i32(tmp, tmp, tmp2);
7137 }
21aeb343 7138 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7139 break;
7140 case 0x05:
e9bb4aa9
JR
7141 if (set_cc) {
7142 gen_helper_adc_cc(tmp, tmp, tmp2);
7143 } else {
7144 gen_add_carry(tmp, tmp, tmp2);
7145 }
21aeb343 7146 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7147 break;
7148 case 0x06:
e9bb4aa9
JR
7149 if (set_cc) {
7150 gen_helper_sbc_cc(tmp, tmp, tmp2);
7151 } else {
7152 gen_sub_carry(tmp, tmp, tmp2);
7153 }
21aeb343 7154 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7155 break;
7156 case 0x07:
e9bb4aa9
JR
7157 if (set_cc) {
7158 gen_helper_sbc_cc(tmp, tmp2, tmp);
7159 } else {
7160 gen_sub_carry(tmp, tmp2, tmp);
7161 }
21aeb343 7162 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7163 break;
7164 case 0x08:
7165 if (set_cc) {
e9bb4aa9
JR
7166 tcg_gen_and_i32(tmp, tmp, tmp2);
7167 gen_logic_CC(tmp);
9ee6e8bb 7168 }
7d1b0095 7169 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7170 break;
7171 case 0x09:
7172 if (set_cc) {
e9bb4aa9
JR
7173 tcg_gen_xor_i32(tmp, tmp, tmp2);
7174 gen_logic_CC(tmp);
9ee6e8bb 7175 }
7d1b0095 7176 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7177 break;
7178 case 0x0a:
7179 if (set_cc) {
e9bb4aa9 7180 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 7181 }
7d1b0095 7182 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7183 break;
7184 case 0x0b:
7185 if (set_cc) {
e9bb4aa9 7186 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 7187 }
7d1b0095 7188 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7189 break;
7190 case 0x0c:
e9bb4aa9
JR
7191 tcg_gen_or_i32(tmp, tmp, tmp2);
7192 if (logic_cc) {
7193 gen_logic_CC(tmp);
7194 }
21aeb343 7195 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7196 break;
7197 case 0x0d:
7198 if (logic_cc && rd == 15) {
7199 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7200 if (IS_USER(s)) {
9ee6e8bb 7201 goto illegal_op;
e9bb4aa9
JR
7202 }
7203 gen_exception_return(s, tmp2);
9ee6e8bb 7204 } else {
e9bb4aa9
JR
7205 if (logic_cc) {
7206 gen_logic_CC(tmp2);
7207 }
21aeb343 7208 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7209 }
7210 break;
7211 case 0x0e:
f669df27 7212 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7213 if (logic_cc) {
7214 gen_logic_CC(tmp);
7215 }
21aeb343 7216 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7217 break;
7218 default:
7219 case 0x0f:
e9bb4aa9
JR
7220 tcg_gen_not_i32(tmp2, tmp2);
7221 if (logic_cc) {
7222 gen_logic_CC(tmp2);
7223 }
21aeb343 7224 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7225 break;
7226 }
e9bb4aa9 7227 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7228 tcg_temp_free_i32(tmp2);
e9bb4aa9 7229 }
9ee6e8bb
PB
7230 } else {
7231 /* other instructions */
7232 op1 = (insn >> 24) & 0xf;
7233 switch(op1) {
7234 case 0x0:
7235 case 0x1:
7236 /* multiplies, extra load/stores */
7237 sh = (insn >> 5) & 3;
7238 if (sh == 0) {
7239 if (op1 == 0x0) {
7240 rd = (insn >> 16) & 0xf;
7241 rn = (insn >> 12) & 0xf;
7242 rs = (insn >> 8) & 0xf;
7243 rm = (insn) & 0xf;
7244 op1 = (insn >> 20) & 0xf;
7245 switch (op1) {
7246 case 0: case 1: case 2: case 3: case 6:
7247 /* 32 bit mul */
5e3f878a
PB
7248 tmp = load_reg(s, rs);
7249 tmp2 = load_reg(s, rm);
7250 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7251 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7252 if (insn & (1 << 22)) {
7253 /* Subtract (mls) */
7254 ARCH(6T2);
5e3f878a
PB
7255 tmp2 = load_reg(s, rn);
7256 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7257 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7258 } else if (insn & (1 << 21)) {
7259 /* Add */
5e3f878a
PB
7260 tmp2 = load_reg(s, rn);
7261 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7262 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7263 }
7264 if (insn & (1 << 20))
5e3f878a
PB
7265 gen_logic_CC(tmp);
7266 store_reg(s, rd, tmp);
9ee6e8bb 7267 break;
8aac08b1
AJ
7268 case 4:
7269 /* 64 bit mul double accumulate (UMAAL) */
7270 ARCH(6);
7271 tmp = load_reg(s, rs);
7272 tmp2 = load_reg(s, rm);
7273 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7274 gen_addq_lo(s, tmp64, rn);
7275 gen_addq_lo(s, tmp64, rd);
7276 gen_storeq_reg(s, rn, rd, tmp64);
7277 tcg_temp_free_i64(tmp64);
7278 break;
7279 case 8: case 9: case 10: case 11:
7280 case 12: case 13: case 14: case 15:
7281 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7282 tmp = load_reg(s, rs);
7283 tmp2 = load_reg(s, rm);
8aac08b1 7284 if (insn & (1 << 22)) {
a7812ae4 7285 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 7286 } else {
a7812ae4 7287 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
7288 }
7289 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 7290 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 7291 }
8aac08b1 7292 if (insn & (1 << 20)) {
a7812ae4 7293 gen_logicq_cc(tmp64);
8aac08b1 7294 }
a7812ae4 7295 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7296 tcg_temp_free_i64(tmp64);
9ee6e8bb 7297 break;
8aac08b1
AJ
7298 default:
7299 goto illegal_op;
9ee6e8bb
PB
7300 }
7301 } else {
7302 rn = (insn >> 16) & 0xf;
7303 rd = (insn >> 12) & 0xf;
7304 if (insn & (1 << 23)) {
7305 /* load/store exclusive */
86753403
PB
7306 op1 = (insn >> 21) & 0x3;
7307 if (op1)
a47f43d2 7308 ARCH(6K);
86753403
PB
7309 else
7310 ARCH(6);
3174f8e9 7311 addr = tcg_temp_local_new_i32();
98a46317 7312 load_reg_var(s, addr, rn);
9ee6e8bb 7313 if (insn & (1 << 20)) {
86753403
PB
7314 switch (op1) {
7315 case 0: /* ldrex */
426f5abc 7316 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7317 break;
7318 case 1: /* ldrexd */
426f5abc 7319 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7320 break;
7321 case 2: /* ldrexb */
426f5abc 7322 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7323 break;
7324 case 3: /* ldrexh */
426f5abc 7325 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7326 break;
7327 default:
7328 abort();
7329 }
9ee6e8bb
PB
7330 } else {
7331 rm = insn & 0xf;
86753403
PB
7332 switch (op1) {
7333 case 0: /* strex */
426f5abc 7334 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7335 break;
7336 case 1: /* strexd */
502e64fe 7337 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7338 break;
7339 case 2: /* strexb */
426f5abc 7340 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7341 break;
7342 case 3: /* strexh */
426f5abc 7343 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7344 break;
7345 default:
7346 abort();
7347 }
9ee6e8bb 7348 }
3174f8e9 7349 tcg_temp_free(addr);
9ee6e8bb
PB
7350 } else {
7351 /* SWP instruction */
7352 rm = (insn) & 0xf;
7353
8984bd2e
PB
7354 /* ??? This is not really atomic. However we know
7355 we never have multiple CPUs running in parallel,
7356 so it is good enough. */
7357 addr = load_reg(s, rn);
7358 tmp = load_reg(s, rm);
9ee6e8bb 7359 if (insn & (1 << 22)) {
8984bd2e
PB
7360 tmp2 = gen_ld8u(addr, IS_USER(s));
7361 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7362 } else {
8984bd2e
PB
7363 tmp2 = gen_ld32(addr, IS_USER(s));
7364 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7365 }
7d1b0095 7366 tcg_temp_free_i32(addr);
8984bd2e 7367 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7368 }
7369 }
7370 } else {
7371 int address_offset;
7372 int load;
7373 /* Misc load/store */
7374 rn = (insn >> 16) & 0xf;
7375 rd = (insn >> 12) & 0xf;
b0109805 7376 addr = load_reg(s, rn);
9ee6e8bb 7377 if (insn & (1 << 24))
b0109805 7378 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7379 address_offset = 0;
7380 if (insn & (1 << 20)) {
7381 /* load */
7382 switch(sh) {
7383 case 1:
b0109805 7384 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
7385 break;
7386 case 2:
b0109805 7387 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
7388 break;
7389 default:
7390 case 3:
b0109805 7391 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
7392 break;
7393 }
7394 load = 1;
7395 } else if (sh & 2) {
be5e7a76 7396 ARCH(5TE);
9ee6e8bb
PB
7397 /* doubleword */
7398 if (sh & 1) {
7399 /* store */
b0109805
PB
7400 tmp = load_reg(s, rd);
7401 gen_st32(tmp, addr, IS_USER(s));
7402 tcg_gen_addi_i32(addr, addr, 4);
7403 tmp = load_reg(s, rd + 1);
7404 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7405 load = 0;
7406 } else {
7407 /* load */
b0109805
PB
7408 tmp = gen_ld32(addr, IS_USER(s));
7409 store_reg(s, rd, tmp);
7410 tcg_gen_addi_i32(addr, addr, 4);
7411 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
7412 rd++;
7413 load = 1;
7414 }
7415 address_offset = -4;
7416 } else {
7417 /* store */
b0109805
PB
7418 tmp = load_reg(s, rd);
7419 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7420 load = 0;
7421 }
7422 /* Perform base writeback before the loaded value to
7423 ensure correct behavior with overlapping index registers.
7424 ldrd with base writeback is is undefined if the
7425 destination and index registers overlap. */
7426 if (!(insn & (1 << 24))) {
b0109805
PB
7427 gen_add_datah_offset(s, insn, address_offset, addr);
7428 store_reg(s, rn, addr);
9ee6e8bb
PB
7429 } else if (insn & (1 << 21)) {
7430 if (address_offset)
b0109805
PB
7431 tcg_gen_addi_i32(addr, addr, address_offset);
7432 store_reg(s, rn, addr);
7433 } else {
7d1b0095 7434 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7435 }
7436 if (load) {
7437 /* Complete the load. */
b0109805 7438 store_reg(s, rd, tmp);
9ee6e8bb
PB
7439 }
7440 }
7441 break;
7442 case 0x4:
7443 case 0x5:
7444 goto do_ldst;
7445 case 0x6:
7446 case 0x7:
7447 if (insn & (1 << 4)) {
7448 ARCH(6);
7449 /* Armv6 Media instructions. */
7450 rm = insn & 0xf;
7451 rn = (insn >> 16) & 0xf;
2c0262af 7452 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7453 rs = (insn >> 8) & 0xf;
7454 switch ((insn >> 23) & 3) {
7455 case 0: /* Parallel add/subtract. */
7456 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7457 tmp = load_reg(s, rn);
7458 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7459 sh = (insn >> 5) & 7;
7460 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7461 goto illegal_op;
6ddbc6e4 7462 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7463 tcg_temp_free_i32(tmp2);
6ddbc6e4 7464 store_reg(s, rd, tmp);
9ee6e8bb
PB
7465 break;
7466 case 1:
7467 if ((insn & 0x00700020) == 0) {
6c95676b 7468 /* Halfword pack. */
3670669c
PB
7469 tmp = load_reg(s, rn);
7470 tmp2 = load_reg(s, rm);
9ee6e8bb 7471 shift = (insn >> 7) & 0x1f;
3670669c
PB
7472 if (insn & (1 << 6)) {
7473 /* pkhtb */
22478e79
AZ
7474 if (shift == 0)
7475 shift = 31;
7476 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7477 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7478 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7479 } else {
7480 /* pkhbt */
22478e79
AZ
7481 if (shift)
7482 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7483 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7484 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7485 }
7486 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7487 tcg_temp_free_i32(tmp2);
3670669c 7488 store_reg(s, rd, tmp);
9ee6e8bb
PB
7489 } else if ((insn & 0x00200020) == 0x00200000) {
7490 /* [us]sat */
6ddbc6e4 7491 tmp = load_reg(s, rm);
9ee6e8bb
PB
7492 shift = (insn >> 7) & 0x1f;
7493 if (insn & (1 << 6)) {
7494 if (shift == 0)
7495 shift = 31;
6ddbc6e4 7496 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7497 } else {
6ddbc6e4 7498 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7499 }
7500 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7501 tmp2 = tcg_const_i32(sh);
7502 if (insn & (1 << 22))
7503 gen_helper_usat(tmp, tmp, tmp2);
7504 else
7505 gen_helper_ssat(tmp, tmp, tmp2);
7506 tcg_temp_free_i32(tmp2);
6ddbc6e4 7507 store_reg(s, rd, tmp);
9ee6e8bb
PB
7508 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7509 /* [us]sat16 */
6ddbc6e4 7510 tmp = load_reg(s, rm);
9ee6e8bb 7511 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7512 tmp2 = tcg_const_i32(sh);
7513 if (insn & (1 << 22))
7514 gen_helper_usat16(tmp, tmp, tmp2);
7515 else
7516 gen_helper_ssat16(tmp, tmp, tmp2);
7517 tcg_temp_free_i32(tmp2);
6ddbc6e4 7518 store_reg(s, rd, tmp);
9ee6e8bb
PB
7519 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7520 /* Select bytes. */
6ddbc6e4
PB
7521 tmp = load_reg(s, rn);
7522 tmp2 = load_reg(s, rm);
7d1b0095 7523 tmp3 = tcg_temp_new_i32();
6ddbc6e4
PB
7524 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7525 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7526 tcg_temp_free_i32(tmp3);
7527 tcg_temp_free_i32(tmp2);
6ddbc6e4 7528 store_reg(s, rd, tmp);
9ee6e8bb 7529 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7530 tmp = load_reg(s, rm);
9ee6e8bb 7531 shift = (insn >> 10) & 3;
1301f322 7532 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7533 rotate, a shift is sufficient. */
7534 if (shift != 0)
f669df27 7535 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7536 op1 = (insn >> 20) & 7;
7537 switch (op1) {
5e3f878a
PB
7538 case 0: gen_sxtb16(tmp); break;
7539 case 2: gen_sxtb(tmp); break;
7540 case 3: gen_sxth(tmp); break;
7541 case 4: gen_uxtb16(tmp); break;
7542 case 6: gen_uxtb(tmp); break;
7543 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7544 default: goto illegal_op;
7545 }
7546 if (rn != 15) {
5e3f878a 7547 tmp2 = load_reg(s, rn);
9ee6e8bb 7548 if ((op1 & 3) == 0) {
5e3f878a 7549 gen_add16(tmp, tmp2);
9ee6e8bb 7550 } else {
5e3f878a 7551 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7552 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7553 }
7554 }
6c95676b 7555 store_reg(s, rd, tmp);
9ee6e8bb
PB
7556 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7557 /* rev */
b0109805 7558 tmp = load_reg(s, rm);
9ee6e8bb
PB
7559 if (insn & (1 << 22)) {
7560 if (insn & (1 << 7)) {
b0109805 7561 gen_revsh(tmp);
9ee6e8bb
PB
7562 } else {
7563 ARCH(6T2);
b0109805 7564 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7565 }
7566 } else {
7567 if (insn & (1 << 7))
b0109805 7568 gen_rev16(tmp);
9ee6e8bb 7569 else
66896cb8 7570 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7571 }
b0109805 7572 store_reg(s, rd, tmp);
9ee6e8bb
PB
7573 } else {
7574 goto illegal_op;
7575 }
7576 break;
7577 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
7578 tmp = load_reg(s, rm);
7579 tmp2 = load_reg(s, rs);
9ee6e8bb 7580 if (insn & (1 << 20)) {
838fa72d
AJ
7581 /* Signed multiply most significant [accumulate].
7582 (SMMUL, SMMLA, SMMLS) */
a7812ae4 7583 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7584
955a7dd5 7585 if (rd != 15) {
838fa72d 7586 tmp = load_reg(s, rd);
9ee6e8bb 7587 if (insn & (1 << 6)) {
838fa72d 7588 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7589 } else {
838fa72d 7590 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7591 }
7592 }
838fa72d
AJ
7593 if (insn & (1 << 5)) {
7594 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7595 }
7596 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7597 tmp = tcg_temp_new_i32();
838fa72d
AJ
7598 tcg_gen_trunc_i64_i32(tmp, tmp64);
7599 tcg_temp_free_i64(tmp64);
955a7dd5 7600 store_reg(s, rn, tmp);
9ee6e8bb
PB
7601 } else {
7602 if (insn & (1 << 5))
5e3f878a
PB
7603 gen_swap_half(tmp2);
7604 gen_smul_dual(tmp, tmp2);
5e3f878a 7605 if (insn & (1 << 6)) {
e1d177b9 7606 /* This subtraction cannot overflow. */
5e3f878a
PB
7607 tcg_gen_sub_i32(tmp, tmp, tmp2);
7608 } else {
e1d177b9
PM
7609 /* This addition cannot overflow 32 bits;
7610 * however it may overflow considered as a signed
7611 * operation, in which case we must set the Q flag.
7612 */
7613 gen_helper_add_setq(tmp, tmp, tmp2);
5e3f878a 7614 }
7d1b0095 7615 tcg_temp_free_i32(tmp2);
9ee6e8bb 7616 if (insn & (1 << 22)) {
5e3f878a 7617 /* smlald, smlsld */
a7812ae4
PB
7618 tmp64 = tcg_temp_new_i64();
7619 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7620 tcg_temp_free_i32(tmp);
a7812ae4
PB
7621 gen_addq(s, tmp64, rd, rn);
7622 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7623 tcg_temp_free_i64(tmp64);
9ee6e8bb 7624 } else {
5e3f878a 7625 /* smuad, smusd, smlad, smlsd */
22478e79 7626 if (rd != 15)
9ee6e8bb 7627 {
22478e79 7628 tmp2 = load_reg(s, rd);
5e3f878a 7629 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7630 tcg_temp_free_i32(tmp2);
9ee6e8bb 7631 }
22478e79 7632 store_reg(s, rn, tmp);
9ee6e8bb
PB
7633 }
7634 }
7635 break;
7636 case 3:
7637 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7638 switch (op1) {
7639 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7640 ARCH(6);
7641 tmp = load_reg(s, rm);
7642 tmp2 = load_reg(s, rs);
7643 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7644 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7645 if (rd != 15) {
7646 tmp2 = load_reg(s, rd);
6ddbc6e4 7647 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7648 tcg_temp_free_i32(tmp2);
9ee6e8bb 7649 }
ded9d295 7650 store_reg(s, rn, tmp);
9ee6e8bb
PB
7651 break;
7652 case 0x20: case 0x24: case 0x28: case 0x2c:
7653 /* Bitfield insert/clear. */
7654 ARCH(6T2);
7655 shift = (insn >> 7) & 0x1f;
7656 i = (insn >> 16) & 0x1f;
7657 i = i + 1 - shift;
7658 if (rm == 15) {
7d1b0095 7659 tmp = tcg_temp_new_i32();
5e3f878a 7660 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7661 } else {
5e3f878a 7662 tmp = load_reg(s, rm);
9ee6e8bb
PB
7663 }
7664 if (i != 32) {
5e3f878a 7665 tmp2 = load_reg(s, rd);
8f8e3aa4 7666 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7d1b0095 7667 tcg_temp_free_i32(tmp2);
9ee6e8bb 7668 }
5e3f878a 7669 store_reg(s, rd, tmp);
9ee6e8bb
PB
7670 break;
7671 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7672 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7673 ARCH(6T2);
5e3f878a 7674 tmp = load_reg(s, rm);
9ee6e8bb
PB
7675 shift = (insn >> 7) & 0x1f;
7676 i = ((insn >> 16) & 0x1f) + 1;
7677 if (shift + i > 32)
7678 goto illegal_op;
7679 if (i < 32) {
7680 if (op1 & 0x20) {
5e3f878a 7681 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7682 } else {
5e3f878a 7683 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7684 }
7685 }
5e3f878a 7686 store_reg(s, rd, tmp);
9ee6e8bb
PB
7687 break;
7688 default:
7689 goto illegal_op;
7690 }
7691 break;
7692 }
7693 break;
7694 }
7695 do_ldst:
7696 /* Check for undefined extension instructions
7697 * per the ARM Bible IE:
7698 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7699 */
7700 sh = (0xf << 20) | (0xf << 4);
7701 if (op1 == 0x7 && ((insn & sh) == sh))
7702 {
7703 goto illegal_op;
7704 }
7705 /* load/store byte/word */
7706 rn = (insn >> 16) & 0xf;
7707 rd = (insn >> 12) & 0xf;
b0109805 7708 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7709 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7710 if (insn & (1 << 24))
b0109805 7711 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7712 if (insn & (1 << 20)) {
7713 /* load */
9ee6e8bb 7714 if (insn & (1 << 22)) {
b0109805 7715 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7716 } else {
b0109805 7717 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7718 }
9ee6e8bb
PB
7719 } else {
7720 /* store */
b0109805 7721 tmp = load_reg(s, rd);
9ee6e8bb 7722 if (insn & (1 << 22))
b0109805 7723 gen_st8(tmp, tmp2, i);
9ee6e8bb 7724 else
b0109805 7725 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7726 }
7727 if (!(insn & (1 << 24))) {
b0109805
PB
7728 gen_add_data_offset(s, insn, tmp2);
7729 store_reg(s, rn, tmp2);
7730 } else if (insn & (1 << 21)) {
7731 store_reg(s, rn, tmp2);
7732 } else {
7d1b0095 7733 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7734 }
7735 if (insn & (1 << 20)) {
7736 /* Complete the load. */
be5e7a76 7737 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7738 }
7739 break;
7740 case 0x08:
7741 case 0x09:
7742 {
7743 int j, n, user, loaded_base;
b0109805 7744 TCGv loaded_var;
9ee6e8bb
PB
7745 /* load/store multiple words */
7746 /* XXX: store correct base if write back */
7747 user = 0;
7748 if (insn & (1 << 22)) {
7749 if (IS_USER(s))
7750 goto illegal_op; /* only usable in supervisor mode */
7751
7752 if ((insn & (1 << 15)) == 0)
7753 user = 1;
7754 }
7755 rn = (insn >> 16) & 0xf;
b0109805 7756 addr = load_reg(s, rn);
9ee6e8bb
PB
7757
7758 /* compute total size */
7759 loaded_base = 0;
a50f5b91 7760 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7761 n = 0;
7762 for(i=0;i<16;i++) {
7763 if (insn & (1 << i))
7764 n++;
7765 }
7766 /* XXX: test invalid n == 0 case ? */
7767 if (insn & (1 << 23)) {
7768 if (insn & (1 << 24)) {
7769 /* pre increment */
b0109805 7770 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7771 } else {
7772 /* post increment */
7773 }
7774 } else {
7775 if (insn & (1 << 24)) {
7776 /* pre decrement */
b0109805 7777 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7778 } else {
7779 /* post decrement */
7780 if (n != 1)
b0109805 7781 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7782 }
7783 }
7784 j = 0;
7785 for(i=0;i<16;i++) {
7786 if (insn & (1 << i)) {
7787 if (insn & (1 << 20)) {
7788 /* load */
b0109805 7789 tmp = gen_ld32(addr, IS_USER(s));
be5e7a76 7790 if (user) {
b75263d6
JR
7791 tmp2 = tcg_const_i32(i);
7792 gen_helper_set_user_reg(tmp2, tmp);
7793 tcg_temp_free_i32(tmp2);
7d1b0095 7794 tcg_temp_free_i32(tmp);
9ee6e8bb 7795 } else if (i == rn) {
b0109805 7796 loaded_var = tmp;
9ee6e8bb
PB
7797 loaded_base = 1;
7798 } else {
be5e7a76 7799 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7800 }
7801 } else {
7802 /* store */
7803 if (i == 15) {
7804 /* special case: r15 = PC + 8 */
7805 val = (long)s->pc + 4;
7d1b0095 7806 tmp = tcg_temp_new_i32();
b0109805 7807 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7808 } else if (user) {
7d1b0095 7809 tmp = tcg_temp_new_i32();
b75263d6
JR
7810 tmp2 = tcg_const_i32(i);
7811 gen_helper_get_user_reg(tmp, tmp2);
7812 tcg_temp_free_i32(tmp2);
9ee6e8bb 7813 } else {
b0109805 7814 tmp = load_reg(s, i);
9ee6e8bb 7815 }
b0109805 7816 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7817 }
7818 j++;
7819 /* no need to add after the last transfer */
7820 if (j != n)
b0109805 7821 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7822 }
7823 }
7824 if (insn & (1 << 21)) {
7825 /* write back */
7826 if (insn & (1 << 23)) {
7827 if (insn & (1 << 24)) {
7828 /* pre increment */
7829 } else {
7830 /* post increment */
b0109805 7831 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7832 }
7833 } else {
7834 if (insn & (1 << 24)) {
7835 /* pre decrement */
7836 if (n != 1)
b0109805 7837 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7838 } else {
7839 /* post decrement */
b0109805 7840 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7841 }
7842 }
b0109805
PB
7843 store_reg(s, rn, addr);
7844 } else {
7d1b0095 7845 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7846 }
7847 if (loaded_base) {
b0109805 7848 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7849 }
7850 if ((insn & (1 << 22)) && !user) {
7851 /* Restore CPSR from SPSR. */
d9ba4830
PB
7852 tmp = load_cpu_field(spsr);
7853 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7854 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7855 s->is_jmp = DISAS_UPDATE;
7856 }
7857 }
7858 break;
7859 case 0xa:
7860 case 0xb:
7861 {
7862 int32_t offset;
7863
7864 /* branch (and link) */
7865 val = (int32_t)s->pc;
7866 if (insn & (1 << 24)) {
7d1b0095 7867 tmp = tcg_temp_new_i32();
5e3f878a
PB
7868 tcg_gen_movi_i32(tmp, val);
7869 store_reg(s, 14, tmp);
9ee6e8bb
PB
7870 }
7871 offset = (((int32_t)insn << 8) >> 8);
7872 val += (offset << 2) + 4;
7873 gen_jmp(s, val);
7874 }
7875 break;
7876 case 0xc:
7877 case 0xd:
7878 case 0xe:
7879 /* Coprocessor. */
7880 if (disas_coproc_insn(env, s, insn))
7881 goto illegal_op;
7882 break;
7883 case 0xf:
7884 /* swi */
5e3f878a 7885 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7886 s->is_jmp = DISAS_SWI;
7887 break;
7888 default:
7889 illegal_op:
bc4a0de0 7890 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7891 break;
7892 }
7893 }
7894}
7895
7896/* Return true if this is a Thumb-2 logical op. */
7897static int
7898thumb2_logic_op(int op)
7899{
7900 return (op < 8);
7901}
7902
7903/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7904 then set condition code flags based on the result of the operation.
7905 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7906 to the high bit of T1.
7907 Returns zero if the opcode is valid. */
7908
7909static int
396e467c 7910gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7911{
7912 int logic_cc;
7913
7914 logic_cc = 0;
7915 switch (op) {
7916 case 0: /* and */
396e467c 7917 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7918 logic_cc = conds;
7919 break;
7920 case 1: /* bic */
f669df27 7921 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7922 logic_cc = conds;
7923 break;
7924 case 2: /* orr */
396e467c 7925 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7926 logic_cc = conds;
7927 break;
7928 case 3: /* orn */
29501f1b 7929 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7930 logic_cc = conds;
7931 break;
7932 case 4: /* eor */
396e467c 7933 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7934 logic_cc = conds;
7935 break;
7936 case 8: /* add */
7937 if (conds)
396e467c 7938 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7939 else
396e467c 7940 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7941 break;
7942 case 10: /* adc */
7943 if (conds)
396e467c 7944 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7945 else
396e467c 7946 gen_adc(t0, t1);
9ee6e8bb
PB
7947 break;
7948 case 11: /* sbc */
7949 if (conds)
396e467c 7950 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7951 else
396e467c 7952 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7953 break;
7954 case 13: /* sub */
7955 if (conds)
396e467c 7956 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7957 else
396e467c 7958 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7959 break;
7960 case 14: /* rsb */
7961 if (conds)
396e467c 7962 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7963 else
396e467c 7964 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7965 break;
7966 default: /* 5, 6, 7, 9, 12, 15. */
7967 return 1;
7968 }
7969 if (logic_cc) {
396e467c 7970 gen_logic_CC(t0);
9ee6e8bb 7971 if (shifter_out)
396e467c 7972 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7973 }
7974 return 0;
7975}
7976
7977/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7978 is not legal. */
7979static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7980{
b0109805 7981 uint32_t insn, imm, shift, offset;
9ee6e8bb 7982 uint32_t rd, rn, rm, rs;
b26eefb6 7983 TCGv tmp;
6ddbc6e4
PB
7984 TCGv tmp2;
7985 TCGv tmp3;
b0109805 7986 TCGv addr;
a7812ae4 7987 TCGv_i64 tmp64;
9ee6e8bb
PB
7988 int op;
7989 int shiftop;
7990 int conds;
7991 int logic_cc;
7992
7993 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7994 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7995 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7996 16-bit instructions to get correct prefetch abort behavior. */
7997 insn = insn_hw1;
7998 if ((insn & (1 << 12)) == 0) {
be5e7a76 7999 ARCH(5);
9ee6e8bb
PB
8000 /* Second half of blx. */
8001 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8002 tmp = load_reg(s, 14);
8003 tcg_gen_addi_i32(tmp, tmp, offset);
8004 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8005
7d1b0095 8006 tmp2 = tcg_temp_new_i32();
b0109805 8007 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8008 store_reg(s, 14, tmp2);
8009 gen_bx(s, tmp);
9ee6e8bb
PB
8010 return 0;
8011 }
8012 if (insn & (1 << 11)) {
8013 /* Second half of bl. */
8014 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8015 tmp = load_reg(s, 14);
6a0d8a1d 8016 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8017
7d1b0095 8018 tmp2 = tcg_temp_new_i32();
b0109805 8019 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8020 store_reg(s, 14, tmp2);
8021 gen_bx(s, tmp);
9ee6e8bb
PB
8022 return 0;
8023 }
8024 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8025 /* Instruction spans a page boundary. Implement it as two
8026 16-bit instructions in case the second half causes an
8027 prefetch abort. */
8028 offset = ((int32_t)insn << 21) >> 9;
396e467c 8029 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8030 return 0;
8031 }
8032 /* Fall through to 32-bit decode. */
8033 }
8034
8035 insn = lduw_code(s->pc);
8036 s->pc += 2;
8037 insn |= (uint32_t)insn_hw1 << 16;
8038
8039 if ((insn & 0xf800e800) != 0xf000e800) {
8040 ARCH(6T2);
8041 }
8042
8043 rn = (insn >> 16) & 0xf;
8044 rs = (insn >> 12) & 0xf;
8045 rd = (insn >> 8) & 0xf;
8046 rm = insn & 0xf;
8047 switch ((insn >> 25) & 0xf) {
8048 case 0: case 1: case 2: case 3:
8049 /* 16-bit instructions. Should never happen. */
8050 abort();
8051 case 4:
8052 if (insn & (1 << 22)) {
8053 /* Other load/store, table branch. */
8054 if (insn & 0x01200000) {
8055 /* Load/store doubleword. */
8056 if (rn == 15) {
7d1b0095 8057 addr = tcg_temp_new_i32();
b0109805 8058 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8059 } else {
b0109805 8060 addr = load_reg(s, rn);
9ee6e8bb
PB
8061 }
8062 offset = (insn & 0xff) * 4;
8063 if ((insn & (1 << 23)) == 0)
8064 offset = -offset;
8065 if (insn & (1 << 24)) {
b0109805 8066 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8067 offset = 0;
8068 }
8069 if (insn & (1 << 20)) {
8070 /* ldrd */
b0109805
PB
8071 tmp = gen_ld32(addr, IS_USER(s));
8072 store_reg(s, rs, tmp);
8073 tcg_gen_addi_i32(addr, addr, 4);
8074 tmp = gen_ld32(addr, IS_USER(s));
8075 store_reg(s, rd, tmp);
9ee6e8bb
PB
8076 } else {
8077 /* strd */
b0109805
PB
8078 tmp = load_reg(s, rs);
8079 gen_st32(tmp, addr, IS_USER(s));
8080 tcg_gen_addi_i32(addr, addr, 4);
8081 tmp = load_reg(s, rd);
8082 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8083 }
8084 if (insn & (1 << 21)) {
8085 /* Base writeback. */
8086 if (rn == 15)
8087 goto illegal_op;
b0109805
PB
8088 tcg_gen_addi_i32(addr, addr, offset - 4);
8089 store_reg(s, rn, addr);
8090 } else {
7d1b0095 8091 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8092 }
8093 } else if ((insn & (1 << 23)) == 0) {
8094 /* Load/store exclusive word. */
3174f8e9 8095 addr = tcg_temp_local_new();
98a46317 8096 load_reg_var(s, addr, rn);
426f5abc 8097 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8098 if (insn & (1 << 20)) {
426f5abc 8099 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8100 } else {
426f5abc 8101 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8102 }
3174f8e9 8103 tcg_temp_free(addr);
9ee6e8bb
PB
8104 } else if ((insn & (1 << 6)) == 0) {
8105 /* Table Branch. */
8106 if (rn == 15) {
7d1b0095 8107 addr = tcg_temp_new_i32();
b0109805 8108 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8109 } else {
b0109805 8110 addr = load_reg(s, rn);
9ee6e8bb 8111 }
b26eefb6 8112 tmp = load_reg(s, rm);
b0109805 8113 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8114 if (insn & (1 << 4)) {
8115 /* tbh */
b0109805 8116 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8117 tcg_temp_free_i32(tmp);
b0109805 8118 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 8119 } else { /* tbb */
7d1b0095 8120 tcg_temp_free_i32(tmp);
b0109805 8121 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 8122 }
7d1b0095 8123 tcg_temp_free_i32(addr);
b0109805
PB
8124 tcg_gen_shli_i32(tmp, tmp, 1);
8125 tcg_gen_addi_i32(tmp, tmp, s->pc);
8126 store_reg(s, 15, tmp);
9ee6e8bb
PB
8127 } else {
8128 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 8129 ARCH(7);
9ee6e8bb 8130 op = (insn >> 4) & 0x3;
426f5abc
PB
8131 if (op == 2) {
8132 goto illegal_op;
8133 }
3174f8e9 8134 addr = tcg_temp_local_new();
98a46317 8135 load_reg_var(s, addr, rn);
9ee6e8bb 8136 if (insn & (1 << 20)) {
426f5abc 8137 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8138 } else {
426f5abc 8139 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8140 }
3174f8e9 8141 tcg_temp_free(addr);
9ee6e8bb
PB
8142 }
8143 } else {
8144 /* Load/store multiple, RFE, SRS. */
8145 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8146 /* Not available in user mode. */
b0109805 8147 if (IS_USER(s))
9ee6e8bb
PB
8148 goto illegal_op;
8149 if (insn & (1 << 20)) {
8150 /* rfe */
b0109805
PB
8151 addr = load_reg(s, rn);
8152 if ((insn & (1 << 24)) == 0)
8153 tcg_gen_addi_i32(addr, addr, -8);
8154 /* Load PC into tmp and CPSR into tmp2. */
8155 tmp = gen_ld32(addr, 0);
8156 tcg_gen_addi_i32(addr, addr, 4);
8157 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
8158 if (insn & (1 << 21)) {
8159 /* Base writeback. */
b0109805
PB
8160 if (insn & (1 << 24)) {
8161 tcg_gen_addi_i32(addr, addr, 4);
8162 } else {
8163 tcg_gen_addi_i32(addr, addr, -4);
8164 }
8165 store_reg(s, rn, addr);
8166 } else {
7d1b0095 8167 tcg_temp_free_i32(addr);
9ee6e8bb 8168 }
b0109805 8169 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8170 } else {
8171 /* srs */
8172 op = (insn & 0x1f);
7d1b0095 8173 addr = tcg_temp_new_i32();
39ea3d4e
PM
8174 tmp = tcg_const_i32(op);
8175 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8176 tcg_temp_free_i32(tmp);
9ee6e8bb 8177 if ((insn & (1 << 24)) == 0) {
b0109805 8178 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 8179 }
b0109805
PB
8180 tmp = load_reg(s, 14);
8181 gen_st32(tmp, addr, 0);
8182 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 8183 tmp = tcg_temp_new_i32();
b0109805
PB
8184 gen_helper_cpsr_read(tmp);
8185 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
8186 if (insn & (1 << 21)) {
8187 if ((insn & (1 << 24)) == 0) {
b0109805 8188 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 8189 } else {
b0109805 8190 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8191 }
39ea3d4e
PM
8192 tmp = tcg_const_i32(op);
8193 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8194 tcg_temp_free_i32(tmp);
b0109805 8195 } else {
7d1b0095 8196 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8197 }
8198 }
8199 } else {
5856d44e
YO
8200 int i, loaded_base = 0;
8201 TCGv loaded_var;
9ee6e8bb 8202 /* Load/store multiple. */
b0109805 8203 addr = load_reg(s, rn);
9ee6e8bb
PB
8204 offset = 0;
8205 for (i = 0; i < 16; i++) {
8206 if (insn & (1 << i))
8207 offset += 4;
8208 }
8209 if (insn & (1 << 24)) {
b0109805 8210 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8211 }
8212
5856d44e 8213 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
8214 for (i = 0; i < 16; i++) {
8215 if ((insn & (1 << i)) == 0)
8216 continue;
8217 if (insn & (1 << 20)) {
8218 /* Load. */
b0109805 8219 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 8220 if (i == 15) {
b0109805 8221 gen_bx(s, tmp);
5856d44e
YO
8222 } else if (i == rn) {
8223 loaded_var = tmp;
8224 loaded_base = 1;
9ee6e8bb 8225 } else {
b0109805 8226 store_reg(s, i, tmp);
9ee6e8bb
PB
8227 }
8228 } else {
8229 /* Store. */
b0109805
PB
8230 tmp = load_reg(s, i);
8231 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8232 }
b0109805 8233 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8234 }
5856d44e
YO
8235 if (loaded_base) {
8236 store_reg(s, rn, loaded_var);
8237 }
9ee6e8bb
PB
8238 if (insn & (1 << 21)) {
8239 /* Base register writeback. */
8240 if (insn & (1 << 24)) {
b0109805 8241 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8242 }
8243 /* Fault if writeback register is in register list. */
8244 if (insn & (1 << rn))
8245 goto illegal_op;
b0109805
PB
8246 store_reg(s, rn, addr);
8247 } else {
7d1b0095 8248 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8249 }
8250 }
8251 }
8252 break;
2af9ab77
JB
8253 case 5:
8254
9ee6e8bb 8255 op = (insn >> 21) & 0xf;
2af9ab77
JB
8256 if (op == 6) {
8257 /* Halfword pack. */
8258 tmp = load_reg(s, rn);
8259 tmp2 = load_reg(s, rm);
8260 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8261 if (insn & (1 << 5)) {
8262 /* pkhtb */
8263 if (shift == 0)
8264 shift = 31;
8265 tcg_gen_sari_i32(tmp2, tmp2, shift);
8266 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8267 tcg_gen_ext16u_i32(tmp2, tmp2);
8268 } else {
8269 /* pkhbt */
8270 if (shift)
8271 tcg_gen_shli_i32(tmp2, tmp2, shift);
8272 tcg_gen_ext16u_i32(tmp, tmp);
8273 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8274 }
8275 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8276 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8277 store_reg(s, rd, tmp);
8278 } else {
2af9ab77
JB
8279 /* Data processing register constant shift. */
8280 if (rn == 15) {
7d1b0095 8281 tmp = tcg_temp_new_i32();
2af9ab77
JB
8282 tcg_gen_movi_i32(tmp, 0);
8283 } else {
8284 tmp = load_reg(s, rn);
8285 }
8286 tmp2 = load_reg(s, rm);
8287
8288 shiftop = (insn >> 4) & 3;
8289 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8290 conds = (insn & (1 << 20)) != 0;
8291 logic_cc = (conds && thumb2_logic_op(op));
8292 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8293 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8294 goto illegal_op;
7d1b0095 8295 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8296 if (rd != 15) {
8297 store_reg(s, rd, tmp);
8298 } else {
7d1b0095 8299 tcg_temp_free_i32(tmp);
2af9ab77 8300 }
3174f8e9 8301 }
9ee6e8bb
PB
8302 break;
8303 case 13: /* Misc data processing. */
8304 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8305 if (op < 4 && (insn & 0xf000) != 0xf000)
8306 goto illegal_op;
8307 switch (op) {
8308 case 0: /* Register controlled shift. */
8984bd2e
PB
8309 tmp = load_reg(s, rn);
8310 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8311 if ((insn & 0x70) != 0)
8312 goto illegal_op;
8313 op = (insn >> 21) & 3;
8984bd2e
PB
8314 logic_cc = (insn & (1 << 20)) != 0;
8315 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8316 if (logic_cc)
8317 gen_logic_CC(tmp);
21aeb343 8318 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8319 break;
8320 case 1: /* Sign/zero extend. */
5e3f878a 8321 tmp = load_reg(s, rm);
9ee6e8bb 8322 shift = (insn >> 4) & 3;
1301f322 8323 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8324 rotate, a shift is sufficient. */
8325 if (shift != 0)
f669df27 8326 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8327 op = (insn >> 20) & 7;
8328 switch (op) {
5e3f878a
PB
8329 case 0: gen_sxth(tmp); break;
8330 case 1: gen_uxth(tmp); break;
8331 case 2: gen_sxtb16(tmp); break;
8332 case 3: gen_uxtb16(tmp); break;
8333 case 4: gen_sxtb(tmp); break;
8334 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8335 default: goto illegal_op;
8336 }
8337 if (rn != 15) {
5e3f878a 8338 tmp2 = load_reg(s, rn);
9ee6e8bb 8339 if ((op >> 1) == 1) {
5e3f878a 8340 gen_add16(tmp, tmp2);
9ee6e8bb 8341 } else {
5e3f878a 8342 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8343 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8344 }
8345 }
5e3f878a 8346 store_reg(s, rd, tmp);
9ee6e8bb
PB
8347 break;
8348 case 2: /* SIMD add/subtract. */
8349 op = (insn >> 20) & 7;
8350 shift = (insn >> 4) & 7;
8351 if ((op & 3) == 3 || (shift & 3) == 3)
8352 goto illegal_op;
6ddbc6e4
PB
8353 tmp = load_reg(s, rn);
8354 tmp2 = load_reg(s, rm);
8355 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8356 tcg_temp_free_i32(tmp2);
6ddbc6e4 8357 store_reg(s, rd, tmp);
9ee6e8bb
PB
8358 break;
8359 case 3: /* Other data processing. */
8360 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8361 if (op < 4) {
8362 /* Saturating add/subtract. */
d9ba4830
PB
8363 tmp = load_reg(s, rn);
8364 tmp2 = load_reg(s, rm);
9ee6e8bb 8365 if (op & 1)
4809c612
JB
8366 gen_helper_double_saturate(tmp, tmp);
8367 if (op & 2)
d9ba4830 8368 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 8369 else
d9ba4830 8370 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 8371 tcg_temp_free_i32(tmp2);
9ee6e8bb 8372 } else {
d9ba4830 8373 tmp = load_reg(s, rn);
9ee6e8bb
PB
8374 switch (op) {
8375 case 0x0a: /* rbit */
d9ba4830 8376 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8377 break;
8378 case 0x08: /* rev */
66896cb8 8379 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8380 break;
8381 case 0x09: /* rev16 */
d9ba4830 8382 gen_rev16(tmp);
9ee6e8bb
PB
8383 break;
8384 case 0x0b: /* revsh */
d9ba4830 8385 gen_revsh(tmp);
9ee6e8bb
PB
8386 break;
8387 case 0x10: /* sel */
d9ba4830 8388 tmp2 = load_reg(s, rm);
7d1b0095 8389 tmp3 = tcg_temp_new_i32();
6ddbc6e4 8390 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 8391 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8392 tcg_temp_free_i32(tmp3);
8393 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8394 break;
8395 case 0x18: /* clz */
d9ba4830 8396 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8397 break;
8398 default:
8399 goto illegal_op;
8400 }
8401 }
d9ba4830 8402 store_reg(s, rd, tmp);
9ee6e8bb
PB
8403 break;
8404 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8405 op = (insn >> 4) & 0xf;
d9ba4830
PB
8406 tmp = load_reg(s, rn);
8407 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8408 switch ((insn >> 20) & 7) {
8409 case 0: /* 32 x 32 -> 32 */
d9ba4830 8410 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8411 tcg_temp_free_i32(tmp2);
9ee6e8bb 8412 if (rs != 15) {
d9ba4830 8413 tmp2 = load_reg(s, rs);
9ee6e8bb 8414 if (op)
d9ba4830 8415 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8416 else
d9ba4830 8417 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8418 tcg_temp_free_i32(tmp2);
9ee6e8bb 8419 }
9ee6e8bb
PB
8420 break;
8421 case 1: /* 16 x 16 -> 32 */
d9ba4830 8422 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8423 tcg_temp_free_i32(tmp2);
9ee6e8bb 8424 if (rs != 15) {
d9ba4830
PB
8425 tmp2 = load_reg(s, rs);
8426 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8427 tcg_temp_free_i32(tmp2);
9ee6e8bb 8428 }
9ee6e8bb
PB
8429 break;
8430 case 2: /* Dual multiply add. */
8431 case 4: /* Dual multiply subtract. */
8432 if (op)
d9ba4830
PB
8433 gen_swap_half(tmp2);
8434 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8435 if (insn & (1 << 22)) {
e1d177b9 8436 /* This subtraction cannot overflow. */
d9ba4830 8437 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8438 } else {
e1d177b9
PM
8439 /* This addition cannot overflow 32 bits;
8440 * however it may overflow considered as a signed
8441 * operation, in which case we must set the Q flag.
8442 */
8443 gen_helper_add_setq(tmp, tmp, tmp2);
9ee6e8bb 8444 }
7d1b0095 8445 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8446 if (rs != 15)
8447 {
d9ba4830
PB
8448 tmp2 = load_reg(s, rs);
8449 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8450 tcg_temp_free_i32(tmp2);
9ee6e8bb 8451 }
9ee6e8bb
PB
8452 break;
8453 case 3: /* 32 * 16 -> 32msb */
8454 if (op)
d9ba4830 8455 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8456 else
d9ba4830 8457 gen_sxth(tmp2);
a7812ae4
PB
8458 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8459 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8460 tmp = tcg_temp_new_i32();
a7812ae4 8461 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8462 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8463 if (rs != 15)
8464 {
d9ba4830
PB
8465 tmp2 = load_reg(s, rs);
8466 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8467 tcg_temp_free_i32(tmp2);
9ee6e8bb 8468 }
9ee6e8bb 8469 break;
838fa72d
AJ
8470 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8471 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8472 if (rs != 15) {
838fa72d
AJ
8473 tmp = load_reg(s, rs);
8474 if (insn & (1 << 20)) {
8475 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8476 } else {
838fa72d 8477 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8478 }
2c0262af 8479 }
838fa72d
AJ
8480 if (insn & (1 << 4)) {
8481 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8482 }
8483 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8484 tmp = tcg_temp_new_i32();
838fa72d
AJ
8485 tcg_gen_trunc_i64_i32(tmp, tmp64);
8486 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8487 break;
8488 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8489 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8490 tcg_temp_free_i32(tmp2);
9ee6e8bb 8491 if (rs != 15) {
d9ba4830
PB
8492 tmp2 = load_reg(s, rs);
8493 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8494 tcg_temp_free_i32(tmp2);
5fd46862 8495 }
9ee6e8bb 8496 break;
2c0262af 8497 }
d9ba4830 8498 store_reg(s, rd, tmp);
2c0262af 8499 break;
9ee6e8bb
PB
8500 case 6: case 7: /* 64-bit multiply, Divide. */
8501 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8502 tmp = load_reg(s, rn);
8503 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8504 if ((op & 0x50) == 0x10) {
8505 /* sdiv, udiv */
8506 if (!arm_feature(env, ARM_FEATURE_DIV))
8507 goto illegal_op;
8508 if (op & 0x20)
5e3f878a 8509 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8510 else
5e3f878a 8511 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8512 tcg_temp_free_i32(tmp2);
5e3f878a 8513 store_reg(s, rd, tmp);
9ee6e8bb
PB
8514 } else if ((op & 0xe) == 0xc) {
8515 /* Dual multiply accumulate long. */
8516 if (op & 1)
5e3f878a
PB
8517 gen_swap_half(tmp2);
8518 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8519 if (op & 0x10) {
5e3f878a 8520 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8521 } else {
5e3f878a 8522 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8523 }
7d1b0095 8524 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8525 /* BUGFIX */
8526 tmp64 = tcg_temp_new_i64();
8527 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8528 tcg_temp_free_i32(tmp);
a7812ae4
PB
8529 gen_addq(s, tmp64, rs, rd);
8530 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8531 tcg_temp_free_i64(tmp64);
2c0262af 8532 } else {
9ee6e8bb
PB
8533 if (op & 0x20) {
8534 /* Unsigned 64-bit multiply */
a7812ae4 8535 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8536 } else {
9ee6e8bb
PB
8537 if (op & 8) {
8538 /* smlalxy */
5e3f878a 8539 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8540 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8541 tmp64 = tcg_temp_new_i64();
8542 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8543 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8544 } else {
8545 /* Signed 64-bit multiply */
a7812ae4 8546 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8547 }
b5ff1b31 8548 }
9ee6e8bb
PB
8549 if (op & 4) {
8550 /* umaal */
a7812ae4
PB
8551 gen_addq_lo(s, tmp64, rs);
8552 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8553 } else if (op & 0x40) {
8554 /* 64-bit accumulate. */
a7812ae4 8555 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8556 }
a7812ae4 8557 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8558 tcg_temp_free_i64(tmp64);
5fd46862 8559 }
2c0262af 8560 break;
9ee6e8bb
PB
8561 }
8562 break;
8563 case 6: case 7: case 14: case 15:
8564 /* Coprocessor. */
8565 if (((insn >> 24) & 3) == 3) {
8566 /* Translate into the equivalent ARM encoding. */
f06053e3 8567 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8568 if (disas_neon_data_insn(env, s, insn))
8569 goto illegal_op;
8570 } else {
8571 if (insn & (1 << 28))
8572 goto illegal_op;
8573 if (disas_coproc_insn (env, s, insn))
8574 goto illegal_op;
8575 }
8576 break;
8577 case 8: case 9: case 10: case 11:
8578 if (insn & (1 << 15)) {
8579 /* Branches, misc control. */
8580 if (insn & 0x5000) {
8581 /* Unconditional branch. */
8582 /* signextend(hw1[10:0]) -> offset[:12]. */
8583 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8584 /* hw1[10:0] -> offset[11:1]. */
8585 offset |= (insn & 0x7ff) << 1;
8586 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8587 offset[24:22] already have the same value because of the
8588 sign extension above. */
8589 offset ^= ((~insn) & (1 << 13)) << 10;
8590 offset ^= ((~insn) & (1 << 11)) << 11;
8591
9ee6e8bb
PB
8592 if (insn & (1 << 14)) {
8593 /* Branch and link. */
3174f8e9 8594 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8595 }
3b46e624 8596
b0109805 8597 offset += s->pc;
9ee6e8bb
PB
8598 if (insn & (1 << 12)) {
8599 /* b/bl */
b0109805 8600 gen_jmp(s, offset);
9ee6e8bb
PB
8601 } else {
8602 /* blx */
b0109805 8603 offset &= ~(uint32_t)2;
be5e7a76 8604 /* thumb2 bx, no need to check */
b0109805 8605 gen_bx_im(s, offset);
2c0262af 8606 }
9ee6e8bb
PB
8607 } else if (((insn >> 23) & 7) == 7) {
8608 /* Misc control */
8609 if (insn & (1 << 13))
8610 goto illegal_op;
8611
8612 if (insn & (1 << 26)) {
8613 /* Secure monitor call (v6Z) */
8614 goto illegal_op; /* not implemented. */
2c0262af 8615 } else {
9ee6e8bb
PB
8616 op = (insn >> 20) & 7;
8617 switch (op) {
8618 case 0: /* msr cpsr. */
8619 if (IS_M(env)) {
8984bd2e
PB
8620 tmp = load_reg(s, rn);
8621 addr = tcg_const_i32(insn & 0xff);
8622 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8623 tcg_temp_free_i32(addr);
7d1b0095 8624 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8625 gen_lookup_tb(s);
8626 break;
8627 }
8628 /* fall through */
8629 case 1: /* msr spsr. */
8630 if (IS_M(env))
8631 goto illegal_op;
2fbac54b
FN
8632 tmp = load_reg(s, rn);
8633 if (gen_set_psr(s,
9ee6e8bb 8634 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8635 op == 1, tmp))
9ee6e8bb
PB
8636 goto illegal_op;
8637 break;
8638 case 2: /* cps, nop-hint. */
8639 if (((insn >> 8) & 7) == 0) {
8640 gen_nop_hint(s, insn & 0xff);
8641 }
8642 /* Implemented as NOP in user mode. */
8643 if (IS_USER(s))
8644 break;
8645 offset = 0;
8646 imm = 0;
8647 if (insn & (1 << 10)) {
8648 if (insn & (1 << 7))
8649 offset |= CPSR_A;
8650 if (insn & (1 << 6))
8651 offset |= CPSR_I;
8652 if (insn & (1 << 5))
8653 offset |= CPSR_F;
8654 if (insn & (1 << 9))
8655 imm = CPSR_A | CPSR_I | CPSR_F;
8656 }
8657 if (insn & (1 << 8)) {
8658 offset |= 0x1f;
8659 imm |= (insn & 0x1f);
8660 }
8661 if (offset) {
2fbac54b 8662 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8663 }
8664 break;
8665 case 3: /* Special control operations. */
426f5abc 8666 ARCH(7);
9ee6e8bb
PB
8667 op = (insn >> 4) & 0xf;
8668 switch (op) {
8669 case 2: /* clrex */
426f5abc 8670 gen_clrex(s);
9ee6e8bb
PB
8671 break;
8672 case 4: /* dsb */
8673 case 5: /* dmb */
8674 case 6: /* isb */
8675 /* These execute as NOPs. */
9ee6e8bb
PB
8676 break;
8677 default:
8678 goto illegal_op;
8679 }
8680 break;
8681 case 4: /* bxj */
8682 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8683 tmp = load_reg(s, rn);
8684 gen_bx(s, tmp);
9ee6e8bb
PB
8685 break;
8686 case 5: /* Exception return. */
b8b45b68
RV
8687 if (IS_USER(s)) {
8688 goto illegal_op;
8689 }
8690 if (rn != 14 || rd != 15) {
8691 goto illegal_op;
8692 }
8693 tmp = load_reg(s, rn);
8694 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8695 gen_exception_return(s, tmp);
8696 break;
9ee6e8bb 8697 case 6: /* mrs cpsr. */
7d1b0095 8698 tmp = tcg_temp_new_i32();
9ee6e8bb 8699 if (IS_M(env)) {
8984bd2e
PB
8700 addr = tcg_const_i32(insn & 0xff);
8701 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8702 tcg_temp_free_i32(addr);
9ee6e8bb 8703 } else {
8984bd2e 8704 gen_helper_cpsr_read(tmp);
9ee6e8bb 8705 }
8984bd2e 8706 store_reg(s, rd, tmp);
9ee6e8bb
PB
8707 break;
8708 case 7: /* mrs spsr. */
8709 /* Not accessible in user mode. */
8710 if (IS_USER(s) || IS_M(env))
8711 goto illegal_op;
d9ba4830
PB
8712 tmp = load_cpu_field(spsr);
8713 store_reg(s, rd, tmp);
9ee6e8bb 8714 break;
2c0262af
FB
8715 }
8716 }
9ee6e8bb
PB
8717 } else {
8718 /* Conditional branch. */
8719 op = (insn >> 22) & 0xf;
8720 /* Generate a conditional jump to next instruction. */
8721 s->condlabel = gen_new_label();
d9ba4830 8722 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8723 s->condjmp = 1;
8724
8725 /* offset[11:1] = insn[10:0] */
8726 offset = (insn & 0x7ff) << 1;
8727 /* offset[17:12] = insn[21:16]. */
8728 offset |= (insn & 0x003f0000) >> 4;
8729 /* offset[31:20] = insn[26]. */
8730 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8731 /* offset[18] = insn[13]. */
8732 offset |= (insn & (1 << 13)) << 5;
8733 /* offset[19] = insn[11]. */
8734 offset |= (insn & (1 << 11)) << 8;
8735
8736 /* jump to the offset */
b0109805 8737 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8738 }
8739 } else {
8740 /* Data processing immediate. */
8741 if (insn & (1 << 25)) {
8742 if (insn & (1 << 24)) {
8743 if (insn & (1 << 20))
8744 goto illegal_op;
8745 /* Bitfield/Saturate. */
8746 op = (insn >> 21) & 7;
8747 imm = insn & 0x1f;
8748 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8749 if (rn == 15) {
7d1b0095 8750 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8751 tcg_gen_movi_i32(tmp, 0);
8752 } else {
8753 tmp = load_reg(s, rn);
8754 }
9ee6e8bb
PB
8755 switch (op) {
8756 case 2: /* Signed bitfield extract. */
8757 imm++;
8758 if (shift + imm > 32)
8759 goto illegal_op;
8760 if (imm < 32)
6ddbc6e4 8761 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8762 break;
8763 case 6: /* Unsigned bitfield extract. */
8764 imm++;
8765 if (shift + imm > 32)
8766 goto illegal_op;
8767 if (imm < 32)
6ddbc6e4 8768 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8769 break;
8770 case 3: /* Bitfield insert/clear. */
8771 if (imm < shift)
8772 goto illegal_op;
8773 imm = imm + 1 - shift;
8774 if (imm != 32) {
6ddbc6e4 8775 tmp2 = load_reg(s, rd);
8f8e3aa4 8776 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7d1b0095 8777 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8778 }
8779 break;
8780 case 7:
8781 goto illegal_op;
8782 default: /* Saturate. */
9ee6e8bb
PB
8783 if (shift) {
8784 if (op & 1)
6ddbc6e4 8785 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8786 else
6ddbc6e4 8787 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8788 }
6ddbc6e4 8789 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8790 if (op & 4) {
8791 /* Unsigned. */
9ee6e8bb 8792 if ((op & 1) && shift == 0)
6ddbc6e4 8793 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8794 else
6ddbc6e4 8795 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8796 } else {
9ee6e8bb 8797 /* Signed. */
9ee6e8bb 8798 if ((op & 1) && shift == 0)
6ddbc6e4 8799 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8800 else
6ddbc6e4 8801 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8802 }
b75263d6 8803 tcg_temp_free_i32(tmp2);
9ee6e8bb 8804 break;
2c0262af 8805 }
6ddbc6e4 8806 store_reg(s, rd, tmp);
9ee6e8bb
PB
8807 } else {
8808 imm = ((insn & 0x04000000) >> 15)
8809 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8810 if (insn & (1 << 22)) {
8811 /* 16-bit immediate. */
8812 imm |= (insn >> 4) & 0xf000;
8813 if (insn & (1 << 23)) {
8814 /* movt */
5e3f878a 8815 tmp = load_reg(s, rd);
86831435 8816 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8817 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8818 } else {
9ee6e8bb 8819 /* movw */
7d1b0095 8820 tmp = tcg_temp_new_i32();
5e3f878a 8821 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8822 }
8823 } else {
9ee6e8bb
PB
8824 /* Add/sub 12-bit immediate. */
8825 if (rn == 15) {
b0109805 8826 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8827 if (insn & (1 << 23))
b0109805 8828 offset -= imm;
9ee6e8bb 8829 else
b0109805 8830 offset += imm;
7d1b0095 8831 tmp = tcg_temp_new_i32();
5e3f878a 8832 tcg_gen_movi_i32(tmp, offset);
2c0262af 8833 } else {
5e3f878a 8834 tmp = load_reg(s, rn);
9ee6e8bb 8835 if (insn & (1 << 23))
5e3f878a 8836 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8837 else
5e3f878a 8838 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8839 }
9ee6e8bb 8840 }
5e3f878a 8841 store_reg(s, rd, tmp);
191abaa2 8842 }
9ee6e8bb
PB
8843 } else {
8844 int shifter_out = 0;
8845 /* modified 12-bit immediate. */
8846 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8847 imm = (insn & 0xff);
8848 switch (shift) {
8849 case 0: /* XY */
8850 /* Nothing to do. */
8851 break;
8852 case 1: /* 00XY00XY */
8853 imm |= imm << 16;
8854 break;
8855 case 2: /* XY00XY00 */
8856 imm |= imm << 16;
8857 imm <<= 8;
8858 break;
8859 case 3: /* XYXYXYXY */
8860 imm |= imm << 16;
8861 imm |= imm << 8;
8862 break;
8863 default: /* Rotated constant. */
8864 shift = (shift << 1) | (imm >> 7);
8865 imm |= 0x80;
8866 imm = imm << (32 - shift);
8867 shifter_out = 1;
8868 break;
b5ff1b31 8869 }
7d1b0095 8870 tmp2 = tcg_temp_new_i32();
3174f8e9 8871 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8872 rn = (insn >> 16) & 0xf;
3174f8e9 8873 if (rn == 15) {
7d1b0095 8874 tmp = tcg_temp_new_i32();
3174f8e9
FN
8875 tcg_gen_movi_i32(tmp, 0);
8876 } else {
8877 tmp = load_reg(s, rn);
8878 }
9ee6e8bb
PB
8879 op = (insn >> 21) & 0xf;
8880 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8881 shifter_out, tmp, tmp2))
9ee6e8bb 8882 goto illegal_op;
7d1b0095 8883 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8884 rd = (insn >> 8) & 0xf;
8885 if (rd != 15) {
3174f8e9
FN
8886 store_reg(s, rd, tmp);
8887 } else {
7d1b0095 8888 tcg_temp_free_i32(tmp);
2c0262af 8889 }
2c0262af 8890 }
9ee6e8bb
PB
8891 }
8892 break;
8893 case 12: /* Load/store single data item. */
8894 {
8895 int postinc = 0;
8896 int writeback = 0;
b0109805 8897 int user;
9ee6e8bb
PB
8898 if ((insn & 0x01100000) == 0x01000000) {
8899 if (disas_neon_ls_insn(env, s, insn))
c1713132 8900 goto illegal_op;
9ee6e8bb
PB
8901 break;
8902 }
a2fdc890
PM
8903 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8904 if (rs == 15) {
8905 if (!(insn & (1 << 20))) {
8906 goto illegal_op;
8907 }
8908 if (op != 2) {
8909 /* Byte or halfword load space with dest == r15 : memory hints.
8910 * Catch them early so we don't emit pointless addressing code.
8911 * This space is a mix of:
8912 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8913 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8914 * cores)
8915 * unallocated hints, which must be treated as NOPs
8916 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8917 * which is easiest for the decoding logic
8918 * Some space which must UNDEF
8919 */
8920 int op1 = (insn >> 23) & 3;
8921 int op2 = (insn >> 6) & 0x3f;
8922 if (op & 2) {
8923 goto illegal_op;
8924 }
8925 if (rn == 15) {
8926 /* UNPREDICTABLE or unallocated hint */
8927 return 0;
8928 }
8929 if (op1 & 1) {
8930 return 0; /* PLD* or unallocated hint */
8931 }
8932 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8933 return 0; /* PLD* or unallocated hint */
8934 }
8935 /* UNDEF space, or an UNPREDICTABLE */
8936 return 1;
8937 }
8938 }
b0109805 8939 user = IS_USER(s);
9ee6e8bb 8940 if (rn == 15) {
7d1b0095 8941 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8942 /* PC relative. */
8943 /* s->pc has already been incremented by 4. */
8944 imm = s->pc & 0xfffffffc;
8945 if (insn & (1 << 23))
8946 imm += insn & 0xfff;
8947 else
8948 imm -= insn & 0xfff;
b0109805 8949 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8950 } else {
b0109805 8951 addr = load_reg(s, rn);
9ee6e8bb
PB
8952 if (insn & (1 << 23)) {
8953 /* Positive offset. */
8954 imm = insn & 0xfff;
b0109805 8955 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8956 } else {
9ee6e8bb 8957 imm = insn & 0xff;
2a0308c5
PM
8958 switch ((insn >> 8) & 0xf) {
8959 case 0x0: /* Shifted Register. */
9ee6e8bb 8960 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8961 if (shift > 3) {
8962 tcg_temp_free_i32(addr);
18c9b560 8963 goto illegal_op;
2a0308c5 8964 }
b26eefb6 8965 tmp = load_reg(s, rm);
9ee6e8bb 8966 if (shift)
b26eefb6 8967 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8968 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8969 tcg_temp_free_i32(tmp);
9ee6e8bb 8970 break;
2a0308c5 8971 case 0xc: /* Negative offset. */
b0109805 8972 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8973 break;
2a0308c5 8974 case 0xe: /* User privilege. */
b0109805
PB
8975 tcg_gen_addi_i32(addr, addr, imm);
8976 user = 1;
9ee6e8bb 8977 break;
2a0308c5 8978 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8979 imm = -imm;
8980 /* Fall through. */
2a0308c5 8981 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8982 postinc = 1;
8983 writeback = 1;
8984 break;
2a0308c5 8985 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
8986 imm = -imm;
8987 /* Fall through. */
2a0308c5 8988 case 0xf: /* Pre-increment. */
b0109805 8989 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8990 writeback = 1;
8991 break;
8992 default:
2a0308c5 8993 tcg_temp_free_i32(addr);
b7bcbe95 8994 goto illegal_op;
9ee6e8bb
PB
8995 }
8996 }
8997 }
9ee6e8bb
PB
8998 if (insn & (1 << 20)) {
8999 /* Load. */
a2fdc890
PM
9000 switch (op) {
9001 case 0: tmp = gen_ld8u(addr, user); break;
9002 case 4: tmp = gen_ld8s(addr, user); break;
9003 case 1: tmp = gen_ld16u(addr, user); break;
9004 case 5: tmp = gen_ld16s(addr, user); break;
9005 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
9006 default:
9007 tcg_temp_free_i32(addr);
9008 goto illegal_op;
a2fdc890
PM
9009 }
9010 if (rs == 15) {
9011 gen_bx(s, tmp);
9ee6e8bb 9012 } else {
a2fdc890 9013 store_reg(s, rs, tmp);
9ee6e8bb
PB
9014 }
9015 } else {
9016 /* Store. */
b0109805 9017 tmp = load_reg(s, rs);
9ee6e8bb 9018 switch (op) {
b0109805
PB
9019 case 0: gen_st8(tmp, addr, user); break;
9020 case 1: gen_st16(tmp, addr, user); break;
9021 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
9022 default:
9023 tcg_temp_free_i32(addr);
9024 goto illegal_op;
b7bcbe95 9025 }
2c0262af 9026 }
9ee6e8bb 9027 if (postinc)
b0109805
PB
9028 tcg_gen_addi_i32(addr, addr, imm);
9029 if (writeback) {
9030 store_reg(s, rn, addr);
9031 } else {
7d1b0095 9032 tcg_temp_free_i32(addr);
b0109805 9033 }
9ee6e8bb
PB
9034 }
9035 break;
9036 default:
9037 goto illegal_op;
2c0262af 9038 }
9ee6e8bb
PB
9039 return 0;
9040illegal_op:
9041 return 1;
2c0262af
FB
9042}
9043
9ee6e8bb 9044static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
9045{
9046 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9047 int32_t offset;
9048 int i;
b26eefb6 9049 TCGv tmp;
d9ba4830 9050 TCGv tmp2;
b0109805 9051 TCGv addr;
99c475ab 9052
9ee6e8bb
PB
9053 if (s->condexec_mask) {
9054 cond = s->condexec_cond;
bedd2912
JB
9055 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9056 s->condlabel = gen_new_label();
9057 gen_test_cc(cond ^ 1, s->condlabel);
9058 s->condjmp = 1;
9059 }
9ee6e8bb
PB
9060 }
9061
b5ff1b31 9062 insn = lduw_code(s->pc);
99c475ab 9063 s->pc += 2;
b5ff1b31 9064
99c475ab
FB
9065 switch (insn >> 12) {
9066 case 0: case 1:
396e467c 9067
99c475ab
FB
9068 rd = insn & 7;
9069 op = (insn >> 11) & 3;
9070 if (op == 3) {
9071 /* add/subtract */
9072 rn = (insn >> 3) & 7;
396e467c 9073 tmp = load_reg(s, rn);
99c475ab
FB
9074 if (insn & (1 << 10)) {
9075 /* immediate */
7d1b0095 9076 tmp2 = tcg_temp_new_i32();
396e467c 9077 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9078 } else {
9079 /* reg */
9080 rm = (insn >> 6) & 7;
396e467c 9081 tmp2 = load_reg(s, rm);
99c475ab 9082 }
9ee6e8bb
PB
9083 if (insn & (1 << 9)) {
9084 if (s->condexec_mask)
396e467c 9085 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9086 else
396e467c 9087 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
9088 } else {
9089 if (s->condexec_mask)
396e467c 9090 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9091 else
396e467c 9092 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 9093 }
7d1b0095 9094 tcg_temp_free_i32(tmp2);
396e467c 9095 store_reg(s, rd, tmp);
99c475ab
FB
9096 } else {
9097 /* shift immediate */
9098 rm = (insn >> 3) & 7;
9099 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9100 tmp = load_reg(s, rm);
9101 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9102 if (!s->condexec_mask)
9103 gen_logic_CC(tmp);
9104 store_reg(s, rd, tmp);
99c475ab
FB
9105 }
9106 break;
9107 case 2: case 3:
9108 /* arithmetic large immediate */
9109 op = (insn >> 11) & 3;
9110 rd = (insn >> 8) & 0x7;
396e467c 9111 if (op == 0) { /* mov */
7d1b0095 9112 tmp = tcg_temp_new_i32();
396e467c 9113 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9114 if (!s->condexec_mask)
396e467c
FN
9115 gen_logic_CC(tmp);
9116 store_reg(s, rd, tmp);
9117 } else {
9118 tmp = load_reg(s, rd);
7d1b0095 9119 tmp2 = tcg_temp_new_i32();
396e467c
FN
9120 tcg_gen_movi_i32(tmp2, insn & 0xff);
9121 switch (op) {
9122 case 1: /* cmp */
9123 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
9124 tcg_temp_free_i32(tmp);
9125 tcg_temp_free_i32(tmp2);
396e467c
FN
9126 break;
9127 case 2: /* add */
9128 if (s->condexec_mask)
9129 tcg_gen_add_i32(tmp, tmp, tmp2);
9130 else
9131 gen_helper_add_cc(tmp, tmp, tmp2);
7d1b0095 9132 tcg_temp_free_i32(tmp2);
396e467c
FN
9133 store_reg(s, rd, tmp);
9134 break;
9135 case 3: /* sub */
9136 if (s->condexec_mask)
9137 tcg_gen_sub_i32(tmp, tmp, tmp2);
9138 else
9139 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095 9140 tcg_temp_free_i32(tmp2);
396e467c
FN
9141 store_reg(s, rd, tmp);
9142 break;
9143 }
99c475ab 9144 }
99c475ab
FB
9145 break;
9146 case 4:
9147 if (insn & (1 << 11)) {
9148 rd = (insn >> 8) & 7;
5899f386
FB
9149 /* load pc-relative. Bit 1 of PC is ignored. */
9150 val = s->pc + 2 + ((insn & 0xff) * 4);
9151 val &= ~(uint32_t)2;
7d1b0095 9152 addr = tcg_temp_new_i32();
b0109805
PB
9153 tcg_gen_movi_i32(addr, val);
9154 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 9155 tcg_temp_free_i32(addr);
b0109805 9156 store_reg(s, rd, tmp);
99c475ab
FB
9157 break;
9158 }
9159 if (insn & (1 << 10)) {
9160 /* data processing extended or blx */
9161 rd = (insn & 7) | ((insn >> 4) & 8);
9162 rm = (insn >> 3) & 0xf;
9163 op = (insn >> 8) & 3;
9164 switch (op) {
9165 case 0: /* add */
396e467c
FN
9166 tmp = load_reg(s, rd);
9167 tmp2 = load_reg(s, rm);
9168 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9169 tcg_temp_free_i32(tmp2);
396e467c 9170 store_reg(s, rd, tmp);
99c475ab
FB
9171 break;
9172 case 1: /* cmp */
396e467c
FN
9173 tmp = load_reg(s, rd);
9174 tmp2 = load_reg(s, rm);
9175 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
9176 tcg_temp_free_i32(tmp2);
9177 tcg_temp_free_i32(tmp);
99c475ab
FB
9178 break;
9179 case 2: /* mov/cpy */
396e467c
FN
9180 tmp = load_reg(s, rm);
9181 store_reg(s, rd, tmp);
99c475ab
FB
9182 break;
9183 case 3:/* branch [and link] exchange thumb register */
b0109805 9184 tmp = load_reg(s, rm);
99c475ab 9185 if (insn & (1 << 7)) {
be5e7a76 9186 ARCH(5);
99c475ab 9187 val = (uint32_t)s->pc | 1;
7d1b0095 9188 tmp2 = tcg_temp_new_i32();
b0109805
PB
9189 tcg_gen_movi_i32(tmp2, val);
9190 store_reg(s, 14, tmp2);
99c475ab 9191 }
be5e7a76 9192 /* already thumb, no need to check */
d9ba4830 9193 gen_bx(s, tmp);
99c475ab
FB
9194 break;
9195 }
9196 break;
9197 }
9198
9199 /* data processing register */
9200 rd = insn & 7;
9201 rm = (insn >> 3) & 7;
9202 op = (insn >> 6) & 0xf;
9203 if (op == 2 || op == 3 || op == 4 || op == 7) {
9204 /* the shift/rotate ops want the operands backwards */
9205 val = rm;
9206 rm = rd;
9207 rd = val;
9208 val = 1;
9209 } else {
9210 val = 0;
9211 }
9212
396e467c 9213 if (op == 9) { /* neg */
7d1b0095 9214 tmp = tcg_temp_new_i32();
396e467c
FN
9215 tcg_gen_movi_i32(tmp, 0);
9216 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9217 tmp = load_reg(s, rd);
9218 } else {
9219 TCGV_UNUSED(tmp);
9220 }
99c475ab 9221
396e467c 9222 tmp2 = load_reg(s, rm);
5899f386 9223 switch (op) {
99c475ab 9224 case 0x0: /* and */
396e467c 9225 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9226 if (!s->condexec_mask)
396e467c 9227 gen_logic_CC(tmp);
99c475ab
FB
9228 break;
9229 case 0x1: /* eor */
396e467c 9230 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9231 if (!s->condexec_mask)
396e467c 9232 gen_logic_CC(tmp);
99c475ab
FB
9233 break;
9234 case 0x2: /* lsl */
9ee6e8bb 9235 if (s->condexec_mask) {
396e467c 9236 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 9237 } else {
396e467c
FN
9238 gen_helper_shl_cc(tmp2, tmp2, tmp);
9239 gen_logic_CC(tmp2);
9ee6e8bb 9240 }
99c475ab
FB
9241 break;
9242 case 0x3: /* lsr */
9ee6e8bb 9243 if (s->condexec_mask) {
396e467c 9244 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 9245 } else {
396e467c
FN
9246 gen_helper_shr_cc(tmp2, tmp2, tmp);
9247 gen_logic_CC(tmp2);
9ee6e8bb 9248 }
99c475ab
FB
9249 break;
9250 case 0x4: /* asr */
9ee6e8bb 9251 if (s->condexec_mask) {
396e467c 9252 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 9253 } else {
396e467c
FN
9254 gen_helper_sar_cc(tmp2, tmp2, tmp);
9255 gen_logic_CC(tmp2);
9ee6e8bb 9256 }
99c475ab
FB
9257 break;
9258 case 0x5: /* adc */
9ee6e8bb 9259 if (s->condexec_mask)
396e467c 9260 gen_adc(tmp, tmp2);
9ee6e8bb 9261 else
396e467c 9262 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
9263 break;
9264 case 0x6: /* sbc */
9ee6e8bb 9265 if (s->condexec_mask)
396e467c 9266 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 9267 else
396e467c 9268 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
9269 break;
9270 case 0x7: /* ror */
9ee6e8bb 9271 if (s->condexec_mask) {
f669df27
AJ
9272 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9273 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9274 } else {
396e467c
FN
9275 gen_helper_ror_cc(tmp2, tmp2, tmp);
9276 gen_logic_CC(tmp2);
9ee6e8bb 9277 }
99c475ab
FB
9278 break;
9279 case 0x8: /* tst */
396e467c
FN
9280 tcg_gen_and_i32(tmp, tmp, tmp2);
9281 gen_logic_CC(tmp);
99c475ab 9282 rd = 16;
5899f386 9283 break;
99c475ab 9284 case 0x9: /* neg */
9ee6e8bb 9285 if (s->condexec_mask)
396e467c 9286 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9287 else
396e467c 9288 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9289 break;
9290 case 0xa: /* cmp */
396e467c 9291 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9292 rd = 16;
9293 break;
9294 case 0xb: /* cmn */
396e467c 9295 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
9296 rd = 16;
9297 break;
9298 case 0xc: /* orr */
396e467c 9299 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9300 if (!s->condexec_mask)
396e467c 9301 gen_logic_CC(tmp);
99c475ab
FB
9302 break;
9303 case 0xd: /* mul */
7b2919a0 9304 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9305 if (!s->condexec_mask)
396e467c 9306 gen_logic_CC(tmp);
99c475ab
FB
9307 break;
9308 case 0xe: /* bic */
f669df27 9309 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9310 if (!s->condexec_mask)
396e467c 9311 gen_logic_CC(tmp);
99c475ab
FB
9312 break;
9313 case 0xf: /* mvn */
396e467c 9314 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9315 if (!s->condexec_mask)
396e467c 9316 gen_logic_CC(tmp2);
99c475ab 9317 val = 1;
5899f386 9318 rm = rd;
99c475ab
FB
9319 break;
9320 }
9321 if (rd != 16) {
396e467c
FN
9322 if (val) {
9323 store_reg(s, rm, tmp2);
9324 if (op != 0xf)
7d1b0095 9325 tcg_temp_free_i32(tmp);
396e467c
FN
9326 } else {
9327 store_reg(s, rd, tmp);
7d1b0095 9328 tcg_temp_free_i32(tmp2);
396e467c
FN
9329 }
9330 } else {
7d1b0095
PM
9331 tcg_temp_free_i32(tmp);
9332 tcg_temp_free_i32(tmp2);
99c475ab
FB
9333 }
9334 break;
9335
9336 case 5:
9337 /* load/store register offset. */
9338 rd = insn & 7;
9339 rn = (insn >> 3) & 7;
9340 rm = (insn >> 6) & 7;
9341 op = (insn >> 9) & 7;
b0109805 9342 addr = load_reg(s, rn);
b26eefb6 9343 tmp = load_reg(s, rm);
b0109805 9344 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9345 tcg_temp_free_i32(tmp);
99c475ab
FB
9346
9347 if (op < 3) /* store */
b0109805 9348 tmp = load_reg(s, rd);
99c475ab
FB
9349
9350 switch (op) {
9351 case 0: /* str */
b0109805 9352 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9353 break;
9354 case 1: /* strh */
b0109805 9355 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9356 break;
9357 case 2: /* strb */
b0109805 9358 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9359 break;
9360 case 3: /* ldrsb */
b0109805 9361 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
9362 break;
9363 case 4: /* ldr */
b0109805 9364 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9365 break;
9366 case 5: /* ldrh */
b0109805 9367 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
9368 break;
9369 case 6: /* ldrb */
b0109805 9370 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
9371 break;
9372 case 7: /* ldrsh */
b0109805 9373 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
9374 break;
9375 }
9376 if (op >= 3) /* load */
b0109805 9377 store_reg(s, rd, tmp);
7d1b0095 9378 tcg_temp_free_i32(addr);
99c475ab
FB
9379 break;
9380
9381 case 6:
9382 /* load/store word immediate offset */
9383 rd = insn & 7;
9384 rn = (insn >> 3) & 7;
b0109805 9385 addr = load_reg(s, rn);
99c475ab 9386 val = (insn >> 4) & 0x7c;
b0109805 9387 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9388
9389 if (insn & (1 << 11)) {
9390 /* load */
b0109805
PB
9391 tmp = gen_ld32(addr, IS_USER(s));
9392 store_reg(s, rd, tmp);
99c475ab
FB
9393 } else {
9394 /* store */
b0109805
PB
9395 tmp = load_reg(s, rd);
9396 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9397 }
7d1b0095 9398 tcg_temp_free_i32(addr);
99c475ab
FB
9399 break;
9400
9401 case 7:
9402 /* load/store byte immediate offset */
9403 rd = insn & 7;
9404 rn = (insn >> 3) & 7;
b0109805 9405 addr = load_reg(s, rn);
99c475ab 9406 val = (insn >> 6) & 0x1f;
b0109805 9407 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9408
9409 if (insn & (1 << 11)) {
9410 /* load */
b0109805
PB
9411 tmp = gen_ld8u(addr, IS_USER(s));
9412 store_reg(s, rd, tmp);
99c475ab
FB
9413 } else {
9414 /* store */
b0109805
PB
9415 tmp = load_reg(s, rd);
9416 gen_st8(tmp, addr, IS_USER(s));
99c475ab 9417 }
7d1b0095 9418 tcg_temp_free_i32(addr);
99c475ab
FB
9419 break;
9420
9421 case 8:
9422 /* load/store halfword immediate offset */
9423 rd = insn & 7;
9424 rn = (insn >> 3) & 7;
b0109805 9425 addr = load_reg(s, rn);
99c475ab 9426 val = (insn >> 5) & 0x3e;
b0109805 9427 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9428
9429 if (insn & (1 << 11)) {
9430 /* load */
b0109805
PB
9431 tmp = gen_ld16u(addr, IS_USER(s));
9432 store_reg(s, rd, tmp);
99c475ab
FB
9433 } else {
9434 /* store */
b0109805
PB
9435 tmp = load_reg(s, rd);
9436 gen_st16(tmp, addr, IS_USER(s));
99c475ab 9437 }
7d1b0095 9438 tcg_temp_free_i32(addr);
99c475ab
FB
9439 break;
9440
9441 case 9:
9442 /* load/store from stack */
9443 rd = (insn >> 8) & 7;
b0109805 9444 addr = load_reg(s, 13);
99c475ab 9445 val = (insn & 0xff) * 4;
b0109805 9446 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9447
9448 if (insn & (1 << 11)) {
9449 /* load */
b0109805
PB
9450 tmp = gen_ld32(addr, IS_USER(s));
9451 store_reg(s, rd, tmp);
99c475ab
FB
9452 } else {
9453 /* store */
b0109805
PB
9454 tmp = load_reg(s, rd);
9455 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9456 }
7d1b0095 9457 tcg_temp_free_i32(addr);
99c475ab
FB
9458 break;
9459
9460 case 10:
9461 /* add to high reg */
9462 rd = (insn >> 8) & 7;
5899f386
FB
9463 if (insn & (1 << 11)) {
9464 /* SP */
5e3f878a 9465 tmp = load_reg(s, 13);
5899f386
FB
9466 } else {
9467 /* PC. bit 1 is ignored. */
7d1b0095 9468 tmp = tcg_temp_new_i32();
5e3f878a 9469 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9470 }
99c475ab 9471 val = (insn & 0xff) * 4;
5e3f878a
PB
9472 tcg_gen_addi_i32(tmp, tmp, val);
9473 store_reg(s, rd, tmp);
99c475ab
FB
9474 break;
9475
9476 case 11:
9477 /* misc */
9478 op = (insn >> 8) & 0xf;
9479 switch (op) {
9480 case 0:
9481 /* adjust stack pointer */
b26eefb6 9482 tmp = load_reg(s, 13);
99c475ab
FB
9483 val = (insn & 0x7f) * 4;
9484 if (insn & (1 << 7))
6a0d8a1d 9485 val = -(int32_t)val;
b26eefb6
PB
9486 tcg_gen_addi_i32(tmp, tmp, val);
9487 store_reg(s, 13, tmp);
99c475ab
FB
9488 break;
9489
9ee6e8bb
PB
9490 case 2: /* sign/zero extend. */
9491 ARCH(6);
9492 rd = insn & 7;
9493 rm = (insn >> 3) & 7;
b0109805 9494 tmp = load_reg(s, rm);
9ee6e8bb 9495 switch ((insn >> 6) & 3) {
b0109805
PB
9496 case 0: gen_sxth(tmp); break;
9497 case 1: gen_sxtb(tmp); break;
9498 case 2: gen_uxth(tmp); break;
9499 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9500 }
b0109805 9501 store_reg(s, rd, tmp);
9ee6e8bb 9502 break;
99c475ab
FB
9503 case 4: case 5: case 0xc: case 0xd:
9504 /* push/pop */
b0109805 9505 addr = load_reg(s, 13);
5899f386
FB
9506 if (insn & (1 << 8))
9507 offset = 4;
99c475ab 9508 else
5899f386
FB
9509 offset = 0;
9510 for (i = 0; i < 8; i++) {
9511 if (insn & (1 << i))
9512 offset += 4;
9513 }
9514 if ((insn & (1 << 11)) == 0) {
b0109805 9515 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9516 }
99c475ab
FB
9517 for (i = 0; i < 8; i++) {
9518 if (insn & (1 << i)) {
9519 if (insn & (1 << 11)) {
9520 /* pop */
b0109805
PB
9521 tmp = gen_ld32(addr, IS_USER(s));
9522 store_reg(s, i, tmp);
99c475ab
FB
9523 } else {
9524 /* push */
b0109805
PB
9525 tmp = load_reg(s, i);
9526 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9527 }
5899f386 9528 /* advance to the next address. */
b0109805 9529 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9530 }
9531 }
a50f5b91 9532 TCGV_UNUSED(tmp);
99c475ab
FB
9533 if (insn & (1 << 8)) {
9534 if (insn & (1 << 11)) {
9535 /* pop pc */
b0109805 9536 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9537 /* don't set the pc until the rest of the instruction
9538 has completed */
9539 } else {
9540 /* push lr */
b0109805
PB
9541 tmp = load_reg(s, 14);
9542 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9543 }
b0109805 9544 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9545 }
5899f386 9546 if ((insn & (1 << 11)) == 0) {
b0109805 9547 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9548 }
99c475ab 9549 /* write back the new stack pointer */
b0109805 9550 store_reg(s, 13, addr);
99c475ab 9551 /* set the new PC value */
be5e7a76
DES
9552 if ((insn & 0x0900) == 0x0900) {
9553 store_reg_from_load(env, s, 15, tmp);
9554 }
99c475ab
FB
9555 break;
9556
9ee6e8bb
PB
9557 case 1: case 3: case 9: case 11: /* czb */
9558 rm = insn & 7;
d9ba4830 9559 tmp = load_reg(s, rm);
9ee6e8bb
PB
9560 s->condlabel = gen_new_label();
9561 s->condjmp = 1;
9562 if (insn & (1 << 11))
cb63669a 9563 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9564 else
cb63669a 9565 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9566 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9567 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9568 val = (uint32_t)s->pc + 2;
9569 val += offset;
9570 gen_jmp(s, val);
9571 break;
9572
9573 case 15: /* IT, nop-hint. */
9574 if ((insn & 0xf) == 0) {
9575 gen_nop_hint(s, (insn >> 4) & 0xf);
9576 break;
9577 }
9578 /* If Then. */
9579 s->condexec_cond = (insn >> 4) & 0xe;
9580 s->condexec_mask = insn & 0x1f;
9581 /* No actual code generated for this insn, just setup state. */
9582 break;
9583
06c949e6 9584 case 0xe: /* bkpt */
be5e7a76 9585 ARCH(5);
bc4a0de0 9586 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9587 break;
9588
9ee6e8bb
PB
9589 case 0xa: /* rev */
9590 ARCH(6);
9591 rn = (insn >> 3) & 0x7;
9592 rd = insn & 0x7;
b0109805 9593 tmp = load_reg(s, rn);
9ee6e8bb 9594 switch ((insn >> 6) & 3) {
66896cb8 9595 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9596 case 1: gen_rev16(tmp); break;
9597 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9598 default: goto illegal_op;
9599 }
b0109805 9600 store_reg(s, rd, tmp);
9ee6e8bb
PB
9601 break;
9602
9603 case 6: /* cps */
9604 ARCH(6);
9605 if (IS_USER(s))
9606 break;
9607 if (IS_M(env)) {
8984bd2e 9608 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 9609 /* PRIMASK */
8984bd2e
PB
9610 if (insn & 1) {
9611 addr = tcg_const_i32(16);
9612 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9613 tcg_temp_free_i32(addr);
8984bd2e 9614 }
9ee6e8bb 9615 /* FAULTMASK */
8984bd2e
PB
9616 if (insn & 2) {
9617 addr = tcg_const_i32(17);
9618 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9619 tcg_temp_free_i32(addr);
8984bd2e 9620 }
b75263d6 9621 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9622 gen_lookup_tb(s);
9623 } else {
9624 if (insn & (1 << 4))
9625 shift = CPSR_A | CPSR_I | CPSR_F;
9626 else
9627 shift = 0;
fa26df03 9628 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
9629 }
9630 break;
9631
99c475ab
FB
9632 default:
9633 goto undef;
9634 }
9635 break;
9636
9637 case 12:
a7d3970d 9638 {
99c475ab 9639 /* load/store multiple */
a7d3970d
PM
9640 TCGv loaded_var;
9641 TCGV_UNUSED(loaded_var);
99c475ab 9642 rn = (insn >> 8) & 0x7;
b0109805 9643 addr = load_reg(s, rn);
99c475ab
FB
9644 for (i = 0; i < 8; i++) {
9645 if (insn & (1 << i)) {
99c475ab
FB
9646 if (insn & (1 << 11)) {
9647 /* load */
b0109805 9648 tmp = gen_ld32(addr, IS_USER(s));
a7d3970d
PM
9649 if (i == rn) {
9650 loaded_var = tmp;
9651 } else {
9652 store_reg(s, i, tmp);
9653 }
99c475ab
FB
9654 } else {
9655 /* store */
b0109805
PB
9656 tmp = load_reg(s, i);
9657 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9658 }
5899f386 9659 /* advance to the next address */
b0109805 9660 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9661 }
9662 }
b0109805 9663 if ((insn & (1 << rn)) == 0) {
a7d3970d 9664 /* base reg not in list: base register writeback */
b0109805
PB
9665 store_reg(s, rn, addr);
9666 } else {
a7d3970d
PM
9667 /* base reg in list: if load, complete it now */
9668 if (insn & (1 << 11)) {
9669 store_reg(s, rn, loaded_var);
9670 }
7d1b0095 9671 tcg_temp_free_i32(addr);
b0109805 9672 }
99c475ab 9673 break;
a7d3970d 9674 }
99c475ab
FB
9675 case 13:
9676 /* conditional branch or swi */
9677 cond = (insn >> 8) & 0xf;
9678 if (cond == 0xe)
9679 goto undef;
9680
9681 if (cond == 0xf) {
9682 /* swi */
422ebf69 9683 gen_set_pc_im(s->pc);
9ee6e8bb 9684 s->is_jmp = DISAS_SWI;
99c475ab
FB
9685 break;
9686 }
9687 /* generate a conditional jump to next instruction */
e50e6a20 9688 s->condlabel = gen_new_label();
d9ba4830 9689 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9690 s->condjmp = 1;
99c475ab
FB
9691
9692 /* jump to the offset */
5899f386 9693 val = (uint32_t)s->pc + 2;
99c475ab 9694 offset = ((int32_t)insn << 24) >> 24;
5899f386 9695 val += offset << 1;
8aaca4c0 9696 gen_jmp(s, val);
99c475ab
FB
9697 break;
9698
9699 case 14:
358bf29e 9700 if (insn & (1 << 11)) {
9ee6e8bb
PB
9701 if (disas_thumb2_insn(env, s, insn))
9702 goto undef32;
358bf29e
PB
9703 break;
9704 }
9ee6e8bb 9705 /* unconditional branch */
99c475ab
FB
9706 val = (uint32_t)s->pc;
9707 offset = ((int32_t)insn << 21) >> 21;
9708 val += (offset << 1) + 2;
8aaca4c0 9709 gen_jmp(s, val);
99c475ab
FB
9710 break;
9711
9712 case 15:
9ee6e8bb 9713 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9714 goto undef32;
9ee6e8bb 9715 break;
99c475ab
FB
9716 }
9717 return;
9ee6e8bb 9718undef32:
bc4a0de0 9719 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9720 return;
9721illegal_op:
99c475ab 9722undef:
bc4a0de0 9723 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9724}
9725
2c0262af
FB
9726/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9727 basic block 'tb'. If search_pc is TRUE, also generate PC
9728 information for each intermediate instruction. */
2cfc5f17
TS
9729static inline void gen_intermediate_code_internal(CPUState *env,
9730 TranslationBlock *tb,
9731 int search_pc)
2c0262af
FB
9732{
9733 DisasContext dc1, *dc = &dc1;
a1d1bb31 9734 CPUBreakpoint *bp;
2c0262af
FB
9735 uint16_t *gen_opc_end;
9736 int j, lj;
0fa85d43 9737 target_ulong pc_start;
b5ff1b31 9738 uint32_t next_page_start;
2e70f6ef
PB
9739 int num_insns;
9740 int max_insns;
3b46e624 9741
2c0262af 9742 /* generate intermediate code */
0fa85d43 9743 pc_start = tb->pc;
3b46e624 9744
2c0262af
FB
9745 dc->tb = tb;
9746
2c0262af 9747 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9748
9749 dc->is_jmp = DISAS_NEXT;
9750 dc->pc = pc_start;
8aaca4c0 9751 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9752 dc->condjmp = 0;
7204ab88 9753 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
98eac7ca
PM
9754 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9755 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9756#if !defined(CONFIG_USER_ONLY)
61f74d6a 9757 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9758#endif
5df8bac1 9759 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9760 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9761 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9762 cpu_F0s = tcg_temp_new_i32();
9763 cpu_F1s = tcg_temp_new_i32();
9764 cpu_F0d = tcg_temp_new_i64();
9765 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9766 cpu_V0 = cpu_F0d;
9767 cpu_V1 = cpu_F1d;
e677137d 9768 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9769 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9770 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9771 lj = -1;
2e70f6ef
PB
9772 num_insns = 0;
9773 max_insns = tb->cflags & CF_COUNT_MASK;
9774 if (max_insns == 0)
9775 max_insns = CF_COUNT_MASK;
9776
9777 gen_icount_start();
e12ce78d 9778
3849902c
PM
9779 tcg_clear_temp_count();
9780
e12ce78d
PM
9781 /* A note on handling of the condexec (IT) bits:
9782 *
9783 * We want to avoid the overhead of having to write the updated condexec
9784 * bits back to the CPUState for every instruction in an IT block. So:
9785 * (1) if the condexec bits are not already zero then we write
9786 * zero back into the CPUState now. This avoids complications trying
9787 * to do it at the end of the block. (For example if we don't do this
9788 * it's hard to identify whether we can safely skip writing condexec
9789 * at the end of the TB, which we definitely want to do for the case
9790 * where a TB doesn't do anything with the IT state at all.)
9791 * (2) if we are going to leave the TB then we call gen_set_condexec()
9792 * which will write the correct value into CPUState if zero is wrong.
9793 * This is done both for leaving the TB at the end, and for leaving
9794 * it because of an exception we know will happen, which is done in
9795 * gen_exception_insn(). The latter is necessary because we need to
9796 * leave the TB with the PC/IT state just prior to execution of the
9797 * instruction which caused the exception.
9798 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9799 * then the CPUState will be wrong and we need to reset it.
9800 * This is handled in the same way as restoration of the
9801 * PC in these situations: we will be called again with search_pc=1
9802 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9803 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9804 * this to restore the condexec bits.
e12ce78d
PM
9805 *
9806 * Note that there are no instructions which can read the condexec
9807 * bits, and none which can write non-static values to them, so
9808 * we don't need to care about whether CPUState is correct in the
9809 * middle of a TB.
9810 */
9811
9ee6e8bb
PB
9812 /* Reset the conditional execution bits immediately. This avoids
9813 complications trying to do it at the end of the block. */
98eac7ca 9814 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9815 {
7d1b0095 9816 TCGv tmp = tcg_temp_new_i32();
8f01245e 9817 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9818 store_cpu_field(tmp, condexec_bits);
8f01245e 9819 }
2c0262af 9820 do {
fbb4a2e3
PB
9821#ifdef CONFIG_USER_ONLY
9822 /* Intercept jump to the magic kernel page. */
9823 if (dc->pc >= 0xffff0000) {
9824 /* We always get here via a jump, so know we are not in a
9825 conditional execution block. */
9826 gen_exception(EXCP_KERNEL_TRAP);
9827 dc->is_jmp = DISAS_UPDATE;
9828 break;
9829 }
9830#else
9ee6e8bb
PB
9831 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9832 /* We always get here via a jump, so know we are not in a
9833 conditional execution block. */
d9ba4830 9834 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9835 dc->is_jmp = DISAS_UPDATE;
9836 break;
9ee6e8bb
PB
9837 }
9838#endif
9839
72cf2d4f
BS
9840 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9841 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9842 if (bp->pc == dc->pc) {
bc4a0de0 9843 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9844 /* Advance PC so that clearing the breakpoint will
9845 invalidate this TB. */
9846 dc->pc += 2;
9847 goto done_generating;
1fddef4b
FB
9848 break;
9849 }
9850 }
9851 }
2c0262af
FB
9852 if (search_pc) {
9853 j = gen_opc_ptr - gen_opc_buf;
9854 if (lj < j) {
9855 lj++;
9856 while (lj < j)
9857 gen_opc_instr_start[lj++] = 0;
9858 }
0fa85d43 9859 gen_opc_pc[lj] = dc->pc;
e12ce78d 9860 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
2c0262af 9861 gen_opc_instr_start[lj] = 1;
2e70f6ef 9862 gen_opc_icount[lj] = num_insns;
2c0262af 9863 }
e50e6a20 9864
2e70f6ef
PB
9865 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9866 gen_io_start();
9867
5642463a
PM
9868 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9869 tcg_gen_debug_insn_start(dc->pc);
9870 }
9871
7204ab88 9872 if (dc->thumb) {
9ee6e8bb
PB
9873 disas_thumb_insn(env, dc);
9874 if (dc->condexec_mask) {
9875 dc->condexec_cond = (dc->condexec_cond & 0xe)
9876 | ((dc->condexec_mask >> 4) & 1);
9877 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9878 if (dc->condexec_mask == 0) {
9879 dc->condexec_cond = 0;
9880 }
9881 }
9882 } else {
9883 disas_arm_insn(env, dc);
9884 }
e50e6a20
FB
9885
9886 if (dc->condjmp && !dc->is_jmp) {
9887 gen_set_label(dc->condlabel);
9888 dc->condjmp = 0;
9889 }
3849902c
PM
9890
9891 if (tcg_check_temp_count()) {
9892 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9893 }
9894
aaf2d97d 9895 /* Translation stops when a conditional branch is encountered.
e50e6a20 9896 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9897 * Also stop translation when a page boundary is reached. This
bf20dc07 9898 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9899 num_insns ++;
1fddef4b
FB
9900 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9901 !env->singlestep_enabled &&
1b530a6d 9902 !singlestep &&
2e70f6ef
PB
9903 dc->pc < next_page_start &&
9904 num_insns < max_insns);
9905
9906 if (tb->cflags & CF_LAST_IO) {
9907 if (dc->condjmp) {
9908 /* FIXME: This can theoretically happen with self-modifying
9909 code. */
9910 cpu_abort(env, "IO on conditional branch instruction");
9911 }
9912 gen_io_end();
9913 }
9ee6e8bb 9914
b5ff1b31 9915 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9916 instruction was a conditional branch or trap, and the PC has
9917 already been written. */
551bd27f 9918 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9919 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9920 if (dc->condjmp) {
9ee6e8bb
PB
9921 gen_set_condexec(dc);
9922 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9923 gen_exception(EXCP_SWI);
9ee6e8bb 9924 } else {
d9ba4830 9925 gen_exception(EXCP_DEBUG);
9ee6e8bb 9926 }
e50e6a20
FB
9927 gen_set_label(dc->condlabel);
9928 }
9929 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9930 gen_set_pc_im(dc->pc);
e50e6a20 9931 dc->condjmp = 0;
8aaca4c0 9932 }
9ee6e8bb
PB
9933 gen_set_condexec(dc);
9934 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9935 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9936 } else {
9937 /* FIXME: Single stepping a WFI insn will not halt
9938 the CPU. */
d9ba4830 9939 gen_exception(EXCP_DEBUG);
9ee6e8bb 9940 }
8aaca4c0 9941 } else {
9ee6e8bb
PB
9942 /* While branches must always occur at the end of an IT block,
9943 there are a few other things that can cause us to terminate
9944 the TB in the middel of an IT block:
9945 - Exception generating instructions (bkpt, swi, undefined).
9946 - Page boundaries.
9947 - Hardware watchpoints.
9948 Hardware breakpoints have already been handled and skip this code.
9949 */
9950 gen_set_condexec(dc);
8aaca4c0 9951 switch(dc->is_jmp) {
8aaca4c0 9952 case DISAS_NEXT:
6e256c93 9953 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9954 break;
9955 default:
9956 case DISAS_JUMP:
9957 case DISAS_UPDATE:
9958 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9959 tcg_gen_exit_tb(0);
8aaca4c0
FB
9960 break;
9961 case DISAS_TB_JUMP:
9962 /* nothing more to generate */
9963 break;
9ee6e8bb 9964 case DISAS_WFI:
d9ba4830 9965 gen_helper_wfi();
9ee6e8bb
PB
9966 break;
9967 case DISAS_SWI:
d9ba4830 9968 gen_exception(EXCP_SWI);
9ee6e8bb 9969 break;
8aaca4c0 9970 }
e50e6a20
FB
9971 if (dc->condjmp) {
9972 gen_set_label(dc->condlabel);
9ee6e8bb 9973 gen_set_condexec(dc);
6e256c93 9974 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9975 dc->condjmp = 0;
9976 }
2c0262af 9977 }
2e70f6ef 9978
9ee6e8bb 9979done_generating:
2e70f6ef 9980 gen_icount_end(tb, num_insns);
2c0262af
FB
9981 *gen_opc_ptr = INDEX_op_end;
9982
9983#ifdef DEBUG_DISAS
8fec2b8c 9984 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9985 qemu_log("----------------\n");
9986 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7204ab88 9987 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
93fcfe39 9988 qemu_log("\n");
2c0262af
FB
9989 }
9990#endif
b5ff1b31
FB
9991 if (search_pc) {
9992 j = gen_opc_ptr - gen_opc_buf;
9993 lj++;
9994 while (lj <= j)
9995 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9996 } else {
2c0262af 9997 tb->size = dc->pc - pc_start;
2e70f6ef 9998 tb->icount = num_insns;
b5ff1b31 9999 }
2c0262af
FB
10000}
10001
2cfc5f17 10002void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 10003{
2cfc5f17 10004 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
10005}
10006
2cfc5f17 10007void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 10008{
2cfc5f17 10009 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
10010}
10011
b5ff1b31
FB
10012static const char *cpu_mode_names[16] = {
10013 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10014 "???", "???", "???", "und", "???", "???", "???", "sys"
10015};
9ee6e8bb 10016
9a78eead 10017void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 10018 int flags)
2c0262af
FB
10019{
10020 int i;
06e80fc9 10021#if 0
bc380d17 10022 union {
b7bcbe95
FB
10023 uint32_t i;
10024 float s;
10025 } s0, s1;
10026 CPU_DoubleU d;
a94a6abf
PB
10027 /* ??? This assumes float64 and double have the same layout.
10028 Oh well, it's only debug dumps. */
10029 union {
10030 float64 f64;
10031 double d;
10032 } d0;
06e80fc9 10033#endif
b5ff1b31 10034 uint32_t psr;
2c0262af
FB
10035
10036 for(i=0;i<16;i++) {
7fe48483 10037 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10038 if ((i % 4) == 3)
7fe48483 10039 cpu_fprintf(f, "\n");
2c0262af 10040 else
7fe48483 10041 cpu_fprintf(f, " ");
2c0262af 10042 }
b5ff1b31 10043 psr = cpsr_read(env);
687fa640
TS
10044 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10045 psr,
b5ff1b31
FB
10046 psr & (1 << 31) ? 'N' : '-',
10047 psr & (1 << 30) ? 'Z' : '-',
10048 psr & (1 << 29) ? 'C' : '-',
10049 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10050 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10051 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10052
5e3f878a 10053#if 0
b7bcbe95 10054 for (i = 0; i < 16; i++) {
8e96005d
FB
10055 d.d = env->vfp.regs[i];
10056 s0.i = d.l.lower;
10057 s1.i = d.l.upper;
a94a6abf
PB
10058 d0.f64 = d.d;
10059 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 10060 i * 2, (int)s0.i, s0.s,
a94a6abf 10061 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 10062 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 10063 d0.d);
b7bcbe95 10064 }
40f137e1 10065 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 10066#endif
2c0262af 10067}
a6b025d3 10068
e87b7cb0 10069void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
10070{
10071 env->regs[15] = gen_opc_pc[pc_pos];
e12ce78d 10072 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10073}