]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-arm: Mark 1136r1 as a v6K core
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
2c0262af 28#include "disas.h"
57fec1fe 29#include "tcg-op.h"
79383c9c 30#include "qemu-log.h"
1497c961 31
7b59220e 32#include "helper.h"
1497c961 33#define GEN_HELPER 1
7b59220e 34#include "helper.h"
2c0262af 35
be5e7a76
DES
36#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38/* currently all emulated v5 cores are also v5TE, so don't bother */
39#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
40#define ENABLE_ARCH_5J 0
41#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 45
86753403 46#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 47
2c0262af
FB
48/* internal defines */
49typedef struct DisasContext {
0fa85d43 50 target_ulong pc;
2c0262af 51 int is_jmp;
e50e6a20
FB
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
9ee6e8bb
PB
56 /* Thumb-2 condtional execution bits. */
57 int condexec_mask;
58 int condexec_cond;
2c0262af 59 struct TranslationBlock *tb;
8aaca4c0 60 int singlestep_enabled;
5899f386 61 int thumb;
b5ff1b31
FB
62#if !defined(CONFIG_USER_ONLY)
63 int user;
64#endif
5df8bac1 65 int vfp_enabled;
69d1fc22
PM
66 int vec_len;
67 int vec_stride;
2c0262af
FB
68} DisasContext;
69
e12ce78d
PM
70static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
71
b5ff1b31
FB
72#if defined(CONFIG_USER_ONLY)
73#define IS_USER(s) 1
74#else
75#define IS_USER(s) (s->user)
76#endif
77
9ee6e8bb
PB
78/* These instructions trap after executing, so defer them until after the
79 conditional executions state has been updated. */
80#define DISAS_WFI 4
81#define DISAS_SWI 5
2c0262af 82
a7812ae4 83static TCGv_ptr cpu_env;
ad69471c 84/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 85static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 86static TCGv_i32 cpu_R[16];
426f5abc
PB
87static TCGv_i32 cpu_exclusive_addr;
88static TCGv_i32 cpu_exclusive_val;
89static TCGv_i32 cpu_exclusive_high;
90#ifdef CONFIG_USER_ONLY
91static TCGv_i32 cpu_exclusive_test;
92static TCGv_i32 cpu_exclusive_info;
93#endif
ad69471c 94
b26eefb6 95/* FIXME: These should be removed. */
a7812ae4
PB
96static TCGv cpu_F0s, cpu_F1s;
97static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 98
2e70f6ef
PB
99#include "gen-icount.h"
100
155c3eac
FN
101static const char *regnames[] =
102 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
103 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
104
b26eefb6
PB
105/* initialize TCG globals. */
106void arm_translate_init(void)
107{
155c3eac
FN
108 int i;
109
a7812ae4
PB
110 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
111
155c3eac
FN
112 for (i = 0; i < 16; i++) {
113 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, regs[i]),
115 regnames[i]);
116 }
426f5abc
PB
117 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUState, exclusive_addr), "exclusive_addr");
119 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUState, exclusive_val), "exclusive_val");
121 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_high), "exclusive_high");
123#ifdef CONFIG_USER_ONLY
124 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUState, exclusive_test), "exclusive_test");
126 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUState, exclusive_info), "exclusive_info");
128#endif
155c3eac 129
a7812ae4 130#define GEN_HELPER 2
7b59220e 131#include "helper.h"
b26eefb6
PB
132}
133
d9ba4830
PB
134static inline TCGv load_cpu_offset(int offset)
135{
7d1b0095 136 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
137 tcg_gen_ld_i32(tmp, cpu_env, offset);
138 return tmp;
139}
140
141#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
142
143static inline void store_cpu_offset(TCGv var, int offset)
144{
145 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 146 tcg_temp_free_i32(var);
d9ba4830
PB
147}
148
149#define store_cpu_field(var, name) \
150 store_cpu_offset(var, offsetof(CPUState, name))
151
b26eefb6
PB
152/* Set a variable to the value of a CPU register. */
153static void load_reg_var(DisasContext *s, TCGv var, int reg)
154{
155 if (reg == 15) {
156 uint32_t addr;
157 /* normaly, since we updated PC, we need only to add one insn */
158 if (s->thumb)
159 addr = (long)s->pc + 2;
160 else
161 addr = (long)s->pc + 4;
162 tcg_gen_movi_i32(var, addr);
163 } else {
155c3eac 164 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
165 }
166}
167
168/* Create a new temporary and set it to the value of a CPU register. */
169static inline TCGv load_reg(DisasContext *s, int reg)
170{
7d1b0095 171 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
172 load_reg_var(s, tmp, reg);
173 return tmp;
174}
175
176/* Set a CPU register. The source must be a temporary and will be
177 marked as dead. */
178static void store_reg(DisasContext *s, int reg, TCGv var)
179{
180 if (reg == 15) {
181 tcg_gen_andi_i32(var, var, ~1);
182 s->is_jmp = DISAS_JUMP;
183 }
155c3eac 184 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 185 tcg_temp_free_i32(var);
b26eefb6
PB
186}
187
b26eefb6 188/* Value extensions. */
86831435
PB
189#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
190#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
191#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
192#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
193
1497c961
PB
194#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
195#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 196
b26eefb6 197
b75263d6
JR
198static inline void gen_set_cpsr(TCGv var, uint32_t mask)
199{
200 TCGv tmp_mask = tcg_const_i32(mask);
201 gen_helper_cpsr_write(var, tmp_mask);
202 tcg_temp_free_i32(tmp_mask);
203}
d9ba4830
PB
204/* Set NZCV flags from the high 4 bits of var. */
205#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
206
207static void gen_exception(int excp)
208{
7d1b0095 209 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
210 tcg_gen_movi_i32(tmp, excp);
211 gen_helper_exception(tmp);
7d1b0095 212 tcg_temp_free_i32(tmp);
d9ba4830
PB
213}
214
3670669c
PB
215static void gen_smul_dual(TCGv a, TCGv b)
216{
7d1b0095
PM
217 TCGv tmp1 = tcg_temp_new_i32();
218 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
219 tcg_gen_ext16s_i32(tmp1, a);
220 tcg_gen_ext16s_i32(tmp2, b);
3670669c 221 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 222 tcg_temp_free_i32(tmp2);
3670669c
PB
223 tcg_gen_sari_i32(a, a, 16);
224 tcg_gen_sari_i32(b, b, 16);
225 tcg_gen_mul_i32(b, b, a);
226 tcg_gen_mov_i32(a, tmp1);
7d1b0095 227 tcg_temp_free_i32(tmp1);
3670669c
PB
228}
229
230/* Byteswap each halfword. */
231static void gen_rev16(TCGv var)
232{
7d1b0095 233 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
234 tcg_gen_shri_i32(tmp, var, 8);
235 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
236 tcg_gen_shli_i32(var, var, 8);
237 tcg_gen_andi_i32(var, var, 0xff00ff00);
238 tcg_gen_or_i32(var, var, tmp);
7d1b0095 239 tcg_temp_free_i32(tmp);
3670669c
PB
240}
241
242/* Byteswap low halfword and sign extend. */
243static void gen_revsh(TCGv var)
244{
1a855029
AJ
245 tcg_gen_ext16u_i32(var, var);
246 tcg_gen_bswap16_i32(var, var);
247 tcg_gen_ext16s_i32(var, var);
3670669c
PB
248}
249
250/* Unsigned bitfield extract. */
251static void gen_ubfx(TCGv var, int shift, uint32_t mask)
252{
253 if (shift)
254 tcg_gen_shri_i32(var, var, shift);
255 tcg_gen_andi_i32(var, var, mask);
256}
257
258/* Signed bitfield extract. */
259static void gen_sbfx(TCGv var, int shift, int width)
260{
261 uint32_t signbit;
262
263 if (shift)
264 tcg_gen_sari_i32(var, var, shift);
265 if (shift + width < 32) {
266 signbit = 1u << (width - 1);
267 tcg_gen_andi_i32(var, var, (1u << width) - 1);
268 tcg_gen_xori_i32(var, var, signbit);
269 tcg_gen_subi_i32(var, var, signbit);
270 }
271}
272
273/* Bitfield insertion. Insert val into base. Clobbers base and val. */
274static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
275{
3670669c 276 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
277 tcg_gen_shli_i32(val, val, shift);
278 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
279 tcg_gen_or_i32(dest, base, val);
280}
281
838fa72d
AJ
282/* Return (b << 32) + a. Mark inputs as dead */
283static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 284{
838fa72d
AJ
285 TCGv_i64 tmp64 = tcg_temp_new_i64();
286
287 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 288 tcg_temp_free_i32(b);
838fa72d
AJ
289 tcg_gen_shli_i64(tmp64, tmp64, 32);
290 tcg_gen_add_i64(a, tmp64, a);
291
292 tcg_temp_free_i64(tmp64);
293 return a;
294}
295
296/* Return (b << 32) - a. Mark inputs as dead. */
297static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
298{
299 TCGv_i64 tmp64 = tcg_temp_new_i64();
300
301 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 302 tcg_temp_free_i32(b);
838fa72d
AJ
303 tcg_gen_shli_i64(tmp64, tmp64, 32);
304 tcg_gen_sub_i64(a, tmp64, a);
305
306 tcg_temp_free_i64(tmp64);
307 return a;
3670669c
PB
308}
309
8f01245e
PB
310/* FIXME: Most targets have native widening multiplication.
311 It would be good to use that instead of a full wide multiply. */
5e3f878a 312/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 313static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 314{
a7812ae4
PB
315 TCGv_i64 tmp1 = tcg_temp_new_i64();
316 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
317
318 tcg_gen_extu_i32_i64(tmp1, a);
7d1b0095 319 tcg_temp_free_i32(a);
5e3f878a 320 tcg_gen_extu_i32_i64(tmp2, b);
7d1b0095 321 tcg_temp_free_i32(b);
5e3f878a 322 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 323 tcg_temp_free_i64(tmp2);
5e3f878a
PB
324 return tmp1;
325}
326
a7812ae4 327static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 328{
a7812ae4
PB
329 TCGv_i64 tmp1 = tcg_temp_new_i64();
330 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
331
332 tcg_gen_ext_i32_i64(tmp1, a);
7d1b0095 333 tcg_temp_free_i32(a);
5e3f878a 334 tcg_gen_ext_i32_i64(tmp2, b);
7d1b0095 335 tcg_temp_free_i32(b);
5e3f878a 336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 337 tcg_temp_free_i64(tmp2);
5e3f878a
PB
338 return tmp1;
339}
340
8f01245e
PB
341/* Swap low and high halfwords. */
342static void gen_swap_half(TCGv var)
343{
7d1b0095 344 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
345 tcg_gen_shri_i32(tmp, var, 16);
346 tcg_gen_shli_i32(var, var, 16);
347 tcg_gen_or_i32(var, var, tmp);
7d1b0095 348 tcg_temp_free_i32(tmp);
8f01245e
PB
349}
350
b26eefb6
PB
351/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
352 tmp = (t0 ^ t1) & 0x8000;
353 t0 &= ~0x8000;
354 t1 &= ~0x8000;
355 t0 = (t0 + t1) ^ tmp;
356 */
357
358static void gen_add16(TCGv t0, TCGv t1)
359{
7d1b0095 360 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
361 tcg_gen_xor_i32(tmp, t0, t1);
362 tcg_gen_andi_i32(tmp, tmp, 0x8000);
363 tcg_gen_andi_i32(t0, t0, ~0x8000);
364 tcg_gen_andi_i32(t1, t1, ~0x8000);
365 tcg_gen_add_i32(t0, t0, t1);
366 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
367 tcg_temp_free_i32(tmp);
368 tcg_temp_free_i32(t1);
b26eefb6
PB
369}
370
9a119ff6
PB
371#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
372
b26eefb6
PB
373/* Set CF to the top bit of var. */
374static void gen_set_CF_bit31(TCGv var)
375{
7d1b0095 376 TCGv tmp = tcg_temp_new_i32();
b26eefb6 377 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 378 gen_set_CF(tmp);
7d1b0095 379 tcg_temp_free_i32(tmp);
b26eefb6
PB
380}
381
382/* Set N and Z flags from var. */
383static inline void gen_logic_CC(TCGv var)
384{
6fbe23d5
PB
385 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
387}
388
389/* T0 += T1 + CF. */
396e467c 390static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 391{
d9ba4830 392 TCGv tmp;
396e467c 393 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 394 tmp = load_cpu_field(CF);
396e467c 395 tcg_gen_add_i32(t0, t0, tmp);
7d1b0095 396 tcg_temp_free_i32(tmp);
b26eefb6
PB
397}
398
e9bb4aa9
JR
399/* dest = T0 + T1 + CF. */
400static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
401{
402 TCGv tmp;
403 tcg_gen_add_i32(dest, t0, t1);
404 tmp = load_cpu_field(CF);
405 tcg_gen_add_i32(dest, dest, tmp);
7d1b0095 406 tcg_temp_free_i32(tmp);
e9bb4aa9
JR
407}
408
3670669c
PB
409/* dest = T0 - T1 + CF - 1. */
410static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
411{
d9ba4830 412 TCGv tmp;
3670669c 413 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 414 tmp = load_cpu_field(CF);
3670669c
PB
415 tcg_gen_add_i32(dest, dest, tmp);
416 tcg_gen_subi_i32(dest, dest, 1);
7d1b0095 417 tcg_temp_free_i32(tmp);
3670669c
PB
418}
419
ad69471c
PB
420/* FIXME: Implement this natively. */
421#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
422
9a119ff6 423static void shifter_out_im(TCGv var, int shift)
b26eefb6 424{
7d1b0095 425 TCGv tmp = tcg_temp_new_i32();
9a119ff6
PB
426 if (shift == 0) {
427 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 428 } else {
9a119ff6 429 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 430 if (shift != 31)
9a119ff6
PB
431 tcg_gen_andi_i32(tmp, tmp, 1);
432 }
433 gen_set_CF(tmp);
7d1b0095 434 tcg_temp_free_i32(tmp);
9a119ff6 435}
b26eefb6 436
9a119ff6
PB
437/* Shift by immediate. Includes special handling for shift == 0. */
438static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
439{
440 switch (shiftop) {
441 case 0: /* LSL */
442 if (shift != 0) {
443 if (flags)
444 shifter_out_im(var, 32 - shift);
445 tcg_gen_shli_i32(var, var, shift);
446 }
447 break;
448 case 1: /* LSR */
449 if (shift == 0) {
450 if (flags) {
451 tcg_gen_shri_i32(var, var, 31);
452 gen_set_CF(var);
453 }
454 tcg_gen_movi_i32(var, 0);
455 } else {
456 if (flags)
457 shifter_out_im(var, shift - 1);
458 tcg_gen_shri_i32(var, var, shift);
459 }
460 break;
461 case 2: /* ASR */
462 if (shift == 0)
463 shift = 32;
464 if (flags)
465 shifter_out_im(var, shift - 1);
466 if (shift == 32)
467 shift = 31;
468 tcg_gen_sari_i32(var, var, shift);
469 break;
470 case 3: /* ROR/RRX */
471 if (shift != 0) {
472 if (flags)
473 shifter_out_im(var, shift - 1);
f669df27 474 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 475 } else {
d9ba4830 476 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
477 if (flags)
478 shifter_out_im(var, 0);
479 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
480 tcg_gen_shli_i32(tmp, tmp, 31);
481 tcg_gen_or_i32(var, var, tmp);
7d1b0095 482 tcg_temp_free_i32(tmp);
b26eefb6
PB
483 }
484 }
485};
486
8984bd2e
PB
487static inline void gen_arm_shift_reg(TCGv var, int shiftop,
488 TCGv shift, int flags)
489{
490 if (flags) {
491 switch (shiftop) {
492 case 0: gen_helper_shl_cc(var, var, shift); break;
493 case 1: gen_helper_shr_cc(var, var, shift); break;
494 case 2: gen_helper_sar_cc(var, var, shift); break;
495 case 3: gen_helper_ror_cc(var, var, shift); break;
496 }
497 } else {
498 switch (shiftop) {
499 case 0: gen_helper_shl(var, var, shift); break;
500 case 1: gen_helper_shr(var, var, shift); break;
501 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
502 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
503 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
504 }
505 }
7d1b0095 506 tcg_temp_free_i32(shift);
8984bd2e
PB
507}
508
6ddbc6e4
PB
509#define PAS_OP(pfx) \
510 switch (op2) { \
511 case 0: gen_pas_helper(glue(pfx,add16)); break; \
512 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
513 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
514 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
515 case 4: gen_pas_helper(glue(pfx,add8)); break; \
516 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
517 }
d9ba4830 518static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 519{
a7812ae4 520 TCGv_ptr tmp;
6ddbc6e4
PB
521
522 switch (op1) {
523#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
524 case 1:
a7812ae4 525 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
526 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
527 PAS_OP(s)
b75263d6 528 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
529 break;
530 case 5:
a7812ae4 531 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
532 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
533 PAS_OP(u)
b75263d6 534 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
535 break;
536#undef gen_pas_helper
537#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
538 case 2:
539 PAS_OP(q);
540 break;
541 case 3:
542 PAS_OP(sh);
543 break;
544 case 6:
545 PAS_OP(uq);
546 break;
547 case 7:
548 PAS_OP(uh);
549 break;
550#undef gen_pas_helper
551 }
552}
9ee6e8bb
PB
553#undef PAS_OP
554
6ddbc6e4
PB
555/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
556#define PAS_OP(pfx) \
ed89a2f1 557 switch (op1) { \
6ddbc6e4
PB
558 case 0: gen_pas_helper(glue(pfx,add8)); break; \
559 case 1: gen_pas_helper(glue(pfx,add16)); break; \
560 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
561 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
562 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
563 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
564 }
d9ba4830 565static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 566{
a7812ae4 567 TCGv_ptr tmp;
6ddbc6e4 568
ed89a2f1 569 switch (op2) {
6ddbc6e4
PB
570#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
571 case 0:
a7812ae4 572 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
573 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
574 PAS_OP(s)
b75263d6 575 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
576 break;
577 case 4:
a7812ae4 578 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
579 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
580 PAS_OP(u)
b75263d6 581 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
582 break;
583#undef gen_pas_helper
584#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
585 case 1:
586 PAS_OP(q);
587 break;
588 case 2:
589 PAS_OP(sh);
590 break;
591 case 5:
592 PAS_OP(uq);
593 break;
594 case 6:
595 PAS_OP(uh);
596 break;
597#undef gen_pas_helper
598 }
599}
9ee6e8bb
PB
600#undef PAS_OP
601
d9ba4830
PB
602static void gen_test_cc(int cc, int label)
603{
604 TCGv tmp;
605 TCGv tmp2;
d9ba4830
PB
606 int inv;
607
d9ba4830
PB
608 switch (cc) {
609 case 0: /* eq: Z */
6fbe23d5 610 tmp = load_cpu_field(ZF);
cb63669a 611 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
612 break;
613 case 1: /* ne: !Z */
6fbe23d5 614 tmp = load_cpu_field(ZF);
cb63669a 615 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
616 break;
617 case 2: /* cs: C */
618 tmp = load_cpu_field(CF);
cb63669a 619 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
620 break;
621 case 3: /* cc: !C */
622 tmp = load_cpu_field(CF);
cb63669a 623 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
624 break;
625 case 4: /* mi: N */
6fbe23d5 626 tmp = load_cpu_field(NF);
cb63669a 627 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
628 break;
629 case 5: /* pl: !N */
6fbe23d5 630 tmp = load_cpu_field(NF);
cb63669a 631 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
632 break;
633 case 6: /* vs: V */
634 tmp = load_cpu_field(VF);
cb63669a 635 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
636 break;
637 case 7: /* vc: !V */
638 tmp = load_cpu_field(VF);
cb63669a 639 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
640 break;
641 case 8: /* hi: C && !Z */
642 inv = gen_new_label();
643 tmp = load_cpu_field(CF);
cb63669a 644 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 645 tcg_temp_free_i32(tmp);
6fbe23d5 646 tmp = load_cpu_field(ZF);
cb63669a 647 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
648 gen_set_label(inv);
649 break;
650 case 9: /* ls: !C || Z */
651 tmp = load_cpu_field(CF);
cb63669a 652 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 653 tcg_temp_free_i32(tmp);
6fbe23d5 654 tmp = load_cpu_field(ZF);
cb63669a 655 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
656 break;
657 case 10: /* ge: N == V -> N ^ V == 0 */
658 tmp = load_cpu_field(VF);
6fbe23d5 659 tmp2 = load_cpu_field(NF);
d9ba4830 660 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 661 tcg_temp_free_i32(tmp2);
cb63669a 662 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
663 break;
664 case 11: /* lt: N != V -> N ^ V != 0 */
665 tmp = load_cpu_field(VF);
6fbe23d5 666 tmp2 = load_cpu_field(NF);
d9ba4830 667 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 668 tcg_temp_free_i32(tmp2);
cb63669a 669 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
670 break;
671 case 12: /* gt: !Z && N == V */
672 inv = gen_new_label();
6fbe23d5 673 tmp = load_cpu_field(ZF);
cb63669a 674 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 675 tcg_temp_free_i32(tmp);
d9ba4830 676 tmp = load_cpu_field(VF);
6fbe23d5 677 tmp2 = load_cpu_field(NF);
d9ba4830 678 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 679 tcg_temp_free_i32(tmp2);
cb63669a 680 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
681 gen_set_label(inv);
682 break;
683 case 13: /* le: Z || N != V */
6fbe23d5 684 tmp = load_cpu_field(ZF);
cb63669a 685 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 686 tcg_temp_free_i32(tmp);
d9ba4830 687 tmp = load_cpu_field(VF);
6fbe23d5 688 tmp2 = load_cpu_field(NF);
d9ba4830 689 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 690 tcg_temp_free_i32(tmp2);
cb63669a 691 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
692 break;
693 default:
694 fprintf(stderr, "Bad condition code 0x%x\n", cc);
695 abort();
696 }
7d1b0095 697 tcg_temp_free_i32(tmp);
d9ba4830 698}
2c0262af 699
b1d8e52e 700static const uint8_t table_logic_cc[16] = {
2c0262af
FB
701 1, /* and */
702 1, /* xor */
703 0, /* sub */
704 0, /* rsb */
705 0, /* add */
706 0, /* adc */
707 0, /* sbc */
708 0, /* rsc */
709 1, /* andl */
710 1, /* xorl */
711 0, /* cmp */
712 0, /* cmn */
713 1, /* orr */
714 1, /* mov */
715 1, /* bic */
716 1, /* mvn */
717};
3b46e624 718
d9ba4830
PB
719/* Set PC and Thumb state from an immediate address. */
720static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 721{
b26eefb6 722 TCGv tmp;
99c475ab 723
b26eefb6 724 s->is_jmp = DISAS_UPDATE;
d9ba4830 725 if (s->thumb != (addr & 1)) {
7d1b0095 726 tmp = tcg_temp_new_i32();
d9ba4830
PB
727 tcg_gen_movi_i32(tmp, addr & 1);
728 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
7d1b0095 729 tcg_temp_free_i32(tmp);
d9ba4830 730 }
155c3eac 731 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
732}
733
734/* Set PC and Thumb state from var. var is marked as dead. */
735static inline void gen_bx(DisasContext *s, TCGv var)
736{
d9ba4830 737 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
738 tcg_gen_andi_i32(cpu_R[15], var, ~1);
739 tcg_gen_andi_i32(var, var, 1);
740 store_cpu_field(var, thumb);
d9ba4830
PB
741}
742
21aeb343
JR
743/* Variant of store_reg which uses branch&exchange logic when storing
744 to r15 in ARM architecture v7 and above. The source must be a temporary
745 and will be marked as dead. */
746static inline void store_reg_bx(CPUState *env, DisasContext *s,
747 int reg, TCGv var)
748{
749 if (reg == 15 && ENABLE_ARCH_7) {
750 gen_bx(s, var);
751 } else {
752 store_reg(s, reg, var);
753 }
754}
755
be5e7a76
DES
756/* Variant of store_reg which uses branch&exchange logic when storing
757 * to r15 in ARM architecture v5T and above. This is used for storing
758 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
759 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
760static inline void store_reg_from_load(CPUState *env, DisasContext *s,
761 int reg, TCGv var)
762{
763 if (reg == 15 && ENABLE_ARCH_5) {
764 gen_bx(s, var);
765 } else {
766 store_reg(s, reg, var);
767 }
768}
769
b0109805
PB
770static inline TCGv gen_ld8s(TCGv addr, int index)
771{
7d1b0095 772 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
773 tcg_gen_qemu_ld8s(tmp, addr, index);
774 return tmp;
775}
776static inline TCGv gen_ld8u(TCGv addr, int index)
777{
7d1b0095 778 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
779 tcg_gen_qemu_ld8u(tmp, addr, index);
780 return tmp;
781}
782static inline TCGv gen_ld16s(TCGv addr, int index)
783{
7d1b0095 784 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
785 tcg_gen_qemu_ld16s(tmp, addr, index);
786 return tmp;
787}
788static inline TCGv gen_ld16u(TCGv addr, int index)
789{
7d1b0095 790 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
791 tcg_gen_qemu_ld16u(tmp, addr, index);
792 return tmp;
793}
794static inline TCGv gen_ld32(TCGv addr, int index)
795{
7d1b0095 796 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
797 tcg_gen_qemu_ld32u(tmp, addr, index);
798 return tmp;
799}
84496233
JR
800static inline TCGv_i64 gen_ld64(TCGv addr, int index)
801{
802 TCGv_i64 tmp = tcg_temp_new_i64();
803 tcg_gen_qemu_ld64(tmp, addr, index);
804 return tmp;
805}
b0109805
PB
806static inline void gen_st8(TCGv val, TCGv addr, int index)
807{
808 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 809 tcg_temp_free_i32(val);
b0109805
PB
810}
811static inline void gen_st16(TCGv val, TCGv addr, int index)
812{
813 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 814 tcg_temp_free_i32(val);
b0109805
PB
815}
816static inline void gen_st32(TCGv val, TCGv addr, int index)
817{
818 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 819 tcg_temp_free_i32(val);
b0109805 820}
84496233
JR
821static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
822{
823 tcg_gen_qemu_st64(val, addr, index);
824 tcg_temp_free_i64(val);
825}
b5ff1b31 826
5e3f878a
PB
827static inline void gen_set_pc_im(uint32_t val)
828{
155c3eac 829 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
830}
831
b5ff1b31
FB
832/* Force a TB lookup after an instruction that changes the CPU state. */
833static inline void gen_lookup_tb(DisasContext *s)
834{
a6445c52 835 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
836 s->is_jmp = DISAS_UPDATE;
837}
838
b0109805
PB
839static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
840 TCGv var)
2c0262af 841{
1e8d4eec 842 int val, rm, shift, shiftop;
b26eefb6 843 TCGv offset;
2c0262af
FB
844
845 if (!(insn & (1 << 25))) {
846 /* immediate */
847 val = insn & 0xfff;
848 if (!(insn & (1 << 23)))
849 val = -val;
537730b9 850 if (val != 0)
b0109805 851 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
852 } else {
853 /* shift/register */
854 rm = (insn) & 0xf;
855 shift = (insn >> 7) & 0x1f;
1e8d4eec 856 shiftop = (insn >> 5) & 3;
b26eefb6 857 offset = load_reg(s, rm);
9a119ff6 858 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 859 if (!(insn & (1 << 23)))
b0109805 860 tcg_gen_sub_i32(var, var, offset);
2c0262af 861 else
b0109805 862 tcg_gen_add_i32(var, var, offset);
7d1b0095 863 tcg_temp_free_i32(offset);
2c0262af
FB
864 }
865}
866
191f9a93 867static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 868 int extra, TCGv var)
2c0262af
FB
869{
870 int val, rm;
b26eefb6 871 TCGv offset;
3b46e624 872
2c0262af
FB
873 if (insn & (1 << 22)) {
874 /* immediate */
875 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
876 if (!(insn & (1 << 23)))
877 val = -val;
18acad92 878 val += extra;
537730b9 879 if (val != 0)
b0109805 880 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
881 } else {
882 /* register */
191f9a93 883 if (extra)
b0109805 884 tcg_gen_addi_i32(var, var, extra);
2c0262af 885 rm = (insn) & 0xf;
b26eefb6 886 offset = load_reg(s, rm);
2c0262af 887 if (!(insn & (1 << 23)))
b0109805 888 tcg_gen_sub_i32(var, var, offset);
2c0262af 889 else
b0109805 890 tcg_gen_add_i32(var, var, offset);
7d1b0095 891 tcg_temp_free_i32(offset);
2c0262af
FB
892 }
893}
894
5aaebd13
PM
895static TCGv_ptr get_fpstatus_ptr(int neon)
896{
897 TCGv_ptr statusptr = tcg_temp_new_ptr();
898 int offset;
899 if (neon) {
900 offset = offsetof(CPUState, vfp.standard_fp_status);
901 } else {
902 offset = offsetof(CPUState, vfp.fp_status);
903 }
904 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
905 return statusptr;
906}
907
4373f3ce
PB
908#define VFP_OP2(name) \
909static inline void gen_vfp_##name(int dp) \
910{ \
ae1857ec
PM
911 TCGv_ptr fpst = get_fpstatus_ptr(0); \
912 if (dp) { \
913 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
914 } else { \
915 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
916 } \
917 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
918}
919
4373f3ce
PB
920VFP_OP2(add)
921VFP_OP2(sub)
922VFP_OP2(mul)
923VFP_OP2(div)
924
925#undef VFP_OP2
926
605a6aed
PM
927static inline void gen_vfp_F1_mul(int dp)
928{
929 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 930 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 931 if (dp) {
ae1857ec 932 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 933 } else {
ae1857ec 934 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 935 }
ae1857ec 936 tcg_temp_free_ptr(fpst);
605a6aed
PM
937}
938
939static inline void gen_vfp_F1_neg(int dp)
940{
941 /* Like gen_vfp_neg() but put result in F1 */
942 if (dp) {
943 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
944 } else {
945 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
946 }
947}
948
4373f3ce
PB
949static inline void gen_vfp_abs(int dp)
950{
951 if (dp)
952 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
953 else
954 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
955}
956
957static inline void gen_vfp_neg(int dp)
958{
959 if (dp)
960 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
961 else
962 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
963}
964
965static inline void gen_vfp_sqrt(int dp)
966{
967 if (dp)
968 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
969 else
970 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
971}
972
973static inline void gen_vfp_cmp(int dp)
974{
975 if (dp)
976 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
977 else
978 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
979}
980
981static inline void gen_vfp_cmpe(int dp)
982{
983 if (dp)
984 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
985 else
986 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
987}
988
989static inline void gen_vfp_F1_ld0(int dp)
990{
991 if (dp)
5b340b51 992 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 993 else
5b340b51 994 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
995}
996
5500b06c
PM
997#define VFP_GEN_ITOF(name) \
998static inline void gen_vfp_##name(int dp, int neon) \
999{ \
5aaebd13 1000 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1001 if (dp) { \
1002 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1003 } else { \
1004 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1005 } \
b7fa9214 1006 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1007}
1008
5500b06c
PM
1009VFP_GEN_ITOF(uito)
1010VFP_GEN_ITOF(sito)
1011#undef VFP_GEN_ITOF
4373f3ce 1012
5500b06c
PM
1013#define VFP_GEN_FTOI(name) \
1014static inline void gen_vfp_##name(int dp, int neon) \
1015{ \
5aaebd13 1016 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1017 if (dp) { \
1018 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1019 } else { \
1020 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1021 } \
b7fa9214 1022 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1023}
1024
5500b06c
PM
1025VFP_GEN_FTOI(toui)
1026VFP_GEN_FTOI(touiz)
1027VFP_GEN_FTOI(tosi)
1028VFP_GEN_FTOI(tosiz)
1029#undef VFP_GEN_FTOI
4373f3ce
PB
1030
1031#define VFP_GEN_FIX(name) \
5500b06c 1032static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1033{ \
b75263d6 1034 TCGv tmp_shift = tcg_const_i32(shift); \
5aaebd13 1035 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1036 if (dp) { \
1037 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1038 } else { \
1039 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1040 } \
b75263d6 1041 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1042 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1043}
4373f3ce
PB
1044VFP_GEN_FIX(tosh)
1045VFP_GEN_FIX(tosl)
1046VFP_GEN_FIX(touh)
1047VFP_GEN_FIX(toul)
1048VFP_GEN_FIX(shto)
1049VFP_GEN_FIX(slto)
1050VFP_GEN_FIX(uhto)
1051VFP_GEN_FIX(ulto)
1052#undef VFP_GEN_FIX
9ee6e8bb 1053
312eea9f 1054static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1055{
1056 if (dp)
312eea9f 1057 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1058 else
312eea9f 1059 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1060}
1061
312eea9f 1062static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1063{
1064 if (dp)
312eea9f 1065 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1066 else
312eea9f 1067 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1068}
1069
8e96005d
FB
1070static inline long
1071vfp_reg_offset (int dp, int reg)
1072{
1073 if (dp)
1074 return offsetof(CPUARMState, vfp.regs[reg]);
1075 else if (reg & 1) {
1076 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1077 + offsetof(CPU_DoubleU, l.upper);
1078 } else {
1079 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1080 + offsetof(CPU_DoubleU, l.lower);
1081 }
1082}
9ee6e8bb
PB
1083
1084/* Return the offset of a 32-bit piece of a NEON register.
1085 zero is the least significant end of the register. */
1086static inline long
1087neon_reg_offset (int reg, int n)
1088{
1089 int sreg;
1090 sreg = reg * 2 + n;
1091 return vfp_reg_offset(0, sreg);
1092}
1093
8f8e3aa4
PB
1094static TCGv neon_load_reg(int reg, int pass)
1095{
7d1b0095 1096 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1097 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1098 return tmp;
1099}
1100
1101static void neon_store_reg(int reg, int pass, TCGv var)
1102{
1103 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1104 tcg_temp_free_i32(var);
8f8e3aa4
PB
1105}
1106
a7812ae4 1107static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1108{
1109 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1110}
1111
a7812ae4 1112static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1113{
1114 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1115}
1116
4373f3ce
PB
1117#define tcg_gen_ld_f32 tcg_gen_ld_i32
1118#define tcg_gen_ld_f64 tcg_gen_ld_i64
1119#define tcg_gen_st_f32 tcg_gen_st_i32
1120#define tcg_gen_st_f64 tcg_gen_st_i64
1121
b7bcbe95
FB
1122static inline void gen_mov_F0_vreg(int dp, int reg)
1123{
1124 if (dp)
4373f3ce 1125 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1126 else
4373f3ce 1127 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1128}
1129
1130static inline void gen_mov_F1_vreg(int dp, int reg)
1131{
1132 if (dp)
4373f3ce 1133 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1134 else
4373f3ce 1135 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1136}
1137
1138static inline void gen_mov_vreg_F0(int dp, int reg)
1139{
1140 if (dp)
4373f3ce 1141 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1142 else
4373f3ce 1143 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1144}
1145
18c9b560
AZ
1146#define ARM_CP_RW_BIT (1 << 20)
1147
a7812ae4 1148static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1149{
1150 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1151}
1152
a7812ae4 1153static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1154{
1155 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1156}
1157
da6b5335 1158static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1159{
7d1b0095 1160 TCGv var = tcg_temp_new_i32();
da6b5335
FN
1161 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1162 return var;
e677137d
PB
1163}
1164
da6b5335 1165static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1166{
da6b5335 1167 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
7d1b0095 1168 tcg_temp_free_i32(var);
e677137d
PB
1169}
1170
1171static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1172{
1173 iwmmxt_store_reg(cpu_M0, rn);
1174}
1175
1176static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1177{
1178 iwmmxt_load_reg(cpu_M0, rn);
1179}
1180
1181static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1182{
1183 iwmmxt_load_reg(cpu_V1, rn);
1184 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1185}
1186
1187static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1188{
1189 iwmmxt_load_reg(cpu_V1, rn);
1190 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1191}
1192
1193static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1194{
1195 iwmmxt_load_reg(cpu_V1, rn);
1196 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1197}
1198
1199#define IWMMXT_OP(name) \
1200static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1201{ \
1202 iwmmxt_load_reg(cpu_V1, rn); \
1203 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1204}
1205
477955bd
PM
1206#define IWMMXT_OP_ENV(name) \
1207static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1208{ \
1209 iwmmxt_load_reg(cpu_V1, rn); \
1210 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1211}
1212
1213#define IWMMXT_OP_ENV_SIZE(name) \
1214IWMMXT_OP_ENV(name##b) \
1215IWMMXT_OP_ENV(name##w) \
1216IWMMXT_OP_ENV(name##l)
e677137d 1217
477955bd 1218#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1219static inline void gen_op_iwmmxt_##name##_M0(void) \
1220{ \
477955bd 1221 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1222}
1223
1224IWMMXT_OP(maddsq)
1225IWMMXT_OP(madduq)
1226IWMMXT_OP(sadb)
1227IWMMXT_OP(sadw)
1228IWMMXT_OP(mulslw)
1229IWMMXT_OP(mulshw)
1230IWMMXT_OP(mululw)
1231IWMMXT_OP(muluhw)
1232IWMMXT_OP(macsw)
1233IWMMXT_OP(macuw)
1234
477955bd
PM
1235IWMMXT_OP_ENV_SIZE(unpackl)
1236IWMMXT_OP_ENV_SIZE(unpackh)
1237
1238IWMMXT_OP_ENV1(unpacklub)
1239IWMMXT_OP_ENV1(unpackluw)
1240IWMMXT_OP_ENV1(unpacklul)
1241IWMMXT_OP_ENV1(unpackhub)
1242IWMMXT_OP_ENV1(unpackhuw)
1243IWMMXT_OP_ENV1(unpackhul)
1244IWMMXT_OP_ENV1(unpacklsb)
1245IWMMXT_OP_ENV1(unpacklsw)
1246IWMMXT_OP_ENV1(unpacklsl)
1247IWMMXT_OP_ENV1(unpackhsb)
1248IWMMXT_OP_ENV1(unpackhsw)
1249IWMMXT_OP_ENV1(unpackhsl)
1250
1251IWMMXT_OP_ENV_SIZE(cmpeq)
1252IWMMXT_OP_ENV_SIZE(cmpgtu)
1253IWMMXT_OP_ENV_SIZE(cmpgts)
1254
1255IWMMXT_OP_ENV_SIZE(mins)
1256IWMMXT_OP_ENV_SIZE(minu)
1257IWMMXT_OP_ENV_SIZE(maxs)
1258IWMMXT_OP_ENV_SIZE(maxu)
1259
1260IWMMXT_OP_ENV_SIZE(subn)
1261IWMMXT_OP_ENV_SIZE(addn)
1262IWMMXT_OP_ENV_SIZE(subu)
1263IWMMXT_OP_ENV_SIZE(addu)
1264IWMMXT_OP_ENV_SIZE(subs)
1265IWMMXT_OP_ENV_SIZE(adds)
1266
1267IWMMXT_OP_ENV(avgb0)
1268IWMMXT_OP_ENV(avgb1)
1269IWMMXT_OP_ENV(avgw0)
1270IWMMXT_OP_ENV(avgw1)
e677137d
PB
1271
1272IWMMXT_OP(msadb)
1273
477955bd
PM
1274IWMMXT_OP_ENV(packuw)
1275IWMMXT_OP_ENV(packul)
1276IWMMXT_OP_ENV(packuq)
1277IWMMXT_OP_ENV(packsw)
1278IWMMXT_OP_ENV(packsl)
1279IWMMXT_OP_ENV(packsq)
e677137d 1280
e677137d
PB
1281static void gen_op_iwmmxt_set_mup(void)
1282{
1283 TCGv tmp;
1284 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1285 tcg_gen_ori_i32(tmp, tmp, 2);
1286 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1287}
1288
1289static void gen_op_iwmmxt_set_cup(void)
1290{
1291 TCGv tmp;
1292 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1293 tcg_gen_ori_i32(tmp, tmp, 1);
1294 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1295}
1296
1297static void gen_op_iwmmxt_setpsr_nz(void)
1298{
7d1b0095 1299 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1300 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1301 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1302}
1303
1304static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1305{
1306 iwmmxt_load_reg(cpu_V1, rn);
86831435 1307 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1308 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1309}
1310
da6b5335 1311static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1312{
1313 int rd;
1314 uint32_t offset;
da6b5335 1315 TCGv tmp;
18c9b560
AZ
1316
1317 rd = (insn >> 16) & 0xf;
da6b5335 1318 tmp = load_reg(s, rd);
18c9b560
AZ
1319
1320 offset = (insn & 0xff) << ((insn >> 7) & 2);
1321 if (insn & (1 << 24)) {
1322 /* Pre indexed */
1323 if (insn & (1 << 23))
da6b5335 1324 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1325 else
da6b5335
FN
1326 tcg_gen_addi_i32(tmp, tmp, -offset);
1327 tcg_gen_mov_i32(dest, tmp);
18c9b560 1328 if (insn & (1 << 21))
da6b5335
FN
1329 store_reg(s, rd, tmp);
1330 else
7d1b0095 1331 tcg_temp_free_i32(tmp);
18c9b560
AZ
1332 } else if (insn & (1 << 21)) {
1333 /* Post indexed */
da6b5335 1334 tcg_gen_mov_i32(dest, tmp);
18c9b560 1335 if (insn & (1 << 23))
da6b5335 1336 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1337 else
da6b5335
FN
1338 tcg_gen_addi_i32(tmp, tmp, -offset);
1339 store_reg(s, rd, tmp);
18c9b560
AZ
1340 } else if (!(insn & (1 << 23)))
1341 return 1;
1342 return 0;
1343}
1344
da6b5335 1345static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1346{
1347 int rd = (insn >> 0) & 0xf;
da6b5335 1348 TCGv tmp;
18c9b560 1349
da6b5335
FN
1350 if (insn & (1 << 8)) {
1351 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1352 return 1;
da6b5335
FN
1353 } else {
1354 tmp = iwmmxt_load_creg(rd);
1355 }
1356 } else {
7d1b0095 1357 tmp = tcg_temp_new_i32();
da6b5335
FN
1358 iwmmxt_load_reg(cpu_V0, rd);
1359 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1360 }
1361 tcg_gen_andi_i32(tmp, tmp, mask);
1362 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1363 tcg_temp_free_i32(tmp);
18c9b560
AZ
1364 return 0;
1365}
1366
a1c7273b 1367/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560
AZ
1368 (ie. an undefined instruction). */
1369static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1370{
1371 int rd, wrd;
1372 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1373 TCGv addr;
1374 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1375
1376 if ((insn & 0x0e000e00) == 0x0c000000) {
1377 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1378 wrd = insn & 0xf;
1379 rdlo = (insn >> 12) & 0xf;
1380 rdhi = (insn >> 16) & 0xf;
1381 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1382 iwmmxt_load_reg(cpu_V0, wrd);
1383 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1384 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1385 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1386 } else { /* TMCRR */
da6b5335
FN
1387 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1388 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1389 gen_op_iwmmxt_set_mup();
1390 }
1391 return 0;
1392 }
1393
1394 wrd = (insn >> 12) & 0xf;
7d1b0095 1395 addr = tcg_temp_new_i32();
da6b5335 1396 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1397 tcg_temp_free_i32(addr);
18c9b560 1398 return 1;
da6b5335 1399 }
18c9b560
AZ
1400 if (insn & ARM_CP_RW_BIT) {
1401 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1402 tmp = tcg_temp_new_i32();
da6b5335
FN
1403 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1404 iwmmxt_store_creg(wrd, tmp);
18c9b560 1405 } else {
e677137d
PB
1406 i = 1;
1407 if (insn & (1 << 8)) {
1408 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1409 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1410 i = 0;
1411 } else { /* WLDRW wRd */
da6b5335 1412 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1413 }
1414 } else {
1415 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1416 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1417 } else { /* WLDRB */
da6b5335 1418 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1419 }
1420 }
1421 if (i) {
1422 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1423 tcg_temp_free_i32(tmp);
e677137d 1424 }
18c9b560
AZ
1425 gen_op_iwmmxt_movq_wRn_M0(wrd);
1426 }
1427 } else {
1428 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1429 tmp = iwmmxt_load_creg(wrd);
1430 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1431 } else {
1432 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1433 tmp = tcg_temp_new_i32();
e677137d
PB
1434 if (insn & (1 << 8)) {
1435 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1436 tcg_temp_free_i32(tmp);
da6b5335 1437 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1438 } else { /* WSTRW wRd */
1439 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1440 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1441 }
1442 } else {
1443 if (insn & (1 << 22)) { /* WSTRH */
1444 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1445 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1446 } else { /* WSTRB */
1447 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1448 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1449 }
1450 }
18c9b560
AZ
1451 }
1452 }
7d1b0095 1453 tcg_temp_free_i32(addr);
18c9b560
AZ
1454 return 0;
1455 }
1456
1457 if ((insn & 0x0f000000) != 0x0e000000)
1458 return 1;
1459
1460 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1461 case 0x000: /* WOR */
1462 wrd = (insn >> 12) & 0xf;
1463 rd0 = (insn >> 0) & 0xf;
1464 rd1 = (insn >> 16) & 0xf;
1465 gen_op_iwmmxt_movq_M0_wRn(rd0);
1466 gen_op_iwmmxt_orq_M0_wRn(rd1);
1467 gen_op_iwmmxt_setpsr_nz();
1468 gen_op_iwmmxt_movq_wRn_M0(wrd);
1469 gen_op_iwmmxt_set_mup();
1470 gen_op_iwmmxt_set_cup();
1471 break;
1472 case 0x011: /* TMCR */
1473 if (insn & 0xf)
1474 return 1;
1475 rd = (insn >> 12) & 0xf;
1476 wrd = (insn >> 16) & 0xf;
1477 switch (wrd) {
1478 case ARM_IWMMXT_wCID:
1479 case ARM_IWMMXT_wCASF:
1480 break;
1481 case ARM_IWMMXT_wCon:
1482 gen_op_iwmmxt_set_cup();
1483 /* Fall through. */
1484 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1485 tmp = iwmmxt_load_creg(wrd);
1486 tmp2 = load_reg(s, rd);
f669df27 1487 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1488 tcg_temp_free_i32(tmp2);
da6b5335 1489 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1490 break;
1491 case ARM_IWMMXT_wCGR0:
1492 case ARM_IWMMXT_wCGR1:
1493 case ARM_IWMMXT_wCGR2:
1494 case ARM_IWMMXT_wCGR3:
1495 gen_op_iwmmxt_set_cup();
da6b5335
FN
1496 tmp = load_reg(s, rd);
1497 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1498 break;
1499 default:
1500 return 1;
1501 }
1502 break;
1503 case 0x100: /* WXOR */
1504 wrd = (insn >> 12) & 0xf;
1505 rd0 = (insn >> 0) & 0xf;
1506 rd1 = (insn >> 16) & 0xf;
1507 gen_op_iwmmxt_movq_M0_wRn(rd0);
1508 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1509 gen_op_iwmmxt_setpsr_nz();
1510 gen_op_iwmmxt_movq_wRn_M0(wrd);
1511 gen_op_iwmmxt_set_mup();
1512 gen_op_iwmmxt_set_cup();
1513 break;
1514 case 0x111: /* TMRC */
1515 if (insn & 0xf)
1516 return 1;
1517 rd = (insn >> 12) & 0xf;
1518 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1519 tmp = iwmmxt_load_creg(wrd);
1520 store_reg(s, rd, tmp);
18c9b560
AZ
1521 break;
1522 case 0x300: /* WANDN */
1523 wrd = (insn >> 12) & 0xf;
1524 rd0 = (insn >> 0) & 0xf;
1525 rd1 = (insn >> 16) & 0xf;
1526 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1527 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1528 gen_op_iwmmxt_andq_M0_wRn(rd1);
1529 gen_op_iwmmxt_setpsr_nz();
1530 gen_op_iwmmxt_movq_wRn_M0(wrd);
1531 gen_op_iwmmxt_set_mup();
1532 gen_op_iwmmxt_set_cup();
1533 break;
1534 case 0x200: /* WAND */
1535 wrd = (insn >> 12) & 0xf;
1536 rd0 = (insn >> 0) & 0xf;
1537 rd1 = (insn >> 16) & 0xf;
1538 gen_op_iwmmxt_movq_M0_wRn(rd0);
1539 gen_op_iwmmxt_andq_M0_wRn(rd1);
1540 gen_op_iwmmxt_setpsr_nz();
1541 gen_op_iwmmxt_movq_wRn_M0(wrd);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1544 break;
1545 case 0x810: case 0xa10: /* WMADD */
1546 wrd = (insn >> 12) & 0xf;
1547 rd0 = (insn >> 0) & 0xf;
1548 rd1 = (insn >> 16) & 0xf;
1549 gen_op_iwmmxt_movq_M0_wRn(rd0);
1550 if (insn & (1 << 21))
1551 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1552 else
1553 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1554 gen_op_iwmmxt_movq_wRn_M0(wrd);
1555 gen_op_iwmmxt_set_mup();
1556 break;
1557 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1558 wrd = (insn >> 12) & 0xf;
1559 rd0 = (insn >> 16) & 0xf;
1560 rd1 = (insn >> 0) & 0xf;
1561 gen_op_iwmmxt_movq_M0_wRn(rd0);
1562 switch ((insn >> 22) & 3) {
1563 case 0:
1564 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1565 break;
1566 case 1:
1567 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1568 break;
1569 case 2:
1570 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1571 break;
1572 case 3:
1573 return 1;
1574 }
1575 gen_op_iwmmxt_movq_wRn_M0(wrd);
1576 gen_op_iwmmxt_set_mup();
1577 gen_op_iwmmxt_set_cup();
1578 break;
1579 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1580 wrd = (insn >> 12) & 0xf;
1581 rd0 = (insn >> 16) & 0xf;
1582 rd1 = (insn >> 0) & 0xf;
1583 gen_op_iwmmxt_movq_M0_wRn(rd0);
1584 switch ((insn >> 22) & 3) {
1585 case 0:
1586 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1587 break;
1588 case 1:
1589 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1590 break;
1591 case 2:
1592 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1593 break;
1594 case 3:
1595 return 1;
1596 }
1597 gen_op_iwmmxt_movq_wRn_M0(wrd);
1598 gen_op_iwmmxt_set_mup();
1599 gen_op_iwmmxt_set_cup();
1600 break;
1601 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1602 wrd = (insn >> 12) & 0xf;
1603 rd0 = (insn >> 16) & 0xf;
1604 rd1 = (insn >> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0);
1606 if (insn & (1 << 22))
1607 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1608 else
1609 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1610 if (!(insn & (1 << 20)))
1611 gen_op_iwmmxt_addl_M0_wRn(wrd);
1612 gen_op_iwmmxt_movq_wRn_M0(wrd);
1613 gen_op_iwmmxt_set_mup();
1614 break;
1615 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1616 wrd = (insn >> 12) & 0xf;
1617 rd0 = (insn >> 16) & 0xf;
1618 rd1 = (insn >> 0) & 0xf;
1619 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1620 if (insn & (1 << 21)) {
1621 if (insn & (1 << 20))
1622 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1623 else
1624 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1625 } else {
1626 if (insn & (1 << 20))
1627 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1628 else
1629 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1630 }
18c9b560
AZ
1631 gen_op_iwmmxt_movq_wRn_M0(wrd);
1632 gen_op_iwmmxt_set_mup();
1633 break;
1634 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1635 wrd = (insn >> 12) & 0xf;
1636 rd0 = (insn >> 16) & 0xf;
1637 rd1 = (insn >> 0) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0);
1639 if (insn & (1 << 21))
1640 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1641 else
1642 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1643 if (!(insn & (1 << 20))) {
e677137d
PB
1644 iwmmxt_load_reg(cpu_V1, wrd);
1645 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1646 }
1647 gen_op_iwmmxt_movq_wRn_M0(wrd);
1648 gen_op_iwmmxt_set_mup();
1649 break;
1650 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1651 wrd = (insn >> 12) & 0xf;
1652 rd0 = (insn >> 16) & 0xf;
1653 rd1 = (insn >> 0) & 0xf;
1654 gen_op_iwmmxt_movq_M0_wRn(rd0);
1655 switch ((insn >> 22) & 3) {
1656 case 0:
1657 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1658 break;
1659 case 1:
1660 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1661 break;
1662 case 2:
1663 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1664 break;
1665 case 3:
1666 return 1;
1667 }
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 gen_op_iwmmxt_set_cup();
1671 break;
1672 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1673 wrd = (insn >> 12) & 0xf;
1674 rd0 = (insn >> 16) & 0xf;
1675 rd1 = (insn >> 0) & 0xf;
1676 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1677 if (insn & (1 << 22)) {
1678 if (insn & (1 << 20))
1679 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1680 else
1681 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1682 } else {
1683 if (insn & (1 << 20))
1684 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1685 else
1686 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1687 }
18c9b560
AZ
1688 gen_op_iwmmxt_movq_wRn_M0(wrd);
1689 gen_op_iwmmxt_set_mup();
1690 gen_op_iwmmxt_set_cup();
1691 break;
1692 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1693 wrd = (insn >> 12) & 0xf;
1694 rd0 = (insn >> 16) & 0xf;
1695 rd1 = (insn >> 0) & 0xf;
1696 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1697 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1698 tcg_gen_andi_i32(tmp, tmp, 7);
1699 iwmmxt_load_reg(cpu_V1, rd1);
1700 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1701 tcg_temp_free_i32(tmp);
18c9b560
AZ
1702 gen_op_iwmmxt_movq_wRn_M0(wrd);
1703 gen_op_iwmmxt_set_mup();
1704 break;
1705 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1706 if (((insn >> 6) & 3) == 3)
1707 return 1;
18c9b560
AZ
1708 rd = (insn >> 12) & 0xf;
1709 wrd = (insn >> 16) & 0xf;
da6b5335 1710 tmp = load_reg(s, rd);
18c9b560
AZ
1711 gen_op_iwmmxt_movq_M0_wRn(wrd);
1712 switch ((insn >> 6) & 3) {
1713 case 0:
da6b5335
FN
1714 tmp2 = tcg_const_i32(0xff);
1715 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1716 break;
1717 case 1:
da6b5335
FN
1718 tmp2 = tcg_const_i32(0xffff);
1719 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1720 break;
1721 case 2:
da6b5335
FN
1722 tmp2 = tcg_const_i32(0xffffffff);
1723 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1724 break;
da6b5335
FN
1725 default:
1726 TCGV_UNUSED(tmp2);
1727 TCGV_UNUSED(tmp3);
18c9b560 1728 }
da6b5335
FN
1729 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1730 tcg_temp_free(tmp3);
1731 tcg_temp_free(tmp2);
7d1b0095 1732 tcg_temp_free_i32(tmp);
18c9b560
AZ
1733 gen_op_iwmmxt_movq_wRn_M0(wrd);
1734 gen_op_iwmmxt_set_mup();
1735 break;
1736 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1737 rd = (insn >> 12) & 0xf;
1738 wrd = (insn >> 16) & 0xf;
da6b5335 1739 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1740 return 1;
1741 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1742 tmp = tcg_temp_new_i32();
18c9b560
AZ
1743 switch ((insn >> 22) & 3) {
1744 case 0:
da6b5335
FN
1745 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1746 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1747 if (insn & 8) {
1748 tcg_gen_ext8s_i32(tmp, tmp);
1749 } else {
1750 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1751 }
1752 break;
1753 case 1:
da6b5335
FN
1754 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1755 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1756 if (insn & 8) {
1757 tcg_gen_ext16s_i32(tmp, tmp);
1758 } else {
1759 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1760 }
1761 break;
1762 case 2:
da6b5335
FN
1763 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1764 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1765 break;
18c9b560 1766 }
da6b5335 1767 store_reg(s, rd, tmp);
18c9b560
AZ
1768 break;
1769 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1770 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1771 return 1;
da6b5335 1772 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1773 switch ((insn >> 22) & 3) {
1774 case 0:
da6b5335 1775 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1776 break;
1777 case 1:
da6b5335 1778 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1779 break;
1780 case 2:
da6b5335 1781 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1782 break;
18c9b560 1783 }
da6b5335
FN
1784 tcg_gen_shli_i32(tmp, tmp, 28);
1785 gen_set_nzcv(tmp);
7d1b0095 1786 tcg_temp_free_i32(tmp);
18c9b560
AZ
1787 break;
1788 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1789 if (((insn >> 6) & 3) == 3)
1790 return 1;
18c9b560
AZ
1791 rd = (insn >> 12) & 0xf;
1792 wrd = (insn >> 16) & 0xf;
da6b5335 1793 tmp = load_reg(s, rd);
18c9b560
AZ
1794 switch ((insn >> 6) & 3) {
1795 case 0:
da6b5335 1796 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1797 break;
1798 case 1:
da6b5335 1799 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1800 break;
1801 case 2:
da6b5335 1802 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1803 break;
18c9b560 1804 }
7d1b0095 1805 tcg_temp_free_i32(tmp);
18c9b560
AZ
1806 gen_op_iwmmxt_movq_wRn_M0(wrd);
1807 gen_op_iwmmxt_set_mup();
1808 break;
1809 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1810 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1811 return 1;
da6b5335 1812 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1813 tmp2 = tcg_temp_new_i32();
da6b5335 1814 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1815 switch ((insn >> 22) & 3) {
1816 case 0:
1817 for (i = 0; i < 7; i ++) {
da6b5335
FN
1818 tcg_gen_shli_i32(tmp2, tmp2, 4);
1819 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1820 }
1821 break;
1822 case 1:
1823 for (i = 0; i < 3; i ++) {
da6b5335
FN
1824 tcg_gen_shli_i32(tmp2, tmp2, 8);
1825 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1826 }
1827 break;
1828 case 2:
da6b5335
FN
1829 tcg_gen_shli_i32(tmp2, tmp2, 16);
1830 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1831 break;
18c9b560 1832 }
da6b5335 1833 gen_set_nzcv(tmp);
7d1b0095
PM
1834 tcg_temp_free_i32(tmp2);
1835 tcg_temp_free_i32(tmp);
18c9b560
AZ
1836 break;
1837 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1838 wrd = (insn >> 12) & 0xf;
1839 rd0 = (insn >> 16) & 0xf;
1840 gen_op_iwmmxt_movq_M0_wRn(rd0);
1841 switch ((insn >> 22) & 3) {
1842 case 0:
e677137d 1843 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1844 break;
1845 case 1:
e677137d 1846 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1847 break;
1848 case 2:
e677137d 1849 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1850 break;
1851 case 3:
1852 return 1;
1853 }
1854 gen_op_iwmmxt_movq_wRn_M0(wrd);
1855 gen_op_iwmmxt_set_mup();
1856 break;
1857 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1858 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1859 return 1;
da6b5335 1860 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1861 tmp2 = tcg_temp_new_i32();
da6b5335 1862 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1863 switch ((insn >> 22) & 3) {
1864 case 0:
1865 for (i = 0; i < 7; i ++) {
da6b5335
FN
1866 tcg_gen_shli_i32(tmp2, tmp2, 4);
1867 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1868 }
1869 break;
1870 case 1:
1871 for (i = 0; i < 3; i ++) {
da6b5335
FN
1872 tcg_gen_shli_i32(tmp2, tmp2, 8);
1873 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1874 }
1875 break;
1876 case 2:
da6b5335
FN
1877 tcg_gen_shli_i32(tmp2, tmp2, 16);
1878 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1879 break;
18c9b560 1880 }
da6b5335 1881 gen_set_nzcv(tmp);
7d1b0095
PM
1882 tcg_temp_free_i32(tmp2);
1883 tcg_temp_free_i32(tmp);
18c9b560
AZ
1884 break;
1885 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1886 rd = (insn >> 12) & 0xf;
1887 rd0 = (insn >> 16) & 0xf;
da6b5335 1888 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1889 return 1;
1890 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1891 tmp = tcg_temp_new_i32();
18c9b560
AZ
1892 switch ((insn >> 22) & 3) {
1893 case 0:
da6b5335 1894 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1895 break;
1896 case 1:
da6b5335 1897 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1898 break;
1899 case 2:
da6b5335 1900 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1901 break;
18c9b560 1902 }
da6b5335 1903 store_reg(s, rd, tmp);
18c9b560
AZ
1904 break;
1905 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1906 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1907 wrd = (insn >> 12) & 0xf;
1908 rd0 = (insn >> 16) & 0xf;
1909 rd1 = (insn >> 0) & 0xf;
1910 gen_op_iwmmxt_movq_M0_wRn(rd0);
1911 switch ((insn >> 22) & 3) {
1912 case 0:
1913 if (insn & (1 << 21))
1914 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1915 else
1916 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1917 break;
1918 case 1:
1919 if (insn & (1 << 21))
1920 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1921 else
1922 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1923 break;
1924 case 2:
1925 if (insn & (1 << 21))
1926 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1927 else
1928 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1929 break;
1930 case 3:
1931 return 1;
1932 }
1933 gen_op_iwmmxt_movq_wRn_M0(wrd);
1934 gen_op_iwmmxt_set_mup();
1935 gen_op_iwmmxt_set_cup();
1936 break;
1937 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1938 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1939 wrd = (insn >> 12) & 0xf;
1940 rd0 = (insn >> 16) & 0xf;
1941 gen_op_iwmmxt_movq_M0_wRn(rd0);
1942 switch ((insn >> 22) & 3) {
1943 case 0:
1944 if (insn & (1 << 21))
1945 gen_op_iwmmxt_unpacklsb_M0();
1946 else
1947 gen_op_iwmmxt_unpacklub_M0();
1948 break;
1949 case 1:
1950 if (insn & (1 << 21))
1951 gen_op_iwmmxt_unpacklsw_M0();
1952 else
1953 gen_op_iwmmxt_unpackluw_M0();
1954 break;
1955 case 2:
1956 if (insn & (1 << 21))
1957 gen_op_iwmmxt_unpacklsl_M0();
1958 else
1959 gen_op_iwmmxt_unpacklul_M0();
1960 break;
1961 case 3:
1962 return 1;
1963 }
1964 gen_op_iwmmxt_movq_wRn_M0(wrd);
1965 gen_op_iwmmxt_set_mup();
1966 gen_op_iwmmxt_set_cup();
1967 break;
1968 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1969 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
1973 switch ((insn >> 22) & 3) {
1974 case 0:
1975 if (insn & (1 << 21))
1976 gen_op_iwmmxt_unpackhsb_M0();
1977 else
1978 gen_op_iwmmxt_unpackhub_M0();
1979 break;
1980 case 1:
1981 if (insn & (1 << 21))
1982 gen_op_iwmmxt_unpackhsw_M0();
1983 else
1984 gen_op_iwmmxt_unpackhuw_M0();
1985 break;
1986 case 2:
1987 if (insn & (1 << 21))
1988 gen_op_iwmmxt_unpackhsl_M0();
1989 else
1990 gen_op_iwmmxt_unpackhul_M0();
1991 break;
1992 case 3:
1993 return 1;
1994 }
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2000 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2001 if (((insn >> 22) & 3) == 0)
2002 return 1;
18c9b560
AZ
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2006 tmp = tcg_temp_new_i32();
da6b5335 2007 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2008 tcg_temp_free_i32(tmp);
18c9b560 2009 return 1;
da6b5335 2010 }
18c9b560 2011 switch ((insn >> 22) & 3) {
18c9b560 2012 case 1:
477955bd 2013 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2014 break;
2015 case 2:
477955bd 2016 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2017 break;
2018 case 3:
477955bd 2019 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2020 break;
2021 }
7d1b0095 2022 tcg_temp_free_i32(tmp);
18c9b560
AZ
2023 gen_op_iwmmxt_movq_wRn_M0(wrd);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2026 break;
2027 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2028 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2029 if (((insn >> 22) & 3) == 0)
2030 return 1;
18c9b560
AZ
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 16) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2034 tmp = tcg_temp_new_i32();
da6b5335 2035 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2036 tcg_temp_free_i32(tmp);
18c9b560 2037 return 1;
da6b5335 2038 }
18c9b560 2039 switch ((insn >> 22) & 3) {
18c9b560 2040 case 1:
477955bd 2041 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2042 break;
2043 case 2:
477955bd 2044 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2045 break;
2046 case 3:
477955bd 2047 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2048 break;
2049 }
7d1b0095 2050 tcg_temp_free_i32(tmp);
18c9b560
AZ
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2054 break;
2055 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2056 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2057 if (((insn >> 22) & 3) == 0)
2058 return 1;
18c9b560
AZ
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2062 tmp = tcg_temp_new_i32();
da6b5335 2063 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2064 tcg_temp_free_i32(tmp);
18c9b560 2065 return 1;
da6b5335 2066 }
18c9b560 2067 switch ((insn >> 22) & 3) {
18c9b560 2068 case 1:
477955bd 2069 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2070 break;
2071 case 2:
477955bd 2072 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2073 break;
2074 case 3:
477955bd 2075 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2076 break;
2077 }
7d1b0095 2078 tcg_temp_free_i32(tmp);
18c9b560
AZ
2079 gen_op_iwmmxt_movq_wRn_M0(wrd);
2080 gen_op_iwmmxt_set_mup();
2081 gen_op_iwmmxt_set_cup();
2082 break;
2083 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2084 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2085 if (((insn >> 22) & 3) == 0)
2086 return 1;
18c9b560
AZ
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2090 tmp = tcg_temp_new_i32();
18c9b560 2091 switch ((insn >> 22) & 3) {
18c9b560 2092 case 1:
da6b5335 2093 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2094 tcg_temp_free_i32(tmp);
18c9b560 2095 return 1;
da6b5335 2096 }
477955bd 2097 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2098 break;
2099 case 2:
da6b5335 2100 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2101 tcg_temp_free_i32(tmp);
18c9b560 2102 return 1;
da6b5335 2103 }
477955bd 2104 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2105 break;
2106 case 3:
da6b5335 2107 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2108 tcg_temp_free_i32(tmp);
18c9b560 2109 return 1;
da6b5335 2110 }
477955bd 2111 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2112 break;
2113 }
7d1b0095 2114 tcg_temp_free_i32(tmp);
18c9b560
AZ
2115 gen_op_iwmmxt_movq_wRn_M0(wrd);
2116 gen_op_iwmmxt_set_mup();
2117 gen_op_iwmmxt_set_cup();
2118 break;
2119 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2120 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2121 wrd = (insn >> 12) & 0xf;
2122 rd0 = (insn >> 16) & 0xf;
2123 rd1 = (insn >> 0) & 0xf;
2124 gen_op_iwmmxt_movq_M0_wRn(rd0);
2125 switch ((insn >> 22) & 3) {
2126 case 0:
2127 if (insn & (1 << 21))
2128 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2129 else
2130 gen_op_iwmmxt_minub_M0_wRn(rd1);
2131 break;
2132 case 1:
2133 if (insn & (1 << 21))
2134 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2135 else
2136 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2137 break;
2138 case 2:
2139 if (insn & (1 << 21))
2140 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2141 else
2142 gen_op_iwmmxt_minul_M0_wRn(rd1);
2143 break;
2144 case 3:
2145 return 1;
2146 }
2147 gen_op_iwmmxt_movq_wRn_M0(wrd);
2148 gen_op_iwmmxt_set_mup();
2149 break;
2150 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2151 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2152 wrd = (insn >> 12) & 0xf;
2153 rd0 = (insn >> 16) & 0xf;
2154 rd1 = (insn >> 0) & 0xf;
2155 gen_op_iwmmxt_movq_M0_wRn(rd0);
2156 switch ((insn >> 22) & 3) {
2157 case 0:
2158 if (insn & (1 << 21))
2159 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2160 else
2161 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2162 break;
2163 case 1:
2164 if (insn & (1 << 21))
2165 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2166 else
2167 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2168 break;
2169 case 2:
2170 if (insn & (1 << 21))
2171 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2172 else
2173 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2174 break;
2175 case 3:
2176 return 1;
2177 }
2178 gen_op_iwmmxt_movq_wRn_M0(wrd);
2179 gen_op_iwmmxt_set_mup();
2180 break;
2181 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2182 case 0x402: case 0x502: case 0x602: case 0x702:
2183 wrd = (insn >> 12) & 0xf;
2184 rd0 = (insn >> 16) & 0xf;
2185 rd1 = (insn >> 0) & 0xf;
2186 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2187 tmp = tcg_const_i32((insn >> 20) & 3);
2188 iwmmxt_load_reg(cpu_V1, rd1);
2189 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2190 tcg_temp_free(tmp);
18c9b560
AZ
2191 gen_op_iwmmxt_movq_wRn_M0(wrd);
2192 gen_op_iwmmxt_set_mup();
2193 break;
2194 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2195 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2196 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2197 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2198 wrd = (insn >> 12) & 0xf;
2199 rd0 = (insn >> 16) & 0xf;
2200 rd1 = (insn >> 0) & 0xf;
2201 gen_op_iwmmxt_movq_M0_wRn(rd0);
2202 switch ((insn >> 20) & 0xf) {
2203 case 0x0:
2204 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2205 break;
2206 case 0x1:
2207 gen_op_iwmmxt_subub_M0_wRn(rd1);
2208 break;
2209 case 0x3:
2210 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2211 break;
2212 case 0x4:
2213 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2214 break;
2215 case 0x5:
2216 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2217 break;
2218 case 0x7:
2219 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2220 break;
2221 case 0x8:
2222 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2223 break;
2224 case 0x9:
2225 gen_op_iwmmxt_subul_M0_wRn(rd1);
2226 break;
2227 case 0xb:
2228 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2229 break;
2230 default:
2231 return 1;
2232 }
2233 gen_op_iwmmxt_movq_wRn_M0(wrd);
2234 gen_op_iwmmxt_set_mup();
2235 gen_op_iwmmxt_set_cup();
2236 break;
2237 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2238 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2239 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2240 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2241 wrd = (insn >> 12) & 0xf;
2242 rd0 = (insn >> 16) & 0xf;
2243 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2244 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2245 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
da6b5335 2246 tcg_temp_free(tmp);
18c9b560
AZ
2247 gen_op_iwmmxt_movq_wRn_M0(wrd);
2248 gen_op_iwmmxt_set_mup();
2249 gen_op_iwmmxt_set_cup();
2250 break;
2251 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2252 case 0x418: case 0x518: case 0x618: case 0x718:
2253 case 0x818: case 0x918: case 0xa18: case 0xb18:
2254 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2255 wrd = (insn >> 12) & 0xf;
2256 rd0 = (insn >> 16) & 0xf;
2257 rd1 = (insn >> 0) & 0xf;
2258 gen_op_iwmmxt_movq_M0_wRn(rd0);
2259 switch ((insn >> 20) & 0xf) {
2260 case 0x0:
2261 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2262 break;
2263 case 0x1:
2264 gen_op_iwmmxt_addub_M0_wRn(rd1);
2265 break;
2266 case 0x3:
2267 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2268 break;
2269 case 0x4:
2270 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2271 break;
2272 case 0x5:
2273 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2274 break;
2275 case 0x7:
2276 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2277 break;
2278 case 0x8:
2279 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2280 break;
2281 case 0x9:
2282 gen_op_iwmmxt_addul_M0_wRn(rd1);
2283 break;
2284 case 0xb:
2285 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2286 break;
2287 default:
2288 return 1;
2289 }
2290 gen_op_iwmmxt_movq_wRn_M0(wrd);
2291 gen_op_iwmmxt_set_mup();
2292 gen_op_iwmmxt_set_cup();
2293 break;
2294 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2295 case 0x408: case 0x508: case 0x608: case 0x708:
2296 case 0x808: case 0x908: case 0xa08: case 0xb08:
2297 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2298 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2299 return 1;
18c9b560
AZ
2300 wrd = (insn >> 12) & 0xf;
2301 rd0 = (insn >> 16) & 0xf;
2302 rd1 = (insn >> 0) & 0xf;
2303 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2304 switch ((insn >> 22) & 3) {
18c9b560
AZ
2305 case 1:
2306 if (insn & (1 << 21))
2307 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2308 else
2309 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2310 break;
2311 case 2:
2312 if (insn & (1 << 21))
2313 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2314 else
2315 gen_op_iwmmxt_packul_M0_wRn(rd1);
2316 break;
2317 case 3:
2318 if (insn & (1 << 21))
2319 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2320 else
2321 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2322 break;
2323 }
2324 gen_op_iwmmxt_movq_wRn_M0(wrd);
2325 gen_op_iwmmxt_set_mup();
2326 gen_op_iwmmxt_set_cup();
2327 break;
2328 case 0x201: case 0x203: case 0x205: case 0x207:
2329 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2330 case 0x211: case 0x213: case 0x215: case 0x217:
2331 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2332 wrd = (insn >> 5) & 0xf;
2333 rd0 = (insn >> 12) & 0xf;
2334 rd1 = (insn >> 0) & 0xf;
2335 if (rd0 == 0xf || rd1 == 0xf)
2336 return 1;
2337 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2338 tmp = load_reg(s, rd0);
2339 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2340 switch ((insn >> 16) & 0xf) {
2341 case 0x0: /* TMIA */
da6b5335 2342 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2343 break;
2344 case 0x8: /* TMIAPH */
da6b5335 2345 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2346 break;
2347 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2348 if (insn & (1 << 16))
da6b5335 2349 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2350 if (insn & (1 << 17))
da6b5335
FN
2351 tcg_gen_shri_i32(tmp2, tmp2, 16);
2352 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2353 break;
2354 default:
7d1b0095
PM
2355 tcg_temp_free_i32(tmp2);
2356 tcg_temp_free_i32(tmp);
18c9b560
AZ
2357 return 1;
2358 }
7d1b0095
PM
2359 tcg_temp_free_i32(tmp2);
2360 tcg_temp_free_i32(tmp);
18c9b560
AZ
2361 gen_op_iwmmxt_movq_wRn_M0(wrd);
2362 gen_op_iwmmxt_set_mup();
2363 break;
2364 default:
2365 return 1;
2366 }
2367
2368 return 0;
2369}
2370
a1c7273b 2371/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560
AZ
2372 (ie. an undefined instruction). */
2373static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2374{
2375 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2376 TCGv tmp, tmp2;
18c9b560
AZ
2377
2378 if ((insn & 0x0ff00f10) == 0x0e200010) {
2379 /* Multiply with Internal Accumulate Format */
2380 rd0 = (insn >> 12) & 0xf;
2381 rd1 = insn & 0xf;
2382 acc = (insn >> 5) & 7;
2383
2384 if (acc != 0)
2385 return 1;
2386
3a554c0f
FN
2387 tmp = load_reg(s, rd0);
2388 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2389 switch ((insn >> 16) & 0xf) {
2390 case 0x0: /* MIA */
3a554c0f 2391 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2392 break;
2393 case 0x8: /* MIAPH */
3a554c0f 2394 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2395 break;
2396 case 0xc: /* MIABB */
2397 case 0xd: /* MIABT */
2398 case 0xe: /* MIATB */
2399 case 0xf: /* MIATT */
18c9b560 2400 if (insn & (1 << 16))
3a554c0f 2401 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2402 if (insn & (1 << 17))
3a554c0f
FN
2403 tcg_gen_shri_i32(tmp2, tmp2, 16);
2404 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2405 break;
2406 default:
2407 return 1;
2408 }
7d1b0095
PM
2409 tcg_temp_free_i32(tmp2);
2410 tcg_temp_free_i32(tmp);
18c9b560
AZ
2411
2412 gen_op_iwmmxt_movq_wRn_M0(acc);
2413 return 0;
2414 }
2415
2416 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2417 /* Internal Accumulator Access Format */
2418 rdhi = (insn >> 16) & 0xf;
2419 rdlo = (insn >> 12) & 0xf;
2420 acc = insn & 7;
2421
2422 if (acc != 0)
2423 return 1;
2424
2425 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2426 iwmmxt_load_reg(cpu_V0, acc);
2427 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2428 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2429 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2430 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2431 } else { /* MAR */
3a554c0f
FN
2432 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2433 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2434 }
2435 return 0;
2436 }
2437
2438 return 1;
2439}
2440
c1713132
AZ
2441/* Disassemble system coprocessor instruction. Return nonzero if
2442 instruction is not defined. */
2443static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2444{
b75263d6 2445 TCGv tmp, tmp2;
c1713132
AZ
2446 uint32_t rd = (insn >> 12) & 0xf;
2447 uint32_t cp = (insn >> 8) & 0xf;
2448 if (IS_USER(s)) {
2449 return 1;
2450 }
2451
18c9b560 2452 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2453 if (!env->cp[cp].cp_read)
2454 return 1;
8984bd2e 2455 gen_set_pc_im(s->pc);
7d1b0095 2456 tmp = tcg_temp_new_i32();
b75263d6
JR
2457 tmp2 = tcg_const_i32(insn);
2458 gen_helper_get_cp(tmp, cpu_env, tmp2);
2459 tcg_temp_free(tmp2);
8984bd2e 2460 store_reg(s, rd, tmp);
c1713132
AZ
2461 } else {
2462 if (!env->cp[cp].cp_write)
2463 return 1;
8984bd2e
PB
2464 gen_set_pc_im(s->pc);
2465 tmp = load_reg(s, rd);
b75263d6
JR
2466 tmp2 = tcg_const_i32(insn);
2467 gen_helper_set_cp(cpu_env, tmp2, tmp);
2468 tcg_temp_free(tmp2);
7d1b0095 2469 tcg_temp_free_i32(tmp);
c1713132
AZ
2470 }
2471 return 0;
2472}
2473
74594c9d 2474static int cp15_user_ok(CPUState *env, uint32_t insn)
9ee6e8bb
PB
2475{
2476 int cpn = (insn >> 16) & 0xf;
2477 int cpm = insn & 0xf;
2478 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2479
74594c9d
PM
2480 if (arm_feature(env, ARM_FEATURE_V7) && cpn == 9) {
2481 /* Performance monitor registers fall into three categories:
2482 * (a) always UNDEF in usermode
2483 * (b) UNDEF only if PMUSERENR.EN is 0
2484 * (c) always read OK and UNDEF on write (PMUSERENR only)
2485 */
2486 if ((cpm == 12 && (op < 6)) ||
2487 (cpm == 13 && (op < 3))) {
2488 return env->cp15.c9_pmuserenr;
2489 } else if (cpm == 14 && op == 0 && (insn & ARM_CP_RW_BIT)) {
2490 /* PMUSERENR, read only */
2491 return 1;
2492 }
2493 return 0;
2494 }
2495
9ee6e8bb
PB
2496 if (cpn == 13 && cpm == 0) {
2497 /* TLS register. */
2498 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2499 return 1;
2500 }
2501 if (cpn == 7) {
2502 /* ISB, DSB, DMB. */
2503 if ((cpm == 5 && op == 4)
2504 || (cpm == 10 && (op == 4 || op == 5)))
2505 return 1;
2506 }
2507 return 0;
2508}
2509
3f26c122
RV
2510static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2511{
2512 TCGv tmp;
2513 int cpn = (insn >> 16) & 0xf;
2514 int cpm = insn & 0xf;
2515 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2516
2517 if (!arm_feature(env, ARM_FEATURE_V6K))
2518 return 0;
2519
2520 if (!(cpn == 13 && cpm == 0))
2521 return 0;
2522
2523 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2524 switch (op) {
2525 case 2:
c5883be2 2526 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2527 break;
2528 case 3:
c5883be2 2529 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2530 break;
2531 case 4:
c5883be2 2532 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2533 break;
2534 default:
3f26c122
RV
2535 return 0;
2536 }
2537 store_reg(s, rd, tmp);
2538
2539 } else {
2540 tmp = load_reg(s, rd);
2541 switch (op) {
2542 case 2:
c5883be2 2543 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2544 break;
2545 case 3:
c5883be2 2546 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2547 break;
2548 case 4:
c5883be2 2549 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2550 break;
2551 default:
7d1b0095 2552 tcg_temp_free_i32(tmp);
3f26c122
RV
2553 return 0;
2554 }
3f26c122
RV
2555 }
2556 return 1;
2557}
2558
b5ff1b31
FB
2559/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2560 instruction is not defined. */
a90b7318 2561static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2562{
2563 uint32_t rd;
b75263d6 2564 TCGv tmp, tmp2;
b5ff1b31 2565
9ee6e8bb
PB
2566 /* M profile cores use memory mapped registers instead of cp15. */
2567 if (arm_feature(env, ARM_FEATURE_M))
2568 return 1;
2569
2570 if ((insn & (1 << 25)) == 0) {
2571 if (insn & (1 << 20)) {
2572 /* mrrc */
2573 return 1;
2574 }
2575 /* mcrr. Used for block cache operations, so implement as no-op. */
2576 return 0;
2577 }
2578 if ((insn & (1 << 4)) == 0) {
2579 /* cdp */
2580 return 1;
2581 }
74594c9d 2582 if (IS_USER(s) && !cp15_user_ok(env, insn)) {
b5ff1b31
FB
2583 return 1;
2584 }
cc688901
PM
2585
2586 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2587 * instructions rather than a separate instruction.
2588 */
2589 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2590 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2591 * In v7, this must NOP.
2592 */
2593 if (!arm_feature(env, ARM_FEATURE_V7)) {
2594 /* Wait for interrupt. */
2595 gen_set_pc_im(s->pc);
2596 s->is_jmp = DISAS_WFI;
2597 }
9332f9da
FB
2598 return 0;
2599 }
cc688901
PM
2600
2601 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2602 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2603 * so this is slightly over-broad.
2604 */
2605 if (!arm_feature(env, ARM_FEATURE_V6)) {
2606 /* Wait for interrupt. */
2607 gen_set_pc_im(s->pc);
2608 s->is_jmp = DISAS_WFI;
2609 return 0;
2610 }
2611 /* Otherwise fall through to handle via helper function.
2612 * In particular, on v7 and some v6 cores this is one of
2613 * the VA-PA registers.
2614 */
2615 }
2616
b5ff1b31 2617 rd = (insn >> 12) & 0xf;
3f26c122
RV
2618
2619 if (cp15_tls_load_store(env, s, insn, rd))
2620 return 0;
2621
b75263d6 2622 tmp2 = tcg_const_i32(insn);
18c9b560 2623 if (insn & ARM_CP_RW_BIT) {
7d1b0095 2624 tmp = tcg_temp_new_i32();
b75263d6 2625 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2626 /* If the destination register is r15 then sets condition codes. */
2627 if (rd != 15)
8984bd2e
PB
2628 store_reg(s, rd, tmp);
2629 else
7d1b0095 2630 tcg_temp_free_i32(tmp);
b5ff1b31 2631 } else {
8984bd2e 2632 tmp = load_reg(s, rd);
b75263d6 2633 gen_helper_set_cp15(cpu_env, tmp2, tmp);
7d1b0095 2634 tcg_temp_free_i32(tmp);
a90b7318
AZ
2635 /* Normally we would always end the TB here, but Linux
2636 * arch/arm/mach-pxa/sleep.S expects two instructions following
2637 * an MMU enable to execute from cache. Imitate this behaviour. */
2638 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2639 (insn & 0x0fff0fff) != 0x0e010f10)
2640 gen_lookup_tb(s);
b5ff1b31 2641 }
b75263d6 2642 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2643 return 0;
2644}
2645
9ee6e8bb
PB
2646#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2647#define VFP_SREG(insn, bigbit, smallbit) \
2648 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2649#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2650 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2651 reg = (((insn) >> (bigbit)) & 0x0f) \
2652 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2653 } else { \
2654 if (insn & (1 << (smallbit))) \
2655 return 1; \
2656 reg = ((insn) >> (bigbit)) & 0x0f; \
2657 }} while (0)
2658
2659#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2660#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2661#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2662#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2663#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2664#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2665
4373f3ce
PB
2666/* Move between integer and VFP cores. */
2667static TCGv gen_vfp_mrs(void)
2668{
7d1b0095 2669 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2670 tcg_gen_mov_i32(tmp, cpu_F0s);
2671 return tmp;
2672}
2673
2674static void gen_vfp_msr(TCGv tmp)
2675{
2676 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2677 tcg_temp_free_i32(tmp);
4373f3ce
PB
2678}
2679
ad69471c
PB
2680static void gen_neon_dup_u8(TCGv var, int shift)
2681{
7d1b0095 2682 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2683 if (shift)
2684 tcg_gen_shri_i32(var, var, shift);
86831435 2685 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2686 tcg_gen_shli_i32(tmp, var, 8);
2687 tcg_gen_or_i32(var, var, tmp);
2688 tcg_gen_shli_i32(tmp, var, 16);
2689 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2690 tcg_temp_free_i32(tmp);
ad69471c
PB
2691}
2692
2693static void gen_neon_dup_low16(TCGv var)
2694{
7d1b0095 2695 TCGv tmp = tcg_temp_new_i32();
86831435 2696 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2697 tcg_gen_shli_i32(tmp, var, 16);
2698 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2699 tcg_temp_free_i32(tmp);
ad69471c
PB
2700}
2701
2702static void gen_neon_dup_high16(TCGv var)
2703{
7d1b0095 2704 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2705 tcg_gen_andi_i32(var, var, 0xffff0000);
2706 tcg_gen_shri_i32(tmp, var, 16);
2707 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2708 tcg_temp_free_i32(tmp);
ad69471c
PB
2709}
2710
8e18cde3
PM
2711static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2712{
2713 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2714 TCGv tmp;
2715 switch (size) {
2716 case 0:
2717 tmp = gen_ld8u(addr, IS_USER(s));
2718 gen_neon_dup_u8(tmp, 0);
2719 break;
2720 case 1:
2721 tmp = gen_ld16u(addr, IS_USER(s));
2722 gen_neon_dup_low16(tmp);
2723 break;
2724 case 2:
2725 tmp = gen_ld32(addr, IS_USER(s));
2726 break;
2727 default: /* Avoid compiler warnings. */
2728 abort();
2729 }
2730 return tmp;
2731}
2732
a1c7273b 2733/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95
FB
2734 (ie. an undefined instruction). */
2735static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2736{
2737 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2738 int dp, veclen;
312eea9f 2739 TCGv addr;
4373f3ce 2740 TCGv tmp;
ad69471c 2741 TCGv tmp2;
b7bcbe95 2742
40f137e1
PB
2743 if (!arm_feature(env, ARM_FEATURE_VFP))
2744 return 1;
2745
5df8bac1 2746 if (!s->vfp_enabled) {
9ee6e8bb 2747 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2748 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2749 return 1;
2750 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2751 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2752 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2753 return 1;
2754 }
b7bcbe95
FB
2755 dp = ((insn & 0xf00) == 0xb00);
2756 switch ((insn >> 24) & 0xf) {
2757 case 0xe:
2758 if (insn & (1 << 4)) {
2759 /* single register transfer */
b7bcbe95
FB
2760 rd = (insn >> 12) & 0xf;
2761 if (dp) {
9ee6e8bb
PB
2762 int size;
2763 int pass;
2764
2765 VFP_DREG_N(rn, insn);
2766 if (insn & 0xf)
b7bcbe95 2767 return 1;
9ee6e8bb
PB
2768 if (insn & 0x00c00060
2769 && !arm_feature(env, ARM_FEATURE_NEON))
2770 return 1;
2771
2772 pass = (insn >> 21) & 1;
2773 if (insn & (1 << 22)) {
2774 size = 0;
2775 offset = ((insn >> 5) & 3) * 8;
2776 } else if (insn & (1 << 5)) {
2777 size = 1;
2778 offset = (insn & (1 << 6)) ? 16 : 0;
2779 } else {
2780 size = 2;
2781 offset = 0;
2782 }
18c9b560 2783 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2784 /* vfp->arm */
ad69471c 2785 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2786 switch (size) {
2787 case 0:
9ee6e8bb 2788 if (offset)
ad69471c 2789 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2790 if (insn & (1 << 23))
ad69471c 2791 gen_uxtb(tmp);
9ee6e8bb 2792 else
ad69471c 2793 gen_sxtb(tmp);
9ee6e8bb
PB
2794 break;
2795 case 1:
9ee6e8bb
PB
2796 if (insn & (1 << 23)) {
2797 if (offset) {
ad69471c 2798 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2799 } else {
ad69471c 2800 gen_uxth(tmp);
9ee6e8bb
PB
2801 }
2802 } else {
2803 if (offset) {
ad69471c 2804 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2805 } else {
ad69471c 2806 gen_sxth(tmp);
9ee6e8bb
PB
2807 }
2808 }
2809 break;
2810 case 2:
9ee6e8bb
PB
2811 break;
2812 }
ad69471c 2813 store_reg(s, rd, tmp);
b7bcbe95
FB
2814 } else {
2815 /* arm->vfp */
ad69471c 2816 tmp = load_reg(s, rd);
9ee6e8bb
PB
2817 if (insn & (1 << 23)) {
2818 /* VDUP */
2819 if (size == 0) {
ad69471c 2820 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2821 } else if (size == 1) {
ad69471c 2822 gen_neon_dup_low16(tmp);
9ee6e8bb 2823 }
cbbccffc 2824 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2825 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2826 tcg_gen_mov_i32(tmp2, tmp);
2827 neon_store_reg(rn, n, tmp2);
2828 }
2829 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2830 } else {
2831 /* VMOV */
2832 switch (size) {
2833 case 0:
ad69471c
PB
2834 tmp2 = neon_load_reg(rn, pass);
2835 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
7d1b0095 2836 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2837 break;
2838 case 1:
ad69471c
PB
2839 tmp2 = neon_load_reg(rn, pass);
2840 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
7d1b0095 2841 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2842 break;
2843 case 2:
9ee6e8bb
PB
2844 break;
2845 }
ad69471c 2846 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2847 }
b7bcbe95 2848 }
9ee6e8bb
PB
2849 } else { /* !dp */
2850 if ((insn & 0x6f) != 0x00)
2851 return 1;
2852 rn = VFP_SREG_N(insn);
18c9b560 2853 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2854 /* vfp->arm */
2855 if (insn & (1 << 21)) {
2856 /* system register */
40f137e1 2857 rn >>= 1;
9ee6e8bb 2858
b7bcbe95 2859 switch (rn) {
40f137e1 2860 case ARM_VFP_FPSID:
4373f3ce 2861 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2862 VFP3 restricts all id registers to privileged
2863 accesses. */
2864 if (IS_USER(s)
2865 && arm_feature(env, ARM_FEATURE_VFP3))
2866 return 1;
4373f3ce 2867 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2868 break;
40f137e1 2869 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2870 if (IS_USER(s))
2871 return 1;
4373f3ce 2872 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2873 break;
40f137e1
PB
2874 case ARM_VFP_FPINST:
2875 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2876 /* Not present in VFP3. */
2877 if (IS_USER(s)
2878 || arm_feature(env, ARM_FEATURE_VFP3))
2879 return 1;
4373f3ce 2880 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2881 break;
40f137e1 2882 case ARM_VFP_FPSCR:
601d70b9 2883 if (rd == 15) {
4373f3ce
PB
2884 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2885 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2886 } else {
7d1b0095 2887 tmp = tcg_temp_new_i32();
4373f3ce
PB
2888 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2889 }
b7bcbe95 2890 break;
9ee6e8bb
PB
2891 case ARM_VFP_MVFR0:
2892 case ARM_VFP_MVFR1:
2893 if (IS_USER(s)
2894 || !arm_feature(env, ARM_FEATURE_VFP3))
2895 return 1;
4373f3ce 2896 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2897 break;
b7bcbe95
FB
2898 default:
2899 return 1;
2900 }
2901 } else {
2902 gen_mov_F0_vreg(0, rn);
4373f3ce 2903 tmp = gen_vfp_mrs();
b7bcbe95
FB
2904 }
2905 if (rd == 15) {
b5ff1b31 2906 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2907 gen_set_nzcv(tmp);
7d1b0095 2908 tcg_temp_free_i32(tmp);
4373f3ce
PB
2909 } else {
2910 store_reg(s, rd, tmp);
2911 }
b7bcbe95
FB
2912 } else {
2913 /* arm->vfp */
4373f3ce 2914 tmp = load_reg(s, rd);
b7bcbe95 2915 if (insn & (1 << 21)) {
40f137e1 2916 rn >>= 1;
b7bcbe95
FB
2917 /* system register */
2918 switch (rn) {
40f137e1 2919 case ARM_VFP_FPSID:
9ee6e8bb
PB
2920 case ARM_VFP_MVFR0:
2921 case ARM_VFP_MVFR1:
b7bcbe95
FB
2922 /* Writes are ignored. */
2923 break;
40f137e1 2924 case ARM_VFP_FPSCR:
4373f3ce 2925 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2926 tcg_temp_free_i32(tmp);
b5ff1b31 2927 gen_lookup_tb(s);
b7bcbe95 2928 break;
40f137e1 2929 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2930 if (IS_USER(s))
2931 return 1;
71b3c3de
JR
2932 /* TODO: VFP subarchitecture support.
2933 * For now, keep the EN bit only */
2934 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2935 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2936 gen_lookup_tb(s);
2937 break;
2938 case ARM_VFP_FPINST:
2939 case ARM_VFP_FPINST2:
4373f3ce 2940 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2941 break;
b7bcbe95
FB
2942 default:
2943 return 1;
2944 }
2945 } else {
4373f3ce 2946 gen_vfp_msr(tmp);
b7bcbe95
FB
2947 gen_mov_vreg_F0(0, rn);
2948 }
2949 }
2950 }
2951 } else {
2952 /* data processing */
2953 /* The opcode is in bits 23, 21, 20 and 6. */
2954 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2955 if (dp) {
2956 if (op == 15) {
2957 /* rn is opcode */
2958 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2959 } else {
2960 /* rn is register number */
9ee6e8bb 2961 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2962 }
2963
04595bf6 2964 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2965 /* Integer or single precision destination. */
9ee6e8bb 2966 rd = VFP_SREG_D(insn);
b7bcbe95 2967 } else {
9ee6e8bb 2968 VFP_DREG_D(rd, insn);
b7bcbe95 2969 }
04595bf6
PM
2970 if (op == 15 &&
2971 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2972 /* VCVT from int is always from S reg regardless of dp bit.
2973 * VCVT with immediate frac_bits has same format as SREG_M
2974 */
2975 rm = VFP_SREG_M(insn);
b7bcbe95 2976 } else {
9ee6e8bb 2977 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2978 }
2979 } else {
9ee6e8bb 2980 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2981 if (op == 15 && rn == 15) {
2982 /* Double precision destination. */
9ee6e8bb
PB
2983 VFP_DREG_D(rd, insn);
2984 } else {
2985 rd = VFP_SREG_D(insn);
2986 }
04595bf6
PM
2987 /* NB that we implicitly rely on the encoding for the frac_bits
2988 * in VCVT of fixed to float being the same as that of an SREG_M
2989 */
9ee6e8bb 2990 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2991 }
2992
69d1fc22 2993 veclen = s->vec_len;
b7bcbe95
FB
2994 if (op == 15 && rn > 3)
2995 veclen = 0;
2996
2997 /* Shut up compiler warnings. */
2998 delta_m = 0;
2999 delta_d = 0;
3000 bank_mask = 0;
3b46e624 3001
b7bcbe95
FB
3002 if (veclen > 0) {
3003 if (dp)
3004 bank_mask = 0xc;
3005 else
3006 bank_mask = 0x18;
3007
3008 /* Figure out what type of vector operation this is. */
3009 if ((rd & bank_mask) == 0) {
3010 /* scalar */
3011 veclen = 0;
3012 } else {
3013 if (dp)
69d1fc22 3014 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3015 else
69d1fc22 3016 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3017
3018 if ((rm & bank_mask) == 0) {
3019 /* mixed scalar/vector */
3020 delta_m = 0;
3021 } else {
3022 /* vector */
3023 delta_m = delta_d;
3024 }
3025 }
3026 }
3027
3028 /* Load the initial operands. */
3029 if (op == 15) {
3030 switch (rn) {
3031 case 16:
3032 case 17:
3033 /* Integer source */
3034 gen_mov_F0_vreg(0, rm);
3035 break;
3036 case 8:
3037 case 9:
3038 /* Compare */
3039 gen_mov_F0_vreg(dp, rd);
3040 gen_mov_F1_vreg(dp, rm);
3041 break;
3042 case 10:
3043 case 11:
3044 /* Compare with zero */
3045 gen_mov_F0_vreg(dp, rd);
3046 gen_vfp_F1_ld0(dp);
3047 break;
9ee6e8bb
PB
3048 case 20:
3049 case 21:
3050 case 22:
3051 case 23:
644ad806
PB
3052 case 28:
3053 case 29:
3054 case 30:
3055 case 31:
9ee6e8bb
PB
3056 /* Source and destination the same. */
3057 gen_mov_F0_vreg(dp, rd);
3058 break;
b7bcbe95
FB
3059 default:
3060 /* One source operand. */
3061 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3062 break;
b7bcbe95
FB
3063 }
3064 } else {
3065 /* Two source operands. */
3066 gen_mov_F0_vreg(dp, rn);
3067 gen_mov_F1_vreg(dp, rm);
3068 }
3069
3070 for (;;) {
3071 /* Perform the calculation. */
3072 switch (op) {
605a6aed
PM
3073 case 0: /* VMLA: fd + (fn * fm) */
3074 /* Note that order of inputs to the add matters for NaNs */
3075 gen_vfp_F1_mul(dp);
3076 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3077 gen_vfp_add(dp);
3078 break;
605a6aed 3079 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3080 gen_vfp_mul(dp);
605a6aed
PM
3081 gen_vfp_F1_neg(dp);
3082 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3083 gen_vfp_add(dp);
3084 break;
605a6aed
PM
3085 case 2: /* VNMLS: -fd + (fn * fm) */
3086 /* Note that it isn't valid to replace (-A + B) with (B - A)
3087 * or similar plausible looking simplifications
3088 * because this will give wrong results for NaNs.
3089 */
3090 gen_vfp_F1_mul(dp);
3091 gen_mov_F0_vreg(dp, rd);
3092 gen_vfp_neg(dp);
3093 gen_vfp_add(dp);
b7bcbe95 3094 break;
605a6aed 3095 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3096 gen_vfp_mul(dp);
605a6aed
PM
3097 gen_vfp_F1_neg(dp);
3098 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3099 gen_vfp_neg(dp);
605a6aed 3100 gen_vfp_add(dp);
b7bcbe95
FB
3101 break;
3102 case 4: /* mul: fn * fm */
3103 gen_vfp_mul(dp);
3104 break;
3105 case 5: /* nmul: -(fn * fm) */
3106 gen_vfp_mul(dp);
3107 gen_vfp_neg(dp);
3108 break;
3109 case 6: /* add: fn + fm */
3110 gen_vfp_add(dp);
3111 break;
3112 case 7: /* sub: fn - fm */
3113 gen_vfp_sub(dp);
3114 break;
3115 case 8: /* div: fn / fm */
3116 gen_vfp_div(dp);
3117 break;
9ee6e8bb
PB
3118 case 14: /* fconst */
3119 if (!arm_feature(env, ARM_FEATURE_VFP3))
3120 return 1;
3121
3122 n = (insn << 12) & 0x80000000;
3123 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3124 if (dp) {
3125 if (i & 0x40)
3126 i |= 0x3f80;
3127 else
3128 i |= 0x4000;
3129 n |= i << 16;
4373f3ce 3130 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3131 } else {
3132 if (i & 0x40)
3133 i |= 0x780;
3134 else
3135 i |= 0x800;
3136 n |= i << 19;
5b340b51 3137 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3138 }
9ee6e8bb 3139 break;
b7bcbe95
FB
3140 case 15: /* extension space */
3141 switch (rn) {
3142 case 0: /* cpy */
3143 /* no-op */
3144 break;
3145 case 1: /* abs */
3146 gen_vfp_abs(dp);
3147 break;
3148 case 2: /* neg */
3149 gen_vfp_neg(dp);
3150 break;
3151 case 3: /* sqrt */
3152 gen_vfp_sqrt(dp);
3153 break;
60011498
PB
3154 case 4: /* vcvtb.f32.f16 */
3155 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3156 return 1;
3157 tmp = gen_vfp_mrs();
3158 tcg_gen_ext16u_i32(tmp, tmp);
3159 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3160 tcg_temp_free_i32(tmp);
60011498
PB
3161 break;
3162 case 5: /* vcvtt.f32.f16 */
3163 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3164 return 1;
3165 tmp = gen_vfp_mrs();
3166 tcg_gen_shri_i32(tmp, tmp, 16);
3167 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3168 tcg_temp_free_i32(tmp);
60011498
PB
3169 break;
3170 case 6: /* vcvtb.f16.f32 */
3171 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3172 return 1;
7d1b0095 3173 tmp = tcg_temp_new_i32();
60011498
PB
3174 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3175 gen_mov_F0_vreg(0, rd);
3176 tmp2 = gen_vfp_mrs();
3177 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3178 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3179 tcg_temp_free_i32(tmp2);
60011498
PB
3180 gen_vfp_msr(tmp);
3181 break;
3182 case 7: /* vcvtt.f16.f32 */
3183 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3184 return 1;
7d1b0095 3185 tmp = tcg_temp_new_i32();
60011498
PB
3186 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3187 tcg_gen_shli_i32(tmp, tmp, 16);
3188 gen_mov_F0_vreg(0, rd);
3189 tmp2 = gen_vfp_mrs();
3190 tcg_gen_ext16u_i32(tmp2, tmp2);
3191 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3192 tcg_temp_free_i32(tmp2);
60011498
PB
3193 gen_vfp_msr(tmp);
3194 break;
b7bcbe95
FB
3195 case 8: /* cmp */
3196 gen_vfp_cmp(dp);
3197 break;
3198 case 9: /* cmpe */
3199 gen_vfp_cmpe(dp);
3200 break;
3201 case 10: /* cmpz */
3202 gen_vfp_cmp(dp);
3203 break;
3204 case 11: /* cmpez */
3205 gen_vfp_F1_ld0(dp);
3206 gen_vfp_cmpe(dp);
3207 break;
3208 case 15: /* single<->double conversion */
3209 if (dp)
4373f3ce 3210 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3211 else
4373f3ce 3212 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3213 break;
3214 case 16: /* fuito */
5500b06c 3215 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3216 break;
3217 case 17: /* fsito */
5500b06c 3218 gen_vfp_sito(dp, 0);
b7bcbe95 3219 break;
9ee6e8bb
PB
3220 case 20: /* fshto */
3221 if (!arm_feature(env, ARM_FEATURE_VFP3))
3222 return 1;
5500b06c 3223 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3224 break;
3225 case 21: /* fslto */
3226 if (!arm_feature(env, ARM_FEATURE_VFP3))
3227 return 1;
5500b06c 3228 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3229 break;
3230 case 22: /* fuhto */
3231 if (!arm_feature(env, ARM_FEATURE_VFP3))
3232 return 1;
5500b06c 3233 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3234 break;
3235 case 23: /* fulto */
3236 if (!arm_feature(env, ARM_FEATURE_VFP3))
3237 return 1;
5500b06c 3238 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3239 break;
b7bcbe95 3240 case 24: /* ftoui */
5500b06c 3241 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3242 break;
3243 case 25: /* ftouiz */
5500b06c 3244 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3245 break;
3246 case 26: /* ftosi */
5500b06c 3247 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3248 break;
3249 case 27: /* ftosiz */
5500b06c 3250 gen_vfp_tosiz(dp, 0);
b7bcbe95 3251 break;
9ee6e8bb
PB
3252 case 28: /* ftosh */
3253 if (!arm_feature(env, ARM_FEATURE_VFP3))
3254 return 1;
5500b06c 3255 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3256 break;
3257 case 29: /* ftosl */
3258 if (!arm_feature(env, ARM_FEATURE_VFP3))
3259 return 1;
5500b06c 3260 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3261 break;
3262 case 30: /* ftouh */
3263 if (!arm_feature(env, ARM_FEATURE_VFP3))
3264 return 1;
5500b06c 3265 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3266 break;
3267 case 31: /* ftoul */
3268 if (!arm_feature(env, ARM_FEATURE_VFP3))
3269 return 1;
5500b06c 3270 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3271 break;
b7bcbe95
FB
3272 default: /* undefined */
3273 printf ("rn:%d\n", rn);
3274 return 1;
3275 }
3276 break;
3277 default: /* undefined */
3278 printf ("op:%d\n", op);
3279 return 1;
3280 }
3281
3282 /* Write back the result. */
3283 if (op == 15 && (rn >= 8 && rn <= 11))
3284 ; /* Comparison, do nothing. */
04595bf6
PM
3285 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3286 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3287 gen_mov_vreg_F0(0, rd);
3288 else if (op == 15 && rn == 15)
3289 /* conversion */
3290 gen_mov_vreg_F0(!dp, rd);
3291 else
3292 gen_mov_vreg_F0(dp, rd);
3293
3294 /* break out of the loop if we have finished */
3295 if (veclen == 0)
3296 break;
3297
3298 if (op == 15 && delta_m == 0) {
3299 /* single source one-many */
3300 while (veclen--) {
3301 rd = ((rd + delta_d) & (bank_mask - 1))
3302 | (rd & bank_mask);
3303 gen_mov_vreg_F0(dp, rd);
3304 }
3305 break;
3306 }
3307 /* Setup the next operands. */
3308 veclen--;
3309 rd = ((rd + delta_d) & (bank_mask - 1))
3310 | (rd & bank_mask);
3311
3312 if (op == 15) {
3313 /* One source operand. */
3314 rm = ((rm + delta_m) & (bank_mask - 1))
3315 | (rm & bank_mask);
3316 gen_mov_F0_vreg(dp, rm);
3317 } else {
3318 /* Two source operands. */
3319 rn = ((rn + delta_d) & (bank_mask - 1))
3320 | (rn & bank_mask);
3321 gen_mov_F0_vreg(dp, rn);
3322 if (delta_m) {
3323 rm = ((rm + delta_m) & (bank_mask - 1))
3324 | (rm & bank_mask);
3325 gen_mov_F1_vreg(dp, rm);
3326 }
3327 }
3328 }
3329 }
3330 break;
3331 case 0xc:
3332 case 0xd:
8387da81 3333 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3334 /* two-register transfer */
3335 rn = (insn >> 16) & 0xf;
3336 rd = (insn >> 12) & 0xf;
3337 if (dp) {
9ee6e8bb
PB
3338 VFP_DREG_M(rm, insn);
3339 } else {
3340 rm = VFP_SREG_M(insn);
3341 }
b7bcbe95 3342
18c9b560 3343 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3344 /* vfp->arm */
3345 if (dp) {
4373f3ce
PB
3346 gen_mov_F0_vreg(0, rm * 2);
3347 tmp = gen_vfp_mrs();
3348 store_reg(s, rd, tmp);
3349 gen_mov_F0_vreg(0, rm * 2 + 1);
3350 tmp = gen_vfp_mrs();
3351 store_reg(s, rn, tmp);
b7bcbe95
FB
3352 } else {
3353 gen_mov_F0_vreg(0, rm);
4373f3ce 3354 tmp = gen_vfp_mrs();
8387da81 3355 store_reg(s, rd, tmp);
b7bcbe95 3356 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3357 tmp = gen_vfp_mrs();
8387da81 3358 store_reg(s, rn, tmp);
b7bcbe95
FB
3359 }
3360 } else {
3361 /* arm->vfp */
3362 if (dp) {
4373f3ce
PB
3363 tmp = load_reg(s, rd);
3364 gen_vfp_msr(tmp);
3365 gen_mov_vreg_F0(0, rm * 2);
3366 tmp = load_reg(s, rn);
3367 gen_vfp_msr(tmp);
3368 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3369 } else {
8387da81 3370 tmp = load_reg(s, rd);
4373f3ce 3371 gen_vfp_msr(tmp);
b7bcbe95 3372 gen_mov_vreg_F0(0, rm);
8387da81 3373 tmp = load_reg(s, rn);
4373f3ce 3374 gen_vfp_msr(tmp);
b7bcbe95
FB
3375 gen_mov_vreg_F0(0, rm + 1);
3376 }
3377 }
3378 } else {
3379 /* Load/store */
3380 rn = (insn >> 16) & 0xf;
3381 if (dp)
9ee6e8bb 3382 VFP_DREG_D(rd, insn);
b7bcbe95 3383 else
9ee6e8bb
PB
3384 rd = VFP_SREG_D(insn);
3385 if (s->thumb && rn == 15) {
7d1b0095 3386 addr = tcg_temp_new_i32();
312eea9f 3387 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3388 } else {
312eea9f 3389 addr = load_reg(s, rn);
9ee6e8bb 3390 }
b7bcbe95
FB
3391 if ((insn & 0x01200000) == 0x01000000) {
3392 /* Single load/store */
3393 offset = (insn & 0xff) << 2;
3394 if ((insn & (1 << 23)) == 0)
3395 offset = -offset;
312eea9f 3396 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3397 if (insn & (1 << 20)) {
312eea9f 3398 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3399 gen_mov_vreg_F0(dp, rd);
3400 } else {
3401 gen_mov_F0_vreg(dp, rd);
312eea9f 3402 gen_vfp_st(s, dp, addr);
b7bcbe95 3403 }
7d1b0095 3404 tcg_temp_free_i32(addr);
b7bcbe95
FB
3405 } else {
3406 /* load/store multiple */
3407 if (dp)
3408 n = (insn >> 1) & 0x7f;
3409 else
3410 n = insn & 0xff;
3411
3412 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3413 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3414
3415 if (dp)
3416 offset = 8;
3417 else
3418 offset = 4;
3419 for (i = 0; i < n; i++) {
18c9b560 3420 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3421 /* load */
312eea9f 3422 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3423 gen_mov_vreg_F0(dp, rd + i);
3424 } else {
3425 /* store */
3426 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3427 gen_vfp_st(s, dp, addr);
b7bcbe95 3428 }
312eea9f 3429 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3430 }
3431 if (insn & (1 << 21)) {
3432 /* writeback */
3433 if (insn & (1 << 24))
3434 offset = -offset * n;
3435 else if (dp && (insn & 1))
3436 offset = 4;
3437 else
3438 offset = 0;
3439
3440 if (offset != 0)
312eea9f
FN
3441 tcg_gen_addi_i32(addr, addr, offset);
3442 store_reg(s, rn, addr);
3443 } else {
7d1b0095 3444 tcg_temp_free_i32(addr);
b7bcbe95
FB
3445 }
3446 }
3447 }
3448 break;
3449 default:
3450 /* Should never happen. */
3451 return 1;
3452 }
3453 return 0;
3454}
3455
6e256c93 3456static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3457{
6e256c93
FB
3458 TranslationBlock *tb;
3459
3460 tb = s->tb;
3461 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3462 tcg_gen_goto_tb(n);
8984bd2e 3463 gen_set_pc_im(dest);
4b4a72e5 3464 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3465 } else {
8984bd2e 3466 gen_set_pc_im(dest);
57fec1fe 3467 tcg_gen_exit_tb(0);
6e256c93 3468 }
c53be334
FB
3469}
3470
8aaca4c0
FB
3471static inline void gen_jmp (DisasContext *s, uint32_t dest)
3472{
551bd27f 3473 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3474 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3475 if (s->thumb)
d9ba4830
PB
3476 dest |= 1;
3477 gen_bx_im(s, dest);
8aaca4c0 3478 } else {
6e256c93 3479 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3480 s->is_jmp = DISAS_TB_JUMP;
3481 }
3482}
3483
d9ba4830 3484static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3485{
ee097184 3486 if (x)
d9ba4830 3487 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3488 else
d9ba4830 3489 gen_sxth(t0);
ee097184 3490 if (y)
d9ba4830 3491 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3492 else
d9ba4830
PB
3493 gen_sxth(t1);
3494 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3495}
3496
3497/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3498static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3499 uint32_t mask;
3500
3501 mask = 0;
3502 if (flags & (1 << 0))
3503 mask |= 0xff;
3504 if (flags & (1 << 1))
3505 mask |= 0xff00;
3506 if (flags & (1 << 2))
3507 mask |= 0xff0000;
3508 if (flags & (1 << 3))
3509 mask |= 0xff000000;
9ee6e8bb 3510
2ae23e75 3511 /* Mask out undefined bits. */
9ee6e8bb 3512 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3513 if (!arm_feature(env, ARM_FEATURE_V4T))
3514 mask &= ~CPSR_T;
3515 if (!arm_feature(env, ARM_FEATURE_V5))
3516 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3517 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3518 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3519 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3520 mask &= ~CPSR_IT;
9ee6e8bb 3521 /* Mask out execution state bits. */
2ae23e75 3522 if (!spsr)
e160c51c 3523 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3524 /* Mask out privileged bits. */
3525 if (IS_USER(s))
9ee6e8bb 3526 mask &= CPSR_USER;
b5ff1b31
FB
3527 return mask;
3528}
3529
2fbac54b
FN
3530/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3531static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3532{
d9ba4830 3533 TCGv tmp;
b5ff1b31
FB
3534 if (spsr) {
3535 /* ??? This is also undefined in system mode. */
3536 if (IS_USER(s))
3537 return 1;
d9ba4830
PB
3538
3539 tmp = load_cpu_field(spsr);
3540 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3541 tcg_gen_andi_i32(t0, t0, mask);
3542 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3543 store_cpu_field(tmp, spsr);
b5ff1b31 3544 } else {
2fbac54b 3545 gen_set_cpsr(t0, mask);
b5ff1b31 3546 }
7d1b0095 3547 tcg_temp_free_i32(t0);
b5ff1b31
FB
3548 gen_lookup_tb(s);
3549 return 0;
3550}
3551
2fbac54b
FN
3552/* Returns nonzero if access to the PSR is not permitted. */
3553static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3554{
3555 TCGv tmp;
7d1b0095 3556 tmp = tcg_temp_new_i32();
2fbac54b
FN
3557 tcg_gen_movi_i32(tmp, val);
3558 return gen_set_psr(s, mask, spsr, tmp);
3559}
3560
e9bb4aa9
JR
3561/* Generate an old-style exception return. Marks pc as dead. */
3562static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3563{
d9ba4830 3564 TCGv tmp;
e9bb4aa9 3565 store_reg(s, 15, pc);
d9ba4830
PB
3566 tmp = load_cpu_field(spsr);
3567 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3568 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3569 s->is_jmp = DISAS_UPDATE;
3570}
3571
b0109805
PB
3572/* Generate a v6 exception return. Marks both values as dead. */
3573static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3574{
b0109805 3575 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3576 tcg_temp_free_i32(cpsr);
b0109805 3577 store_reg(s, 15, pc);
9ee6e8bb
PB
3578 s->is_jmp = DISAS_UPDATE;
3579}
3b46e624 3580
9ee6e8bb
PB
3581static inline void
3582gen_set_condexec (DisasContext *s)
3583{
3584 if (s->condexec_mask) {
8f01245e 3585 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3586 TCGv tmp = tcg_temp_new_i32();
8f01245e 3587 tcg_gen_movi_i32(tmp, val);
d9ba4830 3588 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3589 }
3590}
3b46e624 3591
bc4a0de0
PM
3592static void gen_exception_insn(DisasContext *s, int offset, int excp)
3593{
3594 gen_set_condexec(s);
3595 gen_set_pc_im(s->pc - offset);
3596 gen_exception(excp);
3597 s->is_jmp = DISAS_JUMP;
3598}
3599
9ee6e8bb
PB
3600static void gen_nop_hint(DisasContext *s, int val)
3601{
3602 switch (val) {
3603 case 3: /* wfi */
8984bd2e 3604 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3605 s->is_jmp = DISAS_WFI;
3606 break;
3607 case 2: /* wfe */
3608 case 4: /* sev */
3609 /* TODO: Implement SEV and WFE. May help SMP performance. */
3610 default: /* nop */
3611 break;
3612 }
3613}
99c475ab 3614
ad69471c 3615#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3616
62698be3 3617static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3618{
3619 switch (size) {
dd8fbd78
FN
3620 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3621 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3622 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3623 default: abort();
9ee6e8bb 3624 }
9ee6e8bb
PB
3625}
3626
dd8fbd78 3627static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3628{
3629 switch (size) {
dd8fbd78
FN
3630 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3631 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3632 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3633 default: return;
3634 }
3635}
3636
3637/* 32-bit pairwise ops end up the same as the elementwise versions. */
3638#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3639#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3640#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3641#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3642
ad69471c
PB
3643#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3644 switch ((size << 1) | u) { \
3645 case 0: \
dd8fbd78 3646 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3647 break; \
3648 case 1: \
dd8fbd78 3649 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3650 break; \
3651 case 2: \
dd8fbd78 3652 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3653 break; \
3654 case 3: \
dd8fbd78 3655 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3656 break; \
3657 case 4: \
dd8fbd78 3658 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3659 break; \
3660 case 5: \
dd8fbd78 3661 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3662 break; \
3663 default: return 1; \
3664 }} while (0)
9ee6e8bb
PB
3665
3666#define GEN_NEON_INTEGER_OP(name) do { \
3667 switch ((size << 1) | u) { \
ad69471c 3668 case 0: \
dd8fbd78 3669 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3670 break; \
3671 case 1: \
dd8fbd78 3672 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3673 break; \
3674 case 2: \
dd8fbd78 3675 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3676 break; \
3677 case 3: \
dd8fbd78 3678 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3679 break; \
3680 case 4: \
dd8fbd78 3681 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3682 break; \
3683 case 5: \
dd8fbd78 3684 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3685 break; \
9ee6e8bb
PB
3686 default: return 1; \
3687 }} while (0)
3688
dd8fbd78 3689static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3690{
7d1b0095 3691 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3692 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3693 return tmp;
9ee6e8bb
PB
3694}
3695
dd8fbd78 3696static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3697{
dd8fbd78 3698 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3699 tcg_temp_free_i32(var);
9ee6e8bb
PB
3700}
3701
dd8fbd78 3702static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3703{
dd8fbd78 3704 TCGv tmp;
9ee6e8bb 3705 if (size == 1) {
0fad6efc
PM
3706 tmp = neon_load_reg(reg & 7, reg >> 4);
3707 if (reg & 8) {
dd8fbd78 3708 gen_neon_dup_high16(tmp);
0fad6efc
PM
3709 } else {
3710 gen_neon_dup_low16(tmp);
dd8fbd78 3711 }
0fad6efc
PM
3712 } else {
3713 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3714 }
dd8fbd78 3715 return tmp;
9ee6e8bb
PB
3716}
3717
02acedf9 3718static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3719{
02acedf9 3720 TCGv tmp, tmp2;
600b828c 3721 if (!q && size == 2) {
02acedf9
PM
3722 return 1;
3723 }
3724 tmp = tcg_const_i32(rd);
3725 tmp2 = tcg_const_i32(rm);
3726 if (q) {
3727 switch (size) {
3728 case 0:
02da0b2d 3729 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3730 break;
3731 case 1:
02da0b2d 3732 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3733 break;
3734 case 2:
02da0b2d 3735 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3736 break;
3737 default:
3738 abort();
3739 }
3740 } else {
3741 switch (size) {
3742 case 0:
02da0b2d 3743 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3744 break;
3745 case 1:
02da0b2d 3746 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3747 break;
3748 default:
3749 abort();
3750 }
3751 }
3752 tcg_temp_free_i32(tmp);
3753 tcg_temp_free_i32(tmp2);
3754 return 0;
19457615
FN
3755}
3756
d68a6f3a 3757static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3758{
3759 TCGv tmp, tmp2;
600b828c 3760 if (!q && size == 2) {
d68a6f3a
PM
3761 return 1;
3762 }
3763 tmp = tcg_const_i32(rd);
3764 tmp2 = tcg_const_i32(rm);
3765 if (q) {
3766 switch (size) {
3767 case 0:
02da0b2d 3768 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3769 break;
3770 case 1:
02da0b2d 3771 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3772 break;
3773 case 2:
02da0b2d 3774 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3775 break;
3776 default:
3777 abort();
3778 }
3779 } else {
3780 switch (size) {
3781 case 0:
02da0b2d 3782 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3783 break;
3784 case 1:
02da0b2d 3785 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3786 break;
3787 default:
3788 abort();
3789 }
3790 }
3791 tcg_temp_free_i32(tmp);
3792 tcg_temp_free_i32(tmp2);
3793 return 0;
19457615
FN
3794}
3795
19457615
FN
3796static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3797{
3798 TCGv rd, tmp;
3799
7d1b0095
PM
3800 rd = tcg_temp_new_i32();
3801 tmp = tcg_temp_new_i32();
19457615
FN
3802
3803 tcg_gen_shli_i32(rd, t0, 8);
3804 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3805 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3806 tcg_gen_or_i32(rd, rd, tmp);
3807
3808 tcg_gen_shri_i32(t1, t1, 8);
3809 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3810 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3811 tcg_gen_or_i32(t1, t1, tmp);
3812 tcg_gen_mov_i32(t0, rd);
3813
7d1b0095
PM
3814 tcg_temp_free_i32(tmp);
3815 tcg_temp_free_i32(rd);
19457615
FN
3816}
3817
3818static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3819{
3820 TCGv rd, tmp;
3821
7d1b0095
PM
3822 rd = tcg_temp_new_i32();
3823 tmp = tcg_temp_new_i32();
19457615
FN
3824
3825 tcg_gen_shli_i32(rd, t0, 16);
3826 tcg_gen_andi_i32(tmp, t1, 0xffff);
3827 tcg_gen_or_i32(rd, rd, tmp);
3828 tcg_gen_shri_i32(t1, t1, 16);
3829 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3830 tcg_gen_or_i32(t1, t1, tmp);
3831 tcg_gen_mov_i32(t0, rd);
3832
7d1b0095
PM
3833 tcg_temp_free_i32(tmp);
3834 tcg_temp_free_i32(rd);
19457615
FN
3835}
3836
3837
9ee6e8bb
PB
3838static struct {
3839 int nregs;
3840 int interleave;
3841 int spacing;
3842} neon_ls_element_type[11] = {
3843 {4, 4, 1},
3844 {4, 4, 2},
3845 {4, 1, 1},
3846 {4, 2, 1},
3847 {3, 3, 1},
3848 {3, 3, 2},
3849 {3, 1, 1},
3850 {1, 1, 1},
3851 {2, 2, 1},
3852 {2, 2, 2},
3853 {2, 1, 1}
3854};
3855
3856/* Translate a NEON load/store element instruction. Return nonzero if the
3857 instruction is invalid. */
3858static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3859{
3860 int rd, rn, rm;
3861 int op;
3862 int nregs;
3863 int interleave;
84496233 3864 int spacing;
9ee6e8bb
PB
3865 int stride;
3866 int size;
3867 int reg;
3868 int pass;
3869 int load;
3870 int shift;
9ee6e8bb 3871 int n;
1b2b1e54 3872 TCGv addr;
b0109805 3873 TCGv tmp;
8f8e3aa4 3874 TCGv tmp2;
84496233 3875 TCGv_i64 tmp64;
9ee6e8bb 3876
5df8bac1 3877 if (!s->vfp_enabled)
9ee6e8bb
PB
3878 return 1;
3879 VFP_DREG_D(rd, insn);
3880 rn = (insn >> 16) & 0xf;
3881 rm = insn & 0xf;
3882 load = (insn & (1 << 21)) != 0;
3883 if ((insn & (1 << 23)) == 0) {
3884 /* Load store all elements. */
3885 op = (insn >> 8) & 0xf;
3886 size = (insn >> 6) & 3;
84496233 3887 if (op > 10)
9ee6e8bb 3888 return 1;
f2dd89d0
PM
3889 /* Catch UNDEF cases for bad values of align field */
3890 switch (op & 0xc) {
3891 case 4:
3892 if (((insn >> 5) & 1) == 1) {
3893 return 1;
3894 }
3895 break;
3896 case 8:
3897 if (((insn >> 4) & 3) == 3) {
3898 return 1;
3899 }
3900 break;
3901 default:
3902 break;
3903 }
9ee6e8bb
PB
3904 nregs = neon_ls_element_type[op].nregs;
3905 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3906 spacing = neon_ls_element_type[op].spacing;
3907 if (size == 3 && (interleave | spacing) != 1)
3908 return 1;
e318a60b 3909 addr = tcg_temp_new_i32();
dcc65026 3910 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3911 stride = (1 << size) * interleave;
3912 for (reg = 0; reg < nregs; reg++) {
3913 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3914 load_reg_var(s, addr, rn);
3915 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3916 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3917 load_reg_var(s, addr, rn);
3918 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3919 }
84496233
JR
3920 if (size == 3) {
3921 if (load) {
3922 tmp64 = gen_ld64(addr, IS_USER(s));
3923 neon_store_reg64(tmp64, rd);
3924 tcg_temp_free_i64(tmp64);
3925 } else {
3926 tmp64 = tcg_temp_new_i64();
3927 neon_load_reg64(tmp64, rd);
3928 gen_st64(tmp64, addr, IS_USER(s));
3929 }
3930 tcg_gen_addi_i32(addr, addr, stride);
3931 } else {
3932 for (pass = 0; pass < 2; pass++) {
3933 if (size == 2) {
3934 if (load) {
3935 tmp = gen_ld32(addr, IS_USER(s));
3936 neon_store_reg(rd, pass, tmp);
3937 } else {
3938 tmp = neon_load_reg(rd, pass);
3939 gen_st32(tmp, addr, IS_USER(s));
3940 }
1b2b1e54 3941 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3942 } else if (size == 1) {
3943 if (load) {
3944 tmp = gen_ld16u(addr, IS_USER(s));
3945 tcg_gen_addi_i32(addr, addr, stride);
3946 tmp2 = gen_ld16u(addr, IS_USER(s));
3947 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3948 tcg_gen_shli_i32(tmp2, tmp2, 16);
3949 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3950 tcg_temp_free_i32(tmp2);
84496233
JR
3951 neon_store_reg(rd, pass, tmp);
3952 } else {
3953 tmp = neon_load_reg(rd, pass);
7d1b0095 3954 tmp2 = tcg_temp_new_i32();
84496233
JR
3955 tcg_gen_shri_i32(tmp2, tmp, 16);
3956 gen_st16(tmp, addr, IS_USER(s));
3957 tcg_gen_addi_i32(addr, addr, stride);
3958 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3959 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3960 }
84496233
JR
3961 } else /* size == 0 */ {
3962 if (load) {
3963 TCGV_UNUSED(tmp2);
3964 for (n = 0; n < 4; n++) {
3965 tmp = gen_ld8u(addr, IS_USER(s));
3966 tcg_gen_addi_i32(addr, addr, stride);
3967 if (n == 0) {
3968 tmp2 = tmp;
3969 } else {
41ba8341
PB
3970 tcg_gen_shli_i32(tmp, tmp, n * 8);
3971 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3972 tcg_temp_free_i32(tmp);
84496233 3973 }
9ee6e8bb 3974 }
84496233
JR
3975 neon_store_reg(rd, pass, tmp2);
3976 } else {
3977 tmp2 = neon_load_reg(rd, pass);
3978 for (n = 0; n < 4; n++) {
7d1b0095 3979 tmp = tcg_temp_new_i32();
84496233
JR
3980 if (n == 0) {
3981 tcg_gen_mov_i32(tmp, tmp2);
3982 } else {
3983 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3984 }
3985 gen_st8(tmp, addr, IS_USER(s));
3986 tcg_gen_addi_i32(addr, addr, stride);
3987 }
7d1b0095 3988 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3989 }
3990 }
3991 }
3992 }
84496233 3993 rd += spacing;
9ee6e8bb 3994 }
e318a60b 3995 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3996 stride = nregs * 8;
3997 } else {
3998 size = (insn >> 10) & 3;
3999 if (size == 3) {
4000 /* Load single element to all lanes. */
8e18cde3
PM
4001 int a = (insn >> 4) & 1;
4002 if (!load) {
9ee6e8bb 4003 return 1;
8e18cde3 4004 }
9ee6e8bb
PB
4005 size = (insn >> 6) & 3;
4006 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4007
4008 if (size == 3) {
4009 if (nregs != 4 || a == 0) {
9ee6e8bb 4010 return 1;
99c475ab 4011 }
8e18cde3
PM
4012 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4013 size = 2;
4014 }
4015 if (nregs == 1 && a == 1 && size == 0) {
4016 return 1;
4017 }
4018 if (nregs == 3 && a == 1) {
4019 return 1;
4020 }
e318a60b 4021 addr = tcg_temp_new_i32();
8e18cde3
PM
4022 load_reg_var(s, addr, rn);
4023 if (nregs == 1) {
4024 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4025 tmp = gen_load_and_replicate(s, addr, size);
4026 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4027 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4028 if (insn & (1 << 5)) {
4029 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4030 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4031 }
4032 tcg_temp_free_i32(tmp);
4033 } else {
4034 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4035 stride = (insn & (1 << 5)) ? 2 : 1;
4036 for (reg = 0; reg < nregs; reg++) {
4037 tmp = gen_load_and_replicate(s, addr, size);
4038 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4039 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4040 tcg_temp_free_i32(tmp);
4041 tcg_gen_addi_i32(addr, addr, 1 << size);
4042 rd += stride;
4043 }
9ee6e8bb 4044 }
e318a60b 4045 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4046 stride = (1 << size) * nregs;
4047 } else {
4048 /* Single element. */
93262b16 4049 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4050 pass = (insn >> 7) & 1;
4051 switch (size) {
4052 case 0:
4053 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4054 stride = 1;
4055 break;
4056 case 1:
4057 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4058 stride = (insn & (1 << 5)) ? 2 : 1;
4059 break;
4060 case 2:
4061 shift = 0;
9ee6e8bb
PB
4062 stride = (insn & (1 << 6)) ? 2 : 1;
4063 break;
4064 default:
4065 abort();
4066 }
4067 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4068 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4069 switch (nregs) {
4070 case 1:
4071 if (((idx & (1 << size)) != 0) ||
4072 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4073 return 1;
4074 }
4075 break;
4076 case 3:
4077 if ((idx & 1) != 0) {
4078 return 1;
4079 }
4080 /* fall through */
4081 case 2:
4082 if (size == 2 && (idx & 2) != 0) {
4083 return 1;
4084 }
4085 break;
4086 case 4:
4087 if ((size == 2) && ((idx & 3) == 3)) {
4088 return 1;
4089 }
4090 break;
4091 default:
4092 abort();
4093 }
4094 if ((rd + stride * (nregs - 1)) > 31) {
4095 /* Attempts to write off the end of the register file
4096 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4097 * the neon_load_reg() would write off the end of the array.
4098 */
4099 return 1;
4100 }
e318a60b 4101 addr = tcg_temp_new_i32();
dcc65026 4102 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4103 for (reg = 0; reg < nregs; reg++) {
4104 if (load) {
9ee6e8bb
PB
4105 switch (size) {
4106 case 0:
1b2b1e54 4107 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
4108 break;
4109 case 1:
1b2b1e54 4110 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
4111 break;
4112 case 2:
1b2b1e54 4113 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 4114 break;
a50f5b91
PB
4115 default: /* Avoid compiler warnings. */
4116 abort();
9ee6e8bb
PB
4117 }
4118 if (size != 2) {
8f8e3aa4
PB
4119 tmp2 = neon_load_reg(rd, pass);
4120 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
7d1b0095 4121 tcg_temp_free_i32(tmp2);
9ee6e8bb 4122 }
8f8e3aa4 4123 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4124 } else { /* Store */
8f8e3aa4
PB
4125 tmp = neon_load_reg(rd, pass);
4126 if (shift)
4127 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4128 switch (size) {
4129 case 0:
1b2b1e54 4130 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4131 break;
4132 case 1:
1b2b1e54 4133 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4134 break;
4135 case 2:
1b2b1e54 4136 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4137 break;
99c475ab 4138 }
99c475ab 4139 }
9ee6e8bb 4140 rd += stride;
1b2b1e54 4141 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4142 }
e318a60b 4143 tcg_temp_free_i32(addr);
9ee6e8bb 4144 stride = nregs * (1 << size);
99c475ab 4145 }
9ee6e8bb
PB
4146 }
4147 if (rm != 15) {
b26eefb6
PB
4148 TCGv base;
4149
4150 base = load_reg(s, rn);
9ee6e8bb 4151 if (rm == 13) {
b26eefb6 4152 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4153 } else {
b26eefb6
PB
4154 TCGv index;
4155 index = load_reg(s, rm);
4156 tcg_gen_add_i32(base, base, index);
7d1b0095 4157 tcg_temp_free_i32(index);
9ee6e8bb 4158 }
b26eefb6 4159 store_reg(s, rn, base);
9ee6e8bb
PB
4160 }
4161 return 0;
4162}
3b46e624 4163
8f8e3aa4
PB
4164/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4165static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4166{
4167 tcg_gen_and_i32(t, t, c);
f669df27 4168 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4169 tcg_gen_or_i32(dest, t, f);
4170}
4171
a7812ae4 4172static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4173{
4174 switch (size) {
4175 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4176 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4177 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4178 default: abort();
4179 }
4180}
4181
a7812ae4 4182static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4183{
4184 switch (size) {
02da0b2d
PM
4185 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4186 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4187 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4188 default: abort();
4189 }
4190}
4191
a7812ae4 4192static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4193{
4194 switch (size) {
02da0b2d
PM
4195 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4196 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4197 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4198 default: abort();
4199 }
4200}
4201
af1bbf30
JR
4202static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4203{
4204 switch (size) {
02da0b2d
PM
4205 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4206 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4207 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4208 default: abort();
4209 }
4210}
4211
ad69471c
PB
4212static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4213 int q, int u)
4214{
4215 if (q) {
4216 if (u) {
4217 switch (size) {
4218 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4219 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4220 default: abort();
4221 }
4222 } else {
4223 switch (size) {
4224 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4225 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4226 default: abort();
4227 }
4228 }
4229 } else {
4230 if (u) {
4231 switch (size) {
b408a9b0
CL
4232 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4233 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4234 default: abort();
4235 }
4236 } else {
4237 switch (size) {
4238 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4239 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4240 default: abort();
4241 }
4242 }
4243 }
4244}
4245
a7812ae4 4246static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4247{
4248 if (u) {
4249 switch (size) {
4250 case 0: gen_helper_neon_widen_u8(dest, src); break;
4251 case 1: gen_helper_neon_widen_u16(dest, src); break;
4252 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4253 default: abort();
4254 }
4255 } else {
4256 switch (size) {
4257 case 0: gen_helper_neon_widen_s8(dest, src); break;
4258 case 1: gen_helper_neon_widen_s16(dest, src); break;
4259 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4260 default: abort();
4261 }
4262 }
7d1b0095 4263 tcg_temp_free_i32(src);
ad69471c
PB
4264}
4265
4266static inline void gen_neon_addl(int size)
4267{
4268 switch (size) {
4269 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4270 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4271 case 2: tcg_gen_add_i64(CPU_V001); break;
4272 default: abort();
4273 }
4274}
4275
4276static inline void gen_neon_subl(int size)
4277{
4278 switch (size) {
4279 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4280 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4281 case 2: tcg_gen_sub_i64(CPU_V001); break;
4282 default: abort();
4283 }
4284}
4285
a7812ae4 4286static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4287{
4288 switch (size) {
4289 case 0: gen_helper_neon_negl_u16(var, var); break;
4290 case 1: gen_helper_neon_negl_u32(var, var); break;
4291 case 2: gen_helper_neon_negl_u64(var, var); break;
4292 default: abort();
4293 }
4294}
4295
a7812ae4 4296static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4297{
4298 switch (size) {
02da0b2d
PM
4299 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4300 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4301 default: abort();
4302 }
4303}
4304
a7812ae4 4305static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4306{
a7812ae4 4307 TCGv_i64 tmp;
ad69471c
PB
4308
4309 switch ((size << 1) | u) {
4310 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4311 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4312 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4313 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4314 case 4:
4315 tmp = gen_muls_i64_i32(a, b);
4316 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4317 tcg_temp_free_i64(tmp);
ad69471c
PB
4318 break;
4319 case 5:
4320 tmp = gen_mulu_i64_i32(a, b);
4321 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4322 tcg_temp_free_i64(tmp);
ad69471c
PB
4323 break;
4324 default: abort();
4325 }
c6067f04
CL
4326
4327 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4328 Don't forget to clean them now. */
4329 if (size < 2) {
7d1b0095
PM
4330 tcg_temp_free_i32(a);
4331 tcg_temp_free_i32(b);
c6067f04 4332 }
ad69471c
PB
4333}
4334
c33171c7
PM
4335static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4336{
4337 if (op) {
4338 if (u) {
4339 gen_neon_unarrow_sats(size, dest, src);
4340 } else {
4341 gen_neon_narrow(size, dest, src);
4342 }
4343 } else {
4344 if (u) {
4345 gen_neon_narrow_satu(size, dest, src);
4346 } else {
4347 gen_neon_narrow_sats(size, dest, src);
4348 }
4349 }
4350}
4351
62698be3
PM
4352/* Symbolic constants for op fields for Neon 3-register same-length.
4353 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4354 * table A7-9.
4355 */
4356#define NEON_3R_VHADD 0
4357#define NEON_3R_VQADD 1
4358#define NEON_3R_VRHADD 2
4359#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4360#define NEON_3R_VHSUB 4
4361#define NEON_3R_VQSUB 5
4362#define NEON_3R_VCGT 6
4363#define NEON_3R_VCGE 7
4364#define NEON_3R_VSHL 8
4365#define NEON_3R_VQSHL 9
4366#define NEON_3R_VRSHL 10
4367#define NEON_3R_VQRSHL 11
4368#define NEON_3R_VMAX 12
4369#define NEON_3R_VMIN 13
4370#define NEON_3R_VABD 14
4371#define NEON_3R_VABA 15
4372#define NEON_3R_VADD_VSUB 16
4373#define NEON_3R_VTST_VCEQ 17
4374#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4375#define NEON_3R_VMUL 19
4376#define NEON_3R_VPMAX 20
4377#define NEON_3R_VPMIN 21
4378#define NEON_3R_VQDMULH_VQRDMULH 22
4379#define NEON_3R_VPADD 23
4380#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4381#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4382#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4383#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4384#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4385#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4386
4387static const uint8_t neon_3r_sizes[] = {
4388 [NEON_3R_VHADD] = 0x7,
4389 [NEON_3R_VQADD] = 0xf,
4390 [NEON_3R_VRHADD] = 0x7,
4391 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4392 [NEON_3R_VHSUB] = 0x7,
4393 [NEON_3R_VQSUB] = 0xf,
4394 [NEON_3R_VCGT] = 0x7,
4395 [NEON_3R_VCGE] = 0x7,
4396 [NEON_3R_VSHL] = 0xf,
4397 [NEON_3R_VQSHL] = 0xf,
4398 [NEON_3R_VRSHL] = 0xf,
4399 [NEON_3R_VQRSHL] = 0xf,
4400 [NEON_3R_VMAX] = 0x7,
4401 [NEON_3R_VMIN] = 0x7,
4402 [NEON_3R_VABD] = 0x7,
4403 [NEON_3R_VABA] = 0x7,
4404 [NEON_3R_VADD_VSUB] = 0xf,
4405 [NEON_3R_VTST_VCEQ] = 0x7,
4406 [NEON_3R_VML] = 0x7,
4407 [NEON_3R_VMUL] = 0x7,
4408 [NEON_3R_VPMAX] = 0x7,
4409 [NEON_3R_VPMIN] = 0x7,
4410 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4411 [NEON_3R_VPADD] = 0x7,
4412 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4413 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4414 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4415 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4416 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4417 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4418};
4419
600b828c
PM
4420/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4421 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4422 * table A7-13.
4423 */
4424#define NEON_2RM_VREV64 0
4425#define NEON_2RM_VREV32 1
4426#define NEON_2RM_VREV16 2
4427#define NEON_2RM_VPADDL 4
4428#define NEON_2RM_VPADDL_U 5
4429#define NEON_2RM_VCLS 8
4430#define NEON_2RM_VCLZ 9
4431#define NEON_2RM_VCNT 10
4432#define NEON_2RM_VMVN 11
4433#define NEON_2RM_VPADAL 12
4434#define NEON_2RM_VPADAL_U 13
4435#define NEON_2RM_VQABS 14
4436#define NEON_2RM_VQNEG 15
4437#define NEON_2RM_VCGT0 16
4438#define NEON_2RM_VCGE0 17
4439#define NEON_2RM_VCEQ0 18
4440#define NEON_2RM_VCLE0 19
4441#define NEON_2RM_VCLT0 20
4442#define NEON_2RM_VABS 22
4443#define NEON_2RM_VNEG 23
4444#define NEON_2RM_VCGT0_F 24
4445#define NEON_2RM_VCGE0_F 25
4446#define NEON_2RM_VCEQ0_F 26
4447#define NEON_2RM_VCLE0_F 27
4448#define NEON_2RM_VCLT0_F 28
4449#define NEON_2RM_VABS_F 30
4450#define NEON_2RM_VNEG_F 31
4451#define NEON_2RM_VSWP 32
4452#define NEON_2RM_VTRN 33
4453#define NEON_2RM_VUZP 34
4454#define NEON_2RM_VZIP 35
4455#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4456#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4457#define NEON_2RM_VSHLL 38
4458#define NEON_2RM_VCVT_F16_F32 44
4459#define NEON_2RM_VCVT_F32_F16 46
4460#define NEON_2RM_VRECPE 56
4461#define NEON_2RM_VRSQRTE 57
4462#define NEON_2RM_VRECPE_F 58
4463#define NEON_2RM_VRSQRTE_F 59
4464#define NEON_2RM_VCVT_FS 60
4465#define NEON_2RM_VCVT_FU 61
4466#define NEON_2RM_VCVT_SF 62
4467#define NEON_2RM_VCVT_UF 63
4468
4469static int neon_2rm_is_float_op(int op)
4470{
4471 /* Return true if this neon 2reg-misc op is float-to-float */
4472 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4473 op >= NEON_2RM_VRECPE_F);
4474}
4475
4476/* Each entry in this array has bit n set if the insn allows
4477 * size value n (otherwise it will UNDEF). Since unallocated
4478 * op values will have no bits set they always UNDEF.
4479 */
4480static const uint8_t neon_2rm_sizes[] = {
4481 [NEON_2RM_VREV64] = 0x7,
4482 [NEON_2RM_VREV32] = 0x3,
4483 [NEON_2RM_VREV16] = 0x1,
4484 [NEON_2RM_VPADDL] = 0x7,
4485 [NEON_2RM_VPADDL_U] = 0x7,
4486 [NEON_2RM_VCLS] = 0x7,
4487 [NEON_2RM_VCLZ] = 0x7,
4488 [NEON_2RM_VCNT] = 0x1,
4489 [NEON_2RM_VMVN] = 0x1,
4490 [NEON_2RM_VPADAL] = 0x7,
4491 [NEON_2RM_VPADAL_U] = 0x7,
4492 [NEON_2RM_VQABS] = 0x7,
4493 [NEON_2RM_VQNEG] = 0x7,
4494 [NEON_2RM_VCGT0] = 0x7,
4495 [NEON_2RM_VCGE0] = 0x7,
4496 [NEON_2RM_VCEQ0] = 0x7,
4497 [NEON_2RM_VCLE0] = 0x7,
4498 [NEON_2RM_VCLT0] = 0x7,
4499 [NEON_2RM_VABS] = 0x7,
4500 [NEON_2RM_VNEG] = 0x7,
4501 [NEON_2RM_VCGT0_F] = 0x4,
4502 [NEON_2RM_VCGE0_F] = 0x4,
4503 [NEON_2RM_VCEQ0_F] = 0x4,
4504 [NEON_2RM_VCLE0_F] = 0x4,
4505 [NEON_2RM_VCLT0_F] = 0x4,
4506 [NEON_2RM_VABS_F] = 0x4,
4507 [NEON_2RM_VNEG_F] = 0x4,
4508 [NEON_2RM_VSWP] = 0x1,
4509 [NEON_2RM_VTRN] = 0x7,
4510 [NEON_2RM_VUZP] = 0x7,
4511 [NEON_2RM_VZIP] = 0x7,
4512 [NEON_2RM_VMOVN] = 0x7,
4513 [NEON_2RM_VQMOVN] = 0x7,
4514 [NEON_2RM_VSHLL] = 0x7,
4515 [NEON_2RM_VCVT_F16_F32] = 0x2,
4516 [NEON_2RM_VCVT_F32_F16] = 0x2,
4517 [NEON_2RM_VRECPE] = 0x4,
4518 [NEON_2RM_VRSQRTE] = 0x4,
4519 [NEON_2RM_VRECPE_F] = 0x4,
4520 [NEON_2RM_VRSQRTE_F] = 0x4,
4521 [NEON_2RM_VCVT_FS] = 0x4,
4522 [NEON_2RM_VCVT_FU] = 0x4,
4523 [NEON_2RM_VCVT_SF] = 0x4,
4524 [NEON_2RM_VCVT_UF] = 0x4,
4525};
4526
9ee6e8bb
PB
4527/* Translate a NEON data processing instruction. Return nonzero if the
4528 instruction is invalid.
ad69471c
PB
4529 We process data in a mixture of 32-bit and 64-bit chunks.
4530 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4531
9ee6e8bb
PB
4532static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4533{
4534 int op;
4535 int q;
4536 int rd, rn, rm;
4537 int size;
4538 int shift;
4539 int pass;
4540 int count;
4541 int pairwise;
4542 int u;
ca9a32e4 4543 uint32_t imm, mask;
b75263d6 4544 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4545 TCGv_i64 tmp64;
9ee6e8bb 4546
5df8bac1 4547 if (!s->vfp_enabled)
9ee6e8bb
PB
4548 return 1;
4549 q = (insn & (1 << 6)) != 0;
4550 u = (insn >> 24) & 1;
4551 VFP_DREG_D(rd, insn);
4552 VFP_DREG_N(rn, insn);
4553 VFP_DREG_M(rm, insn);
4554 size = (insn >> 20) & 3;
4555 if ((insn & (1 << 23)) == 0) {
4556 /* Three register same length. */
4557 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4558 /* Catch invalid op and bad size combinations: UNDEF */
4559 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4560 return 1;
4561 }
25f84f79
PM
4562 /* All insns of this form UNDEF for either this condition or the
4563 * superset of cases "Q==1"; we catch the latter later.
4564 */
4565 if (q && ((rd | rn | rm) & 1)) {
4566 return 1;
4567 }
62698be3
PM
4568 if (size == 3 && op != NEON_3R_LOGIC) {
4569 /* 64-bit element instructions. */
9ee6e8bb 4570 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4571 neon_load_reg64(cpu_V0, rn + pass);
4572 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4573 switch (op) {
62698be3 4574 case NEON_3R_VQADD:
9ee6e8bb 4575 if (u) {
02da0b2d
PM
4576 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4577 cpu_V0, cpu_V1);
2c0262af 4578 } else {
02da0b2d
PM
4579 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4580 cpu_V0, cpu_V1);
2c0262af 4581 }
9ee6e8bb 4582 break;
62698be3 4583 case NEON_3R_VQSUB:
9ee6e8bb 4584 if (u) {
02da0b2d
PM
4585 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4586 cpu_V0, cpu_V1);
ad69471c 4587 } else {
02da0b2d
PM
4588 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4589 cpu_V0, cpu_V1);
ad69471c
PB
4590 }
4591 break;
62698be3 4592 case NEON_3R_VSHL:
ad69471c
PB
4593 if (u) {
4594 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4595 } else {
4596 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4597 }
4598 break;
62698be3 4599 case NEON_3R_VQSHL:
ad69471c 4600 if (u) {
02da0b2d
PM
4601 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4602 cpu_V1, cpu_V0);
ad69471c 4603 } else {
02da0b2d
PM
4604 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4605 cpu_V1, cpu_V0);
ad69471c
PB
4606 }
4607 break;
62698be3 4608 case NEON_3R_VRSHL:
ad69471c
PB
4609 if (u) {
4610 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4611 } else {
ad69471c
PB
4612 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4613 }
4614 break;
62698be3 4615 case NEON_3R_VQRSHL:
ad69471c 4616 if (u) {
02da0b2d
PM
4617 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4618 cpu_V1, cpu_V0);
ad69471c 4619 } else {
02da0b2d
PM
4620 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4621 cpu_V1, cpu_V0);
1e8d4eec 4622 }
9ee6e8bb 4623 break;
62698be3 4624 case NEON_3R_VADD_VSUB:
9ee6e8bb 4625 if (u) {
ad69471c 4626 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4627 } else {
ad69471c 4628 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4629 }
4630 break;
4631 default:
4632 abort();
2c0262af 4633 }
ad69471c 4634 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4635 }
9ee6e8bb 4636 return 0;
2c0262af 4637 }
25f84f79 4638 pairwise = 0;
9ee6e8bb 4639 switch (op) {
62698be3
PM
4640 case NEON_3R_VSHL:
4641 case NEON_3R_VQSHL:
4642 case NEON_3R_VRSHL:
4643 case NEON_3R_VQRSHL:
9ee6e8bb 4644 {
ad69471c
PB
4645 int rtmp;
4646 /* Shift instruction operands are reversed. */
4647 rtmp = rn;
9ee6e8bb 4648 rn = rm;
ad69471c 4649 rm = rtmp;
9ee6e8bb 4650 }
2c0262af 4651 break;
25f84f79
PM
4652 case NEON_3R_VPADD:
4653 if (u) {
4654 return 1;
4655 }
4656 /* Fall through */
62698be3
PM
4657 case NEON_3R_VPMAX:
4658 case NEON_3R_VPMIN:
9ee6e8bb 4659 pairwise = 1;
2c0262af 4660 break;
25f84f79
PM
4661 case NEON_3R_FLOAT_ARITH:
4662 pairwise = (u && size < 2); /* if VPADD (float) */
4663 break;
4664 case NEON_3R_FLOAT_MINMAX:
4665 pairwise = u; /* if VPMIN/VPMAX (float) */
4666 break;
4667 case NEON_3R_FLOAT_CMP:
4668 if (!u && size) {
4669 /* no encoding for U=0 C=1x */
4670 return 1;
4671 }
4672 break;
4673 case NEON_3R_FLOAT_ACMP:
4674 if (!u) {
4675 return 1;
4676 }
4677 break;
4678 case NEON_3R_VRECPS_VRSQRTS:
4679 if (u) {
4680 return 1;
4681 }
2c0262af 4682 break;
25f84f79
PM
4683 case NEON_3R_VMUL:
4684 if (u && (size != 0)) {
4685 /* UNDEF on invalid size for polynomial subcase */
4686 return 1;
4687 }
2c0262af 4688 break;
9ee6e8bb 4689 default:
2c0262af 4690 break;
9ee6e8bb 4691 }
dd8fbd78 4692
25f84f79
PM
4693 if (pairwise && q) {
4694 /* All the pairwise insns UNDEF if Q is set */
4695 return 1;
4696 }
4697
9ee6e8bb
PB
4698 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4699
4700 if (pairwise) {
4701 /* Pairwise. */
a5a14945
JR
4702 if (pass < 1) {
4703 tmp = neon_load_reg(rn, 0);
4704 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4705 } else {
a5a14945
JR
4706 tmp = neon_load_reg(rm, 0);
4707 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4708 }
4709 } else {
4710 /* Elementwise. */
dd8fbd78
FN
4711 tmp = neon_load_reg(rn, pass);
4712 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4713 }
4714 switch (op) {
62698be3 4715 case NEON_3R_VHADD:
9ee6e8bb
PB
4716 GEN_NEON_INTEGER_OP(hadd);
4717 break;
62698be3 4718 case NEON_3R_VQADD:
02da0b2d 4719 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4720 break;
62698be3 4721 case NEON_3R_VRHADD:
9ee6e8bb 4722 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4723 break;
62698be3 4724 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4725 switch ((u << 2) | size) {
4726 case 0: /* VAND */
dd8fbd78 4727 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4728 break;
4729 case 1: /* BIC */
f669df27 4730 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4731 break;
4732 case 2: /* VORR */
dd8fbd78 4733 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4734 break;
4735 case 3: /* VORN */
f669df27 4736 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4737 break;
4738 case 4: /* VEOR */
dd8fbd78 4739 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4740 break;
4741 case 5: /* VBSL */
dd8fbd78
FN
4742 tmp3 = neon_load_reg(rd, pass);
4743 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4744 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4745 break;
4746 case 6: /* VBIT */
dd8fbd78
FN
4747 tmp3 = neon_load_reg(rd, pass);
4748 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4749 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4750 break;
4751 case 7: /* VBIF */
dd8fbd78
FN
4752 tmp3 = neon_load_reg(rd, pass);
4753 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4754 tcg_temp_free_i32(tmp3);
9ee6e8bb 4755 break;
2c0262af
FB
4756 }
4757 break;
62698be3 4758 case NEON_3R_VHSUB:
9ee6e8bb
PB
4759 GEN_NEON_INTEGER_OP(hsub);
4760 break;
62698be3 4761 case NEON_3R_VQSUB:
02da0b2d 4762 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4763 break;
62698be3 4764 case NEON_3R_VCGT:
9ee6e8bb
PB
4765 GEN_NEON_INTEGER_OP(cgt);
4766 break;
62698be3 4767 case NEON_3R_VCGE:
9ee6e8bb
PB
4768 GEN_NEON_INTEGER_OP(cge);
4769 break;
62698be3 4770 case NEON_3R_VSHL:
ad69471c 4771 GEN_NEON_INTEGER_OP(shl);
2c0262af 4772 break;
62698be3 4773 case NEON_3R_VQSHL:
02da0b2d 4774 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4775 break;
62698be3 4776 case NEON_3R_VRSHL:
ad69471c 4777 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4778 break;
62698be3 4779 case NEON_3R_VQRSHL:
02da0b2d 4780 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4781 break;
62698be3 4782 case NEON_3R_VMAX:
9ee6e8bb
PB
4783 GEN_NEON_INTEGER_OP(max);
4784 break;
62698be3 4785 case NEON_3R_VMIN:
9ee6e8bb
PB
4786 GEN_NEON_INTEGER_OP(min);
4787 break;
62698be3 4788 case NEON_3R_VABD:
9ee6e8bb
PB
4789 GEN_NEON_INTEGER_OP(abd);
4790 break;
62698be3 4791 case NEON_3R_VABA:
9ee6e8bb 4792 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4793 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4794 tmp2 = neon_load_reg(rd, pass);
4795 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4796 break;
62698be3 4797 case NEON_3R_VADD_VSUB:
9ee6e8bb 4798 if (!u) { /* VADD */
62698be3 4799 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4800 } else { /* VSUB */
4801 switch (size) {
dd8fbd78
FN
4802 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4803 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4804 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4805 default: abort();
9ee6e8bb
PB
4806 }
4807 }
4808 break;
62698be3 4809 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4810 if (!u) { /* VTST */
4811 switch (size) {
dd8fbd78
FN
4812 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4813 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4814 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4815 default: abort();
9ee6e8bb
PB
4816 }
4817 } else { /* VCEQ */
4818 switch (size) {
dd8fbd78
FN
4819 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4820 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4821 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4822 default: abort();
9ee6e8bb
PB
4823 }
4824 }
4825 break;
62698be3 4826 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4827 switch (size) {
dd8fbd78
FN
4828 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4829 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4830 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4831 default: abort();
9ee6e8bb 4832 }
7d1b0095 4833 tcg_temp_free_i32(tmp2);
dd8fbd78 4834 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4835 if (u) { /* VMLS */
dd8fbd78 4836 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4837 } else { /* VMLA */
dd8fbd78 4838 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4839 }
4840 break;
62698be3 4841 case NEON_3R_VMUL:
9ee6e8bb 4842 if (u) { /* polynomial */
dd8fbd78 4843 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4844 } else { /* Integer */
4845 switch (size) {
dd8fbd78
FN
4846 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4847 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4848 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4849 default: abort();
9ee6e8bb
PB
4850 }
4851 }
4852 break;
62698be3 4853 case NEON_3R_VPMAX:
9ee6e8bb
PB
4854 GEN_NEON_INTEGER_OP(pmax);
4855 break;
62698be3 4856 case NEON_3R_VPMIN:
9ee6e8bb
PB
4857 GEN_NEON_INTEGER_OP(pmin);
4858 break;
62698be3 4859 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4860 if (!u) { /* VQDMULH */
4861 switch (size) {
02da0b2d
PM
4862 case 1:
4863 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4864 break;
4865 case 2:
4866 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4867 break;
62698be3 4868 default: abort();
9ee6e8bb 4869 }
62698be3 4870 } else { /* VQRDMULH */
9ee6e8bb 4871 switch (size) {
02da0b2d
PM
4872 case 1:
4873 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4874 break;
4875 case 2:
4876 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4877 break;
62698be3 4878 default: abort();
9ee6e8bb
PB
4879 }
4880 }
4881 break;
62698be3 4882 case NEON_3R_VPADD:
9ee6e8bb 4883 switch (size) {
dd8fbd78
FN
4884 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4885 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4886 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4887 default: abort();
9ee6e8bb
PB
4888 }
4889 break;
62698be3 4890 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4891 {
4892 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4893 switch ((u << 2) | size) {
4894 case 0: /* VADD */
aa47cfdd
PM
4895 case 4: /* VPADD */
4896 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4897 break;
4898 case 2: /* VSUB */
aa47cfdd 4899 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4900 break;
4901 case 6: /* VABD */
aa47cfdd 4902 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4903 break;
4904 default:
62698be3 4905 abort();
9ee6e8bb 4906 }
aa47cfdd 4907 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4908 break;
aa47cfdd 4909 }
62698be3 4910 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
4911 {
4912 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4913 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4914 if (!u) {
7d1b0095 4915 tcg_temp_free_i32(tmp2);
dd8fbd78 4916 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4917 if (size == 0) {
aa47cfdd 4918 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4919 } else {
aa47cfdd 4920 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
4921 }
4922 }
aa47cfdd 4923 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4924 break;
aa47cfdd 4925 }
62698be3 4926 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
4927 {
4928 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 4929 if (!u) {
aa47cfdd 4930 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 4931 } else {
aa47cfdd
PM
4932 if (size == 0) {
4933 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4934 } else {
4935 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4936 }
b5ff1b31 4937 }
aa47cfdd 4938 tcg_temp_free_ptr(fpstatus);
2c0262af 4939 break;
aa47cfdd 4940 }
62698be3 4941 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
4942 {
4943 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4944 if (size == 0) {
4945 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4946 } else {
4947 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4948 }
4949 tcg_temp_free_ptr(fpstatus);
2c0262af 4950 break;
aa47cfdd 4951 }
62698be3 4952 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
4953 {
4954 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4955 if (size == 0) {
4956 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4957 } else {
4958 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4959 }
4960 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4961 break;
aa47cfdd 4962 }
62698be3 4963 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4964 if (size == 0)
dd8fbd78 4965 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4966 else
dd8fbd78 4967 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4968 break;
9ee6e8bb
PB
4969 default:
4970 abort();
2c0262af 4971 }
7d1b0095 4972 tcg_temp_free_i32(tmp2);
dd8fbd78 4973
9ee6e8bb
PB
4974 /* Save the result. For elementwise operations we can put it
4975 straight into the destination register. For pairwise operations
4976 we have to be careful to avoid clobbering the source operands. */
4977 if (pairwise && rd == rm) {
dd8fbd78 4978 neon_store_scratch(pass, tmp);
9ee6e8bb 4979 } else {
dd8fbd78 4980 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4981 }
4982
4983 } /* for pass */
4984 if (pairwise && rd == rm) {
4985 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4986 tmp = neon_load_scratch(pass);
4987 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4988 }
4989 }
ad69471c 4990 /* End of 3 register same size operations. */
9ee6e8bb
PB
4991 } else if (insn & (1 << 4)) {
4992 if ((insn & 0x00380080) != 0) {
4993 /* Two registers and shift. */
4994 op = (insn >> 8) & 0xf;
4995 if (insn & (1 << 7)) {
cc13115b
PM
4996 /* 64-bit shift. */
4997 if (op > 7) {
4998 return 1;
4999 }
9ee6e8bb
PB
5000 size = 3;
5001 } else {
5002 size = 2;
5003 while ((insn & (1 << (size + 19))) == 0)
5004 size--;
5005 }
5006 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5007 /* To avoid excessive dumplication of ops we implement shift
5008 by immediate using the variable shift operations. */
5009 if (op < 8) {
5010 /* Shift by immediate:
5011 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5012 if (q && ((rd | rm) & 1)) {
5013 return 1;
5014 }
5015 if (!u && (op == 4 || op == 6)) {
5016 return 1;
5017 }
9ee6e8bb
PB
5018 /* Right shifts are encoded as N - shift, where N is the
5019 element size in bits. */
5020 if (op <= 4)
5021 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5022 if (size == 3) {
5023 count = q + 1;
5024 } else {
5025 count = q ? 4: 2;
5026 }
5027 switch (size) {
5028 case 0:
5029 imm = (uint8_t) shift;
5030 imm |= imm << 8;
5031 imm |= imm << 16;
5032 break;
5033 case 1:
5034 imm = (uint16_t) shift;
5035 imm |= imm << 16;
5036 break;
5037 case 2:
5038 case 3:
5039 imm = shift;
5040 break;
5041 default:
5042 abort();
5043 }
5044
5045 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5046 if (size == 3) {
5047 neon_load_reg64(cpu_V0, rm + pass);
5048 tcg_gen_movi_i64(cpu_V1, imm);
5049 switch (op) {
5050 case 0: /* VSHR */
5051 case 1: /* VSRA */
5052 if (u)
5053 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5054 else
ad69471c 5055 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5056 break;
ad69471c
PB
5057 case 2: /* VRSHR */
5058 case 3: /* VRSRA */
5059 if (u)
5060 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5061 else
ad69471c 5062 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5063 break;
ad69471c 5064 case 4: /* VSRI */
ad69471c
PB
5065 case 5: /* VSHL, VSLI */
5066 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5067 break;
0322b26e 5068 case 6: /* VQSHLU */
02da0b2d
PM
5069 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5070 cpu_V0, cpu_V1);
ad69471c 5071 break;
0322b26e
PM
5072 case 7: /* VQSHL */
5073 if (u) {
02da0b2d 5074 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5075 cpu_V0, cpu_V1);
5076 } else {
02da0b2d 5077 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5078 cpu_V0, cpu_V1);
5079 }
9ee6e8bb 5080 break;
9ee6e8bb 5081 }
ad69471c
PB
5082 if (op == 1 || op == 3) {
5083 /* Accumulate. */
5371cb81 5084 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5085 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5086 } else if (op == 4 || (op == 5 && u)) {
5087 /* Insert */
923e6509
CL
5088 neon_load_reg64(cpu_V1, rd + pass);
5089 uint64_t mask;
5090 if (shift < -63 || shift > 63) {
5091 mask = 0;
5092 } else {
5093 if (op == 4) {
5094 mask = 0xffffffffffffffffull >> -shift;
5095 } else {
5096 mask = 0xffffffffffffffffull << shift;
5097 }
5098 }
5099 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5100 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5101 }
5102 neon_store_reg64(cpu_V0, rd + pass);
5103 } else { /* size < 3 */
5104 /* Operands in T0 and T1. */
dd8fbd78 5105 tmp = neon_load_reg(rm, pass);
7d1b0095 5106 tmp2 = tcg_temp_new_i32();
dd8fbd78 5107 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5108 switch (op) {
5109 case 0: /* VSHR */
5110 case 1: /* VSRA */
5111 GEN_NEON_INTEGER_OP(shl);
5112 break;
5113 case 2: /* VRSHR */
5114 case 3: /* VRSRA */
5115 GEN_NEON_INTEGER_OP(rshl);
5116 break;
5117 case 4: /* VSRI */
ad69471c
PB
5118 case 5: /* VSHL, VSLI */
5119 switch (size) {
dd8fbd78
FN
5120 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5121 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5122 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5123 default: abort();
ad69471c
PB
5124 }
5125 break;
0322b26e 5126 case 6: /* VQSHLU */
ad69471c 5127 switch (size) {
0322b26e 5128 case 0:
02da0b2d
PM
5129 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5130 tmp, tmp2);
0322b26e
PM
5131 break;
5132 case 1:
02da0b2d
PM
5133 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5134 tmp, tmp2);
0322b26e
PM
5135 break;
5136 case 2:
02da0b2d
PM
5137 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5138 tmp, tmp2);
0322b26e
PM
5139 break;
5140 default:
cc13115b 5141 abort();
ad69471c
PB
5142 }
5143 break;
0322b26e 5144 case 7: /* VQSHL */
02da0b2d 5145 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5146 break;
ad69471c 5147 }
7d1b0095 5148 tcg_temp_free_i32(tmp2);
ad69471c
PB
5149
5150 if (op == 1 || op == 3) {
5151 /* Accumulate. */
dd8fbd78 5152 tmp2 = neon_load_reg(rd, pass);
5371cb81 5153 gen_neon_add(size, tmp, tmp2);
7d1b0095 5154 tcg_temp_free_i32(tmp2);
ad69471c
PB
5155 } else if (op == 4 || (op == 5 && u)) {
5156 /* Insert */
5157 switch (size) {
5158 case 0:
5159 if (op == 4)
ca9a32e4 5160 mask = 0xff >> -shift;
ad69471c 5161 else
ca9a32e4
JR
5162 mask = (uint8_t)(0xff << shift);
5163 mask |= mask << 8;
5164 mask |= mask << 16;
ad69471c
PB
5165 break;
5166 case 1:
5167 if (op == 4)
ca9a32e4 5168 mask = 0xffff >> -shift;
ad69471c 5169 else
ca9a32e4
JR
5170 mask = (uint16_t)(0xffff << shift);
5171 mask |= mask << 16;
ad69471c
PB
5172 break;
5173 case 2:
ca9a32e4
JR
5174 if (shift < -31 || shift > 31) {
5175 mask = 0;
5176 } else {
5177 if (op == 4)
5178 mask = 0xffffffffu >> -shift;
5179 else
5180 mask = 0xffffffffu << shift;
5181 }
ad69471c
PB
5182 break;
5183 default:
5184 abort();
5185 }
dd8fbd78 5186 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5187 tcg_gen_andi_i32(tmp, tmp, mask);
5188 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5189 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5190 tcg_temp_free_i32(tmp2);
ad69471c 5191 }
dd8fbd78 5192 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5193 }
5194 } /* for pass */
5195 } else if (op < 10) {
ad69471c 5196 /* Shift by immediate and narrow:
9ee6e8bb 5197 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5198 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5199 if (rm & 1) {
5200 return 1;
5201 }
9ee6e8bb
PB
5202 shift = shift - (1 << (size + 3));
5203 size++;
92cdfaeb 5204 if (size == 3) {
a7812ae4 5205 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5206 neon_load_reg64(cpu_V0, rm);
5207 neon_load_reg64(cpu_V1, rm + 1);
5208 for (pass = 0; pass < 2; pass++) {
5209 TCGv_i64 in;
5210 if (pass == 0) {
5211 in = cpu_V0;
5212 } else {
5213 in = cpu_V1;
5214 }
ad69471c 5215 if (q) {
0b36f4cd 5216 if (input_unsigned) {
92cdfaeb 5217 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5218 } else {
92cdfaeb 5219 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5220 }
ad69471c 5221 } else {
0b36f4cd 5222 if (input_unsigned) {
92cdfaeb 5223 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5224 } else {
92cdfaeb 5225 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5226 }
ad69471c 5227 }
7d1b0095 5228 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5229 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5230 neon_store_reg(rd, pass, tmp);
5231 } /* for pass */
5232 tcg_temp_free_i64(tmp64);
5233 } else {
5234 if (size == 1) {
5235 imm = (uint16_t)shift;
5236 imm |= imm << 16;
2c0262af 5237 } else {
92cdfaeb
PM
5238 /* size == 2 */
5239 imm = (uint32_t)shift;
5240 }
5241 tmp2 = tcg_const_i32(imm);
5242 tmp4 = neon_load_reg(rm + 1, 0);
5243 tmp5 = neon_load_reg(rm + 1, 1);
5244 for (pass = 0; pass < 2; pass++) {
5245 if (pass == 0) {
5246 tmp = neon_load_reg(rm, 0);
5247 } else {
5248 tmp = tmp4;
5249 }
0b36f4cd
CL
5250 gen_neon_shift_narrow(size, tmp, tmp2, q,
5251 input_unsigned);
92cdfaeb
PM
5252 if (pass == 0) {
5253 tmp3 = neon_load_reg(rm, 1);
5254 } else {
5255 tmp3 = tmp5;
5256 }
0b36f4cd
CL
5257 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5258 input_unsigned);
36aa55dc 5259 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5260 tcg_temp_free_i32(tmp);
5261 tcg_temp_free_i32(tmp3);
5262 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5263 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5264 neon_store_reg(rd, pass, tmp);
5265 } /* for pass */
c6067f04 5266 tcg_temp_free_i32(tmp2);
b75263d6 5267 }
9ee6e8bb 5268 } else if (op == 10) {
cc13115b
PM
5269 /* VSHLL, VMOVL */
5270 if (q || (rd & 1)) {
9ee6e8bb 5271 return 1;
cc13115b 5272 }
ad69471c
PB
5273 tmp = neon_load_reg(rm, 0);
5274 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5275 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5276 if (pass == 1)
5277 tmp = tmp2;
5278
5279 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5280
9ee6e8bb
PB
5281 if (shift != 0) {
5282 /* The shift is less than the width of the source
ad69471c
PB
5283 type, so we can just shift the whole register. */
5284 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5285 /* Widen the result of shift: we need to clear
5286 * the potential overflow bits resulting from
5287 * left bits of the narrow input appearing as
5288 * right bits of left the neighbour narrow
5289 * input. */
ad69471c
PB
5290 if (size < 2 || !u) {
5291 uint64_t imm64;
5292 if (size == 0) {
5293 imm = (0xffu >> (8 - shift));
5294 imm |= imm << 16;
acdf01ef 5295 } else if (size == 1) {
ad69471c 5296 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5297 } else {
5298 /* size == 2 */
5299 imm = 0xffffffff >> (32 - shift);
5300 }
5301 if (size < 2) {
5302 imm64 = imm | (((uint64_t)imm) << 32);
5303 } else {
5304 imm64 = imm;
9ee6e8bb 5305 }
acdf01ef 5306 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5307 }
5308 }
ad69471c 5309 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5310 }
f73534a5 5311 } else if (op >= 14) {
9ee6e8bb 5312 /* VCVT fixed-point. */
cc13115b
PM
5313 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5314 return 1;
5315 }
f73534a5
PM
5316 /* We have already masked out the must-be-1 top bit of imm6,
5317 * hence this 32-shift where the ARM ARM has 64-imm6.
5318 */
5319 shift = 32 - shift;
9ee6e8bb 5320 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5321 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5322 if (!(op & 1)) {
9ee6e8bb 5323 if (u)
5500b06c 5324 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5325 else
5500b06c 5326 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5327 } else {
5328 if (u)
5500b06c 5329 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5330 else
5500b06c 5331 gen_vfp_tosl(0, shift, 1);
2c0262af 5332 }
4373f3ce 5333 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5334 }
5335 } else {
9ee6e8bb
PB
5336 return 1;
5337 }
5338 } else { /* (insn & 0x00380080) == 0 */
5339 int invert;
7d80fee5
PM
5340 if (q && (rd & 1)) {
5341 return 1;
5342 }
9ee6e8bb
PB
5343
5344 op = (insn >> 8) & 0xf;
5345 /* One register and immediate. */
5346 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5347 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5348 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5349 * We choose to not special-case this and will behave as if a
5350 * valid constant encoding of 0 had been given.
5351 */
9ee6e8bb
PB
5352 switch (op) {
5353 case 0: case 1:
5354 /* no-op */
5355 break;
5356 case 2: case 3:
5357 imm <<= 8;
5358 break;
5359 case 4: case 5:
5360 imm <<= 16;
5361 break;
5362 case 6: case 7:
5363 imm <<= 24;
5364 break;
5365 case 8: case 9:
5366 imm |= imm << 16;
5367 break;
5368 case 10: case 11:
5369 imm = (imm << 8) | (imm << 24);
5370 break;
5371 case 12:
8e31209e 5372 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5373 break;
5374 case 13:
5375 imm = (imm << 16) | 0xffff;
5376 break;
5377 case 14:
5378 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5379 if (invert)
5380 imm = ~imm;
5381 break;
5382 case 15:
7d80fee5
PM
5383 if (invert) {
5384 return 1;
5385 }
9ee6e8bb
PB
5386 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5387 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5388 break;
5389 }
5390 if (invert)
5391 imm = ~imm;
5392
9ee6e8bb
PB
5393 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5394 if (op & 1 && op < 12) {
ad69471c 5395 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5396 if (invert) {
5397 /* The immediate value has already been inverted, so
5398 BIC becomes AND. */
ad69471c 5399 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5400 } else {
ad69471c 5401 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5402 }
9ee6e8bb 5403 } else {
ad69471c 5404 /* VMOV, VMVN. */
7d1b0095 5405 tmp = tcg_temp_new_i32();
9ee6e8bb 5406 if (op == 14 && invert) {
a5a14945 5407 int n;
ad69471c
PB
5408 uint32_t val;
5409 val = 0;
9ee6e8bb
PB
5410 for (n = 0; n < 4; n++) {
5411 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5412 val |= 0xff << (n * 8);
9ee6e8bb 5413 }
ad69471c
PB
5414 tcg_gen_movi_i32(tmp, val);
5415 } else {
5416 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5417 }
9ee6e8bb 5418 }
ad69471c 5419 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5420 }
5421 }
e4b3861d 5422 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5423 if (size != 3) {
5424 op = (insn >> 8) & 0xf;
5425 if ((insn & (1 << 6)) == 0) {
5426 /* Three registers of different lengths. */
5427 int src1_wide;
5428 int src2_wide;
5429 int prewiden;
695272dc
PM
5430 /* undefreq: bit 0 : UNDEF if size != 0
5431 * bit 1 : UNDEF if size == 0
5432 * bit 2 : UNDEF if U == 1
5433 * Note that [1:0] set implies 'always UNDEF'
5434 */
5435 int undefreq;
5436 /* prewiden, src1_wide, src2_wide, undefreq */
5437 static const int neon_3reg_wide[16][4] = {
5438 {1, 0, 0, 0}, /* VADDL */
5439 {1, 1, 0, 0}, /* VADDW */
5440 {1, 0, 0, 0}, /* VSUBL */
5441 {1, 1, 0, 0}, /* VSUBW */
5442 {0, 1, 1, 0}, /* VADDHN */
5443 {0, 0, 0, 0}, /* VABAL */
5444 {0, 1, 1, 0}, /* VSUBHN */
5445 {0, 0, 0, 0}, /* VABDL */
5446 {0, 0, 0, 0}, /* VMLAL */
5447 {0, 0, 0, 6}, /* VQDMLAL */
5448 {0, 0, 0, 0}, /* VMLSL */
5449 {0, 0, 0, 6}, /* VQDMLSL */
5450 {0, 0, 0, 0}, /* Integer VMULL */
5451 {0, 0, 0, 2}, /* VQDMULL */
5452 {0, 0, 0, 5}, /* Polynomial VMULL */
5453 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5454 };
5455
5456 prewiden = neon_3reg_wide[op][0];
5457 src1_wide = neon_3reg_wide[op][1];
5458 src2_wide = neon_3reg_wide[op][2];
695272dc 5459 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5460
695272dc
PM
5461 if (((undefreq & 1) && (size != 0)) ||
5462 ((undefreq & 2) && (size == 0)) ||
5463 ((undefreq & 4) && u)) {
5464 return 1;
5465 }
5466 if ((src1_wide && (rn & 1)) ||
5467 (src2_wide && (rm & 1)) ||
5468 (!src2_wide && (rd & 1))) {
ad69471c 5469 return 1;
695272dc 5470 }
ad69471c 5471
9ee6e8bb
PB
5472 /* Avoid overlapping operands. Wide source operands are
5473 always aligned so will never overlap with wide
5474 destinations in problematic ways. */
8f8e3aa4 5475 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5476 tmp = neon_load_reg(rm, 1);
5477 neon_store_scratch(2, tmp);
8f8e3aa4 5478 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5479 tmp = neon_load_reg(rn, 1);
5480 neon_store_scratch(2, tmp);
9ee6e8bb 5481 }
a50f5b91 5482 TCGV_UNUSED(tmp3);
9ee6e8bb 5483 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5484 if (src1_wide) {
5485 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5486 TCGV_UNUSED(tmp);
9ee6e8bb 5487 } else {
ad69471c 5488 if (pass == 1 && rd == rn) {
dd8fbd78 5489 tmp = neon_load_scratch(2);
9ee6e8bb 5490 } else {
ad69471c
PB
5491 tmp = neon_load_reg(rn, pass);
5492 }
5493 if (prewiden) {
5494 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5495 }
5496 }
ad69471c
PB
5497 if (src2_wide) {
5498 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5499 TCGV_UNUSED(tmp2);
9ee6e8bb 5500 } else {
ad69471c 5501 if (pass == 1 && rd == rm) {
dd8fbd78 5502 tmp2 = neon_load_scratch(2);
9ee6e8bb 5503 } else {
ad69471c
PB
5504 tmp2 = neon_load_reg(rm, pass);
5505 }
5506 if (prewiden) {
5507 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5508 }
9ee6e8bb
PB
5509 }
5510 switch (op) {
5511 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5512 gen_neon_addl(size);
9ee6e8bb 5513 break;
79b0e534 5514 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5515 gen_neon_subl(size);
9ee6e8bb
PB
5516 break;
5517 case 5: case 7: /* VABAL, VABDL */
5518 switch ((size << 1) | u) {
ad69471c
PB
5519 case 0:
5520 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5521 break;
5522 case 1:
5523 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5524 break;
5525 case 2:
5526 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5527 break;
5528 case 3:
5529 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5530 break;
5531 case 4:
5532 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5533 break;
5534 case 5:
5535 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5536 break;
9ee6e8bb
PB
5537 default: abort();
5538 }
7d1b0095
PM
5539 tcg_temp_free_i32(tmp2);
5540 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5541 break;
5542 case 8: case 9: case 10: case 11: case 12: case 13:
5543 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5544 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5545 break;
5546 case 14: /* Polynomial VMULL */
e5ca24cb 5547 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5548 tcg_temp_free_i32(tmp2);
5549 tcg_temp_free_i32(tmp);
e5ca24cb 5550 break;
695272dc
PM
5551 default: /* 15 is RESERVED: caught earlier */
5552 abort();
9ee6e8bb 5553 }
ebcd88ce
PM
5554 if (op == 13) {
5555 /* VQDMULL */
5556 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5557 neon_store_reg64(cpu_V0, rd + pass);
5558 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5559 /* Accumulate. */
ebcd88ce 5560 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5561 switch (op) {
4dc064e6
PM
5562 case 10: /* VMLSL */
5563 gen_neon_negl(cpu_V0, size);
5564 /* Fall through */
5565 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5566 gen_neon_addl(size);
9ee6e8bb
PB
5567 break;
5568 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5569 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5570 if (op == 11) {
5571 gen_neon_negl(cpu_V0, size);
5572 }
ad69471c
PB
5573 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5574 break;
9ee6e8bb
PB
5575 default:
5576 abort();
5577 }
ad69471c 5578 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5579 } else if (op == 4 || op == 6) {
5580 /* Narrowing operation. */
7d1b0095 5581 tmp = tcg_temp_new_i32();
79b0e534 5582 if (!u) {
9ee6e8bb 5583 switch (size) {
ad69471c
PB
5584 case 0:
5585 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5586 break;
5587 case 1:
5588 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5589 break;
5590 case 2:
5591 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5592 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5593 break;
9ee6e8bb
PB
5594 default: abort();
5595 }
5596 } else {
5597 switch (size) {
ad69471c
PB
5598 case 0:
5599 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5600 break;
5601 case 1:
5602 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5603 break;
5604 case 2:
5605 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5606 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5607 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5608 break;
9ee6e8bb
PB
5609 default: abort();
5610 }
5611 }
ad69471c
PB
5612 if (pass == 0) {
5613 tmp3 = tmp;
5614 } else {
5615 neon_store_reg(rd, 0, tmp3);
5616 neon_store_reg(rd, 1, tmp);
5617 }
9ee6e8bb
PB
5618 } else {
5619 /* Write back the result. */
ad69471c 5620 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5621 }
5622 }
5623 } else {
3e3326df
PM
5624 /* Two registers and a scalar. NB that for ops of this form
5625 * the ARM ARM labels bit 24 as Q, but it is in our variable
5626 * 'u', not 'q'.
5627 */
5628 if (size == 0) {
5629 return 1;
5630 }
9ee6e8bb 5631 switch (op) {
9ee6e8bb 5632 case 1: /* Float VMLA scalar */
9ee6e8bb 5633 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5634 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5635 if (size == 1) {
5636 return 1;
5637 }
5638 /* fall through */
5639 case 0: /* Integer VMLA scalar */
5640 case 4: /* Integer VMLS scalar */
5641 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5642 case 12: /* VQDMULH scalar */
5643 case 13: /* VQRDMULH scalar */
3e3326df
PM
5644 if (u && ((rd | rn) & 1)) {
5645 return 1;
5646 }
dd8fbd78
FN
5647 tmp = neon_get_scalar(size, rm);
5648 neon_store_scratch(0, tmp);
9ee6e8bb 5649 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5650 tmp = neon_load_scratch(0);
5651 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5652 if (op == 12) {
5653 if (size == 1) {
02da0b2d 5654 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5655 } else {
02da0b2d 5656 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5657 }
5658 } else if (op == 13) {
5659 if (size == 1) {
02da0b2d 5660 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5661 } else {
02da0b2d 5662 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5663 }
5664 } else if (op & 1) {
aa47cfdd
PM
5665 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5666 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5667 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5668 } else {
5669 switch (size) {
dd8fbd78
FN
5670 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5671 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5672 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5673 default: abort();
9ee6e8bb
PB
5674 }
5675 }
7d1b0095 5676 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5677 if (op < 8) {
5678 /* Accumulate. */
dd8fbd78 5679 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5680 switch (op) {
5681 case 0:
dd8fbd78 5682 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5683 break;
5684 case 1:
aa47cfdd
PM
5685 {
5686 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5687 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5688 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5689 break;
aa47cfdd 5690 }
9ee6e8bb 5691 case 4:
dd8fbd78 5692 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5693 break;
5694 case 5:
aa47cfdd
PM
5695 {
5696 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5697 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5698 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5699 break;
aa47cfdd 5700 }
9ee6e8bb
PB
5701 default:
5702 abort();
5703 }
7d1b0095 5704 tcg_temp_free_i32(tmp2);
9ee6e8bb 5705 }
dd8fbd78 5706 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5707 }
5708 break;
9ee6e8bb 5709 case 3: /* VQDMLAL scalar */
9ee6e8bb 5710 case 7: /* VQDMLSL scalar */
9ee6e8bb 5711 case 11: /* VQDMULL scalar */
3e3326df 5712 if (u == 1) {
ad69471c 5713 return 1;
3e3326df
PM
5714 }
5715 /* fall through */
5716 case 2: /* VMLAL sclar */
5717 case 6: /* VMLSL scalar */
5718 case 10: /* VMULL scalar */
5719 if (rd & 1) {
5720 return 1;
5721 }
dd8fbd78 5722 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5723 /* We need a copy of tmp2 because gen_neon_mull
5724 * deletes it during pass 0. */
7d1b0095 5725 tmp4 = tcg_temp_new_i32();
c6067f04 5726 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5727 tmp3 = neon_load_reg(rn, 1);
ad69471c 5728
9ee6e8bb 5729 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5730 if (pass == 0) {
5731 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5732 } else {
dd8fbd78 5733 tmp = tmp3;
c6067f04 5734 tmp2 = tmp4;
9ee6e8bb 5735 }
ad69471c 5736 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5737 if (op != 11) {
5738 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5739 }
9ee6e8bb 5740 switch (op) {
4dc064e6
PM
5741 case 6:
5742 gen_neon_negl(cpu_V0, size);
5743 /* Fall through */
5744 case 2:
ad69471c 5745 gen_neon_addl(size);
9ee6e8bb
PB
5746 break;
5747 case 3: case 7:
ad69471c 5748 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5749 if (op == 7) {
5750 gen_neon_negl(cpu_V0, size);
5751 }
ad69471c 5752 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5753 break;
5754 case 10:
5755 /* no-op */
5756 break;
5757 case 11:
ad69471c 5758 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5759 break;
5760 default:
5761 abort();
5762 }
ad69471c 5763 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5764 }
dd8fbd78 5765
dd8fbd78 5766
9ee6e8bb
PB
5767 break;
5768 default: /* 14 and 15 are RESERVED */
5769 return 1;
5770 }
5771 }
5772 } else { /* size == 3 */
5773 if (!u) {
5774 /* Extract. */
9ee6e8bb 5775 imm = (insn >> 8) & 0xf;
ad69471c
PB
5776
5777 if (imm > 7 && !q)
5778 return 1;
5779
52579ea1
PM
5780 if (q && ((rd | rn | rm) & 1)) {
5781 return 1;
5782 }
5783
ad69471c
PB
5784 if (imm == 0) {
5785 neon_load_reg64(cpu_V0, rn);
5786 if (q) {
5787 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5788 }
ad69471c
PB
5789 } else if (imm == 8) {
5790 neon_load_reg64(cpu_V0, rn + 1);
5791 if (q) {
5792 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5793 }
ad69471c 5794 } else if (q) {
a7812ae4 5795 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5796 if (imm < 8) {
5797 neon_load_reg64(cpu_V0, rn);
a7812ae4 5798 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5799 } else {
5800 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5801 neon_load_reg64(tmp64, rm);
ad69471c
PB
5802 }
5803 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5804 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5805 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5806 if (imm < 8) {
5807 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5808 } else {
ad69471c
PB
5809 neon_load_reg64(cpu_V1, rm + 1);
5810 imm -= 8;
9ee6e8bb 5811 }
ad69471c 5812 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5813 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5814 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5815 tcg_temp_free_i64(tmp64);
ad69471c 5816 } else {
a7812ae4 5817 /* BUGFIX */
ad69471c 5818 neon_load_reg64(cpu_V0, rn);
a7812ae4 5819 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5820 neon_load_reg64(cpu_V1, rm);
a7812ae4 5821 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5822 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5823 }
5824 neon_store_reg64(cpu_V0, rd);
5825 if (q) {
5826 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5827 }
5828 } else if ((insn & (1 << 11)) == 0) {
5829 /* Two register misc. */
5830 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5831 size = (insn >> 18) & 3;
600b828c
PM
5832 /* UNDEF for unknown op values and bad op-size combinations */
5833 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5834 return 1;
5835 }
fc2a9b37
PM
5836 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5837 q && ((rm | rd) & 1)) {
5838 return 1;
5839 }
9ee6e8bb 5840 switch (op) {
600b828c 5841 case NEON_2RM_VREV64:
9ee6e8bb 5842 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5843 tmp = neon_load_reg(rm, pass * 2);
5844 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5845 switch (size) {
dd8fbd78
FN
5846 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5847 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5848 case 2: /* no-op */ break;
5849 default: abort();
5850 }
dd8fbd78 5851 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5852 if (size == 2) {
dd8fbd78 5853 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5854 } else {
9ee6e8bb 5855 switch (size) {
dd8fbd78
FN
5856 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5857 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5858 default: abort();
5859 }
dd8fbd78 5860 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5861 }
5862 }
5863 break;
600b828c
PM
5864 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5865 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5866 for (pass = 0; pass < q + 1; pass++) {
5867 tmp = neon_load_reg(rm, pass * 2);
5868 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5869 tmp = neon_load_reg(rm, pass * 2 + 1);
5870 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5871 switch (size) {
5872 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5873 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5874 case 2: tcg_gen_add_i64(CPU_V001); break;
5875 default: abort();
5876 }
600b828c 5877 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5878 /* Accumulate. */
ad69471c
PB
5879 neon_load_reg64(cpu_V1, rd + pass);
5880 gen_neon_addl(size);
9ee6e8bb 5881 }
ad69471c 5882 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5883 }
5884 break;
600b828c 5885 case NEON_2RM_VTRN:
9ee6e8bb 5886 if (size == 2) {
a5a14945 5887 int n;
9ee6e8bb 5888 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5889 tmp = neon_load_reg(rm, n);
5890 tmp2 = neon_load_reg(rd, n + 1);
5891 neon_store_reg(rm, n, tmp2);
5892 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5893 }
5894 } else {
5895 goto elementwise;
5896 }
5897 break;
600b828c 5898 case NEON_2RM_VUZP:
02acedf9 5899 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5900 return 1;
9ee6e8bb
PB
5901 }
5902 break;
600b828c 5903 case NEON_2RM_VZIP:
d68a6f3a 5904 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5905 return 1;
9ee6e8bb
PB
5906 }
5907 break;
600b828c
PM
5908 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5909 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5910 if (rm & 1) {
5911 return 1;
5912 }
a50f5b91 5913 TCGV_UNUSED(tmp2);
9ee6e8bb 5914 for (pass = 0; pass < 2; pass++) {
ad69471c 5915 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5916 tmp = tcg_temp_new_i32();
600b828c
PM
5917 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5918 tmp, cpu_V0);
ad69471c
PB
5919 if (pass == 0) {
5920 tmp2 = tmp;
5921 } else {
5922 neon_store_reg(rd, 0, tmp2);
5923 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5924 }
9ee6e8bb
PB
5925 }
5926 break;
600b828c 5927 case NEON_2RM_VSHLL:
fc2a9b37 5928 if (q || (rd & 1)) {
9ee6e8bb 5929 return 1;
600b828c 5930 }
ad69471c
PB
5931 tmp = neon_load_reg(rm, 0);
5932 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5933 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5934 if (pass == 1)
5935 tmp = tmp2;
5936 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5937 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5938 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5939 }
5940 break;
600b828c 5941 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5942 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5943 q || (rm & 1)) {
5944 return 1;
5945 }
7d1b0095
PM
5946 tmp = tcg_temp_new_i32();
5947 tmp2 = tcg_temp_new_i32();
60011498 5948 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5949 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5950 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5951 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5952 tcg_gen_shli_i32(tmp2, tmp2, 16);
5953 tcg_gen_or_i32(tmp2, tmp2, tmp);
5954 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5955 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5956 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5957 neon_store_reg(rd, 0, tmp2);
7d1b0095 5958 tmp2 = tcg_temp_new_i32();
2d981da7 5959 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5960 tcg_gen_shli_i32(tmp2, tmp2, 16);
5961 tcg_gen_or_i32(tmp2, tmp2, tmp);
5962 neon_store_reg(rd, 1, tmp2);
7d1b0095 5963 tcg_temp_free_i32(tmp);
60011498 5964 break;
600b828c 5965 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5966 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5967 q || (rd & 1)) {
5968 return 1;
5969 }
7d1b0095 5970 tmp3 = tcg_temp_new_i32();
60011498
PB
5971 tmp = neon_load_reg(rm, 0);
5972 tmp2 = neon_load_reg(rm, 1);
5973 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5974 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5975 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5976 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5977 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5978 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5979 tcg_temp_free_i32(tmp);
60011498 5980 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5981 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5982 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5983 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5984 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5985 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5986 tcg_temp_free_i32(tmp2);
5987 tcg_temp_free_i32(tmp3);
60011498 5988 break;
9ee6e8bb
PB
5989 default:
5990 elementwise:
5991 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 5992 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
5993 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5994 neon_reg_offset(rm, pass));
dd8fbd78 5995 TCGV_UNUSED(tmp);
9ee6e8bb 5996 } else {
dd8fbd78 5997 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5998 }
5999 switch (op) {
600b828c 6000 case NEON_2RM_VREV32:
9ee6e8bb 6001 switch (size) {
dd8fbd78
FN
6002 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6003 case 1: gen_swap_half(tmp); break;
600b828c 6004 default: abort();
9ee6e8bb
PB
6005 }
6006 break;
600b828c 6007 case NEON_2RM_VREV16:
dd8fbd78 6008 gen_rev16(tmp);
9ee6e8bb 6009 break;
600b828c 6010 case NEON_2RM_VCLS:
9ee6e8bb 6011 switch (size) {
dd8fbd78
FN
6012 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6013 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6014 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6015 default: abort();
9ee6e8bb
PB
6016 }
6017 break;
600b828c 6018 case NEON_2RM_VCLZ:
9ee6e8bb 6019 switch (size) {
dd8fbd78
FN
6020 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6021 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6022 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6023 default: abort();
9ee6e8bb
PB
6024 }
6025 break;
600b828c 6026 case NEON_2RM_VCNT:
dd8fbd78 6027 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6028 break;
600b828c 6029 case NEON_2RM_VMVN:
dd8fbd78 6030 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6031 break;
600b828c 6032 case NEON_2RM_VQABS:
9ee6e8bb 6033 switch (size) {
02da0b2d
PM
6034 case 0:
6035 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6036 break;
6037 case 1:
6038 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6039 break;
6040 case 2:
6041 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6042 break;
600b828c 6043 default: abort();
9ee6e8bb
PB
6044 }
6045 break;
600b828c 6046 case NEON_2RM_VQNEG:
9ee6e8bb 6047 switch (size) {
02da0b2d
PM
6048 case 0:
6049 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6050 break;
6051 case 1:
6052 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6053 break;
6054 case 2:
6055 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6056 break;
600b828c 6057 default: abort();
9ee6e8bb
PB
6058 }
6059 break;
600b828c 6060 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6061 tmp2 = tcg_const_i32(0);
9ee6e8bb 6062 switch(size) {
dd8fbd78
FN
6063 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6064 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6065 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6066 default: abort();
9ee6e8bb 6067 }
dd8fbd78 6068 tcg_temp_free(tmp2);
600b828c 6069 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6070 tcg_gen_not_i32(tmp, tmp);
600b828c 6071 }
9ee6e8bb 6072 break;
600b828c 6073 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6074 tmp2 = tcg_const_i32(0);
9ee6e8bb 6075 switch(size) {
dd8fbd78
FN
6076 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6077 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6078 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6079 default: abort();
9ee6e8bb 6080 }
dd8fbd78 6081 tcg_temp_free(tmp2);
600b828c 6082 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6083 tcg_gen_not_i32(tmp, tmp);
600b828c 6084 }
9ee6e8bb 6085 break;
600b828c 6086 case NEON_2RM_VCEQ0:
dd8fbd78 6087 tmp2 = tcg_const_i32(0);
9ee6e8bb 6088 switch(size) {
dd8fbd78
FN
6089 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6090 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6091 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6092 default: abort();
9ee6e8bb 6093 }
dd8fbd78 6094 tcg_temp_free(tmp2);
9ee6e8bb 6095 break;
600b828c 6096 case NEON_2RM_VABS:
9ee6e8bb 6097 switch(size) {
dd8fbd78
FN
6098 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6099 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6100 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6101 default: abort();
9ee6e8bb
PB
6102 }
6103 break;
600b828c 6104 case NEON_2RM_VNEG:
dd8fbd78
FN
6105 tmp2 = tcg_const_i32(0);
6106 gen_neon_rsb(size, tmp, tmp2);
6107 tcg_temp_free(tmp2);
9ee6e8bb 6108 break;
600b828c 6109 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6110 {
6111 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6112 tmp2 = tcg_const_i32(0);
aa47cfdd 6113 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6114 tcg_temp_free(tmp2);
aa47cfdd 6115 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6116 break;
aa47cfdd 6117 }
600b828c 6118 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6119 {
6120 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6121 tmp2 = tcg_const_i32(0);
aa47cfdd 6122 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6123 tcg_temp_free(tmp2);
aa47cfdd 6124 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6125 break;
aa47cfdd 6126 }
600b828c 6127 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6128 {
6129 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6130 tmp2 = tcg_const_i32(0);
aa47cfdd 6131 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6132 tcg_temp_free(tmp2);
aa47cfdd 6133 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6134 break;
aa47cfdd 6135 }
600b828c 6136 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6137 {
6138 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6139 tmp2 = tcg_const_i32(0);
aa47cfdd 6140 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6141 tcg_temp_free(tmp2);
aa47cfdd 6142 tcg_temp_free_ptr(fpstatus);
0e326109 6143 break;
aa47cfdd 6144 }
600b828c 6145 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6146 {
6147 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6148 tmp2 = tcg_const_i32(0);
aa47cfdd 6149 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6150 tcg_temp_free(tmp2);
aa47cfdd 6151 tcg_temp_free_ptr(fpstatus);
0e326109 6152 break;
aa47cfdd 6153 }
600b828c 6154 case NEON_2RM_VABS_F:
4373f3ce 6155 gen_vfp_abs(0);
9ee6e8bb 6156 break;
600b828c 6157 case NEON_2RM_VNEG_F:
4373f3ce 6158 gen_vfp_neg(0);
9ee6e8bb 6159 break;
600b828c 6160 case NEON_2RM_VSWP:
dd8fbd78
FN
6161 tmp2 = neon_load_reg(rd, pass);
6162 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6163 break;
600b828c 6164 case NEON_2RM_VTRN:
dd8fbd78 6165 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6166 switch (size) {
dd8fbd78
FN
6167 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6168 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6169 default: abort();
9ee6e8bb 6170 }
dd8fbd78 6171 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6172 break;
600b828c 6173 case NEON_2RM_VRECPE:
dd8fbd78 6174 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6175 break;
600b828c 6176 case NEON_2RM_VRSQRTE:
dd8fbd78 6177 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6178 break;
600b828c 6179 case NEON_2RM_VRECPE_F:
4373f3ce 6180 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6181 break;
600b828c 6182 case NEON_2RM_VRSQRTE_F:
4373f3ce 6183 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6184 break;
600b828c 6185 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6186 gen_vfp_sito(0, 1);
9ee6e8bb 6187 break;
600b828c 6188 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6189 gen_vfp_uito(0, 1);
9ee6e8bb 6190 break;
600b828c 6191 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6192 gen_vfp_tosiz(0, 1);
9ee6e8bb 6193 break;
600b828c 6194 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6195 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6196 break;
6197 default:
600b828c
PM
6198 /* Reserved op values were caught by the
6199 * neon_2rm_sizes[] check earlier.
6200 */
6201 abort();
9ee6e8bb 6202 }
600b828c 6203 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6204 tcg_gen_st_f32(cpu_F0s, cpu_env,
6205 neon_reg_offset(rd, pass));
9ee6e8bb 6206 } else {
dd8fbd78 6207 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6208 }
6209 }
6210 break;
6211 }
6212 } else if ((insn & (1 << 10)) == 0) {
6213 /* VTBL, VTBX. */
56907d77
PM
6214 int n = ((insn >> 8) & 3) + 1;
6215 if ((rn + n) > 32) {
6216 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6217 * helper function running off the end of the register file.
6218 */
6219 return 1;
6220 }
6221 n <<= 3;
9ee6e8bb 6222 if (insn & (1 << 6)) {
8f8e3aa4 6223 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6224 } else {
7d1b0095 6225 tmp = tcg_temp_new_i32();
8f8e3aa4 6226 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6227 }
8f8e3aa4 6228 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6229 tmp4 = tcg_const_i32(rn);
6230 tmp5 = tcg_const_i32(n);
6231 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
7d1b0095 6232 tcg_temp_free_i32(tmp);
9ee6e8bb 6233 if (insn & (1 << 6)) {
8f8e3aa4 6234 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6235 } else {
7d1b0095 6236 tmp = tcg_temp_new_i32();
8f8e3aa4 6237 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6238 }
8f8e3aa4 6239 tmp3 = neon_load_reg(rm, 1);
b75263d6 6240 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6241 tcg_temp_free_i32(tmp5);
6242 tcg_temp_free_i32(tmp4);
8f8e3aa4 6243 neon_store_reg(rd, 0, tmp2);
3018f259 6244 neon_store_reg(rd, 1, tmp3);
7d1b0095 6245 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6246 } else if ((insn & 0x380) == 0) {
6247 /* VDUP */
133da6aa
JR
6248 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6249 return 1;
6250 }
9ee6e8bb 6251 if (insn & (1 << 19)) {
dd8fbd78 6252 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6253 } else {
dd8fbd78 6254 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6255 }
6256 if (insn & (1 << 16)) {
dd8fbd78 6257 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6258 } else if (insn & (1 << 17)) {
6259 if ((insn >> 18) & 1)
dd8fbd78 6260 gen_neon_dup_high16(tmp);
9ee6e8bb 6261 else
dd8fbd78 6262 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6263 }
6264 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6265 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6266 tcg_gen_mov_i32(tmp2, tmp);
6267 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6268 }
7d1b0095 6269 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6270 } else {
6271 return 1;
6272 }
6273 }
6274 }
6275 return 0;
6276}
6277
fe1479c3
PB
6278static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
6279{
6280 int crn = (insn >> 16) & 0xf;
6281 int crm = insn & 0xf;
6282 int op1 = (insn >> 21) & 7;
6283 int op2 = (insn >> 5) & 7;
6284 int rt = (insn >> 12) & 0xf;
6285 TCGv tmp;
6286
ca27c052
PM
6287 /* Minimal set of debug registers, since we don't support debug */
6288 if (op1 == 0 && crn == 0 && op2 == 0) {
6289 switch (crm) {
6290 case 0:
6291 /* DBGDIDR: just RAZ. In particular this means the
6292 * "debug architecture version" bits will read as
6293 * a reserved value, which should cause Linux to
6294 * not try to use the debug hardware.
6295 */
6296 tmp = tcg_const_i32(0);
6297 store_reg(s, rt, tmp);
6298 return 0;
6299 case 1:
6300 case 2:
6301 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6302 * don't implement memory mapped debug components
6303 */
6304 if (ENABLE_ARCH_7) {
6305 tmp = tcg_const_i32(0);
6306 store_reg(s, rt, tmp);
6307 return 0;
6308 }
6309 break;
6310 default:
6311 break;
6312 }
6313 }
6314
fe1479c3
PB
6315 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6316 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6317 /* TEECR */
6318 if (IS_USER(s))
6319 return 1;
6320 tmp = load_cpu_field(teecr);
6321 store_reg(s, rt, tmp);
6322 return 0;
6323 }
6324 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6325 /* TEEHBR */
6326 if (IS_USER(s) && (env->teecr & 1))
6327 return 1;
6328 tmp = load_cpu_field(teehbr);
6329 store_reg(s, rt, tmp);
6330 return 0;
6331 }
6332 }
6333 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
6334 op1, crn, crm, op2);
6335 return 1;
6336}
6337
6338static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
6339{
6340 int crn = (insn >> 16) & 0xf;
6341 int crm = insn & 0xf;
6342 int op1 = (insn >> 21) & 7;
6343 int op2 = (insn >> 5) & 7;
6344 int rt = (insn >> 12) & 0xf;
6345 TCGv tmp;
6346
6347 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6348 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6349 /* TEECR */
6350 if (IS_USER(s))
6351 return 1;
6352 tmp = load_reg(s, rt);
6353 gen_helper_set_teecr(cpu_env, tmp);
7d1b0095 6354 tcg_temp_free_i32(tmp);
fe1479c3
PB
6355 return 0;
6356 }
6357 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6358 /* TEEHBR */
6359 if (IS_USER(s) && (env->teecr & 1))
6360 return 1;
6361 tmp = load_reg(s, rt);
6362 store_cpu_field(tmp, teehbr);
6363 return 0;
6364 }
6365 }
6366 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6367 op1, crn, crm, op2);
6368 return 1;
6369}
6370
9ee6e8bb
PB
6371static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
6372{
6373 int cpnum;
6374
6375 cpnum = (insn >> 8) & 0xf;
6376 if (arm_feature(env, ARM_FEATURE_XSCALE)
6377 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6378 return 1;
6379
6380 switch (cpnum) {
6381 case 0:
6382 case 1:
6383 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6384 return disas_iwmmxt_insn(env, s, insn);
6385 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6386 return disas_dsp_insn(env, s, insn);
6387 }
6388 return 1;
6389 case 10:
6390 case 11:
6391 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
6392 case 14:
6393 /* Coprocessors 7-15 are architecturally reserved by ARM.
6394 Unfortunately Intel decided to ignore this. */
6395 if (arm_feature(env, ARM_FEATURE_XSCALE))
6396 goto board;
6397 if (insn & (1 << 20))
6398 return disas_cp14_read(env, s, insn);
6399 else
6400 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
6401 case 15:
6402 return disas_cp15_insn (env, s, insn);
6403 default:
fe1479c3 6404 board:
9ee6e8bb
PB
6405 /* Unknown coprocessor. See if the board has hooked it. */
6406 return disas_cp_insn (env, s, insn);
6407 }
6408}
6409
5e3f878a
PB
6410
6411/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6412static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
6413{
6414 TCGv tmp;
7d1b0095 6415 tmp = tcg_temp_new_i32();
5e3f878a
PB
6416 tcg_gen_trunc_i64_i32(tmp, val);
6417 store_reg(s, rlow, tmp);
7d1b0095 6418 tmp = tcg_temp_new_i32();
5e3f878a
PB
6419 tcg_gen_shri_i64(val, val, 32);
6420 tcg_gen_trunc_i64_i32(tmp, val);
6421 store_reg(s, rhigh, tmp);
6422}
6423
6424/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6425static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6426{
a7812ae4 6427 TCGv_i64 tmp;
5e3f878a
PB
6428 TCGv tmp2;
6429
36aa55dc 6430 /* Load value and extend to 64 bits. */
a7812ae4 6431 tmp = tcg_temp_new_i64();
5e3f878a
PB
6432 tmp2 = load_reg(s, rlow);
6433 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6434 tcg_temp_free_i32(tmp2);
5e3f878a 6435 tcg_gen_add_i64(val, val, tmp);
b75263d6 6436 tcg_temp_free_i64(tmp);
5e3f878a
PB
6437}
6438
6439/* load and add a 64-bit value from a register pair. */
a7812ae4 6440static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6441{
a7812ae4 6442 TCGv_i64 tmp;
36aa55dc
PB
6443 TCGv tmpl;
6444 TCGv tmph;
5e3f878a
PB
6445
6446 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6447 tmpl = load_reg(s, rlow);
6448 tmph = load_reg(s, rhigh);
a7812ae4 6449 tmp = tcg_temp_new_i64();
36aa55dc 6450 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6451 tcg_temp_free_i32(tmpl);
6452 tcg_temp_free_i32(tmph);
5e3f878a 6453 tcg_gen_add_i64(val, val, tmp);
b75263d6 6454 tcg_temp_free_i64(tmp);
5e3f878a
PB
6455}
6456
6457/* Set N and Z flags from a 64-bit value. */
a7812ae4 6458static void gen_logicq_cc(TCGv_i64 val)
5e3f878a 6459{
7d1b0095 6460 TCGv tmp = tcg_temp_new_i32();
5e3f878a 6461 gen_helper_logicq_cc(tmp, val);
6fbe23d5 6462 gen_logic_CC(tmp);
7d1b0095 6463 tcg_temp_free_i32(tmp);
5e3f878a
PB
6464}
6465
426f5abc
PB
6466/* Load/Store exclusive instructions are implemented by remembering
6467 the value/address loaded, and seeing if these are the same
6468 when the store is performed. This should be is sufficient to implement
6469 the architecturally mandated semantics, and avoids having to monitor
6470 regular stores.
6471
6472 In system emulation mode only one CPU will be running at once, so
6473 this sequence is effectively atomic. In user emulation mode we
6474 throw an exception and handle the atomic operation elsewhere. */
6475static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6476 TCGv addr, int size)
6477{
6478 TCGv tmp;
6479
6480 switch (size) {
6481 case 0:
6482 tmp = gen_ld8u(addr, IS_USER(s));
6483 break;
6484 case 1:
6485 tmp = gen_ld16u(addr, IS_USER(s));
6486 break;
6487 case 2:
6488 case 3:
6489 tmp = gen_ld32(addr, IS_USER(s));
6490 break;
6491 default:
6492 abort();
6493 }
6494 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6495 store_reg(s, rt, tmp);
6496 if (size == 3) {
7d1b0095 6497 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6498 tcg_gen_addi_i32(tmp2, addr, 4);
6499 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6500 tcg_temp_free_i32(tmp2);
426f5abc
PB
6501 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6502 store_reg(s, rt2, tmp);
6503 }
6504 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6505}
6506
6507static void gen_clrex(DisasContext *s)
6508{
6509 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6510}
6511
6512#ifdef CONFIG_USER_ONLY
6513static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6514 TCGv addr, int size)
6515{
6516 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6517 tcg_gen_movi_i32(cpu_exclusive_info,
6518 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6519 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6520}
6521#else
6522static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6523 TCGv addr, int size)
6524{
6525 TCGv tmp;
6526 int done_label;
6527 int fail_label;
6528
6529 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6530 [addr] = {Rt};
6531 {Rd} = 0;
6532 } else {
6533 {Rd} = 1;
6534 } */
6535 fail_label = gen_new_label();
6536 done_label = gen_new_label();
6537 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6538 switch (size) {
6539 case 0:
6540 tmp = gen_ld8u(addr, IS_USER(s));
6541 break;
6542 case 1:
6543 tmp = gen_ld16u(addr, IS_USER(s));
6544 break;
6545 case 2:
6546 case 3:
6547 tmp = gen_ld32(addr, IS_USER(s));
6548 break;
6549 default:
6550 abort();
6551 }
6552 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6553 tcg_temp_free_i32(tmp);
426f5abc 6554 if (size == 3) {
7d1b0095 6555 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6556 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6557 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6558 tcg_temp_free_i32(tmp2);
426f5abc 6559 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6560 tcg_temp_free_i32(tmp);
426f5abc
PB
6561 }
6562 tmp = load_reg(s, rt);
6563 switch (size) {
6564 case 0:
6565 gen_st8(tmp, addr, IS_USER(s));
6566 break;
6567 case 1:
6568 gen_st16(tmp, addr, IS_USER(s));
6569 break;
6570 case 2:
6571 case 3:
6572 gen_st32(tmp, addr, IS_USER(s));
6573 break;
6574 default:
6575 abort();
6576 }
6577 if (size == 3) {
6578 tcg_gen_addi_i32(addr, addr, 4);
6579 tmp = load_reg(s, rt2);
6580 gen_st32(tmp, addr, IS_USER(s));
6581 }
6582 tcg_gen_movi_i32(cpu_R[rd], 0);
6583 tcg_gen_br(done_label);
6584 gen_set_label(fail_label);
6585 tcg_gen_movi_i32(cpu_R[rd], 1);
6586 gen_set_label(done_label);
6587 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6588}
6589#endif
6590
9ee6e8bb
PB
6591static void disas_arm_insn(CPUState * env, DisasContext *s)
6592{
6593 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6594 TCGv tmp;
3670669c 6595 TCGv tmp2;
6ddbc6e4 6596 TCGv tmp3;
b0109805 6597 TCGv addr;
a7812ae4 6598 TCGv_i64 tmp64;
9ee6e8bb
PB
6599
6600 insn = ldl_code(s->pc);
6601 s->pc += 4;
6602
6603 /* M variants do not implement ARM mode. */
6604 if (IS_M(env))
6605 goto illegal_op;
6606 cond = insn >> 28;
6607 if (cond == 0xf){
be5e7a76
DES
6608 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6609 * choose to UNDEF. In ARMv5 and above the space is used
6610 * for miscellaneous unconditional instructions.
6611 */
6612 ARCH(5);
6613
9ee6e8bb
PB
6614 /* Unconditional instructions. */
6615 if (((insn >> 25) & 7) == 1) {
6616 /* NEON Data processing. */
6617 if (!arm_feature(env, ARM_FEATURE_NEON))
6618 goto illegal_op;
6619
6620 if (disas_neon_data_insn(env, s, insn))
6621 goto illegal_op;
6622 return;
6623 }
6624 if ((insn & 0x0f100000) == 0x04000000) {
6625 /* NEON load/store. */
6626 if (!arm_feature(env, ARM_FEATURE_NEON))
6627 goto illegal_op;
6628
6629 if (disas_neon_ls_insn(env, s, insn))
6630 goto illegal_op;
6631 return;
6632 }
3d185e5d
PM
6633 if (((insn & 0x0f30f000) == 0x0510f000) ||
6634 ((insn & 0x0f30f010) == 0x0710f000)) {
6635 if ((insn & (1 << 22)) == 0) {
6636 /* PLDW; v7MP */
6637 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6638 goto illegal_op;
6639 }
6640 }
6641 /* Otherwise PLD; v5TE+ */
be5e7a76 6642 ARCH(5TE);
3d185e5d
PM
6643 return;
6644 }
6645 if (((insn & 0x0f70f000) == 0x0450f000) ||
6646 ((insn & 0x0f70f010) == 0x0650f000)) {
6647 ARCH(7);
6648 return; /* PLI; V7 */
6649 }
6650 if (((insn & 0x0f700000) == 0x04100000) ||
6651 ((insn & 0x0f700010) == 0x06100000)) {
6652 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6653 goto illegal_op;
6654 }
6655 return; /* v7MP: Unallocated memory hint: must NOP */
6656 }
6657
6658 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6659 ARCH(6);
6660 /* setend */
6661 if (insn & (1 << 9)) {
6662 /* BE8 mode not implemented. */
6663 goto illegal_op;
6664 }
6665 return;
6666 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6667 switch ((insn >> 4) & 0xf) {
6668 case 1: /* clrex */
6669 ARCH(6K);
426f5abc 6670 gen_clrex(s);
9ee6e8bb
PB
6671 return;
6672 case 4: /* dsb */
6673 case 5: /* dmb */
6674 case 6: /* isb */
6675 ARCH(7);
6676 /* We don't emulate caches so these are a no-op. */
6677 return;
6678 default:
6679 goto illegal_op;
6680 }
6681 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6682 /* srs */
c67b6b71 6683 int32_t offset;
9ee6e8bb
PB
6684 if (IS_USER(s))
6685 goto illegal_op;
6686 ARCH(6);
6687 op1 = (insn & 0x1f);
7d1b0095 6688 addr = tcg_temp_new_i32();
39ea3d4e
PM
6689 tmp = tcg_const_i32(op1);
6690 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6691 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6692 i = (insn >> 23) & 3;
6693 switch (i) {
6694 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6695 case 1: offset = 0; break; /* IA */
6696 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6697 case 3: offset = 4; break; /* IB */
6698 default: abort();
6699 }
6700 if (offset)
b0109805
PB
6701 tcg_gen_addi_i32(addr, addr, offset);
6702 tmp = load_reg(s, 14);
6703 gen_st32(tmp, addr, 0);
c67b6b71 6704 tmp = load_cpu_field(spsr);
b0109805
PB
6705 tcg_gen_addi_i32(addr, addr, 4);
6706 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6707 if (insn & (1 << 21)) {
6708 /* Base writeback. */
6709 switch (i) {
6710 case 0: offset = -8; break;
c67b6b71
FN
6711 case 1: offset = 4; break;
6712 case 2: offset = -4; break;
9ee6e8bb
PB
6713 case 3: offset = 0; break;
6714 default: abort();
6715 }
6716 if (offset)
c67b6b71 6717 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6718 tmp = tcg_const_i32(op1);
6719 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6720 tcg_temp_free_i32(tmp);
7d1b0095 6721 tcg_temp_free_i32(addr);
b0109805 6722 } else {
7d1b0095 6723 tcg_temp_free_i32(addr);
9ee6e8bb 6724 }
a990f58f 6725 return;
ea825eee 6726 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6727 /* rfe */
c67b6b71 6728 int32_t offset;
9ee6e8bb
PB
6729 if (IS_USER(s))
6730 goto illegal_op;
6731 ARCH(6);
6732 rn = (insn >> 16) & 0xf;
b0109805 6733 addr = load_reg(s, rn);
9ee6e8bb
PB
6734 i = (insn >> 23) & 3;
6735 switch (i) {
b0109805 6736 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6737 case 1: offset = 0; break; /* IA */
6738 case 2: offset = -8; break; /* DB */
b0109805 6739 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6740 default: abort();
6741 }
6742 if (offset)
b0109805
PB
6743 tcg_gen_addi_i32(addr, addr, offset);
6744 /* Load PC into tmp and CPSR into tmp2. */
6745 tmp = gen_ld32(addr, 0);
6746 tcg_gen_addi_i32(addr, addr, 4);
6747 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6748 if (insn & (1 << 21)) {
6749 /* Base writeback. */
6750 switch (i) {
b0109805 6751 case 0: offset = -8; break;
c67b6b71
FN
6752 case 1: offset = 4; break;
6753 case 2: offset = -4; break;
b0109805 6754 case 3: offset = 0; break;
9ee6e8bb
PB
6755 default: abort();
6756 }
6757 if (offset)
b0109805
PB
6758 tcg_gen_addi_i32(addr, addr, offset);
6759 store_reg(s, rn, addr);
6760 } else {
7d1b0095 6761 tcg_temp_free_i32(addr);
9ee6e8bb 6762 }
b0109805 6763 gen_rfe(s, tmp, tmp2);
c67b6b71 6764 return;
9ee6e8bb
PB
6765 } else if ((insn & 0x0e000000) == 0x0a000000) {
6766 /* branch link and change to thumb (blx <offset>) */
6767 int32_t offset;
6768
6769 val = (uint32_t)s->pc;
7d1b0095 6770 tmp = tcg_temp_new_i32();
d9ba4830
PB
6771 tcg_gen_movi_i32(tmp, val);
6772 store_reg(s, 14, tmp);
9ee6e8bb
PB
6773 /* Sign-extend the 24-bit offset */
6774 offset = (((int32_t)insn) << 8) >> 8;
6775 /* offset * 4 + bit24 * 2 + (thumb bit) */
6776 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6777 /* pipeline offset */
6778 val += 4;
be5e7a76 6779 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6780 gen_bx_im(s, val);
9ee6e8bb
PB
6781 return;
6782 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6783 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6784 /* iWMMXt register transfer. */
6785 if (env->cp15.c15_cpar & (1 << 1))
6786 if (!disas_iwmmxt_insn(env, s, insn))
6787 return;
6788 }
6789 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6790 /* Coprocessor double register transfer. */
be5e7a76 6791 ARCH(5TE);
9ee6e8bb
PB
6792 } else if ((insn & 0x0f000010) == 0x0e000010) {
6793 /* Additional coprocessor register transfer. */
7997d92f 6794 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6795 uint32_t mask;
6796 uint32_t val;
6797 /* cps (privileged) */
6798 if (IS_USER(s))
6799 return;
6800 mask = val = 0;
6801 if (insn & (1 << 19)) {
6802 if (insn & (1 << 8))
6803 mask |= CPSR_A;
6804 if (insn & (1 << 7))
6805 mask |= CPSR_I;
6806 if (insn & (1 << 6))
6807 mask |= CPSR_F;
6808 if (insn & (1 << 18))
6809 val |= mask;
6810 }
7997d92f 6811 if (insn & (1 << 17)) {
9ee6e8bb
PB
6812 mask |= CPSR_M;
6813 val |= (insn & 0x1f);
6814 }
6815 if (mask) {
2fbac54b 6816 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6817 }
6818 return;
6819 }
6820 goto illegal_op;
6821 }
6822 if (cond != 0xe) {
6823 /* if not always execute, we generate a conditional jump to
6824 next instruction */
6825 s->condlabel = gen_new_label();
d9ba4830 6826 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6827 s->condjmp = 1;
6828 }
6829 if ((insn & 0x0f900000) == 0x03000000) {
6830 if ((insn & (1 << 21)) == 0) {
6831 ARCH(6T2);
6832 rd = (insn >> 12) & 0xf;
6833 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6834 if ((insn & (1 << 22)) == 0) {
6835 /* MOVW */
7d1b0095 6836 tmp = tcg_temp_new_i32();
5e3f878a 6837 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6838 } else {
6839 /* MOVT */
5e3f878a 6840 tmp = load_reg(s, rd);
86831435 6841 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6842 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6843 }
5e3f878a 6844 store_reg(s, rd, tmp);
9ee6e8bb
PB
6845 } else {
6846 if (((insn >> 12) & 0xf) != 0xf)
6847 goto illegal_op;
6848 if (((insn >> 16) & 0xf) == 0) {
6849 gen_nop_hint(s, insn & 0xff);
6850 } else {
6851 /* CPSR = immediate */
6852 val = insn & 0xff;
6853 shift = ((insn >> 8) & 0xf) * 2;
6854 if (shift)
6855 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6856 i = ((insn & (1 << 22)) != 0);
2fbac54b 6857 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6858 goto illegal_op;
6859 }
6860 }
6861 } else if ((insn & 0x0f900000) == 0x01000000
6862 && (insn & 0x00000090) != 0x00000090) {
6863 /* miscellaneous instructions */
6864 op1 = (insn >> 21) & 3;
6865 sh = (insn >> 4) & 0xf;
6866 rm = insn & 0xf;
6867 switch (sh) {
6868 case 0x0: /* move program status register */
6869 if (op1 & 1) {
6870 /* PSR = reg */
2fbac54b 6871 tmp = load_reg(s, rm);
9ee6e8bb 6872 i = ((op1 & 2) != 0);
2fbac54b 6873 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6874 goto illegal_op;
6875 } else {
6876 /* reg = PSR */
6877 rd = (insn >> 12) & 0xf;
6878 if (op1 & 2) {
6879 if (IS_USER(s))
6880 goto illegal_op;
d9ba4830 6881 tmp = load_cpu_field(spsr);
9ee6e8bb 6882 } else {
7d1b0095 6883 tmp = tcg_temp_new_i32();
d9ba4830 6884 gen_helper_cpsr_read(tmp);
9ee6e8bb 6885 }
d9ba4830 6886 store_reg(s, rd, tmp);
9ee6e8bb
PB
6887 }
6888 break;
6889 case 0x1:
6890 if (op1 == 1) {
6891 /* branch/exchange thumb (bx). */
be5e7a76 6892 ARCH(4T);
d9ba4830
PB
6893 tmp = load_reg(s, rm);
6894 gen_bx(s, tmp);
9ee6e8bb
PB
6895 } else if (op1 == 3) {
6896 /* clz */
be5e7a76 6897 ARCH(5);
9ee6e8bb 6898 rd = (insn >> 12) & 0xf;
1497c961
PB
6899 tmp = load_reg(s, rm);
6900 gen_helper_clz(tmp, tmp);
6901 store_reg(s, rd, tmp);
9ee6e8bb
PB
6902 } else {
6903 goto illegal_op;
6904 }
6905 break;
6906 case 0x2:
6907 if (op1 == 1) {
6908 ARCH(5J); /* bxj */
6909 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6910 tmp = load_reg(s, rm);
6911 gen_bx(s, tmp);
9ee6e8bb
PB
6912 } else {
6913 goto illegal_op;
6914 }
6915 break;
6916 case 0x3:
6917 if (op1 != 1)
6918 goto illegal_op;
6919
be5e7a76 6920 ARCH(5);
9ee6e8bb 6921 /* branch link/exchange thumb (blx) */
d9ba4830 6922 tmp = load_reg(s, rm);
7d1b0095 6923 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6924 tcg_gen_movi_i32(tmp2, s->pc);
6925 store_reg(s, 14, tmp2);
6926 gen_bx(s, tmp);
9ee6e8bb
PB
6927 break;
6928 case 0x5: /* saturating add/subtract */
be5e7a76 6929 ARCH(5TE);
9ee6e8bb
PB
6930 rd = (insn >> 12) & 0xf;
6931 rn = (insn >> 16) & 0xf;
b40d0353 6932 tmp = load_reg(s, rm);
5e3f878a 6933 tmp2 = load_reg(s, rn);
9ee6e8bb 6934 if (op1 & 2)
5e3f878a 6935 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6936 if (op1 & 1)
5e3f878a 6937 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6938 else
5e3f878a 6939 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 6940 tcg_temp_free_i32(tmp2);
5e3f878a 6941 store_reg(s, rd, tmp);
9ee6e8bb 6942 break;
49e14940
AL
6943 case 7:
6944 /* SMC instruction (op1 == 3)
6945 and undefined instructions (op1 == 0 || op1 == 2)
6946 will trap */
6947 if (op1 != 1) {
6948 goto illegal_op;
6949 }
6950 /* bkpt */
be5e7a76 6951 ARCH(5);
bc4a0de0 6952 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6953 break;
6954 case 0x8: /* signed multiply */
6955 case 0xa:
6956 case 0xc:
6957 case 0xe:
be5e7a76 6958 ARCH(5TE);
9ee6e8bb
PB
6959 rs = (insn >> 8) & 0xf;
6960 rn = (insn >> 12) & 0xf;
6961 rd = (insn >> 16) & 0xf;
6962 if (op1 == 1) {
6963 /* (32 * 16) >> 16 */
5e3f878a
PB
6964 tmp = load_reg(s, rm);
6965 tmp2 = load_reg(s, rs);
9ee6e8bb 6966 if (sh & 4)
5e3f878a 6967 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6968 else
5e3f878a 6969 gen_sxth(tmp2);
a7812ae4
PB
6970 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6971 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 6972 tmp = tcg_temp_new_i32();
a7812ae4 6973 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6974 tcg_temp_free_i64(tmp64);
9ee6e8bb 6975 if ((sh & 2) == 0) {
5e3f878a
PB
6976 tmp2 = load_reg(s, rn);
6977 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6978 tcg_temp_free_i32(tmp2);
9ee6e8bb 6979 }
5e3f878a 6980 store_reg(s, rd, tmp);
9ee6e8bb
PB
6981 } else {
6982 /* 16 * 16 */
5e3f878a
PB
6983 tmp = load_reg(s, rm);
6984 tmp2 = load_reg(s, rs);
6985 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 6986 tcg_temp_free_i32(tmp2);
9ee6e8bb 6987 if (op1 == 2) {
a7812ae4
PB
6988 tmp64 = tcg_temp_new_i64();
6989 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 6990 tcg_temp_free_i32(tmp);
a7812ae4
PB
6991 gen_addq(s, tmp64, rn, rd);
6992 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6993 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6994 } else {
6995 if (op1 == 0) {
5e3f878a
PB
6996 tmp2 = load_reg(s, rn);
6997 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6998 tcg_temp_free_i32(tmp2);
9ee6e8bb 6999 }
5e3f878a 7000 store_reg(s, rd, tmp);
9ee6e8bb
PB
7001 }
7002 }
7003 break;
7004 default:
7005 goto illegal_op;
7006 }
7007 } else if (((insn & 0x0e000000) == 0 &&
7008 (insn & 0x00000090) != 0x90) ||
7009 ((insn & 0x0e000000) == (1 << 25))) {
7010 int set_cc, logic_cc, shiftop;
7011
7012 op1 = (insn >> 21) & 0xf;
7013 set_cc = (insn >> 20) & 1;
7014 logic_cc = table_logic_cc[op1] & set_cc;
7015
7016 /* data processing instruction */
7017 if (insn & (1 << 25)) {
7018 /* immediate operand */
7019 val = insn & 0xff;
7020 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7021 if (shift) {
9ee6e8bb 7022 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7023 }
7d1b0095 7024 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7025 tcg_gen_movi_i32(tmp2, val);
7026 if (logic_cc && shift) {
7027 gen_set_CF_bit31(tmp2);
7028 }
9ee6e8bb
PB
7029 } else {
7030 /* register */
7031 rm = (insn) & 0xf;
e9bb4aa9 7032 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7033 shiftop = (insn >> 5) & 3;
7034 if (!(insn & (1 << 4))) {
7035 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7036 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7037 } else {
7038 rs = (insn >> 8) & 0xf;
8984bd2e 7039 tmp = load_reg(s, rs);
e9bb4aa9 7040 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7041 }
7042 }
7043 if (op1 != 0x0f && op1 != 0x0d) {
7044 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7045 tmp = load_reg(s, rn);
7046 } else {
7047 TCGV_UNUSED(tmp);
9ee6e8bb
PB
7048 }
7049 rd = (insn >> 12) & 0xf;
7050 switch(op1) {
7051 case 0x00:
e9bb4aa9
JR
7052 tcg_gen_and_i32(tmp, tmp, tmp2);
7053 if (logic_cc) {
7054 gen_logic_CC(tmp);
7055 }
21aeb343 7056 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7057 break;
7058 case 0x01:
e9bb4aa9
JR
7059 tcg_gen_xor_i32(tmp, tmp, tmp2);
7060 if (logic_cc) {
7061 gen_logic_CC(tmp);
7062 }
21aeb343 7063 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7064 break;
7065 case 0x02:
7066 if (set_cc && rd == 15) {
7067 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7068 if (IS_USER(s)) {
9ee6e8bb 7069 goto illegal_op;
e9bb4aa9
JR
7070 }
7071 gen_helper_sub_cc(tmp, tmp, tmp2);
7072 gen_exception_return(s, tmp);
9ee6e8bb 7073 } else {
e9bb4aa9
JR
7074 if (set_cc) {
7075 gen_helper_sub_cc(tmp, tmp, tmp2);
7076 } else {
7077 tcg_gen_sub_i32(tmp, tmp, tmp2);
7078 }
21aeb343 7079 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7080 }
7081 break;
7082 case 0x03:
e9bb4aa9
JR
7083 if (set_cc) {
7084 gen_helper_sub_cc(tmp, tmp2, tmp);
7085 } else {
7086 tcg_gen_sub_i32(tmp, tmp2, tmp);
7087 }
21aeb343 7088 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7089 break;
7090 case 0x04:
e9bb4aa9
JR
7091 if (set_cc) {
7092 gen_helper_add_cc(tmp, tmp, tmp2);
7093 } else {
7094 tcg_gen_add_i32(tmp, tmp, tmp2);
7095 }
21aeb343 7096 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7097 break;
7098 case 0x05:
e9bb4aa9
JR
7099 if (set_cc) {
7100 gen_helper_adc_cc(tmp, tmp, tmp2);
7101 } else {
7102 gen_add_carry(tmp, tmp, tmp2);
7103 }
21aeb343 7104 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7105 break;
7106 case 0x06:
e9bb4aa9
JR
7107 if (set_cc) {
7108 gen_helper_sbc_cc(tmp, tmp, tmp2);
7109 } else {
7110 gen_sub_carry(tmp, tmp, tmp2);
7111 }
21aeb343 7112 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7113 break;
7114 case 0x07:
e9bb4aa9
JR
7115 if (set_cc) {
7116 gen_helper_sbc_cc(tmp, tmp2, tmp);
7117 } else {
7118 gen_sub_carry(tmp, tmp2, tmp);
7119 }
21aeb343 7120 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7121 break;
7122 case 0x08:
7123 if (set_cc) {
e9bb4aa9
JR
7124 tcg_gen_and_i32(tmp, tmp, tmp2);
7125 gen_logic_CC(tmp);
9ee6e8bb 7126 }
7d1b0095 7127 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7128 break;
7129 case 0x09:
7130 if (set_cc) {
e9bb4aa9
JR
7131 tcg_gen_xor_i32(tmp, tmp, tmp2);
7132 gen_logic_CC(tmp);
9ee6e8bb 7133 }
7d1b0095 7134 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7135 break;
7136 case 0x0a:
7137 if (set_cc) {
e9bb4aa9 7138 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 7139 }
7d1b0095 7140 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7141 break;
7142 case 0x0b:
7143 if (set_cc) {
e9bb4aa9 7144 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 7145 }
7d1b0095 7146 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7147 break;
7148 case 0x0c:
e9bb4aa9
JR
7149 tcg_gen_or_i32(tmp, tmp, tmp2);
7150 if (logic_cc) {
7151 gen_logic_CC(tmp);
7152 }
21aeb343 7153 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7154 break;
7155 case 0x0d:
7156 if (logic_cc && rd == 15) {
7157 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7158 if (IS_USER(s)) {
9ee6e8bb 7159 goto illegal_op;
e9bb4aa9
JR
7160 }
7161 gen_exception_return(s, tmp2);
9ee6e8bb 7162 } else {
e9bb4aa9
JR
7163 if (logic_cc) {
7164 gen_logic_CC(tmp2);
7165 }
21aeb343 7166 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7167 }
7168 break;
7169 case 0x0e:
f669df27 7170 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7171 if (logic_cc) {
7172 gen_logic_CC(tmp);
7173 }
21aeb343 7174 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7175 break;
7176 default:
7177 case 0x0f:
e9bb4aa9
JR
7178 tcg_gen_not_i32(tmp2, tmp2);
7179 if (logic_cc) {
7180 gen_logic_CC(tmp2);
7181 }
21aeb343 7182 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7183 break;
7184 }
e9bb4aa9 7185 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7186 tcg_temp_free_i32(tmp2);
e9bb4aa9 7187 }
9ee6e8bb
PB
7188 } else {
7189 /* other instructions */
7190 op1 = (insn >> 24) & 0xf;
7191 switch(op1) {
7192 case 0x0:
7193 case 0x1:
7194 /* multiplies, extra load/stores */
7195 sh = (insn >> 5) & 3;
7196 if (sh == 0) {
7197 if (op1 == 0x0) {
7198 rd = (insn >> 16) & 0xf;
7199 rn = (insn >> 12) & 0xf;
7200 rs = (insn >> 8) & 0xf;
7201 rm = (insn) & 0xf;
7202 op1 = (insn >> 20) & 0xf;
7203 switch (op1) {
7204 case 0: case 1: case 2: case 3: case 6:
7205 /* 32 bit mul */
5e3f878a
PB
7206 tmp = load_reg(s, rs);
7207 tmp2 = load_reg(s, rm);
7208 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7209 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7210 if (insn & (1 << 22)) {
7211 /* Subtract (mls) */
7212 ARCH(6T2);
5e3f878a
PB
7213 tmp2 = load_reg(s, rn);
7214 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7215 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7216 } else if (insn & (1 << 21)) {
7217 /* Add */
5e3f878a
PB
7218 tmp2 = load_reg(s, rn);
7219 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7220 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7221 }
7222 if (insn & (1 << 20))
5e3f878a
PB
7223 gen_logic_CC(tmp);
7224 store_reg(s, rd, tmp);
9ee6e8bb 7225 break;
8aac08b1
AJ
7226 case 4:
7227 /* 64 bit mul double accumulate (UMAAL) */
7228 ARCH(6);
7229 tmp = load_reg(s, rs);
7230 tmp2 = load_reg(s, rm);
7231 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7232 gen_addq_lo(s, tmp64, rn);
7233 gen_addq_lo(s, tmp64, rd);
7234 gen_storeq_reg(s, rn, rd, tmp64);
7235 tcg_temp_free_i64(tmp64);
7236 break;
7237 case 8: case 9: case 10: case 11:
7238 case 12: case 13: case 14: case 15:
7239 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7240 tmp = load_reg(s, rs);
7241 tmp2 = load_reg(s, rm);
8aac08b1 7242 if (insn & (1 << 22)) {
a7812ae4 7243 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 7244 } else {
a7812ae4 7245 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
7246 }
7247 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 7248 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 7249 }
8aac08b1 7250 if (insn & (1 << 20)) {
a7812ae4 7251 gen_logicq_cc(tmp64);
8aac08b1 7252 }
a7812ae4 7253 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7254 tcg_temp_free_i64(tmp64);
9ee6e8bb 7255 break;
8aac08b1
AJ
7256 default:
7257 goto illegal_op;
9ee6e8bb
PB
7258 }
7259 } else {
7260 rn = (insn >> 16) & 0xf;
7261 rd = (insn >> 12) & 0xf;
7262 if (insn & (1 << 23)) {
7263 /* load/store exclusive */
86753403
PB
7264 op1 = (insn >> 21) & 0x3;
7265 if (op1)
a47f43d2 7266 ARCH(6K);
86753403
PB
7267 else
7268 ARCH(6);
3174f8e9 7269 addr = tcg_temp_local_new_i32();
98a46317 7270 load_reg_var(s, addr, rn);
9ee6e8bb 7271 if (insn & (1 << 20)) {
86753403
PB
7272 switch (op1) {
7273 case 0: /* ldrex */
426f5abc 7274 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7275 break;
7276 case 1: /* ldrexd */
426f5abc 7277 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7278 break;
7279 case 2: /* ldrexb */
426f5abc 7280 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7281 break;
7282 case 3: /* ldrexh */
426f5abc 7283 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7284 break;
7285 default:
7286 abort();
7287 }
9ee6e8bb
PB
7288 } else {
7289 rm = insn & 0xf;
86753403
PB
7290 switch (op1) {
7291 case 0: /* strex */
426f5abc 7292 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7293 break;
7294 case 1: /* strexd */
502e64fe 7295 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7296 break;
7297 case 2: /* strexb */
426f5abc 7298 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7299 break;
7300 case 3: /* strexh */
426f5abc 7301 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7302 break;
7303 default:
7304 abort();
7305 }
9ee6e8bb 7306 }
3174f8e9 7307 tcg_temp_free(addr);
9ee6e8bb
PB
7308 } else {
7309 /* SWP instruction */
7310 rm = (insn) & 0xf;
7311
8984bd2e
PB
7312 /* ??? This is not really atomic. However we know
7313 we never have multiple CPUs running in parallel,
7314 so it is good enough. */
7315 addr = load_reg(s, rn);
7316 tmp = load_reg(s, rm);
9ee6e8bb 7317 if (insn & (1 << 22)) {
8984bd2e
PB
7318 tmp2 = gen_ld8u(addr, IS_USER(s));
7319 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7320 } else {
8984bd2e
PB
7321 tmp2 = gen_ld32(addr, IS_USER(s));
7322 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7323 }
7d1b0095 7324 tcg_temp_free_i32(addr);
8984bd2e 7325 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7326 }
7327 }
7328 } else {
7329 int address_offset;
7330 int load;
7331 /* Misc load/store */
7332 rn = (insn >> 16) & 0xf;
7333 rd = (insn >> 12) & 0xf;
b0109805 7334 addr = load_reg(s, rn);
9ee6e8bb 7335 if (insn & (1 << 24))
b0109805 7336 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7337 address_offset = 0;
7338 if (insn & (1 << 20)) {
7339 /* load */
7340 switch(sh) {
7341 case 1:
b0109805 7342 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
7343 break;
7344 case 2:
b0109805 7345 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
7346 break;
7347 default:
7348 case 3:
b0109805 7349 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
7350 break;
7351 }
7352 load = 1;
7353 } else if (sh & 2) {
be5e7a76 7354 ARCH(5TE);
9ee6e8bb
PB
7355 /* doubleword */
7356 if (sh & 1) {
7357 /* store */
b0109805
PB
7358 tmp = load_reg(s, rd);
7359 gen_st32(tmp, addr, IS_USER(s));
7360 tcg_gen_addi_i32(addr, addr, 4);
7361 tmp = load_reg(s, rd + 1);
7362 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7363 load = 0;
7364 } else {
7365 /* load */
b0109805
PB
7366 tmp = gen_ld32(addr, IS_USER(s));
7367 store_reg(s, rd, tmp);
7368 tcg_gen_addi_i32(addr, addr, 4);
7369 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
7370 rd++;
7371 load = 1;
7372 }
7373 address_offset = -4;
7374 } else {
7375 /* store */
b0109805
PB
7376 tmp = load_reg(s, rd);
7377 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7378 load = 0;
7379 }
7380 /* Perform base writeback before the loaded value to
7381 ensure correct behavior with overlapping index registers.
7382 ldrd with base writeback is is undefined if the
7383 destination and index registers overlap. */
7384 if (!(insn & (1 << 24))) {
b0109805
PB
7385 gen_add_datah_offset(s, insn, address_offset, addr);
7386 store_reg(s, rn, addr);
9ee6e8bb
PB
7387 } else if (insn & (1 << 21)) {
7388 if (address_offset)
b0109805
PB
7389 tcg_gen_addi_i32(addr, addr, address_offset);
7390 store_reg(s, rn, addr);
7391 } else {
7d1b0095 7392 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7393 }
7394 if (load) {
7395 /* Complete the load. */
b0109805 7396 store_reg(s, rd, tmp);
9ee6e8bb
PB
7397 }
7398 }
7399 break;
7400 case 0x4:
7401 case 0x5:
7402 goto do_ldst;
7403 case 0x6:
7404 case 0x7:
7405 if (insn & (1 << 4)) {
7406 ARCH(6);
7407 /* Armv6 Media instructions. */
7408 rm = insn & 0xf;
7409 rn = (insn >> 16) & 0xf;
2c0262af 7410 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7411 rs = (insn >> 8) & 0xf;
7412 switch ((insn >> 23) & 3) {
7413 case 0: /* Parallel add/subtract. */
7414 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7415 tmp = load_reg(s, rn);
7416 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7417 sh = (insn >> 5) & 7;
7418 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7419 goto illegal_op;
6ddbc6e4 7420 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7421 tcg_temp_free_i32(tmp2);
6ddbc6e4 7422 store_reg(s, rd, tmp);
9ee6e8bb
PB
7423 break;
7424 case 1:
7425 if ((insn & 0x00700020) == 0) {
6c95676b 7426 /* Halfword pack. */
3670669c
PB
7427 tmp = load_reg(s, rn);
7428 tmp2 = load_reg(s, rm);
9ee6e8bb 7429 shift = (insn >> 7) & 0x1f;
3670669c
PB
7430 if (insn & (1 << 6)) {
7431 /* pkhtb */
22478e79
AZ
7432 if (shift == 0)
7433 shift = 31;
7434 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7435 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7436 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7437 } else {
7438 /* pkhbt */
22478e79
AZ
7439 if (shift)
7440 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7441 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7442 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7443 }
7444 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7445 tcg_temp_free_i32(tmp2);
3670669c 7446 store_reg(s, rd, tmp);
9ee6e8bb
PB
7447 } else if ((insn & 0x00200020) == 0x00200000) {
7448 /* [us]sat */
6ddbc6e4 7449 tmp = load_reg(s, rm);
9ee6e8bb
PB
7450 shift = (insn >> 7) & 0x1f;
7451 if (insn & (1 << 6)) {
7452 if (shift == 0)
7453 shift = 31;
6ddbc6e4 7454 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7455 } else {
6ddbc6e4 7456 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7457 }
7458 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7459 tmp2 = tcg_const_i32(sh);
7460 if (insn & (1 << 22))
7461 gen_helper_usat(tmp, tmp, tmp2);
7462 else
7463 gen_helper_ssat(tmp, tmp, tmp2);
7464 tcg_temp_free_i32(tmp2);
6ddbc6e4 7465 store_reg(s, rd, tmp);
9ee6e8bb
PB
7466 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7467 /* [us]sat16 */
6ddbc6e4 7468 tmp = load_reg(s, rm);
9ee6e8bb 7469 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7470 tmp2 = tcg_const_i32(sh);
7471 if (insn & (1 << 22))
7472 gen_helper_usat16(tmp, tmp, tmp2);
7473 else
7474 gen_helper_ssat16(tmp, tmp, tmp2);
7475 tcg_temp_free_i32(tmp2);
6ddbc6e4 7476 store_reg(s, rd, tmp);
9ee6e8bb
PB
7477 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7478 /* Select bytes. */
6ddbc6e4
PB
7479 tmp = load_reg(s, rn);
7480 tmp2 = load_reg(s, rm);
7d1b0095 7481 tmp3 = tcg_temp_new_i32();
6ddbc6e4
PB
7482 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7483 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7484 tcg_temp_free_i32(tmp3);
7485 tcg_temp_free_i32(tmp2);
6ddbc6e4 7486 store_reg(s, rd, tmp);
9ee6e8bb 7487 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7488 tmp = load_reg(s, rm);
9ee6e8bb 7489 shift = (insn >> 10) & 3;
1301f322 7490 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7491 rotate, a shift is sufficient. */
7492 if (shift != 0)
f669df27 7493 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7494 op1 = (insn >> 20) & 7;
7495 switch (op1) {
5e3f878a
PB
7496 case 0: gen_sxtb16(tmp); break;
7497 case 2: gen_sxtb(tmp); break;
7498 case 3: gen_sxth(tmp); break;
7499 case 4: gen_uxtb16(tmp); break;
7500 case 6: gen_uxtb(tmp); break;
7501 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7502 default: goto illegal_op;
7503 }
7504 if (rn != 15) {
5e3f878a 7505 tmp2 = load_reg(s, rn);
9ee6e8bb 7506 if ((op1 & 3) == 0) {
5e3f878a 7507 gen_add16(tmp, tmp2);
9ee6e8bb 7508 } else {
5e3f878a 7509 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7510 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7511 }
7512 }
6c95676b 7513 store_reg(s, rd, tmp);
9ee6e8bb
PB
7514 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7515 /* rev */
b0109805 7516 tmp = load_reg(s, rm);
9ee6e8bb
PB
7517 if (insn & (1 << 22)) {
7518 if (insn & (1 << 7)) {
b0109805 7519 gen_revsh(tmp);
9ee6e8bb
PB
7520 } else {
7521 ARCH(6T2);
b0109805 7522 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7523 }
7524 } else {
7525 if (insn & (1 << 7))
b0109805 7526 gen_rev16(tmp);
9ee6e8bb 7527 else
66896cb8 7528 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7529 }
b0109805 7530 store_reg(s, rd, tmp);
9ee6e8bb
PB
7531 } else {
7532 goto illegal_op;
7533 }
7534 break;
7535 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
7536 tmp = load_reg(s, rm);
7537 tmp2 = load_reg(s, rs);
9ee6e8bb 7538 if (insn & (1 << 20)) {
838fa72d
AJ
7539 /* Signed multiply most significant [accumulate].
7540 (SMMUL, SMMLA, SMMLS) */
a7812ae4 7541 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7542
955a7dd5 7543 if (rd != 15) {
838fa72d 7544 tmp = load_reg(s, rd);
9ee6e8bb 7545 if (insn & (1 << 6)) {
838fa72d 7546 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7547 } else {
838fa72d 7548 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7549 }
7550 }
838fa72d
AJ
7551 if (insn & (1 << 5)) {
7552 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7553 }
7554 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7555 tmp = tcg_temp_new_i32();
838fa72d
AJ
7556 tcg_gen_trunc_i64_i32(tmp, tmp64);
7557 tcg_temp_free_i64(tmp64);
955a7dd5 7558 store_reg(s, rn, tmp);
9ee6e8bb
PB
7559 } else {
7560 if (insn & (1 << 5))
5e3f878a
PB
7561 gen_swap_half(tmp2);
7562 gen_smul_dual(tmp, tmp2);
5e3f878a 7563 if (insn & (1 << 6)) {
e1d177b9 7564 /* This subtraction cannot overflow. */
5e3f878a
PB
7565 tcg_gen_sub_i32(tmp, tmp, tmp2);
7566 } else {
e1d177b9
PM
7567 /* This addition cannot overflow 32 bits;
7568 * however it may overflow considered as a signed
7569 * operation, in which case we must set the Q flag.
7570 */
7571 gen_helper_add_setq(tmp, tmp, tmp2);
5e3f878a 7572 }
7d1b0095 7573 tcg_temp_free_i32(tmp2);
9ee6e8bb 7574 if (insn & (1 << 22)) {
5e3f878a 7575 /* smlald, smlsld */
a7812ae4
PB
7576 tmp64 = tcg_temp_new_i64();
7577 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7578 tcg_temp_free_i32(tmp);
a7812ae4
PB
7579 gen_addq(s, tmp64, rd, rn);
7580 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7581 tcg_temp_free_i64(tmp64);
9ee6e8bb 7582 } else {
5e3f878a 7583 /* smuad, smusd, smlad, smlsd */
22478e79 7584 if (rd != 15)
9ee6e8bb 7585 {
22478e79 7586 tmp2 = load_reg(s, rd);
5e3f878a 7587 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7588 tcg_temp_free_i32(tmp2);
9ee6e8bb 7589 }
22478e79 7590 store_reg(s, rn, tmp);
9ee6e8bb
PB
7591 }
7592 }
7593 break;
7594 case 3:
7595 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7596 switch (op1) {
7597 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7598 ARCH(6);
7599 tmp = load_reg(s, rm);
7600 tmp2 = load_reg(s, rs);
7601 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7602 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7603 if (rd != 15) {
7604 tmp2 = load_reg(s, rd);
6ddbc6e4 7605 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7606 tcg_temp_free_i32(tmp2);
9ee6e8bb 7607 }
ded9d295 7608 store_reg(s, rn, tmp);
9ee6e8bb
PB
7609 break;
7610 case 0x20: case 0x24: case 0x28: case 0x2c:
7611 /* Bitfield insert/clear. */
7612 ARCH(6T2);
7613 shift = (insn >> 7) & 0x1f;
7614 i = (insn >> 16) & 0x1f;
7615 i = i + 1 - shift;
7616 if (rm == 15) {
7d1b0095 7617 tmp = tcg_temp_new_i32();
5e3f878a 7618 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7619 } else {
5e3f878a 7620 tmp = load_reg(s, rm);
9ee6e8bb
PB
7621 }
7622 if (i != 32) {
5e3f878a 7623 tmp2 = load_reg(s, rd);
8f8e3aa4 7624 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7d1b0095 7625 tcg_temp_free_i32(tmp2);
9ee6e8bb 7626 }
5e3f878a 7627 store_reg(s, rd, tmp);
9ee6e8bb
PB
7628 break;
7629 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7630 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7631 ARCH(6T2);
5e3f878a 7632 tmp = load_reg(s, rm);
9ee6e8bb
PB
7633 shift = (insn >> 7) & 0x1f;
7634 i = ((insn >> 16) & 0x1f) + 1;
7635 if (shift + i > 32)
7636 goto illegal_op;
7637 if (i < 32) {
7638 if (op1 & 0x20) {
5e3f878a 7639 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7640 } else {
5e3f878a 7641 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7642 }
7643 }
5e3f878a 7644 store_reg(s, rd, tmp);
9ee6e8bb
PB
7645 break;
7646 default:
7647 goto illegal_op;
7648 }
7649 break;
7650 }
7651 break;
7652 }
7653 do_ldst:
7654 /* Check for undefined extension instructions
7655 * per the ARM Bible IE:
7656 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7657 */
7658 sh = (0xf << 20) | (0xf << 4);
7659 if (op1 == 0x7 && ((insn & sh) == sh))
7660 {
7661 goto illegal_op;
7662 }
7663 /* load/store byte/word */
7664 rn = (insn >> 16) & 0xf;
7665 rd = (insn >> 12) & 0xf;
b0109805 7666 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7667 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7668 if (insn & (1 << 24))
b0109805 7669 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7670 if (insn & (1 << 20)) {
7671 /* load */
9ee6e8bb 7672 if (insn & (1 << 22)) {
b0109805 7673 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7674 } else {
b0109805 7675 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7676 }
9ee6e8bb
PB
7677 } else {
7678 /* store */
b0109805 7679 tmp = load_reg(s, rd);
9ee6e8bb 7680 if (insn & (1 << 22))
b0109805 7681 gen_st8(tmp, tmp2, i);
9ee6e8bb 7682 else
b0109805 7683 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7684 }
7685 if (!(insn & (1 << 24))) {
b0109805
PB
7686 gen_add_data_offset(s, insn, tmp2);
7687 store_reg(s, rn, tmp2);
7688 } else if (insn & (1 << 21)) {
7689 store_reg(s, rn, tmp2);
7690 } else {
7d1b0095 7691 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7692 }
7693 if (insn & (1 << 20)) {
7694 /* Complete the load. */
be5e7a76 7695 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7696 }
7697 break;
7698 case 0x08:
7699 case 0x09:
7700 {
7701 int j, n, user, loaded_base;
b0109805 7702 TCGv loaded_var;
9ee6e8bb
PB
7703 /* load/store multiple words */
7704 /* XXX: store correct base if write back */
7705 user = 0;
7706 if (insn & (1 << 22)) {
7707 if (IS_USER(s))
7708 goto illegal_op; /* only usable in supervisor mode */
7709
7710 if ((insn & (1 << 15)) == 0)
7711 user = 1;
7712 }
7713 rn = (insn >> 16) & 0xf;
b0109805 7714 addr = load_reg(s, rn);
9ee6e8bb
PB
7715
7716 /* compute total size */
7717 loaded_base = 0;
a50f5b91 7718 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7719 n = 0;
7720 for(i=0;i<16;i++) {
7721 if (insn & (1 << i))
7722 n++;
7723 }
7724 /* XXX: test invalid n == 0 case ? */
7725 if (insn & (1 << 23)) {
7726 if (insn & (1 << 24)) {
7727 /* pre increment */
b0109805 7728 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7729 } else {
7730 /* post increment */
7731 }
7732 } else {
7733 if (insn & (1 << 24)) {
7734 /* pre decrement */
b0109805 7735 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7736 } else {
7737 /* post decrement */
7738 if (n != 1)
b0109805 7739 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7740 }
7741 }
7742 j = 0;
7743 for(i=0;i<16;i++) {
7744 if (insn & (1 << i)) {
7745 if (insn & (1 << 20)) {
7746 /* load */
b0109805 7747 tmp = gen_ld32(addr, IS_USER(s));
be5e7a76 7748 if (user) {
b75263d6
JR
7749 tmp2 = tcg_const_i32(i);
7750 gen_helper_set_user_reg(tmp2, tmp);
7751 tcg_temp_free_i32(tmp2);
7d1b0095 7752 tcg_temp_free_i32(tmp);
9ee6e8bb 7753 } else if (i == rn) {
b0109805 7754 loaded_var = tmp;
9ee6e8bb
PB
7755 loaded_base = 1;
7756 } else {
be5e7a76 7757 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7758 }
7759 } else {
7760 /* store */
7761 if (i == 15) {
7762 /* special case: r15 = PC + 8 */
7763 val = (long)s->pc + 4;
7d1b0095 7764 tmp = tcg_temp_new_i32();
b0109805 7765 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7766 } else if (user) {
7d1b0095 7767 tmp = tcg_temp_new_i32();
b75263d6
JR
7768 tmp2 = tcg_const_i32(i);
7769 gen_helper_get_user_reg(tmp, tmp2);
7770 tcg_temp_free_i32(tmp2);
9ee6e8bb 7771 } else {
b0109805 7772 tmp = load_reg(s, i);
9ee6e8bb 7773 }
b0109805 7774 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7775 }
7776 j++;
7777 /* no need to add after the last transfer */
7778 if (j != n)
b0109805 7779 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7780 }
7781 }
7782 if (insn & (1 << 21)) {
7783 /* write back */
7784 if (insn & (1 << 23)) {
7785 if (insn & (1 << 24)) {
7786 /* pre increment */
7787 } else {
7788 /* post increment */
b0109805 7789 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7790 }
7791 } else {
7792 if (insn & (1 << 24)) {
7793 /* pre decrement */
7794 if (n != 1)
b0109805 7795 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7796 } else {
7797 /* post decrement */
b0109805 7798 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7799 }
7800 }
b0109805
PB
7801 store_reg(s, rn, addr);
7802 } else {
7d1b0095 7803 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7804 }
7805 if (loaded_base) {
b0109805 7806 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7807 }
7808 if ((insn & (1 << 22)) && !user) {
7809 /* Restore CPSR from SPSR. */
d9ba4830
PB
7810 tmp = load_cpu_field(spsr);
7811 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7812 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7813 s->is_jmp = DISAS_UPDATE;
7814 }
7815 }
7816 break;
7817 case 0xa:
7818 case 0xb:
7819 {
7820 int32_t offset;
7821
7822 /* branch (and link) */
7823 val = (int32_t)s->pc;
7824 if (insn & (1 << 24)) {
7d1b0095 7825 tmp = tcg_temp_new_i32();
5e3f878a
PB
7826 tcg_gen_movi_i32(tmp, val);
7827 store_reg(s, 14, tmp);
9ee6e8bb
PB
7828 }
7829 offset = (((int32_t)insn << 8) >> 8);
7830 val += (offset << 2) + 4;
7831 gen_jmp(s, val);
7832 }
7833 break;
7834 case 0xc:
7835 case 0xd:
7836 case 0xe:
7837 /* Coprocessor. */
7838 if (disas_coproc_insn(env, s, insn))
7839 goto illegal_op;
7840 break;
7841 case 0xf:
7842 /* swi */
5e3f878a 7843 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7844 s->is_jmp = DISAS_SWI;
7845 break;
7846 default:
7847 illegal_op:
bc4a0de0 7848 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7849 break;
7850 }
7851 }
7852}
7853
7854/* Return true if this is a Thumb-2 logical op. */
7855static int
7856thumb2_logic_op(int op)
7857{
7858 return (op < 8);
7859}
7860
7861/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7862 then set condition code flags based on the result of the operation.
7863 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7864 to the high bit of T1.
7865 Returns zero if the opcode is valid. */
7866
7867static int
396e467c 7868gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7869{
7870 int logic_cc;
7871
7872 logic_cc = 0;
7873 switch (op) {
7874 case 0: /* and */
396e467c 7875 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7876 logic_cc = conds;
7877 break;
7878 case 1: /* bic */
f669df27 7879 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7880 logic_cc = conds;
7881 break;
7882 case 2: /* orr */
396e467c 7883 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7884 logic_cc = conds;
7885 break;
7886 case 3: /* orn */
29501f1b 7887 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7888 logic_cc = conds;
7889 break;
7890 case 4: /* eor */
396e467c 7891 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7892 logic_cc = conds;
7893 break;
7894 case 8: /* add */
7895 if (conds)
396e467c 7896 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7897 else
396e467c 7898 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7899 break;
7900 case 10: /* adc */
7901 if (conds)
396e467c 7902 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7903 else
396e467c 7904 gen_adc(t0, t1);
9ee6e8bb
PB
7905 break;
7906 case 11: /* sbc */
7907 if (conds)
396e467c 7908 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7909 else
396e467c 7910 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7911 break;
7912 case 13: /* sub */
7913 if (conds)
396e467c 7914 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7915 else
396e467c 7916 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7917 break;
7918 case 14: /* rsb */
7919 if (conds)
396e467c 7920 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7921 else
396e467c 7922 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7923 break;
7924 default: /* 5, 6, 7, 9, 12, 15. */
7925 return 1;
7926 }
7927 if (logic_cc) {
396e467c 7928 gen_logic_CC(t0);
9ee6e8bb 7929 if (shifter_out)
396e467c 7930 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7931 }
7932 return 0;
7933}
7934
7935/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7936 is not legal. */
7937static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7938{
b0109805 7939 uint32_t insn, imm, shift, offset;
9ee6e8bb 7940 uint32_t rd, rn, rm, rs;
b26eefb6 7941 TCGv tmp;
6ddbc6e4
PB
7942 TCGv tmp2;
7943 TCGv tmp3;
b0109805 7944 TCGv addr;
a7812ae4 7945 TCGv_i64 tmp64;
9ee6e8bb
PB
7946 int op;
7947 int shiftop;
7948 int conds;
7949 int logic_cc;
7950
7951 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7952 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7953 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7954 16-bit instructions to get correct prefetch abort behavior. */
7955 insn = insn_hw1;
7956 if ((insn & (1 << 12)) == 0) {
be5e7a76 7957 ARCH(5);
9ee6e8bb
PB
7958 /* Second half of blx. */
7959 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7960 tmp = load_reg(s, 14);
7961 tcg_gen_addi_i32(tmp, tmp, offset);
7962 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7963
7d1b0095 7964 tmp2 = tcg_temp_new_i32();
b0109805 7965 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7966 store_reg(s, 14, tmp2);
7967 gen_bx(s, tmp);
9ee6e8bb
PB
7968 return 0;
7969 }
7970 if (insn & (1 << 11)) {
7971 /* Second half of bl. */
7972 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7973 tmp = load_reg(s, 14);
6a0d8a1d 7974 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7975
7d1b0095 7976 tmp2 = tcg_temp_new_i32();
b0109805 7977 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7978 store_reg(s, 14, tmp2);
7979 gen_bx(s, tmp);
9ee6e8bb
PB
7980 return 0;
7981 }
7982 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7983 /* Instruction spans a page boundary. Implement it as two
7984 16-bit instructions in case the second half causes an
7985 prefetch abort. */
7986 offset = ((int32_t)insn << 21) >> 9;
396e467c 7987 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7988 return 0;
7989 }
7990 /* Fall through to 32-bit decode. */
7991 }
7992
7993 insn = lduw_code(s->pc);
7994 s->pc += 2;
7995 insn |= (uint32_t)insn_hw1 << 16;
7996
7997 if ((insn & 0xf800e800) != 0xf000e800) {
7998 ARCH(6T2);
7999 }
8000
8001 rn = (insn >> 16) & 0xf;
8002 rs = (insn >> 12) & 0xf;
8003 rd = (insn >> 8) & 0xf;
8004 rm = insn & 0xf;
8005 switch ((insn >> 25) & 0xf) {
8006 case 0: case 1: case 2: case 3:
8007 /* 16-bit instructions. Should never happen. */
8008 abort();
8009 case 4:
8010 if (insn & (1 << 22)) {
8011 /* Other load/store, table branch. */
8012 if (insn & 0x01200000) {
8013 /* Load/store doubleword. */
8014 if (rn == 15) {
7d1b0095 8015 addr = tcg_temp_new_i32();
b0109805 8016 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8017 } else {
b0109805 8018 addr = load_reg(s, rn);
9ee6e8bb
PB
8019 }
8020 offset = (insn & 0xff) * 4;
8021 if ((insn & (1 << 23)) == 0)
8022 offset = -offset;
8023 if (insn & (1 << 24)) {
b0109805 8024 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8025 offset = 0;
8026 }
8027 if (insn & (1 << 20)) {
8028 /* ldrd */
b0109805
PB
8029 tmp = gen_ld32(addr, IS_USER(s));
8030 store_reg(s, rs, tmp);
8031 tcg_gen_addi_i32(addr, addr, 4);
8032 tmp = gen_ld32(addr, IS_USER(s));
8033 store_reg(s, rd, tmp);
9ee6e8bb
PB
8034 } else {
8035 /* strd */
b0109805
PB
8036 tmp = load_reg(s, rs);
8037 gen_st32(tmp, addr, IS_USER(s));
8038 tcg_gen_addi_i32(addr, addr, 4);
8039 tmp = load_reg(s, rd);
8040 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8041 }
8042 if (insn & (1 << 21)) {
8043 /* Base writeback. */
8044 if (rn == 15)
8045 goto illegal_op;
b0109805
PB
8046 tcg_gen_addi_i32(addr, addr, offset - 4);
8047 store_reg(s, rn, addr);
8048 } else {
7d1b0095 8049 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8050 }
8051 } else if ((insn & (1 << 23)) == 0) {
8052 /* Load/store exclusive word. */
3174f8e9 8053 addr = tcg_temp_local_new();
98a46317 8054 load_reg_var(s, addr, rn);
426f5abc 8055 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8056 if (insn & (1 << 20)) {
426f5abc 8057 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8058 } else {
426f5abc 8059 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8060 }
3174f8e9 8061 tcg_temp_free(addr);
9ee6e8bb
PB
8062 } else if ((insn & (1 << 6)) == 0) {
8063 /* Table Branch. */
8064 if (rn == 15) {
7d1b0095 8065 addr = tcg_temp_new_i32();
b0109805 8066 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8067 } else {
b0109805 8068 addr = load_reg(s, rn);
9ee6e8bb 8069 }
b26eefb6 8070 tmp = load_reg(s, rm);
b0109805 8071 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8072 if (insn & (1 << 4)) {
8073 /* tbh */
b0109805 8074 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8075 tcg_temp_free_i32(tmp);
b0109805 8076 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 8077 } else { /* tbb */
7d1b0095 8078 tcg_temp_free_i32(tmp);
b0109805 8079 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 8080 }
7d1b0095 8081 tcg_temp_free_i32(addr);
b0109805
PB
8082 tcg_gen_shli_i32(tmp, tmp, 1);
8083 tcg_gen_addi_i32(tmp, tmp, s->pc);
8084 store_reg(s, 15, tmp);
9ee6e8bb
PB
8085 } else {
8086 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 8087 ARCH(7);
9ee6e8bb 8088 op = (insn >> 4) & 0x3;
426f5abc
PB
8089 if (op == 2) {
8090 goto illegal_op;
8091 }
3174f8e9 8092 addr = tcg_temp_local_new();
98a46317 8093 load_reg_var(s, addr, rn);
9ee6e8bb 8094 if (insn & (1 << 20)) {
426f5abc 8095 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8096 } else {
426f5abc 8097 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8098 }
3174f8e9 8099 tcg_temp_free(addr);
9ee6e8bb
PB
8100 }
8101 } else {
8102 /* Load/store multiple, RFE, SRS. */
8103 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8104 /* Not available in user mode. */
b0109805 8105 if (IS_USER(s))
9ee6e8bb
PB
8106 goto illegal_op;
8107 if (insn & (1 << 20)) {
8108 /* rfe */
b0109805
PB
8109 addr = load_reg(s, rn);
8110 if ((insn & (1 << 24)) == 0)
8111 tcg_gen_addi_i32(addr, addr, -8);
8112 /* Load PC into tmp and CPSR into tmp2. */
8113 tmp = gen_ld32(addr, 0);
8114 tcg_gen_addi_i32(addr, addr, 4);
8115 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
8116 if (insn & (1 << 21)) {
8117 /* Base writeback. */
b0109805
PB
8118 if (insn & (1 << 24)) {
8119 tcg_gen_addi_i32(addr, addr, 4);
8120 } else {
8121 tcg_gen_addi_i32(addr, addr, -4);
8122 }
8123 store_reg(s, rn, addr);
8124 } else {
7d1b0095 8125 tcg_temp_free_i32(addr);
9ee6e8bb 8126 }
b0109805 8127 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8128 } else {
8129 /* srs */
8130 op = (insn & 0x1f);
7d1b0095 8131 addr = tcg_temp_new_i32();
39ea3d4e
PM
8132 tmp = tcg_const_i32(op);
8133 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8134 tcg_temp_free_i32(tmp);
9ee6e8bb 8135 if ((insn & (1 << 24)) == 0) {
b0109805 8136 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 8137 }
b0109805
PB
8138 tmp = load_reg(s, 14);
8139 gen_st32(tmp, addr, 0);
8140 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 8141 tmp = tcg_temp_new_i32();
b0109805
PB
8142 gen_helper_cpsr_read(tmp);
8143 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
8144 if (insn & (1 << 21)) {
8145 if ((insn & (1 << 24)) == 0) {
b0109805 8146 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 8147 } else {
b0109805 8148 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8149 }
39ea3d4e
PM
8150 tmp = tcg_const_i32(op);
8151 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8152 tcg_temp_free_i32(tmp);
b0109805 8153 } else {
7d1b0095 8154 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8155 }
8156 }
8157 } else {
5856d44e
YO
8158 int i, loaded_base = 0;
8159 TCGv loaded_var;
9ee6e8bb 8160 /* Load/store multiple. */
b0109805 8161 addr = load_reg(s, rn);
9ee6e8bb
PB
8162 offset = 0;
8163 for (i = 0; i < 16; i++) {
8164 if (insn & (1 << i))
8165 offset += 4;
8166 }
8167 if (insn & (1 << 24)) {
b0109805 8168 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8169 }
8170
5856d44e 8171 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
8172 for (i = 0; i < 16; i++) {
8173 if ((insn & (1 << i)) == 0)
8174 continue;
8175 if (insn & (1 << 20)) {
8176 /* Load. */
b0109805 8177 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 8178 if (i == 15) {
b0109805 8179 gen_bx(s, tmp);
5856d44e
YO
8180 } else if (i == rn) {
8181 loaded_var = tmp;
8182 loaded_base = 1;
9ee6e8bb 8183 } else {
b0109805 8184 store_reg(s, i, tmp);
9ee6e8bb
PB
8185 }
8186 } else {
8187 /* Store. */
b0109805
PB
8188 tmp = load_reg(s, i);
8189 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8190 }
b0109805 8191 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8192 }
5856d44e
YO
8193 if (loaded_base) {
8194 store_reg(s, rn, loaded_var);
8195 }
9ee6e8bb
PB
8196 if (insn & (1 << 21)) {
8197 /* Base register writeback. */
8198 if (insn & (1 << 24)) {
b0109805 8199 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8200 }
8201 /* Fault if writeback register is in register list. */
8202 if (insn & (1 << rn))
8203 goto illegal_op;
b0109805
PB
8204 store_reg(s, rn, addr);
8205 } else {
7d1b0095 8206 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8207 }
8208 }
8209 }
8210 break;
2af9ab77
JB
8211 case 5:
8212
9ee6e8bb 8213 op = (insn >> 21) & 0xf;
2af9ab77
JB
8214 if (op == 6) {
8215 /* Halfword pack. */
8216 tmp = load_reg(s, rn);
8217 tmp2 = load_reg(s, rm);
8218 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8219 if (insn & (1 << 5)) {
8220 /* pkhtb */
8221 if (shift == 0)
8222 shift = 31;
8223 tcg_gen_sari_i32(tmp2, tmp2, shift);
8224 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8225 tcg_gen_ext16u_i32(tmp2, tmp2);
8226 } else {
8227 /* pkhbt */
8228 if (shift)
8229 tcg_gen_shli_i32(tmp2, tmp2, shift);
8230 tcg_gen_ext16u_i32(tmp, tmp);
8231 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8232 }
8233 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8234 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8235 store_reg(s, rd, tmp);
8236 } else {
2af9ab77
JB
8237 /* Data processing register constant shift. */
8238 if (rn == 15) {
7d1b0095 8239 tmp = tcg_temp_new_i32();
2af9ab77
JB
8240 tcg_gen_movi_i32(tmp, 0);
8241 } else {
8242 tmp = load_reg(s, rn);
8243 }
8244 tmp2 = load_reg(s, rm);
8245
8246 shiftop = (insn >> 4) & 3;
8247 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8248 conds = (insn & (1 << 20)) != 0;
8249 logic_cc = (conds && thumb2_logic_op(op));
8250 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8251 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8252 goto illegal_op;
7d1b0095 8253 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8254 if (rd != 15) {
8255 store_reg(s, rd, tmp);
8256 } else {
7d1b0095 8257 tcg_temp_free_i32(tmp);
2af9ab77 8258 }
3174f8e9 8259 }
9ee6e8bb
PB
8260 break;
8261 case 13: /* Misc data processing. */
8262 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8263 if (op < 4 && (insn & 0xf000) != 0xf000)
8264 goto illegal_op;
8265 switch (op) {
8266 case 0: /* Register controlled shift. */
8984bd2e
PB
8267 tmp = load_reg(s, rn);
8268 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8269 if ((insn & 0x70) != 0)
8270 goto illegal_op;
8271 op = (insn >> 21) & 3;
8984bd2e
PB
8272 logic_cc = (insn & (1 << 20)) != 0;
8273 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8274 if (logic_cc)
8275 gen_logic_CC(tmp);
21aeb343 8276 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8277 break;
8278 case 1: /* Sign/zero extend. */
5e3f878a 8279 tmp = load_reg(s, rm);
9ee6e8bb 8280 shift = (insn >> 4) & 3;
1301f322 8281 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8282 rotate, a shift is sufficient. */
8283 if (shift != 0)
f669df27 8284 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8285 op = (insn >> 20) & 7;
8286 switch (op) {
5e3f878a
PB
8287 case 0: gen_sxth(tmp); break;
8288 case 1: gen_uxth(tmp); break;
8289 case 2: gen_sxtb16(tmp); break;
8290 case 3: gen_uxtb16(tmp); break;
8291 case 4: gen_sxtb(tmp); break;
8292 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8293 default: goto illegal_op;
8294 }
8295 if (rn != 15) {
5e3f878a 8296 tmp2 = load_reg(s, rn);
9ee6e8bb 8297 if ((op >> 1) == 1) {
5e3f878a 8298 gen_add16(tmp, tmp2);
9ee6e8bb 8299 } else {
5e3f878a 8300 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8301 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8302 }
8303 }
5e3f878a 8304 store_reg(s, rd, tmp);
9ee6e8bb
PB
8305 break;
8306 case 2: /* SIMD add/subtract. */
8307 op = (insn >> 20) & 7;
8308 shift = (insn >> 4) & 7;
8309 if ((op & 3) == 3 || (shift & 3) == 3)
8310 goto illegal_op;
6ddbc6e4
PB
8311 tmp = load_reg(s, rn);
8312 tmp2 = load_reg(s, rm);
8313 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8314 tcg_temp_free_i32(tmp2);
6ddbc6e4 8315 store_reg(s, rd, tmp);
9ee6e8bb
PB
8316 break;
8317 case 3: /* Other data processing. */
8318 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8319 if (op < 4) {
8320 /* Saturating add/subtract. */
d9ba4830
PB
8321 tmp = load_reg(s, rn);
8322 tmp2 = load_reg(s, rm);
9ee6e8bb 8323 if (op & 1)
4809c612
JB
8324 gen_helper_double_saturate(tmp, tmp);
8325 if (op & 2)
d9ba4830 8326 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 8327 else
d9ba4830 8328 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 8329 tcg_temp_free_i32(tmp2);
9ee6e8bb 8330 } else {
d9ba4830 8331 tmp = load_reg(s, rn);
9ee6e8bb
PB
8332 switch (op) {
8333 case 0x0a: /* rbit */
d9ba4830 8334 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8335 break;
8336 case 0x08: /* rev */
66896cb8 8337 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8338 break;
8339 case 0x09: /* rev16 */
d9ba4830 8340 gen_rev16(tmp);
9ee6e8bb
PB
8341 break;
8342 case 0x0b: /* revsh */
d9ba4830 8343 gen_revsh(tmp);
9ee6e8bb
PB
8344 break;
8345 case 0x10: /* sel */
d9ba4830 8346 tmp2 = load_reg(s, rm);
7d1b0095 8347 tmp3 = tcg_temp_new_i32();
6ddbc6e4 8348 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 8349 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8350 tcg_temp_free_i32(tmp3);
8351 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8352 break;
8353 case 0x18: /* clz */
d9ba4830 8354 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8355 break;
8356 default:
8357 goto illegal_op;
8358 }
8359 }
d9ba4830 8360 store_reg(s, rd, tmp);
9ee6e8bb
PB
8361 break;
8362 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8363 op = (insn >> 4) & 0xf;
d9ba4830
PB
8364 tmp = load_reg(s, rn);
8365 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8366 switch ((insn >> 20) & 7) {
8367 case 0: /* 32 x 32 -> 32 */
d9ba4830 8368 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8369 tcg_temp_free_i32(tmp2);
9ee6e8bb 8370 if (rs != 15) {
d9ba4830 8371 tmp2 = load_reg(s, rs);
9ee6e8bb 8372 if (op)
d9ba4830 8373 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8374 else
d9ba4830 8375 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8376 tcg_temp_free_i32(tmp2);
9ee6e8bb 8377 }
9ee6e8bb
PB
8378 break;
8379 case 1: /* 16 x 16 -> 32 */
d9ba4830 8380 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8381 tcg_temp_free_i32(tmp2);
9ee6e8bb 8382 if (rs != 15) {
d9ba4830
PB
8383 tmp2 = load_reg(s, rs);
8384 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8385 tcg_temp_free_i32(tmp2);
9ee6e8bb 8386 }
9ee6e8bb
PB
8387 break;
8388 case 2: /* Dual multiply add. */
8389 case 4: /* Dual multiply subtract. */
8390 if (op)
d9ba4830
PB
8391 gen_swap_half(tmp2);
8392 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8393 if (insn & (1 << 22)) {
e1d177b9 8394 /* This subtraction cannot overflow. */
d9ba4830 8395 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8396 } else {
e1d177b9
PM
8397 /* This addition cannot overflow 32 bits;
8398 * however it may overflow considered as a signed
8399 * operation, in which case we must set the Q flag.
8400 */
8401 gen_helper_add_setq(tmp, tmp, tmp2);
9ee6e8bb 8402 }
7d1b0095 8403 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8404 if (rs != 15)
8405 {
d9ba4830
PB
8406 tmp2 = load_reg(s, rs);
8407 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8408 tcg_temp_free_i32(tmp2);
9ee6e8bb 8409 }
9ee6e8bb
PB
8410 break;
8411 case 3: /* 32 * 16 -> 32msb */
8412 if (op)
d9ba4830 8413 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8414 else
d9ba4830 8415 gen_sxth(tmp2);
a7812ae4
PB
8416 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8417 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8418 tmp = tcg_temp_new_i32();
a7812ae4 8419 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8420 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8421 if (rs != 15)
8422 {
d9ba4830
PB
8423 tmp2 = load_reg(s, rs);
8424 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8425 tcg_temp_free_i32(tmp2);
9ee6e8bb 8426 }
9ee6e8bb 8427 break;
838fa72d
AJ
8428 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8429 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8430 if (rs != 15) {
838fa72d
AJ
8431 tmp = load_reg(s, rs);
8432 if (insn & (1 << 20)) {
8433 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8434 } else {
838fa72d 8435 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8436 }
2c0262af 8437 }
838fa72d
AJ
8438 if (insn & (1 << 4)) {
8439 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8440 }
8441 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8442 tmp = tcg_temp_new_i32();
838fa72d
AJ
8443 tcg_gen_trunc_i64_i32(tmp, tmp64);
8444 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8445 break;
8446 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8447 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8448 tcg_temp_free_i32(tmp2);
9ee6e8bb 8449 if (rs != 15) {
d9ba4830
PB
8450 tmp2 = load_reg(s, rs);
8451 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8452 tcg_temp_free_i32(tmp2);
5fd46862 8453 }
9ee6e8bb 8454 break;
2c0262af 8455 }
d9ba4830 8456 store_reg(s, rd, tmp);
2c0262af 8457 break;
9ee6e8bb
PB
8458 case 6: case 7: /* 64-bit multiply, Divide. */
8459 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8460 tmp = load_reg(s, rn);
8461 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8462 if ((op & 0x50) == 0x10) {
8463 /* sdiv, udiv */
8464 if (!arm_feature(env, ARM_FEATURE_DIV))
8465 goto illegal_op;
8466 if (op & 0x20)
5e3f878a 8467 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8468 else
5e3f878a 8469 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8470 tcg_temp_free_i32(tmp2);
5e3f878a 8471 store_reg(s, rd, tmp);
9ee6e8bb
PB
8472 } else if ((op & 0xe) == 0xc) {
8473 /* Dual multiply accumulate long. */
8474 if (op & 1)
5e3f878a
PB
8475 gen_swap_half(tmp2);
8476 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8477 if (op & 0x10) {
5e3f878a 8478 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8479 } else {
5e3f878a 8480 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8481 }
7d1b0095 8482 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8483 /* BUGFIX */
8484 tmp64 = tcg_temp_new_i64();
8485 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8486 tcg_temp_free_i32(tmp);
a7812ae4
PB
8487 gen_addq(s, tmp64, rs, rd);
8488 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8489 tcg_temp_free_i64(tmp64);
2c0262af 8490 } else {
9ee6e8bb
PB
8491 if (op & 0x20) {
8492 /* Unsigned 64-bit multiply */
a7812ae4 8493 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8494 } else {
9ee6e8bb
PB
8495 if (op & 8) {
8496 /* smlalxy */
5e3f878a 8497 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8498 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8499 tmp64 = tcg_temp_new_i64();
8500 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8501 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8502 } else {
8503 /* Signed 64-bit multiply */
a7812ae4 8504 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8505 }
b5ff1b31 8506 }
9ee6e8bb
PB
8507 if (op & 4) {
8508 /* umaal */
a7812ae4
PB
8509 gen_addq_lo(s, tmp64, rs);
8510 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8511 } else if (op & 0x40) {
8512 /* 64-bit accumulate. */
a7812ae4 8513 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8514 }
a7812ae4 8515 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8516 tcg_temp_free_i64(tmp64);
5fd46862 8517 }
2c0262af 8518 break;
9ee6e8bb
PB
8519 }
8520 break;
8521 case 6: case 7: case 14: case 15:
8522 /* Coprocessor. */
8523 if (((insn >> 24) & 3) == 3) {
8524 /* Translate into the equivalent ARM encoding. */
f06053e3 8525 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8526 if (disas_neon_data_insn(env, s, insn))
8527 goto illegal_op;
8528 } else {
8529 if (insn & (1 << 28))
8530 goto illegal_op;
8531 if (disas_coproc_insn (env, s, insn))
8532 goto illegal_op;
8533 }
8534 break;
8535 case 8: case 9: case 10: case 11:
8536 if (insn & (1 << 15)) {
8537 /* Branches, misc control. */
8538 if (insn & 0x5000) {
8539 /* Unconditional branch. */
8540 /* signextend(hw1[10:0]) -> offset[:12]. */
8541 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8542 /* hw1[10:0] -> offset[11:1]. */
8543 offset |= (insn & 0x7ff) << 1;
8544 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8545 offset[24:22] already have the same value because of the
8546 sign extension above. */
8547 offset ^= ((~insn) & (1 << 13)) << 10;
8548 offset ^= ((~insn) & (1 << 11)) << 11;
8549
9ee6e8bb
PB
8550 if (insn & (1 << 14)) {
8551 /* Branch and link. */
3174f8e9 8552 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8553 }
3b46e624 8554
b0109805 8555 offset += s->pc;
9ee6e8bb
PB
8556 if (insn & (1 << 12)) {
8557 /* b/bl */
b0109805 8558 gen_jmp(s, offset);
9ee6e8bb
PB
8559 } else {
8560 /* blx */
b0109805 8561 offset &= ~(uint32_t)2;
be5e7a76 8562 /* thumb2 bx, no need to check */
b0109805 8563 gen_bx_im(s, offset);
2c0262af 8564 }
9ee6e8bb
PB
8565 } else if (((insn >> 23) & 7) == 7) {
8566 /* Misc control */
8567 if (insn & (1 << 13))
8568 goto illegal_op;
8569
8570 if (insn & (1 << 26)) {
8571 /* Secure monitor call (v6Z) */
8572 goto illegal_op; /* not implemented. */
2c0262af 8573 } else {
9ee6e8bb
PB
8574 op = (insn >> 20) & 7;
8575 switch (op) {
8576 case 0: /* msr cpsr. */
8577 if (IS_M(env)) {
8984bd2e
PB
8578 tmp = load_reg(s, rn);
8579 addr = tcg_const_i32(insn & 0xff);
8580 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8581 tcg_temp_free_i32(addr);
7d1b0095 8582 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8583 gen_lookup_tb(s);
8584 break;
8585 }
8586 /* fall through */
8587 case 1: /* msr spsr. */
8588 if (IS_M(env))
8589 goto illegal_op;
2fbac54b
FN
8590 tmp = load_reg(s, rn);
8591 if (gen_set_psr(s,
9ee6e8bb 8592 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8593 op == 1, tmp))
9ee6e8bb
PB
8594 goto illegal_op;
8595 break;
8596 case 2: /* cps, nop-hint. */
8597 if (((insn >> 8) & 7) == 0) {
8598 gen_nop_hint(s, insn & 0xff);
8599 }
8600 /* Implemented as NOP in user mode. */
8601 if (IS_USER(s))
8602 break;
8603 offset = 0;
8604 imm = 0;
8605 if (insn & (1 << 10)) {
8606 if (insn & (1 << 7))
8607 offset |= CPSR_A;
8608 if (insn & (1 << 6))
8609 offset |= CPSR_I;
8610 if (insn & (1 << 5))
8611 offset |= CPSR_F;
8612 if (insn & (1 << 9))
8613 imm = CPSR_A | CPSR_I | CPSR_F;
8614 }
8615 if (insn & (1 << 8)) {
8616 offset |= 0x1f;
8617 imm |= (insn & 0x1f);
8618 }
8619 if (offset) {
2fbac54b 8620 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8621 }
8622 break;
8623 case 3: /* Special control operations. */
426f5abc 8624 ARCH(7);
9ee6e8bb
PB
8625 op = (insn >> 4) & 0xf;
8626 switch (op) {
8627 case 2: /* clrex */
426f5abc 8628 gen_clrex(s);
9ee6e8bb
PB
8629 break;
8630 case 4: /* dsb */
8631 case 5: /* dmb */
8632 case 6: /* isb */
8633 /* These execute as NOPs. */
9ee6e8bb
PB
8634 break;
8635 default:
8636 goto illegal_op;
8637 }
8638 break;
8639 case 4: /* bxj */
8640 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8641 tmp = load_reg(s, rn);
8642 gen_bx(s, tmp);
9ee6e8bb
PB
8643 break;
8644 case 5: /* Exception return. */
b8b45b68
RV
8645 if (IS_USER(s)) {
8646 goto illegal_op;
8647 }
8648 if (rn != 14 || rd != 15) {
8649 goto illegal_op;
8650 }
8651 tmp = load_reg(s, rn);
8652 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8653 gen_exception_return(s, tmp);
8654 break;
9ee6e8bb 8655 case 6: /* mrs cpsr. */
7d1b0095 8656 tmp = tcg_temp_new_i32();
9ee6e8bb 8657 if (IS_M(env)) {
8984bd2e
PB
8658 addr = tcg_const_i32(insn & 0xff);
8659 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8660 tcg_temp_free_i32(addr);
9ee6e8bb 8661 } else {
8984bd2e 8662 gen_helper_cpsr_read(tmp);
9ee6e8bb 8663 }
8984bd2e 8664 store_reg(s, rd, tmp);
9ee6e8bb
PB
8665 break;
8666 case 7: /* mrs spsr. */
8667 /* Not accessible in user mode. */
8668 if (IS_USER(s) || IS_M(env))
8669 goto illegal_op;
d9ba4830
PB
8670 tmp = load_cpu_field(spsr);
8671 store_reg(s, rd, tmp);
9ee6e8bb 8672 break;
2c0262af
FB
8673 }
8674 }
9ee6e8bb
PB
8675 } else {
8676 /* Conditional branch. */
8677 op = (insn >> 22) & 0xf;
8678 /* Generate a conditional jump to next instruction. */
8679 s->condlabel = gen_new_label();
d9ba4830 8680 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8681 s->condjmp = 1;
8682
8683 /* offset[11:1] = insn[10:0] */
8684 offset = (insn & 0x7ff) << 1;
8685 /* offset[17:12] = insn[21:16]. */
8686 offset |= (insn & 0x003f0000) >> 4;
8687 /* offset[31:20] = insn[26]. */
8688 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8689 /* offset[18] = insn[13]. */
8690 offset |= (insn & (1 << 13)) << 5;
8691 /* offset[19] = insn[11]. */
8692 offset |= (insn & (1 << 11)) << 8;
8693
8694 /* jump to the offset */
b0109805 8695 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8696 }
8697 } else {
8698 /* Data processing immediate. */
8699 if (insn & (1 << 25)) {
8700 if (insn & (1 << 24)) {
8701 if (insn & (1 << 20))
8702 goto illegal_op;
8703 /* Bitfield/Saturate. */
8704 op = (insn >> 21) & 7;
8705 imm = insn & 0x1f;
8706 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8707 if (rn == 15) {
7d1b0095 8708 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8709 tcg_gen_movi_i32(tmp, 0);
8710 } else {
8711 tmp = load_reg(s, rn);
8712 }
9ee6e8bb
PB
8713 switch (op) {
8714 case 2: /* Signed bitfield extract. */
8715 imm++;
8716 if (shift + imm > 32)
8717 goto illegal_op;
8718 if (imm < 32)
6ddbc6e4 8719 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8720 break;
8721 case 6: /* Unsigned bitfield extract. */
8722 imm++;
8723 if (shift + imm > 32)
8724 goto illegal_op;
8725 if (imm < 32)
6ddbc6e4 8726 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8727 break;
8728 case 3: /* Bitfield insert/clear. */
8729 if (imm < shift)
8730 goto illegal_op;
8731 imm = imm + 1 - shift;
8732 if (imm != 32) {
6ddbc6e4 8733 tmp2 = load_reg(s, rd);
8f8e3aa4 8734 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7d1b0095 8735 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8736 }
8737 break;
8738 case 7:
8739 goto illegal_op;
8740 default: /* Saturate. */
9ee6e8bb
PB
8741 if (shift) {
8742 if (op & 1)
6ddbc6e4 8743 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8744 else
6ddbc6e4 8745 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8746 }
6ddbc6e4 8747 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8748 if (op & 4) {
8749 /* Unsigned. */
9ee6e8bb 8750 if ((op & 1) && shift == 0)
6ddbc6e4 8751 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8752 else
6ddbc6e4 8753 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8754 } else {
9ee6e8bb 8755 /* Signed. */
9ee6e8bb 8756 if ((op & 1) && shift == 0)
6ddbc6e4 8757 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8758 else
6ddbc6e4 8759 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8760 }
b75263d6 8761 tcg_temp_free_i32(tmp2);
9ee6e8bb 8762 break;
2c0262af 8763 }
6ddbc6e4 8764 store_reg(s, rd, tmp);
9ee6e8bb
PB
8765 } else {
8766 imm = ((insn & 0x04000000) >> 15)
8767 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8768 if (insn & (1 << 22)) {
8769 /* 16-bit immediate. */
8770 imm |= (insn >> 4) & 0xf000;
8771 if (insn & (1 << 23)) {
8772 /* movt */
5e3f878a 8773 tmp = load_reg(s, rd);
86831435 8774 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8775 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8776 } else {
9ee6e8bb 8777 /* movw */
7d1b0095 8778 tmp = tcg_temp_new_i32();
5e3f878a 8779 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8780 }
8781 } else {
9ee6e8bb
PB
8782 /* Add/sub 12-bit immediate. */
8783 if (rn == 15) {
b0109805 8784 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8785 if (insn & (1 << 23))
b0109805 8786 offset -= imm;
9ee6e8bb 8787 else
b0109805 8788 offset += imm;
7d1b0095 8789 tmp = tcg_temp_new_i32();
5e3f878a 8790 tcg_gen_movi_i32(tmp, offset);
2c0262af 8791 } else {
5e3f878a 8792 tmp = load_reg(s, rn);
9ee6e8bb 8793 if (insn & (1 << 23))
5e3f878a 8794 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8795 else
5e3f878a 8796 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8797 }
9ee6e8bb 8798 }
5e3f878a 8799 store_reg(s, rd, tmp);
191abaa2 8800 }
9ee6e8bb
PB
8801 } else {
8802 int shifter_out = 0;
8803 /* modified 12-bit immediate. */
8804 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8805 imm = (insn & 0xff);
8806 switch (shift) {
8807 case 0: /* XY */
8808 /* Nothing to do. */
8809 break;
8810 case 1: /* 00XY00XY */
8811 imm |= imm << 16;
8812 break;
8813 case 2: /* XY00XY00 */
8814 imm |= imm << 16;
8815 imm <<= 8;
8816 break;
8817 case 3: /* XYXYXYXY */
8818 imm |= imm << 16;
8819 imm |= imm << 8;
8820 break;
8821 default: /* Rotated constant. */
8822 shift = (shift << 1) | (imm >> 7);
8823 imm |= 0x80;
8824 imm = imm << (32 - shift);
8825 shifter_out = 1;
8826 break;
b5ff1b31 8827 }
7d1b0095 8828 tmp2 = tcg_temp_new_i32();
3174f8e9 8829 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8830 rn = (insn >> 16) & 0xf;
3174f8e9 8831 if (rn == 15) {
7d1b0095 8832 tmp = tcg_temp_new_i32();
3174f8e9
FN
8833 tcg_gen_movi_i32(tmp, 0);
8834 } else {
8835 tmp = load_reg(s, rn);
8836 }
9ee6e8bb
PB
8837 op = (insn >> 21) & 0xf;
8838 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8839 shifter_out, tmp, tmp2))
9ee6e8bb 8840 goto illegal_op;
7d1b0095 8841 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8842 rd = (insn >> 8) & 0xf;
8843 if (rd != 15) {
3174f8e9
FN
8844 store_reg(s, rd, tmp);
8845 } else {
7d1b0095 8846 tcg_temp_free_i32(tmp);
2c0262af 8847 }
2c0262af 8848 }
9ee6e8bb
PB
8849 }
8850 break;
8851 case 12: /* Load/store single data item. */
8852 {
8853 int postinc = 0;
8854 int writeback = 0;
b0109805 8855 int user;
9ee6e8bb
PB
8856 if ((insn & 0x01100000) == 0x01000000) {
8857 if (disas_neon_ls_insn(env, s, insn))
c1713132 8858 goto illegal_op;
9ee6e8bb
PB
8859 break;
8860 }
a2fdc890
PM
8861 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8862 if (rs == 15) {
8863 if (!(insn & (1 << 20))) {
8864 goto illegal_op;
8865 }
8866 if (op != 2) {
8867 /* Byte or halfword load space with dest == r15 : memory hints.
8868 * Catch them early so we don't emit pointless addressing code.
8869 * This space is a mix of:
8870 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8871 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8872 * cores)
8873 * unallocated hints, which must be treated as NOPs
8874 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8875 * which is easiest for the decoding logic
8876 * Some space which must UNDEF
8877 */
8878 int op1 = (insn >> 23) & 3;
8879 int op2 = (insn >> 6) & 0x3f;
8880 if (op & 2) {
8881 goto illegal_op;
8882 }
8883 if (rn == 15) {
8884 /* UNPREDICTABLE or unallocated hint */
8885 return 0;
8886 }
8887 if (op1 & 1) {
8888 return 0; /* PLD* or unallocated hint */
8889 }
8890 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8891 return 0; /* PLD* or unallocated hint */
8892 }
8893 /* UNDEF space, or an UNPREDICTABLE */
8894 return 1;
8895 }
8896 }
b0109805 8897 user = IS_USER(s);
9ee6e8bb 8898 if (rn == 15) {
7d1b0095 8899 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8900 /* PC relative. */
8901 /* s->pc has already been incremented by 4. */
8902 imm = s->pc & 0xfffffffc;
8903 if (insn & (1 << 23))
8904 imm += insn & 0xfff;
8905 else
8906 imm -= insn & 0xfff;
b0109805 8907 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8908 } else {
b0109805 8909 addr = load_reg(s, rn);
9ee6e8bb
PB
8910 if (insn & (1 << 23)) {
8911 /* Positive offset. */
8912 imm = insn & 0xfff;
b0109805 8913 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8914 } else {
9ee6e8bb 8915 imm = insn & 0xff;
2a0308c5
PM
8916 switch ((insn >> 8) & 0xf) {
8917 case 0x0: /* Shifted Register. */
9ee6e8bb 8918 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8919 if (shift > 3) {
8920 tcg_temp_free_i32(addr);
18c9b560 8921 goto illegal_op;
2a0308c5 8922 }
b26eefb6 8923 tmp = load_reg(s, rm);
9ee6e8bb 8924 if (shift)
b26eefb6 8925 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8926 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8927 tcg_temp_free_i32(tmp);
9ee6e8bb 8928 break;
2a0308c5 8929 case 0xc: /* Negative offset. */
b0109805 8930 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8931 break;
2a0308c5 8932 case 0xe: /* User privilege. */
b0109805
PB
8933 tcg_gen_addi_i32(addr, addr, imm);
8934 user = 1;
9ee6e8bb 8935 break;
2a0308c5 8936 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8937 imm = -imm;
8938 /* Fall through. */
2a0308c5 8939 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8940 postinc = 1;
8941 writeback = 1;
8942 break;
2a0308c5 8943 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
8944 imm = -imm;
8945 /* Fall through. */
2a0308c5 8946 case 0xf: /* Pre-increment. */
b0109805 8947 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8948 writeback = 1;
8949 break;
8950 default:
2a0308c5 8951 tcg_temp_free_i32(addr);
b7bcbe95 8952 goto illegal_op;
9ee6e8bb
PB
8953 }
8954 }
8955 }
9ee6e8bb
PB
8956 if (insn & (1 << 20)) {
8957 /* Load. */
a2fdc890
PM
8958 switch (op) {
8959 case 0: tmp = gen_ld8u(addr, user); break;
8960 case 4: tmp = gen_ld8s(addr, user); break;
8961 case 1: tmp = gen_ld16u(addr, user); break;
8962 case 5: tmp = gen_ld16s(addr, user); break;
8963 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
8964 default:
8965 tcg_temp_free_i32(addr);
8966 goto illegal_op;
a2fdc890
PM
8967 }
8968 if (rs == 15) {
8969 gen_bx(s, tmp);
9ee6e8bb 8970 } else {
a2fdc890 8971 store_reg(s, rs, tmp);
9ee6e8bb
PB
8972 }
8973 } else {
8974 /* Store. */
b0109805 8975 tmp = load_reg(s, rs);
9ee6e8bb 8976 switch (op) {
b0109805
PB
8977 case 0: gen_st8(tmp, addr, user); break;
8978 case 1: gen_st16(tmp, addr, user); break;
8979 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
8980 default:
8981 tcg_temp_free_i32(addr);
8982 goto illegal_op;
b7bcbe95 8983 }
2c0262af 8984 }
9ee6e8bb 8985 if (postinc)
b0109805
PB
8986 tcg_gen_addi_i32(addr, addr, imm);
8987 if (writeback) {
8988 store_reg(s, rn, addr);
8989 } else {
7d1b0095 8990 tcg_temp_free_i32(addr);
b0109805 8991 }
9ee6e8bb
PB
8992 }
8993 break;
8994 default:
8995 goto illegal_op;
2c0262af 8996 }
9ee6e8bb
PB
8997 return 0;
8998illegal_op:
8999 return 1;
2c0262af
FB
9000}
9001
9ee6e8bb 9002static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
9003{
9004 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9005 int32_t offset;
9006 int i;
b26eefb6 9007 TCGv tmp;
d9ba4830 9008 TCGv tmp2;
b0109805 9009 TCGv addr;
99c475ab 9010
9ee6e8bb
PB
9011 if (s->condexec_mask) {
9012 cond = s->condexec_cond;
bedd2912
JB
9013 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9014 s->condlabel = gen_new_label();
9015 gen_test_cc(cond ^ 1, s->condlabel);
9016 s->condjmp = 1;
9017 }
9ee6e8bb
PB
9018 }
9019
b5ff1b31 9020 insn = lduw_code(s->pc);
99c475ab 9021 s->pc += 2;
b5ff1b31 9022
99c475ab
FB
9023 switch (insn >> 12) {
9024 case 0: case 1:
396e467c 9025
99c475ab
FB
9026 rd = insn & 7;
9027 op = (insn >> 11) & 3;
9028 if (op == 3) {
9029 /* add/subtract */
9030 rn = (insn >> 3) & 7;
396e467c 9031 tmp = load_reg(s, rn);
99c475ab
FB
9032 if (insn & (1 << 10)) {
9033 /* immediate */
7d1b0095 9034 tmp2 = tcg_temp_new_i32();
396e467c 9035 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9036 } else {
9037 /* reg */
9038 rm = (insn >> 6) & 7;
396e467c 9039 tmp2 = load_reg(s, rm);
99c475ab 9040 }
9ee6e8bb
PB
9041 if (insn & (1 << 9)) {
9042 if (s->condexec_mask)
396e467c 9043 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9044 else
396e467c 9045 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
9046 } else {
9047 if (s->condexec_mask)
396e467c 9048 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9049 else
396e467c 9050 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 9051 }
7d1b0095 9052 tcg_temp_free_i32(tmp2);
396e467c 9053 store_reg(s, rd, tmp);
99c475ab
FB
9054 } else {
9055 /* shift immediate */
9056 rm = (insn >> 3) & 7;
9057 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9058 tmp = load_reg(s, rm);
9059 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9060 if (!s->condexec_mask)
9061 gen_logic_CC(tmp);
9062 store_reg(s, rd, tmp);
99c475ab
FB
9063 }
9064 break;
9065 case 2: case 3:
9066 /* arithmetic large immediate */
9067 op = (insn >> 11) & 3;
9068 rd = (insn >> 8) & 0x7;
396e467c 9069 if (op == 0) { /* mov */
7d1b0095 9070 tmp = tcg_temp_new_i32();
396e467c 9071 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9072 if (!s->condexec_mask)
396e467c
FN
9073 gen_logic_CC(tmp);
9074 store_reg(s, rd, tmp);
9075 } else {
9076 tmp = load_reg(s, rd);
7d1b0095 9077 tmp2 = tcg_temp_new_i32();
396e467c
FN
9078 tcg_gen_movi_i32(tmp2, insn & 0xff);
9079 switch (op) {
9080 case 1: /* cmp */
9081 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
9082 tcg_temp_free_i32(tmp);
9083 tcg_temp_free_i32(tmp2);
396e467c
FN
9084 break;
9085 case 2: /* add */
9086 if (s->condexec_mask)
9087 tcg_gen_add_i32(tmp, tmp, tmp2);
9088 else
9089 gen_helper_add_cc(tmp, tmp, tmp2);
7d1b0095 9090 tcg_temp_free_i32(tmp2);
396e467c
FN
9091 store_reg(s, rd, tmp);
9092 break;
9093 case 3: /* sub */
9094 if (s->condexec_mask)
9095 tcg_gen_sub_i32(tmp, tmp, tmp2);
9096 else
9097 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095 9098 tcg_temp_free_i32(tmp2);
396e467c
FN
9099 store_reg(s, rd, tmp);
9100 break;
9101 }
99c475ab 9102 }
99c475ab
FB
9103 break;
9104 case 4:
9105 if (insn & (1 << 11)) {
9106 rd = (insn >> 8) & 7;
5899f386
FB
9107 /* load pc-relative. Bit 1 of PC is ignored. */
9108 val = s->pc + 2 + ((insn & 0xff) * 4);
9109 val &= ~(uint32_t)2;
7d1b0095 9110 addr = tcg_temp_new_i32();
b0109805
PB
9111 tcg_gen_movi_i32(addr, val);
9112 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 9113 tcg_temp_free_i32(addr);
b0109805 9114 store_reg(s, rd, tmp);
99c475ab
FB
9115 break;
9116 }
9117 if (insn & (1 << 10)) {
9118 /* data processing extended or blx */
9119 rd = (insn & 7) | ((insn >> 4) & 8);
9120 rm = (insn >> 3) & 0xf;
9121 op = (insn >> 8) & 3;
9122 switch (op) {
9123 case 0: /* add */
396e467c
FN
9124 tmp = load_reg(s, rd);
9125 tmp2 = load_reg(s, rm);
9126 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9127 tcg_temp_free_i32(tmp2);
396e467c 9128 store_reg(s, rd, tmp);
99c475ab
FB
9129 break;
9130 case 1: /* cmp */
396e467c
FN
9131 tmp = load_reg(s, rd);
9132 tmp2 = load_reg(s, rm);
9133 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
9134 tcg_temp_free_i32(tmp2);
9135 tcg_temp_free_i32(tmp);
99c475ab
FB
9136 break;
9137 case 2: /* mov/cpy */
396e467c
FN
9138 tmp = load_reg(s, rm);
9139 store_reg(s, rd, tmp);
99c475ab
FB
9140 break;
9141 case 3:/* branch [and link] exchange thumb register */
b0109805 9142 tmp = load_reg(s, rm);
99c475ab 9143 if (insn & (1 << 7)) {
be5e7a76 9144 ARCH(5);
99c475ab 9145 val = (uint32_t)s->pc | 1;
7d1b0095 9146 tmp2 = tcg_temp_new_i32();
b0109805
PB
9147 tcg_gen_movi_i32(tmp2, val);
9148 store_reg(s, 14, tmp2);
99c475ab 9149 }
be5e7a76 9150 /* already thumb, no need to check */
d9ba4830 9151 gen_bx(s, tmp);
99c475ab
FB
9152 break;
9153 }
9154 break;
9155 }
9156
9157 /* data processing register */
9158 rd = insn & 7;
9159 rm = (insn >> 3) & 7;
9160 op = (insn >> 6) & 0xf;
9161 if (op == 2 || op == 3 || op == 4 || op == 7) {
9162 /* the shift/rotate ops want the operands backwards */
9163 val = rm;
9164 rm = rd;
9165 rd = val;
9166 val = 1;
9167 } else {
9168 val = 0;
9169 }
9170
396e467c 9171 if (op == 9) { /* neg */
7d1b0095 9172 tmp = tcg_temp_new_i32();
396e467c
FN
9173 tcg_gen_movi_i32(tmp, 0);
9174 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9175 tmp = load_reg(s, rd);
9176 } else {
9177 TCGV_UNUSED(tmp);
9178 }
99c475ab 9179
396e467c 9180 tmp2 = load_reg(s, rm);
5899f386 9181 switch (op) {
99c475ab 9182 case 0x0: /* and */
396e467c 9183 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9184 if (!s->condexec_mask)
396e467c 9185 gen_logic_CC(tmp);
99c475ab
FB
9186 break;
9187 case 0x1: /* eor */
396e467c 9188 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9189 if (!s->condexec_mask)
396e467c 9190 gen_logic_CC(tmp);
99c475ab
FB
9191 break;
9192 case 0x2: /* lsl */
9ee6e8bb 9193 if (s->condexec_mask) {
396e467c 9194 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 9195 } else {
396e467c
FN
9196 gen_helper_shl_cc(tmp2, tmp2, tmp);
9197 gen_logic_CC(tmp2);
9ee6e8bb 9198 }
99c475ab
FB
9199 break;
9200 case 0x3: /* lsr */
9ee6e8bb 9201 if (s->condexec_mask) {
396e467c 9202 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 9203 } else {
396e467c
FN
9204 gen_helper_shr_cc(tmp2, tmp2, tmp);
9205 gen_logic_CC(tmp2);
9ee6e8bb 9206 }
99c475ab
FB
9207 break;
9208 case 0x4: /* asr */
9ee6e8bb 9209 if (s->condexec_mask) {
396e467c 9210 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 9211 } else {
396e467c
FN
9212 gen_helper_sar_cc(tmp2, tmp2, tmp);
9213 gen_logic_CC(tmp2);
9ee6e8bb 9214 }
99c475ab
FB
9215 break;
9216 case 0x5: /* adc */
9ee6e8bb 9217 if (s->condexec_mask)
396e467c 9218 gen_adc(tmp, tmp2);
9ee6e8bb 9219 else
396e467c 9220 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
9221 break;
9222 case 0x6: /* sbc */
9ee6e8bb 9223 if (s->condexec_mask)
396e467c 9224 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 9225 else
396e467c 9226 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
9227 break;
9228 case 0x7: /* ror */
9ee6e8bb 9229 if (s->condexec_mask) {
f669df27
AJ
9230 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9231 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9232 } else {
396e467c
FN
9233 gen_helper_ror_cc(tmp2, tmp2, tmp);
9234 gen_logic_CC(tmp2);
9ee6e8bb 9235 }
99c475ab
FB
9236 break;
9237 case 0x8: /* tst */
396e467c
FN
9238 tcg_gen_and_i32(tmp, tmp, tmp2);
9239 gen_logic_CC(tmp);
99c475ab 9240 rd = 16;
5899f386 9241 break;
99c475ab 9242 case 0x9: /* neg */
9ee6e8bb 9243 if (s->condexec_mask)
396e467c 9244 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9245 else
396e467c 9246 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9247 break;
9248 case 0xa: /* cmp */
396e467c 9249 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9250 rd = 16;
9251 break;
9252 case 0xb: /* cmn */
396e467c 9253 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
9254 rd = 16;
9255 break;
9256 case 0xc: /* orr */
396e467c 9257 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9258 if (!s->condexec_mask)
396e467c 9259 gen_logic_CC(tmp);
99c475ab
FB
9260 break;
9261 case 0xd: /* mul */
7b2919a0 9262 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9263 if (!s->condexec_mask)
396e467c 9264 gen_logic_CC(tmp);
99c475ab
FB
9265 break;
9266 case 0xe: /* bic */
f669df27 9267 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9268 if (!s->condexec_mask)
396e467c 9269 gen_logic_CC(tmp);
99c475ab
FB
9270 break;
9271 case 0xf: /* mvn */
396e467c 9272 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9273 if (!s->condexec_mask)
396e467c 9274 gen_logic_CC(tmp2);
99c475ab 9275 val = 1;
5899f386 9276 rm = rd;
99c475ab
FB
9277 break;
9278 }
9279 if (rd != 16) {
396e467c
FN
9280 if (val) {
9281 store_reg(s, rm, tmp2);
9282 if (op != 0xf)
7d1b0095 9283 tcg_temp_free_i32(tmp);
396e467c
FN
9284 } else {
9285 store_reg(s, rd, tmp);
7d1b0095 9286 tcg_temp_free_i32(tmp2);
396e467c
FN
9287 }
9288 } else {
7d1b0095
PM
9289 tcg_temp_free_i32(tmp);
9290 tcg_temp_free_i32(tmp2);
99c475ab
FB
9291 }
9292 break;
9293
9294 case 5:
9295 /* load/store register offset. */
9296 rd = insn & 7;
9297 rn = (insn >> 3) & 7;
9298 rm = (insn >> 6) & 7;
9299 op = (insn >> 9) & 7;
b0109805 9300 addr = load_reg(s, rn);
b26eefb6 9301 tmp = load_reg(s, rm);
b0109805 9302 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9303 tcg_temp_free_i32(tmp);
99c475ab
FB
9304
9305 if (op < 3) /* store */
b0109805 9306 tmp = load_reg(s, rd);
99c475ab
FB
9307
9308 switch (op) {
9309 case 0: /* str */
b0109805 9310 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9311 break;
9312 case 1: /* strh */
b0109805 9313 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9314 break;
9315 case 2: /* strb */
b0109805 9316 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9317 break;
9318 case 3: /* ldrsb */
b0109805 9319 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
9320 break;
9321 case 4: /* ldr */
b0109805 9322 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9323 break;
9324 case 5: /* ldrh */
b0109805 9325 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
9326 break;
9327 case 6: /* ldrb */
b0109805 9328 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
9329 break;
9330 case 7: /* ldrsh */
b0109805 9331 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
9332 break;
9333 }
9334 if (op >= 3) /* load */
b0109805 9335 store_reg(s, rd, tmp);
7d1b0095 9336 tcg_temp_free_i32(addr);
99c475ab
FB
9337 break;
9338
9339 case 6:
9340 /* load/store word immediate offset */
9341 rd = insn & 7;
9342 rn = (insn >> 3) & 7;
b0109805 9343 addr = load_reg(s, rn);
99c475ab 9344 val = (insn >> 4) & 0x7c;
b0109805 9345 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9346
9347 if (insn & (1 << 11)) {
9348 /* load */
b0109805
PB
9349 tmp = gen_ld32(addr, IS_USER(s));
9350 store_reg(s, rd, tmp);
99c475ab
FB
9351 } else {
9352 /* store */
b0109805
PB
9353 tmp = load_reg(s, rd);
9354 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9355 }
7d1b0095 9356 tcg_temp_free_i32(addr);
99c475ab
FB
9357 break;
9358
9359 case 7:
9360 /* load/store byte immediate offset */
9361 rd = insn & 7;
9362 rn = (insn >> 3) & 7;
b0109805 9363 addr = load_reg(s, rn);
99c475ab 9364 val = (insn >> 6) & 0x1f;
b0109805 9365 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9366
9367 if (insn & (1 << 11)) {
9368 /* load */
b0109805
PB
9369 tmp = gen_ld8u(addr, IS_USER(s));
9370 store_reg(s, rd, tmp);
99c475ab
FB
9371 } else {
9372 /* store */
b0109805
PB
9373 tmp = load_reg(s, rd);
9374 gen_st8(tmp, addr, IS_USER(s));
99c475ab 9375 }
7d1b0095 9376 tcg_temp_free_i32(addr);
99c475ab
FB
9377 break;
9378
9379 case 8:
9380 /* load/store halfword immediate offset */
9381 rd = insn & 7;
9382 rn = (insn >> 3) & 7;
b0109805 9383 addr = load_reg(s, rn);
99c475ab 9384 val = (insn >> 5) & 0x3e;
b0109805 9385 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9386
9387 if (insn & (1 << 11)) {
9388 /* load */
b0109805
PB
9389 tmp = gen_ld16u(addr, IS_USER(s));
9390 store_reg(s, rd, tmp);
99c475ab
FB
9391 } else {
9392 /* store */
b0109805
PB
9393 tmp = load_reg(s, rd);
9394 gen_st16(tmp, addr, IS_USER(s));
99c475ab 9395 }
7d1b0095 9396 tcg_temp_free_i32(addr);
99c475ab
FB
9397 break;
9398
9399 case 9:
9400 /* load/store from stack */
9401 rd = (insn >> 8) & 7;
b0109805 9402 addr = load_reg(s, 13);
99c475ab 9403 val = (insn & 0xff) * 4;
b0109805 9404 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9405
9406 if (insn & (1 << 11)) {
9407 /* load */
b0109805
PB
9408 tmp = gen_ld32(addr, IS_USER(s));
9409 store_reg(s, rd, tmp);
99c475ab
FB
9410 } else {
9411 /* store */
b0109805
PB
9412 tmp = load_reg(s, rd);
9413 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9414 }
7d1b0095 9415 tcg_temp_free_i32(addr);
99c475ab
FB
9416 break;
9417
9418 case 10:
9419 /* add to high reg */
9420 rd = (insn >> 8) & 7;
5899f386
FB
9421 if (insn & (1 << 11)) {
9422 /* SP */
5e3f878a 9423 tmp = load_reg(s, 13);
5899f386
FB
9424 } else {
9425 /* PC. bit 1 is ignored. */
7d1b0095 9426 tmp = tcg_temp_new_i32();
5e3f878a 9427 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9428 }
99c475ab 9429 val = (insn & 0xff) * 4;
5e3f878a
PB
9430 tcg_gen_addi_i32(tmp, tmp, val);
9431 store_reg(s, rd, tmp);
99c475ab
FB
9432 break;
9433
9434 case 11:
9435 /* misc */
9436 op = (insn >> 8) & 0xf;
9437 switch (op) {
9438 case 0:
9439 /* adjust stack pointer */
b26eefb6 9440 tmp = load_reg(s, 13);
99c475ab
FB
9441 val = (insn & 0x7f) * 4;
9442 if (insn & (1 << 7))
6a0d8a1d 9443 val = -(int32_t)val;
b26eefb6
PB
9444 tcg_gen_addi_i32(tmp, tmp, val);
9445 store_reg(s, 13, tmp);
99c475ab
FB
9446 break;
9447
9ee6e8bb
PB
9448 case 2: /* sign/zero extend. */
9449 ARCH(6);
9450 rd = insn & 7;
9451 rm = (insn >> 3) & 7;
b0109805 9452 tmp = load_reg(s, rm);
9ee6e8bb 9453 switch ((insn >> 6) & 3) {
b0109805
PB
9454 case 0: gen_sxth(tmp); break;
9455 case 1: gen_sxtb(tmp); break;
9456 case 2: gen_uxth(tmp); break;
9457 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9458 }
b0109805 9459 store_reg(s, rd, tmp);
9ee6e8bb 9460 break;
99c475ab
FB
9461 case 4: case 5: case 0xc: case 0xd:
9462 /* push/pop */
b0109805 9463 addr = load_reg(s, 13);
5899f386
FB
9464 if (insn & (1 << 8))
9465 offset = 4;
99c475ab 9466 else
5899f386
FB
9467 offset = 0;
9468 for (i = 0; i < 8; i++) {
9469 if (insn & (1 << i))
9470 offset += 4;
9471 }
9472 if ((insn & (1 << 11)) == 0) {
b0109805 9473 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9474 }
99c475ab
FB
9475 for (i = 0; i < 8; i++) {
9476 if (insn & (1 << i)) {
9477 if (insn & (1 << 11)) {
9478 /* pop */
b0109805
PB
9479 tmp = gen_ld32(addr, IS_USER(s));
9480 store_reg(s, i, tmp);
99c475ab
FB
9481 } else {
9482 /* push */
b0109805
PB
9483 tmp = load_reg(s, i);
9484 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9485 }
5899f386 9486 /* advance to the next address. */
b0109805 9487 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9488 }
9489 }
a50f5b91 9490 TCGV_UNUSED(tmp);
99c475ab
FB
9491 if (insn & (1 << 8)) {
9492 if (insn & (1 << 11)) {
9493 /* pop pc */
b0109805 9494 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9495 /* don't set the pc until the rest of the instruction
9496 has completed */
9497 } else {
9498 /* push lr */
b0109805
PB
9499 tmp = load_reg(s, 14);
9500 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9501 }
b0109805 9502 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9503 }
5899f386 9504 if ((insn & (1 << 11)) == 0) {
b0109805 9505 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9506 }
99c475ab 9507 /* write back the new stack pointer */
b0109805 9508 store_reg(s, 13, addr);
99c475ab 9509 /* set the new PC value */
be5e7a76
DES
9510 if ((insn & 0x0900) == 0x0900) {
9511 store_reg_from_load(env, s, 15, tmp);
9512 }
99c475ab
FB
9513 break;
9514
9ee6e8bb
PB
9515 case 1: case 3: case 9: case 11: /* czb */
9516 rm = insn & 7;
d9ba4830 9517 tmp = load_reg(s, rm);
9ee6e8bb
PB
9518 s->condlabel = gen_new_label();
9519 s->condjmp = 1;
9520 if (insn & (1 << 11))
cb63669a 9521 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9522 else
cb63669a 9523 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9524 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9525 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9526 val = (uint32_t)s->pc + 2;
9527 val += offset;
9528 gen_jmp(s, val);
9529 break;
9530
9531 case 15: /* IT, nop-hint. */
9532 if ((insn & 0xf) == 0) {
9533 gen_nop_hint(s, (insn >> 4) & 0xf);
9534 break;
9535 }
9536 /* If Then. */
9537 s->condexec_cond = (insn >> 4) & 0xe;
9538 s->condexec_mask = insn & 0x1f;
9539 /* No actual code generated for this insn, just setup state. */
9540 break;
9541
06c949e6 9542 case 0xe: /* bkpt */
be5e7a76 9543 ARCH(5);
bc4a0de0 9544 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9545 break;
9546
9ee6e8bb
PB
9547 case 0xa: /* rev */
9548 ARCH(6);
9549 rn = (insn >> 3) & 0x7;
9550 rd = insn & 0x7;
b0109805 9551 tmp = load_reg(s, rn);
9ee6e8bb 9552 switch ((insn >> 6) & 3) {
66896cb8 9553 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9554 case 1: gen_rev16(tmp); break;
9555 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9556 default: goto illegal_op;
9557 }
b0109805 9558 store_reg(s, rd, tmp);
9ee6e8bb
PB
9559 break;
9560
9561 case 6: /* cps */
9562 ARCH(6);
9563 if (IS_USER(s))
9564 break;
9565 if (IS_M(env)) {
8984bd2e 9566 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 9567 /* PRIMASK */
8984bd2e
PB
9568 if (insn & 1) {
9569 addr = tcg_const_i32(16);
9570 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9571 tcg_temp_free_i32(addr);
8984bd2e 9572 }
9ee6e8bb 9573 /* FAULTMASK */
8984bd2e
PB
9574 if (insn & 2) {
9575 addr = tcg_const_i32(17);
9576 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9577 tcg_temp_free_i32(addr);
8984bd2e 9578 }
b75263d6 9579 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9580 gen_lookup_tb(s);
9581 } else {
9582 if (insn & (1 << 4))
9583 shift = CPSR_A | CPSR_I | CPSR_F;
9584 else
9585 shift = 0;
fa26df03 9586 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
9587 }
9588 break;
9589
99c475ab
FB
9590 default:
9591 goto undef;
9592 }
9593 break;
9594
9595 case 12:
a7d3970d 9596 {
99c475ab 9597 /* load/store multiple */
a7d3970d
PM
9598 TCGv loaded_var;
9599 TCGV_UNUSED(loaded_var);
99c475ab 9600 rn = (insn >> 8) & 0x7;
b0109805 9601 addr = load_reg(s, rn);
99c475ab
FB
9602 for (i = 0; i < 8; i++) {
9603 if (insn & (1 << i)) {
99c475ab
FB
9604 if (insn & (1 << 11)) {
9605 /* load */
b0109805 9606 tmp = gen_ld32(addr, IS_USER(s));
a7d3970d
PM
9607 if (i == rn) {
9608 loaded_var = tmp;
9609 } else {
9610 store_reg(s, i, tmp);
9611 }
99c475ab
FB
9612 } else {
9613 /* store */
b0109805
PB
9614 tmp = load_reg(s, i);
9615 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9616 }
5899f386 9617 /* advance to the next address */
b0109805 9618 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9619 }
9620 }
b0109805 9621 if ((insn & (1 << rn)) == 0) {
a7d3970d 9622 /* base reg not in list: base register writeback */
b0109805
PB
9623 store_reg(s, rn, addr);
9624 } else {
a7d3970d
PM
9625 /* base reg in list: if load, complete it now */
9626 if (insn & (1 << 11)) {
9627 store_reg(s, rn, loaded_var);
9628 }
7d1b0095 9629 tcg_temp_free_i32(addr);
b0109805 9630 }
99c475ab 9631 break;
a7d3970d 9632 }
99c475ab
FB
9633 case 13:
9634 /* conditional branch or swi */
9635 cond = (insn >> 8) & 0xf;
9636 if (cond == 0xe)
9637 goto undef;
9638
9639 if (cond == 0xf) {
9640 /* swi */
422ebf69 9641 gen_set_pc_im(s->pc);
9ee6e8bb 9642 s->is_jmp = DISAS_SWI;
99c475ab
FB
9643 break;
9644 }
9645 /* generate a conditional jump to next instruction */
e50e6a20 9646 s->condlabel = gen_new_label();
d9ba4830 9647 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9648 s->condjmp = 1;
99c475ab
FB
9649
9650 /* jump to the offset */
5899f386 9651 val = (uint32_t)s->pc + 2;
99c475ab 9652 offset = ((int32_t)insn << 24) >> 24;
5899f386 9653 val += offset << 1;
8aaca4c0 9654 gen_jmp(s, val);
99c475ab
FB
9655 break;
9656
9657 case 14:
358bf29e 9658 if (insn & (1 << 11)) {
9ee6e8bb
PB
9659 if (disas_thumb2_insn(env, s, insn))
9660 goto undef32;
358bf29e
PB
9661 break;
9662 }
9ee6e8bb 9663 /* unconditional branch */
99c475ab
FB
9664 val = (uint32_t)s->pc;
9665 offset = ((int32_t)insn << 21) >> 21;
9666 val += (offset << 1) + 2;
8aaca4c0 9667 gen_jmp(s, val);
99c475ab
FB
9668 break;
9669
9670 case 15:
9ee6e8bb 9671 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9672 goto undef32;
9ee6e8bb 9673 break;
99c475ab
FB
9674 }
9675 return;
9ee6e8bb 9676undef32:
bc4a0de0 9677 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9678 return;
9679illegal_op:
99c475ab 9680undef:
bc4a0de0 9681 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9682}
9683
2c0262af
FB
9684/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9685 basic block 'tb'. If search_pc is TRUE, also generate PC
9686 information for each intermediate instruction. */
2cfc5f17
TS
9687static inline void gen_intermediate_code_internal(CPUState *env,
9688 TranslationBlock *tb,
9689 int search_pc)
2c0262af
FB
9690{
9691 DisasContext dc1, *dc = &dc1;
a1d1bb31 9692 CPUBreakpoint *bp;
2c0262af
FB
9693 uint16_t *gen_opc_end;
9694 int j, lj;
0fa85d43 9695 target_ulong pc_start;
b5ff1b31 9696 uint32_t next_page_start;
2e70f6ef
PB
9697 int num_insns;
9698 int max_insns;
3b46e624 9699
2c0262af 9700 /* generate intermediate code */
0fa85d43 9701 pc_start = tb->pc;
3b46e624 9702
2c0262af
FB
9703 dc->tb = tb;
9704
2c0262af 9705 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9706
9707 dc->is_jmp = DISAS_NEXT;
9708 dc->pc = pc_start;
8aaca4c0 9709 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9710 dc->condjmp = 0;
7204ab88 9711 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
98eac7ca
PM
9712 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9713 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9714#if !defined(CONFIG_USER_ONLY)
61f74d6a 9715 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9716#endif
5df8bac1 9717 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9718 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9719 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9720 cpu_F0s = tcg_temp_new_i32();
9721 cpu_F1s = tcg_temp_new_i32();
9722 cpu_F0d = tcg_temp_new_i64();
9723 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9724 cpu_V0 = cpu_F0d;
9725 cpu_V1 = cpu_F1d;
e677137d 9726 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9727 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9728 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9729 lj = -1;
2e70f6ef
PB
9730 num_insns = 0;
9731 max_insns = tb->cflags & CF_COUNT_MASK;
9732 if (max_insns == 0)
9733 max_insns = CF_COUNT_MASK;
9734
9735 gen_icount_start();
e12ce78d 9736
3849902c
PM
9737 tcg_clear_temp_count();
9738
e12ce78d
PM
9739 /* A note on handling of the condexec (IT) bits:
9740 *
9741 * We want to avoid the overhead of having to write the updated condexec
9742 * bits back to the CPUState for every instruction in an IT block. So:
9743 * (1) if the condexec bits are not already zero then we write
9744 * zero back into the CPUState now. This avoids complications trying
9745 * to do it at the end of the block. (For example if we don't do this
9746 * it's hard to identify whether we can safely skip writing condexec
9747 * at the end of the TB, which we definitely want to do for the case
9748 * where a TB doesn't do anything with the IT state at all.)
9749 * (2) if we are going to leave the TB then we call gen_set_condexec()
9750 * which will write the correct value into CPUState if zero is wrong.
9751 * This is done both for leaving the TB at the end, and for leaving
9752 * it because of an exception we know will happen, which is done in
9753 * gen_exception_insn(). The latter is necessary because we need to
9754 * leave the TB with the PC/IT state just prior to execution of the
9755 * instruction which caused the exception.
9756 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9757 * then the CPUState will be wrong and we need to reset it.
9758 * This is handled in the same way as restoration of the
9759 * PC in these situations: we will be called again with search_pc=1
9760 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9761 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9762 * this to restore the condexec bits.
e12ce78d
PM
9763 *
9764 * Note that there are no instructions which can read the condexec
9765 * bits, and none which can write non-static values to them, so
9766 * we don't need to care about whether CPUState is correct in the
9767 * middle of a TB.
9768 */
9769
9ee6e8bb
PB
9770 /* Reset the conditional execution bits immediately. This avoids
9771 complications trying to do it at the end of the block. */
98eac7ca 9772 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9773 {
7d1b0095 9774 TCGv tmp = tcg_temp_new_i32();
8f01245e 9775 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9776 store_cpu_field(tmp, condexec_bits);
8f01245e 9777 }
2c0262af 9778 do {
fbb4a2e3
PB
9779#ifdef CONFIG_USER_ONLY
9780 /* Intercept jump to the magic kernel page. */
9781 if (dc->pc >= 0xffff0000) {
9782 /* We always get here via a jump, so know we are not in a
9783 conditional execution block. */
9784 gen_exception(EXCP_KERNEL_TRAP);
9785 dc->is_jmp = DISAS_UPDATE;
9786 break;
9787 }
9788#else
9ee6e8bb
PB
9789 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9790 /* We always get here via a jump, so know we are not in a
9791 conditional execution block. */
d9ba4830 9792 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9793 dc->is_jmp = DISAS_UPDATE;
9794 break;
9ee6e8bb
PB
9795 }
9796#endif
9797
72cf2d4f
BS
9798 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9799 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9800 if (bp->pc == dc->pc) {
bc4a0de0 9801 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9802 /* Advance PC so that clearing the breakpoint will
9803 invalidate this TB. */
9804 dc->pc += 2;
9805 goto done_generating;
1fddef4b
FB
9806 break;
9807 }
9808 }
9809 }
2c0262af
FB
9810 if (search_pc) {
9811 j = gen_opc_ptr - gen_opc_buf;
9812 if (lj < j) {
9813 lj++;
9814 while (lj < j)
9815 gen_opc_instr_start[lj++] = 0;
9816 }
0fa85d43 9817 gen_opc_pc[lj] = dc->pc;
e12ce78d 9818 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
2c0262af 9819 gen_opc_instr_start[lj] = 1;
2e70f6ef 9820 gen_opc_icount[lj] = num_insns;
2c0262af 9821 }
e50e6a20 9822
2e70f6ef
PB
9823 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9824 gen_io_start();
9825
5642463a
PM
9826 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9827 tcg_gen_debug_insn_start(dc->pc);
9828 }
9829
7204ab88 9830 if (dc->thumb) {
9ee6e8bb
PB
9831 disas_thumb_insn(env, dc);
9832 if (dc->condexec_mask) {
9833 dc->condexec_cond = (dc->condexec_cond & 0xe)
9834 | ((dc->condexec_mask >> 4) & 1);
9835 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9836 if (dc->condexec_mask == 0) {
9837 dc->condexec_cond = 0;
9838 }
9839 }
9840 } else {
9841 disas_arm_insn(env, dc);
9842 }
e50e6a20
FB
9843
9844 if (dc->condjmp && !dc->is_jmp) {
9845 gen_set_label(dc->condlabel);
9846 dc->condjmp = 0;
9847 }
3849902c
PM
9848
9849 if (tcg_check_temp_count()) {
9850 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9851 }
9852
aaf2d97d 9853 /* Translation stops when a conditional branch is encountered.
e50e6a20 9854 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9855 * Also stop translation when a page boundary is reached. This
bf20dc07 9856 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9857 num_insns ++;
1fddef4b
FB
9858 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9859 !env->singlestep_enabled &&
1b530a6d 9860 !singlestep &&
2e70f6ef
PB
9861 dc->pc < next_page_start &&
9862 num_insns < max_insns);
9863
9864 if (tb->cflags & CF_LAST_IO) {
9865 if (dc->condjmp) {
9866 /* FIXME: This can theoretically happen with self-modifying
9867 code. */
9868 cpu_abort(env, "IO on conditional branch instruction");
9869 }
9870 gen_io_end();
9871 }
9ee6e8bb 9872
b5ff1b31 9873 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9874 instruction was a conditional branch or trap, and the PC has
9875 already been written. */
551bd27f 9876 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9877 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9878 if (dc->condjmp) {
9ee6e8bb
PB
9879 gen_set_condexec(dc);
9880 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9881 gen_exception(EXCP_SWI);
9ee6e8bb 9882 } else {
d9ba4830 9883 gen_exception(EXCP_DEBUG);
9ee6e8bb 9884 }
e50e6a20
FB
9885 gen_set_label(dc->condlabel);
9886 }
9887 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9888 gen_set_pc_im(dc->pc);
e50e6a20 9889 dc->condjmp = 0;
8aaca4c0 9890 }
9ee6e8bb
PB
9891 gen_set_condexec(dc);
9892 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9893 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9894 } else {
9895 /* FIXME: Single stepping a WFI insn will not halt
9896 the CPU. */
d9ba4830 9897 gen_exception(EXCP_DEBUG);
9ee6e8bb 9898 }
8aaca4c0 9899 } else {
9ee6e8bb
PB
9900 /* While branches must always occur at the end of an IT block,
9901 there are a few other things that can cause us to terminate
9902 the TB in the middel of an IT block:
9903 - Exception generating instructions (bkpt, swi, undefined).
9904 - Page boundaries.
9905 - Hardware watchpoints.
9906 Hardware breakpoints have already been handled and skip this code.
9907 */
9908 gen_set_condexec(dc);
8aaca4c0 9909 switch(dc->is_jmp) {
8aaca4c0 9910 case DISAS_NEXT:
6e256c93 9911 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9912 break;
9913 default:
9914 case DISAS_JUMP:
9915 case DISAS_UPDATE:
9916 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9917 tcg_gen_exit_tb(0);
8aaca4c0
FB
9918 break;
9919 case DISAS_TB_JUMP:
9920 /* nothing more to generate */
9921 break;
9ee6e8bb 9922 case DISAS_WFI:
d9ba4830 9923 gen_helper_wfi();
9ee6e8bb
PB
9924 break;
9925 case DISAS_SWI:
d9ba4830 9926 gen_exception(EXCP_SWI);
9ee6e8bb 9927 break;
8aaca4c0 9928 }
e50e6a20
FB
9929 if (dc->condjmp) {
9930 gen_set_label(dc->condlabel);
9ee6e8bb 9931 gen_set_condexec(dc);
6e256c93 9932 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9933 dc->condjmp = 0;
9934 }
2c0262af 9935 }
2e70f6ef 9936
9ee6e8bb 9937done_generating:
2e70f6ef 9938 gen_icount_end(tb, num_insns);
2c0262af
FB
9939 *gen_opc_ptr = INDEX_op_end;
9940
9941#ifdef DEBUG_DISAS
8fec2b8c 9942 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9943 qemu_log("----------------\n");
9944 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7204ab88 9945 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
93fcfe39 9946 qemu_log("\n");
2c0262af
FB
9947 }
9948#endif
b5ff1b31
FB
9949 if (search_pc) {
9950 j = gen_opc_ptr - gen_opc_buf;
9951 lj++;
9952 while (lj <= j)
9953 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9954 } else {
2c0262af 9955 tb->size = dc->pc - pc_start;
2e70f6ef 9956 tb->icount = num_insns;
b5ff1b31 9957 }
2c0262af
FB
9958}
9959
2cfc5f17 9960void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9961{
2cfc5f17 9962 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9963}
9964
2cfc5f17 9965void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9966{
2cfc5f17 9967 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9968}
9969
b5ff1b31
FB
9970static const char *cpu_mode_names[16] = {
9971 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9972 "???", "???", "???", "und", "???", "???", "???", "sys"
9973};
9ee6e8bb 9974
9a78eead 9975void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 9976 int flags)
2c0262af
FB
9977{
9978 int i;
06e80fc9 9979#if 0
bc380d17 9980 union {
b7bcbe95
FB
9981 uint32_t i;
9982 float s;
9983 } s0, s1;
9984 CPU_DoubleU d;
a94a6abf
PB
9985 /* ??? This assumes float64 and double have the same layout.
9986 Oh well, it's only debug dumps. */
9987 union {
9988 float64 f64;
9989 double d;
9990 } d0;
06e80fc9 9991#endif
b5ff1b31 9992 uint32_t psr;
2c0262af
FB
9993
9994 for(i=0;i<16;i++) {
7fe48483 9995 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9996 if ((i % 4) == 3)
7fe48483 9997 cpu_fprintf(f, "\n");
2c0262af 9998 else
7fe48483 9999 cpu_fprintf(f, " ");
2c0262af 10000 }
b5ff1b31 10001 psr = cpsr_read(env);
687fa640
TS
10002 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10003 psr,
b5ff1b31
FB
10004 psr & (1 << 31) ? 'N' : '-',
10005 psr & (1 << 30) ? 'Z' : '-',
10006 psr & (1 << 29) ? 'C' : '-',
10007 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10008 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10009 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10010
5e3f878a 10011#if 0
b7bcbe95 10012 for (i = 0; i < 16; i++) {
8e96005d
FB
10013 d.d = env->vfp.regs[i];
10014 s0.i = d.l.lower;
10015 s1.i = d.l.upper;
a94a6abf
PB
10016 d0.f64 = d.d;
10017 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 10018 i * 2, (int)s0.i, s0.s,
a94a6abf 10019 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 10020 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 10021 d0.d);
b7bcbe95 10022 }
40f137e1 10023 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 10024#endif
2c0262af 10025}
a6b025d3 10026
e87b7cb0 10027void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
10028{
10029 env->regs[15] = gen_opc_pc[pc_pos];
e12ce78d 10030 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10031}