]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
target-arm: Handle UNDEF cases for VEXT
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
a7812ae4 33#include "helpers.h"
1497c961 34#define GEN_HELPER 1
b26eefb6 35#include "helpers.h"
2c0262af 36
be5e7a76
DES
37#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39/* currently all emulated v5 cores are also v5TE, so don't bother */
40#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
41#define ENABLE_ARCH_5J 0
42#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 46
86753403 47#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 48
2c0262af
FB
49/* internal defines */
50typedef struct DisasContext {
0fa85d43 51 target_ulong pc;
2c0262af 52 int is_jmp;
e50e6a20
FB
53 /* Nonzero if this instruction has been conditionally skipped. */
54 int condjmp;
55 /* The label that will be jumped to when the instruction is skipped. */
56 int condlabel;
9ee6e8bb
PB
57 /* Thumb-2 condtional execution bits. */
58 int condexec_mask;
59 int condexec_cond;
2c0262af 60 struct TranslationBlock *tb;
8aaca4c0 61 int singlestep_enabled;
5899f386 62 int thumb;
b5ff1b31
FB
63#if !defined(CONFIG_USER_ONLY)
64 int user;
65#endif
5df8bac1 66 int vfp_enabled;
69d1fc22
PM
67 int vec_len;
68 int vec_stride;
2c0262af
FB
69} DisasContext;
70
e12ce78d
PM
71static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
b5ff1b31
FB
73#if defined(CONFIG_USER_ONLY)
74#define IS_USER(s) 1
75#else
76#define IS_USER(s) (s->user)
77#endif
78
9ee6e8bb
PB
79/* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
81#define DISAS_WFI 4
82#define DISAS_SWI 5
2c0262af 83
a7812ae4 84static TCGv_ptr cpu_env;
ad69471c 85/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 86static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 87static TCGv_i32 cpu_R[16];
426f5abc
PB
88static TCGv_i32 cpu_exclusive_addr;
89static TCGv_i32 cpu_exclusive_val;
90static TCGv_i32 cpu_exclusive_high;
91#ifdef CONFIG_USER_ONLY
92static TCGv_i32 cpu_exclusive_test;
93static TCGv_i32 cpu_exclusive_info;
94#endif
ad69471c 95
b26eefb6 96/* FIXME: These should be removed. */
a7812ae4
PB
97static TCGv cpu_F0s, cpu_F1s;
98static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 99
2e70f6ef
PB
100#include "gen-icount.h"
101
155c3eac
FN
102static const char *regnames[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
105
b26eefb6
PB
106/* initialize TCG globals. */
107void arm_translate_init(void)
108{
155c3eac
FN
109 int i;
110
a7812ae4
PB
111 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
112
155c3eac
FN
113 for (i = 0; i < 16; i++) {
114 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, regs[i]),
116 regnames[i]);
117 }
426f5abc
PB
118 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_addr), "exclusive_addr");
120 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUState, exclusive_val), "exclusive_val");
122 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUState, exclusive_high), "exclusive_high");
124#ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUState, exclusive_test), "exclusive_test");
127 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUState, exclusive_info), "exclusive_info");
129#endif
155c3eac 130
a7812ae4
PB
131#define GEN_HELPER 2
132#include "helpers.h"
b26eefb6
PB
133}
134
d9ba4830
PB
135static inline TCGv load_cpu_offset(int offset)
136{
7d1b0095 137 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
138 tcg_gen_ld_i32(tmp, cpu_env, offset);
139 return tmp;
140}
141
142#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
143
144static inline void store_cpu_offset(TCGv var, int offset)
145{
146 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 147 tcg_temp_free_i32(var);
d9ba4830
PB
148}
149
150#define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUState, name))
152
b26eefb6
PB
153/* Set a variable to the value of a CPU register. */
154static void load_reg_var(DisasContext *s, TCGv var, int reg)
155{
156 if (reg == 15) {
157 uint32_t addr;
158 /* normaly, since we updated PC, we need only to add one insn */
159 if (s->thumb)
160 addr = (long)s->pc + 2;
161 else
162 addr = (long)s->pc + 4;
163 tcg_gen_movi_i32(var, addr);
164 } else {
155c3eac 165 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
166 }
167}
168
169/* Create a new temporary and set it to the value of a CPU register. */
170static inline TCGv load_reg(DisasContext *s, int reg)
171{
7d1b0095 172 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
173 load_reg_var(s, tmp, reg);
174 return tmp;
175}
176
177/* Set a CPU register. The source must be a temporary and will be
178 marked as dead. */
179static void store_reg(DisasContext *s, int reg, TCGv var)
180{
181 if (reg == 15) {
182 tcg_gen_andi_i32(var, var, ~1);
183 s->is_jmp = DISAS_JUMP;
184 }
155c3eac 185 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 186 tcg_temp_free_i32(var);
b26eefb6
PB
187}
188
b26eefb6 189/* Value extensions. */
86831435
PB
190#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
192#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
194
1497c961
PB
195#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 197
b26eefb6 198
b75263d6
JR
199static inline void gen_set_cpsr(TCGv var, uint32_t mask)
200{
201 TCGv tmp_mask = tcg_const_i32(mask);
202 gen_helper_cpsr_write(var, tmp_mask);
203 tcg_temp_free_i32(tmp_mask);
204}
d9ba4830
PB
205/* Set NZCV flags from the high 4 bits of var. */
206#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
207
208static void gen_exception(int excp)
209{
7d1b0095 210 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
211 tcg_gen_movi_i32(tmp, excp);
212 gen_helper_exception(tmp);
7d1b0095 213 tcg_temp_free_i32(tmp);
d9ba4830
PB
214}
215
3670669c
PB
216static void gen_smul_dual(TCGv a, TCGv b)
217{
7d1b0095
PM
218 TCGv tmp1 = tcg_temp_new_i32();
219 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
220 tcg_gen_ext16s_i32(tmp1, a);
221 tcg_gen_ext16s_i32(tmp2, b);
3670669c 222 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 223 tcg_temp_free_i32(tmp2);
3670669c
PB
224 tcg_gen_sari_i32(a, a, 16);
225 tcg_gen_sari_i32(b, b, 16);
226 tcg_gen_mul_i32(b, b, a);
227 tcg_gen_mov_i32(a, tmp1);
7d1b0095 228 tcg_temp_free_i32(tmp1);
3670669c
PB
229}
230
231/* Byteswap each halfword. */
232static void gen_rev16(TCGv var)
233{
7d1b0095 234 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
235 tcg_gen_shri_i32(tmp, var, 8);
236 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
237 tcg_gen_shli_i32(var, var, 8);
238 tcg_gen_andi_i32(var, var, 0xff00ff00);
239 tcg_gen_or_i32(var, var, tmp);
7d1b0095 240 tcg_temp_free_i32(tmp);
3670669c
PB
241}
242
243/* Byteswap low halfword and sign extend. */
244static void gen_revsh(TCGv var)
245{
1a855029
AJ
246 tcg_gen_ext16u_i32(var, var);
247 tcg_gen_bswap16_i32(var, var);
248 tcg_gen_ext16s_i32(var, var);
3670669c
PB
249}
250
251/* Unsigned bitfield extract. */
252static void gen_ubfx(TCGv var, int shift, uint32_t mask)
253{
254 if (shift)
255 tcg_gen_shri_i32(var, var, shift);
256 tcg_gen_andi_i32(var, var, mask);
257}
258
259/* Signed bitfield extract. */
260static void gen_sbfx(TCGv var, int shift, int width)
261{
262 uint32_t signbit;
263
264 if (shift)
265 tcg_gen_sari_i32(var, var, shift);
266 if (shift + width < 32) {
267 signbit = 1u << (width - 1);
268 tcg_gen_andi_i32(var, var, (1u << width) - 1);
269 tcg_gen_xori_i32(var, var, signbit);
270 tcg_gen_subi_i32(var, var, signbit);
271 }
272}
273
274/* Bitfield insertion. Insert val into base. Clobbers base and val. */
275static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
276{
3670669c 277 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
278 tcg_gen_shli_i32(val, val, shift);
279 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
280 tcg_gen_or_i32(dest, base, val);
281}
282
838fa72d
AJ
283/* Return (b << 32) + a. Mark inputs as dead */
284static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 285{
838fa72d
AJ
286 TCGv_i64 tmp64 = tcg_temp_new_i64();
287
288 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 289 tcg_temp_free_i32(b);
838fa72d
AJ
290 tcg_gen_shli_i64(tmp64, tmp64, 32);
291 tcg_gen_add_i64(a, tmp64, a);
292
293 tcg_temp_free_i64(tmp64);
294 return a;
295}
296
297/* Return (b << 32) - a. Mark inputs as dead. */
298static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
299{
300 TCGv_i64 tmp64 = tcg_temp_new_i64();
301
302 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 303 tcg_temp_free_i32(b);
838fa72d
AJ
304 tcg_gen_shli_i64(tmp64, tmp64, 32);
305 tcg_gen_sub_i64(a, tmp64, a);
306
307 tcg_temp_free_i64(tmp64);
308 return a;
3670669c
PB
309}
310
8f01245e
PB
311/* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
5e3f878a 313/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 314static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 315{
a7812ae4
PB
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
318
319 tcg_gen_extu_i32_i64(tmp1, a);
7d1b0095 320 tcg_temp_free_i32(a);
5e3f878a 321 tcg_gen_extu_i32_i64(tmp2, b);
7d1b0095 322 tcg_temp_free_i32(b);
5e3f878a 323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 324 tcg_temp_free_i64(tmp2);
5e3f878a
PB
325 return tmp1;
326}
327
a7812ae4 328static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 329{
a7812ae4
PB
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
332
333 tcg_gen_ext_i32_i64(tmp1, a);
7d1b0095 334 tcg_temp_free_i32(a);
5e3f878a 335 tcg_gen_ext_i32_i64(tmp2, b);
7d1b0095 336 tcg_temp_free_i32(b);
5e3f878a 337 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 338 tcg_temp_free_i64(tmp2);
5e3f878a
PB
339 return tmp1;
340}
341
8f01245e
PB
342/* Swap low and high halfwords. */
343static void gen_swap_half(TCGv var)
344{
7d1b0095 345 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
346 tcg_gen_shri_i32(tmp, var, 16);
347 tcg_gen_shli_i32(var, var, 16);
348 tcg_gen_or_i32(var, var, tmp);
7d1b0095 349 tcg_temp_free_i32(tmp);
8f01245e
PB
350}
351
b26eefb6
PB
352/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
354 t0 &= ~0x8000;
355 t1 &= ~0x8000;
356 t0 = (t0 + t1) ^ tmp;
357 */
358
359static void gen_add16(TCGv t0, TCGv t1)
360{
7d1b0095 361 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
362 tcg_gen_xor_i32(tmp, t0, t1);
363 tcg_gen_andi_i32(tmp, tmp, 0x8000);
364 tcg_gen_andi_i32(t0, t0, ~0x8000);
365 tcg_gen_andi_i32(t1, t1, ~0x8000);
366 tcg_gen_add_i32(t0, t0, t1);
367 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
368 tcg_temp_free_i32(tmp);
369 tcg_temp_free_i32(t1);
b26eefb6
PB
370}
371
9a119ff6
PB
372#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
373
b26eefb6
PB
374/* Set CF to the top bit of var. */
375static void gen_set_CF_bit31(TCGv var)
376{
7d1b0095 377 TCGv tmp = tcg_temp_new_i32();
b26eefb6 378 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 379 gen_set_CF(tmp);
7d1b0095 380 tcg_temp_free_i32(tmp);
b26eefb6
PB
381}
382
383/* Set N and Z flags from var. */
384static inline void gen_logic_CC(TCGv var)
385{
6fbe23d5
PB
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
388}
389
390/* T0 += T1 + CF. */
396e467c 391static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 392{
d9ba4830 393 TCGv tmp;
396e467c 394 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 395 tmp = load_cpu_field(CF);
396e467c 396 tcg_gen_add_i32(t0, t0, tmp);
7d1b0095 397 tcg_temp_free_i32(tmp);
b26eefb6
PB
398}
399
e9bb4aa9
JR
400/* dest = T0 + T1 + CF. */
401static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
402{
403 TCGv tmp;
404 tcg_gen_add_i32(dest, t0, t1);
405 tmp = load_cpu_field(CF);
406 tcg_gen_add_i32(dest, dest, tmp);
7d1b0095 407 tcg_temp_free_i32(tmp);
e9bb4aa9
JR
408}
409
3670669c
PB
410/* dest = T0 - T1 + CF - 1. */
411static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
412{
d9ba4830 413 TCGv tmp;
3670669c 414 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 415 tmp = load_cpu_field(CF);
3670669c
PB
416 tcg_gen_add_i32(dest, dest, tmp);
417 tcg_gen_subi_i32(dest, dest, 1);
7d1b0095 418 tcg_temp_free_i32(tmp);
3670669c
PB
419}
420
ad69471c
PB
421/* FIXME: Implement this natively. */
422#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
423
9a119ff6 424static void shifter_out_im(TCGv var, int shift)
b26eefb6 425{
7d1b0095 426 TCGv tmp = tcg_temp_new_i32();
9a119ff6
PB
427 if (shift == 0) {
428 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 429 } else {
9a119ff6 430 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 431 if (shift != 31)
9a119ff6
PB
432 tcg_gen_andi_i32(tmp, tmp, 1);
433 }
434 gen_set_CF(tmp);
7d1b0095 435 tcg_temp_free_i32(tmp);
9a119ff6 436}
b26eefb6 437
9a119ff6
PB
438/* Shift by immediate. Includes special handling for shift == 0. */
439static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
440{
441 switch (shiftop) {
442 case 0: /* LSL */
443 if (shift != 0) {
444 if (flags)
445 shifter_out_im(var, 32 - shift);
446 tcg_gen_shli_i32(var, var, shift);
447 }
448 break;
449 case 1: /* LSR */
450 if (shift == 0) {
451 if (flags) {
452 tcg_gen_shri_i32(var, var, 31);
453 gen_set_CF(var);
454 }
455 tcg_gen_movi_i32(var, 0);
456 } else {
457 if (flags)
458 shifter_out_im(var, shift - 1);
459 tcg_gen_shri_i32(var, var, shift);
460 }
461 break;
462 case 2: /* ASR */
463 if (shift == 0)
464 shift = 32;
465 if (flags)
466 shifter_out_im(var, shift - 1);
467 if (shift == 32)
468 shift = 31;
469 tcg_gen_sari_i32(var, var, shift);
470 break;
471 case 3: /* ROR/RRX */
472 if (shift != 0) {
473 if (flags)
474 shifter_out_im(var, shift - 1);
f669df27 475 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 476 } else {
d9ba4830 477 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
478 if (flags)
479 shifter_out_im(var, 0);
480 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
481 tcg_gen_shli_i32(tmp, tmp, 31);
482 tcg_gen_or_i32(var, var, tmp);
7d1b0095 483 tcg_temp_free_i32(tmp);
b26eefb6
PB
484 }
485 }
486};
487
8984bd2e
PB
488static inline void gen_arm_shift_reg(TCGv var, int shiftop,
489 TCGv shift, int flags)
490{
491 if (flags) {
492 switch (shiftop) {
493 case 0: gen_helper_shl_cc(var, var, shift); break;
494 case 1: gen_helper_shr_cc(var, var, shift); break;
495 case 2: gen_helper_sar_cc(var, var, shift); break;
496 case 3: gen_helper_ror_cc(var, var, shift); break;
497 }
498 } else {
499 switch (shiftop) {
500 case 0: gen_helper_shl(var, var, shift); break;
501 case 1: gen_helper_shr(var, var, shift); break;
502 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
503 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
504 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
505 }
506 }
7d1b0095 507 tcg_temp_free_i32(shift);
8984bd2e
PB
508}
509
6ddbc6e4
PB
510#define PAS_OP(pfx) \
511 switch (op2) { \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
518 }
d9ba4830 519static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 520{
a7812ae4 521 TCGv_ptr tmp;
6ddbc6e4
PB
522
523 switch (op1) {
524#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
525 case 1:
a7812ae4 526 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
527 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
528 PAS_OP(s)
b75263d6 529 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
530 break;
531 case 5:
a7812ae4 532 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
534 PAS_OP(u)
b75263d6 535 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
536 break;
537#undef gen_pas_helper
538#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
539 case 2:
540 PAS_OP(q);
541 break;
542 case 3:
543 PAS_OP(sh);
544 break;
545 case 6:
546 PAS_OP(uq);
547 break;
548 case 7:
549 PAS_OP(uh);
550 break;
551#undef gen_pas_helper
552 }
553}
9ee6e8bb
PB
554#undef PAS_OP
555
6ddbc6e4
PB
556/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557#define PAS_OP(pfx) \
ed89a2f1 558 switch (op1) { \
6ddbc6e4
PB
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
565 }
d9ba4830 566static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 567{
a7812ae4 568 TCGv_ptr tmp;
6ddbc6e4 569
ed89a2f1 570 switch (op2) {
6ddbc6e4
PB
571#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
572 case 0:
a7812ae4 573 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
575 PAS_OP(s)
b75263d6 576 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
577 break;
578 case 4:
a7812ae4 579 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
581 PAS_OP(u)
b75263d6 582 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
583 break;
584#undef gen_pas_helper
585#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
586 case 1:
587 PAS_OP(q);
588 break;
589 case 2:
590 PAS_OP(sh);
591 break;
592 case 5:
593 PAS_OP(uq);
594 break;
595 case 6:
596 PAS_OP(uh);
597 break;
598#undef gen_pas_helper
599 }
600}
9ee6e8bb
PB
601#undef PAS_OP
602
d9ba4830
PB
603static void gen_test_cc(int cc, int label)
604{
605 TCGv tmp;
606 TCGv tmp2;
d9ba4830
PB
607 int inv;
608
d9ba4830
PB
609 switch (cc) {
610 case 0: /* eq: Z */
6fbe23d5 611 tmp = load_cpu_field(ZF);
cb63669a 612 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
613 break;
614 case 1: /* ne: !Z */
6fbe23d5 615 tmp = load_cpu_field(ZF);
cb63669a 616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
617 break;
618 case 2: /* cs: C */
619 tmp = load_cpu_field(CF);
cb63669a 620 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
621 break;
622 case 3: /* cc: !C */
623 tmp = load_cpu_field(CF);
cb63669a 624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
625 break;
626 case 4: /* mi: N */
6fbe23d5 627 tmp = load_cpu_field(NF);
cb63669a 628 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
629 break;
630 case 5: /* pl: !N */
6fbe23d5 631 tmp = load_cpu_field(NF);
cb63669a 632 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
633 break;
634 case 6: /* vs: V */
635 tmp = load_cpu_field(VF);
cb63669a 636 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
637 break;
638 case 7: /* vc: !V */
639 tmp = load_cpu_field(VF);
cb63669a 640 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
641 break;
642 case 8: /* hi: C && !Z */
643 inv = gen_new_label();
644 tmp = load_cpu_field(CF);
cb63669a 645 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 646 tcg_temp_free_i32(tmp);
6fbe23d5 647 tmp = load_cpu_field(ZF);
cb63669a 648 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
649 gen_set_label(inv);
650 break;
651 case 9: /* ls: !C || Z */
652 tmp = load_cpu_field(CF);
cb63669a 653 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 654 tcg_temp_free_i32(tmp);
6fbe23d5 655 tmp = load_cpu_field(ZF);
cb63669a 656 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
657 break;
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp = load_cpu_field(VF);
6fbe23d5 660 tmp2 = load_cpu_field(NF);
d9ba4830 661 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 662 tcg_temp_free_i32(tmp2);
cb63669a 663 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
664 break;
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp = load_cpu_field(VF);
6fbe23d5 667 tmp2 = load_cpu_field(NF);
d9ba4830 668 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 669 tcg_temp_free_i32(tmp2);
cb63669a 670 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
671 break;
672 case 12: /* gt: !Z && N == V */
673 inv = gen_new_label();
6fbe23d5 674 tmp = load_cpu_field(ZF);
cb63669a 675 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 676 tcg_temp_free_i32(tmp);
d9ba4830 677 tmp = load_cpu_field(VF);
6fbe23d5 678 tmp2 = load_cpu_field(NF);
d9ba4830 679 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 680 tcg_temp_free_i32(tmp2);
cb63669a 681 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
682 gen_set_label(inv);
683 break;
684 case 13: /* le: Z || N != V */
6fbe23d5 685 tmp = load_cpu_field(ZF);
cb63669a 686 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 687 tcg_temp_free_i32(tmp);
d9ba4830 688 tmp = load_cpu_field(VF);
6fbe23d5 689 tmp2 = load_cpu_field(NF);
d9ba4830 690 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 691 tcg_temp_free_i32(tmp2);
cb63669a 692 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
693 break;
694 default:
695 fprintf(stderr, "Bad condition code 0x%x\n", cc);
696 abort();
697 }
7d1b0095 698 tcg_temp_free_i32(tmp);
d9ba4830 699}
2c0262af 700
b1d8e52e 701static const uint8_t table_logic_cc[16] = {
2c0262af
FB
702 1, /* and */
703 1, /* xor */
704 0, /* sub */
705 0, /* rsb */
706 0, /* add */
707 0, /* adc */
708 0, /* sbc */
709 0, /* rsc */
710 1, /* andl */
711 1, /* xorl */
712 0, /* cmp */
713 0, /* cmn */
714 1, /* orr */
715 1, /* mov */
716 1, /* bic */
717 1, /* mvn */
718};
3b46e624 719
d9ba4830
PB
720/* Set PC and Thumb state from an immediate address. */
721static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 722{
b26eefb6 723 TCGv tmp;
99c475ab 724
b26eefb6 725 s->is_jmp = DISAS_UPDATE;
d9ba4830 726 if (s->thumb != (addr & 1)) {
7d1b0095 727 tmp = tcg_temp_new_i32();
d9ba4830
PB
728 tcg_gen_movi_i32(tmp, addr & 1);
729 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
7d1b0095 730 tcg_temp_free_i32(tmp);
d9ba4830 731 }
155c3eac 732 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
733}
734
735/* Set PC and Thumb state from var. var is marked as dead. */
736static inline void gen_bx(DisasContext *s, TCGv var)
737{
d9ba4830 738 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
739 tcg_gen_andi_i32(cpu_R[15], var, ~1);
740 tcg_gen_andi_i32(var, var, 1);
741 store_cpu_field(var, thumb);
d9ba4830
PB
742}
743
21aeb343
JR
744/* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747static inline void store_reg_bx(CPUState *env, DisasContext *s,
748 int reg, TCGv var)
749{
750 if (reg == 15 && ENABLE_ARCH_7) {
751 gen_bx(s, var);
752 } else {
753 store_reg(s, reg, var);
754 }
755}
756
be5e7a76
DES
757/* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761static inline void store_reg_from_load(CPUState *env, DisasContext *s,
762 int reg, TCGv var)
763{
764 if (reg == 15 && ENABLE_ARCH_5) {
765 gen_bx(s, var);
766 } else {
767 store_reg(s, reg, var);
768 }
769}
770
b0109805
PB
771static inline TCGv gen_ld8s(TCGv addr, int index)
772{
7d1b0095 773 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
774 tcg_gen_qemu_ld8s(tmp, addr, index);
775 return tmp;
776}
777static inline TCGv gen_ld8u(TCGv addr, int index)
778{
7d1b0095 779 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
780 tcg_gen_qemu_ld8u(tmp, addr, index);
781 return tmp;
782}
783static inline TCGv gen_ld16s(TCGv addr, int index)
784{
7d1b0095 785 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
786 tcg_gen_qemu_ld16s(tmp, addr, index);
787 return tmp;
788}
789static inline TCGv gen_ld16u(TCGv addr, int index)
790{
7d1b0095 791 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
792 tcg_gen_qemu_ld16u(tmp, addr, index);
793 return tmp;
794}
795static inline TCGv gen_ld32(TCGv addr, int index)
796{
7d1b0095 797 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
798 tcg_gen_qemu_ld32u(tmp, addr, index);
799 return tmp;
800}
84496233
JR
801static inline TCGv_i64 gen_ld64(TCGv addr, int index)
802{
803 TCGv_i64 tmp = tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp, addr, index);
805 return tmp;
806}
b0109805
PB
807static inline void gen_st8(TCGv val, TCGv addr, int index)
808{
809 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 810 tcg_temp_free_i32(val);
b0109805
PB
811}
812static inline void gen_st16(TCGv val, TCGv addr, int index)
813{
814 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 815 tcg_temp_free_i32(val);
b0109805
PB
816}
817static inline void gen_st32(TCGv val, TCGv addr, int index)
818{
819 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 820 tcg_temp_free_i32(val);
b0109805 821}
84496233
JR
822static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
823{
824 tcg_gen_qemu_st64(val, addr, index);
825 tcg_temp_free_i64(val);
826}
b5ff1b31 827
5e3f878a
PB
828static inline void gen_set_pc_im(uint32_t val)
829{
155c3eac 830 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
831}
832
b5ff1b31
FB
833/* Force a TB lookup after an instruction that changes the CPU state. */
834static inline void gen_lookup_tb(DisasContext *s)
835{
a6445c52 836 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
837 s->is_jmp = DISAS_UPDATE;
838}
839
b0109805
PB
840static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
841 TCGv var)
2c0262af 842{
1e8d4eec 843 int val, rm, shift, shiftop;
b26eefb6 844 TCGv offset;
2c0262af
FB
845
846 if (!(insn & (1 << 25))) {
847 /* immediate */
848 val = insn & 0xfff;
849 if (!(insn & (1 << 23)))
850 val = -val;
537730b9 851 if (val != 0)
b0109805 852 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
853 } else {
854 /* shift/register */
855 rm = (insn) & 0xf;
856 shift = (insn >> 7) & 0x1f;
1e8d4eec 857 shiftop = (insn >> 5) & 3;
b26eefb6 858 offset = load_reg(s, rm);
9a119ff6 859 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 860 if (!(insn & (1 << 23)))
b0109805 861 tcg_gen_sub_i32(var, var, offset);
2c0262af 862 else
b0109805 863 tcg_gen_add_i32(var, var, offset);
7d1b0095 864 tcg_temp_free_i32(offset);
2c0262af
FB
865 }
866}
867
191f9a93 868static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 869 int extra, TCGv var)
2c0262af
FB
870{
871 int val, rm;
b26eefb6 872 TCGv offset;
3b46e624 873
2c0262af
FB
874 if (insn & (1 << 22)) {
875 /* immediate */
876 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
877 if (!(insn & (1 << 23)))
878 val = -val;
18acad92 879 val += extra;
537730b9 880 if (val != 0)
b0109805 881 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
882 } else {
883 /* register */
191f9a93 884 if (extra)
b0109805 885 tcg_gen_addi_i32(var, var, extra);
2c0262af 886 rm = (insn) & 0xf;
b26eefb6 887 offset = load_reg(s, rm);
2c0262af 888 if (!(insn & (1 << 23)))
b0109805 889 tcg_gen_sub_i32(var, var, offset);
2c0262af 890 else
b0109805 891 tcg_gen_add_i32(var, var, offset);
7d1b0095 892 tcg_temp_free_i32(offset);
2c0262af
FB
893 }
894}
895
4373f3ce
PB
896#define VFP_OP2(name) \
897static inline void gen_vfp_##name(int dp) \
898{ \
899 if (dp) \
900 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
901 else \
902 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
903}
904
4373f3ce
PB
905VFP_OP2(add)
906VFP_OP2(sub)
907VFP_OP2(mul)
908VFP_OP2(div)
909
910#undef VFP_OP2
911
912static inline void gen_vfp_abs(int dp)
913{
914 if (dp)
915 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
916 else
917 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
918}
919
920static inline void gen_vfp_neg(int dp)
921{
922 if (dp)
923 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
924 else
925 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
926}
927
928static inline void gen_vfp_sqrt(int dp)
929{
930 if (dp)
931 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
932 else
933 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
934}
935
936static inline void gen_vfp_cmp(int dp)
937{
938 if (dp)
939 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
940 else
941 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
942}
943
944static inline void gen_vfp_cmpe(int dp)
945{
946 if (dp)
947 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
948 else
949 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
950}
951
952static inline void gen_vfp_F1_ld0(int dp)
953{
954 if (dp)
5b340b51 955 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 956 else
5b340b51 957 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
958}
959
960static inline void gen_vfp_uito(int dp)
961{
962 if (dp)
963 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
964 else
965 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
966}
967
968static inline void gen_vfp_sito(int dp)
969{
970 if (dp)
66230e0d 971 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 972 else
66230e0d 973 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
974}
975
976static inline void gen_vfp_toui(int dp)
977{
978 if (dp)
979 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
980 else
981 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
982}
983
984static inline void gen_vfp_touiz(int dp)
985{
986 if (dp)
987 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
988 else
989 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
990}
991
992static inline void gen_vfp_tosi(int dp)
993{
994 if (dp)
995 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
996 else
997 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
998}
999
1000static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
1001{
1002 if (dp)
4373f3ce 1003 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 1004 else
4373f3ce
PB
1005 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1006}
1007
1008#define VFP_GEN_FIX(name) \
1009static inline void gen_vfp_##name(int dp, int shift) \
1010{ \
b75263d6 1011 TCGv tmp_shift = tcg_const_i32(shift); \
4373f3ce 1012 if (dp) \
b75263d6 1013 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
4373f3ce 1014 else \
b75263d6
JR
1015 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1016 tcg_temp_free_i32(tmp_shift); \
9ee6e8bb 1017}
4373f3ce
PB
1018VFP_GEN_FIX(tosh)
1019VFP_GEN_FIX(tosl)
1020VFP_GEN_FIX(touh)
1021VFP_GEN_FIX(toul)
1022VFP_GEN_FIX(shto)
1023VFP_GEN_FIX(slto)
1024VFP_GEN_FIX(uhto)
1025VFP_GEN_FIX(ulto)
1026#undef VFP_GEN_FIX
9ee6e8bb 1027
312eea9f 1028static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1029{
1030 if (dp)
312eea9f 1031 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1032 else
312eea9f 1033 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1034}
1035
312eea9f 1036static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1037{
1038 if (dp)
312eea9f 1039 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1040 else
312eea9f 1041 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1042}
1043
8e96005d
FB
1044static inline long
1045vfp_reg_offset (int dp, int reg)
1046{
1047 if (dp)
1048 return offsetof(CPUARMState, vfp.regs[reg]);
1049 else if (reg & 1) {
1050 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1051 + offsetof(CPU_DoubleU, l.upper);
1052 } else {
1053 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1054 + offsetof(CPU_DoubleU, l.lower);
1055 }
1056}
9ee6e8bb
PB
1057
1058/* Return the offset of a 32-bit piece of a NEON register.
1059 zero is the least significant end of the register. */
1060static inline long
1061neon_reg_offset (int reg, int n)
1062{
1063 int sreg;
1064 sreg = reg * 2 + n;
1065 return vfp_reg_offset(0, sreg);
1066}
1067
8f8e3aa4
PB
1068static TCGv neon_load_reg(int reg, int pass)
1069{
7d1b0095 1070 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1071 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1072 return tmp;
1073}
1074
1075static void neon_store_reg(int reg, int pass, TCGv var)
1076{
1077 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1078 tcg_temp_free_i32(var);
8f8e3aa4
PB
1079}
1080
a7812ae4 1081static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1082{
1083 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1084}
1085
a7812ae4 1086static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1087{
1088 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1089}
1090
4373f3ce
PB
1091#define tcg_gen_ld_f32 tcg_gen_ld_i32
1092#define tcg_gen_ld_f64 tcg_gen_ld_i64
1093#define tcg_gen_st_f32 tcg_gen_st_i32
1094#define tcg_gen_st_f64 tcg_gen_st_i64
1095
b7bcbe95
FB
1096static inline void gen_mov_F0_vreg(int dp, int reg)
1097{
1098 if (dp)
4373f3ce 1099 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1100 else
4373f3ce 1101 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1102}
1103
1104static inline void gen_mov_F1_vreg(int dp, int reg)
1105{
1106 if (dp)
4373f3ce 1107 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1108 else
4373f3ce 1109 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1110}
1111
1112static inline void gen_mov_vreg_F0(int dp, int reg)
1113{
1114 if (dp)
4373f3ce 1115 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1116 else
4373f3ce 1117 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1118}
1119
18c9b560
AZ
1120#define ARM_CP_RW_BIT (1 << 20)
1121
a7812ae4 1122static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1123{
1124 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1125}
1126
a7812ae4 1127static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1128{
1129 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1130}
1131
da6b5335 1132static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1133{
7d1b0095 1134 TCGv var = tcg_temp_new_i32();
da6b5335
FN
1135 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1136 return var;
e677137d
PB
1137}
1138
da6b5335 1139static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1140{
da6b5335 1141 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
7d1b0095 1142 tcg_temp_free_i32(var);
e677137d
PB
1143}
1144
1145static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1146{
1147 iwmmxt_store_reg(cpu_M0, rn);
1148}
1149
1150static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1151{
1152 iwmmxt_load_reg(cpu_M0, rn);
1153}
1154
1155static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1156{
1157 iwmmxt_load_reg(cpu_V1, rn);
1158 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1159}
1160
1161static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1162{
1163 iwmmxt_load_reg(cpu_V1, rn);
1164 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1165}
1166
1167static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1168{
1169 iwmmxt_load_reg(cpu_V1, rn);
1170 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1171}
1172
1173#define IWMMXT_OP(name) \
1174static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1175{ \
1176 iwmmxt_load_reg(cpu_V1, rn); \
1177 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1178}
1179
947a2fa2
PM
1180#define IWMMXT_OP_SIZE(name) \
1181IWMMXT_OP(name##b) \
1182IWMMXT_OP(name##w) \
1183IWMMXT_OP(name##l)
e677137d 1184
947a2fa2 1185#define IWMMXT_OP_1(name) \
e677137d
PB
1186static inline void gen_op_iwmmxt_##name##_M0(void) \
1187{ \
947a2fa2 1188 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0); \
e677137d
PB
1189}
1190
1191IWMMXT_OP(maddsq)
1192IWMMXT_OP(madduq)
1193IWMMXT_OP(sadb)
1194IWMMXT_OP(sadw)
1195IWMMXT_OP(mulslw)
1196IWMMXT_OP(mulshw)
1197IWMMXT_OP(mululw)
1198IWMMXT_OP(muluhw)
1199IWMMXT_OP(macsw)
1200IWMMXT_OP(macuw)
1201
947a2fa2
PM
1202IWMMXT_OP_SIZE(unpackl)
1203IWMMXT_OP_SIZE(unpackh)
1204
1205IWMMXT_OP_1(unpacklub)
1206IWMMXT_OP_1(unpackluw)
1207IWMMXT_OP_1(unpacklul)
1208IWMMXT_OP_1(unpackhub)
1209IWMMXT_OP_1(unpackhuw)
1210IWMMXT_OP_1(unpackhul)
1211IWMMXT_OP_1(unpacklsb)
1212IWMMXT_OP_1(unpacklsw)
1213IWMMXT_OP_1(unpacklsl)
1214IWMMXT_OP_1(unpackhsb)
1215IWMMXT_OP_1(unpackhsw)
1216IWMMXT_OP_1(unpackhsl)
1217
1218IWMMXT_OP_SIZE(cmpeq)
1219IWMMXT_OP_SIZE(cmpgtu)
1220IWMMXT_OP_SIZE(cmpgts)
1221
1222IWMMXT_OP_SIZE(mins)
1223IWMMXT_OP_SIZE(minu)
1224IWMMXT_OP_SIZE(maxs)
1225IWMMXT_OP_SIZE(maxu)
1226
1227IWMMXT_OP_SIZE(subn)
1228IWMMXT_OP_SIZE(addn)
1229IWMMXT_OP_SIZE(subu)
1230IWMMXT_OP_SIZE(addu)
1231IWMMXT_OP_SIZE(subs)
1232IWMMXT_OP_SIZE(adds)
1233
1234IWMMXT_OP(avgb0)
1235IWMMXT_OP(avgb1)
1236IWMMXT_OP(avgw0)
1237IWMMXT_OP(avgw1)
e677137d
PB
1238
1239IWMMXT_OP(msadb)
1240
947a2fa2
PM
1241IWMMXT_OP(packuw)
1242IWMMXT_OP(packul)
1243IWMMXT_OP(packuq)
1244IWMMXT_OP(packsw)
1245IWMMXT_OP(packsl)
1246IWMMXT_OP(packsq)
e677137d 1247
e677137d
PB
1248static void gen_op_iwmmxt_set_mup(void)
1249{
1250 TCGv tmp;
1251 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1252 tcg_gen_ori_i32(tmp, tmp, 2);
1253 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1254}
1255
1256static void gen_op_iwmmxt_set_cup(void)
1257{
1258 TCGv tmp;
1259 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1260 tcg_gen_ori_i32(tmp, tmp, 1);
1261 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1262}
1263
1264static void gen_op_iwmmxt_setpsr_nz(void)
1265{
7d1b0095 1266 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1267 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1268 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1269}
1270
1271static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1272{
1273 iwmmxt_load_reg(cpu_V1, rn);
86831435 1274 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1275 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1276}
1277
da6b5335 1278static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1279{
1280 int rd;
1281 uint32_t offset;
da6b5335 1282 TCGv tmp;
18c9b560
AZ
1283
1284 rd = (insn >> 16) & 0xf;
da6b5335 1285 tmp = load_reg(s, rd);
18c9b560
AZ
1286
1287 offset = (insn & 0xff) << ((insn >> 7) & 2);
1288 if (insn & (1 << 24)) {
1289 /* Pre indexed */
1290 if (insn & (1 << 23))
da6b5335 1291 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1292 else
da6b5335
FN
1293 tcg_gen_addi_i32(tmp, tmp, -offset);
1294 tcg_gen_mov_i32(dest, tmp);
18c9b560 1295 if (insn & (1 << 21))
da6b5335
FN
1296 store_reg(s, rd, tmp);
1297 else
7d1b0095 1298 tcg_temp_free_i32(tmp);
18c9b560
AZ
1299 } else if (insn & (1 << 21)) {
1300 /* Post indexed */
da6b5335 1301 tcg_gen_mov_i32(dest, tmp);
18c9b560 1302 if (insn & (1 << 23))
da6b5335 1303 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1304 else
da6b5335
FN
1305 tcg_gen_addi_i32(tmp, tmp, -offset);
1306 store_reg(s, rd, tmp);
18c9b560
AZ
1307 } else if (!(insn & (1 << 23)))
1308 return 1;
1309 return 0;
1310}
1311
da6b5335 1312static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1313{
1314 int rd = (insn >> 0) & 0xf;
da6b5335 1315 TCGv tmp;
18c9b560 1316
da6b5335
FN
1317 if (insn & (1 << 8)) {
1318 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1319 return 1;
da6b5335
FN
1320 } else {
1321 tmp = iwmmxt_load_creg(rd);
1322 }
1323 } else {
7d1b0095 1324 tmp = tcg_temp_new_i32();
da6b5335
FN
1325 iwmmxt_load_reg(cpu_V0, rd);
1326 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1327 }
1328 tcg_gen_andi_i32(tmp, tmp, mask);
1329 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1330 tcg_temp_free_i32(tmp);
18c9b560
AZ
1331 return 0;
1332}
1333
1334/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1335 (ie. an undefined instruction). */
1336static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1337{
1338 int rd, wrd;
1339 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1340 TCGv addr;
1341 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1342
1343 if ((insn & 0x0e000e00) == 0x0c000000) {
1344 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1345 wrd = insn & 0xf;
1346 rdlo = (insn >> 12) & 0xf;
1347 rdhi = (insn >> 16) & 0xf;
1348 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1349 iwmmxt_load_reg(cpu_V0, wrd);
1350 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1351 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1352 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1353 } else { /* TMCRR */
da6b5335
FN
1354 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1355 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1356 gen_op_iwmmxt_set_mup();
1357 }
1358 return 0;
1359 }
1360
1361 wrd = (insn >> 12) & 0xf;
7d1b0095 1362 addr = tcg_temp_new_i32();
da6b5335 1363 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1364 tcg_temp_free_i32(addr);
18c9b560 1365 return 1;
da6b5335 1366 }
18c9b560
AZ
1367 if (insn & ARM_CP_RW_BIT) {
1368 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1369 tmp = tcg_temp_new_i32();
da6b5335
FN
1370 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1371 iwmmxt_store_creg(wrd, tmp);
18c9b560 1372 } else {
e677137d
PB
1373 i = 1;
1374 if (insn & (1 << 8)) {
1375 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1376 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1377 i = 0;
1378 } else { /* WLDRW wRd */
da6b5335 1379 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1380 }
1381 } else {
1382 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1383 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1384 } else { /* WLDRB */
da6b5335 1385 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1386 }
1387 }
1388 if (i) {
1389 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1390 tcg_temp_free_i32(tmp);
e677137d 1391 }
18c9b560
AZ
1392 gen_op_iwmmxt_movq_wRn_M0(wrd);
1393 }
1394 } else {
1395 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1396 tmp = iwmmxt_load_creg(wrd);
1397 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1398 } else {
1399 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1400 tmp = tcg_temp_new_i32();
e677137d
PB
1401 if (insn & (1 << 8)) {
1402 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1403 tcg_temp_free_i32(tmp);
da6b5335 1404 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1405 } else { /* WSTRW wRd */
1406 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1407 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1408 }
1409 } else {
1410 if (insn & (1 << 22)) { /* WSTRH */
1411 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1412 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1413 } else { /* WSTRB */
1414 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1415 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1416 }
1417 }
18c9b560
AZ
1418 }
1419 }
7d1b0095 1420 tcg_temp_free_i32(addr);
18c9b560
AZ
1421 return 0;
1422 }
1423
1424 if ((insn & 0x0f000000) != 0x0e000000)
1425 return 1;
1426
1427 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1428 case 0x000: /* WOR */
1429 wrd = (insn >> 12) & 0xf;
1430 rd0 = (insn >> 0) & 0xf;
1431 rd1 = (insn >> 16) & 0xf;
1432 gen_op_iwmmxt_movq_M0_wRn(rd0);
1433 gen_op_iwmmxt_orq_M0_wRn(rd1);
1434 gen_op_iwmmxt_setpsr_nz();
1435 gen_op_iwmmxt_movq_wRn_M0(wrd);
1436 gen_op_iwmmxt_set_mup();
1437 gen_op_iwmmxt_set_cup();
1438 break;
1439 case 0x011: /* TMCR */
1440 if (insn & 0xf)
1441 return 1;
1442 rd = (insn >> 12) & 0xf;
1443 wrd = (insn >> 16) & 0xf;
1444 switch (wrd) {
1445 case ARM_IWMMXT_wCID:
1446 case ARM_IWMMXT_wCASF:
1447 break;
1448 case ARM_IWMMXT_wCon:
1449 gen_op_iwmmxt_set_cup();
1450 /* Fall through. */
1451 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1452 tmp = iwmmxt_load_creg(wrd);
1453 tmp2 = load_reg(s, rd);
f669df27 1454 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1455 tcg_temp_free_i32(tmp2);
da6b5335 1456 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1457 break;
1458 case ARM_IWMMXT_wCGR0:
1459 case ARM_IWMMXT_wCGR1:
1460 case ARM_IWMMXT_wCGR2:
1461 case ARM_IWMMXT_wCGR3:
1462 gen_op_iwmmxt_set_cup();
da6b5335
FN
1463 tmp = load_reg(s, rd);
1464 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1465 break;
1466 default:
1467 return 1;
1468 }
1469 break;
1470 case 0x100: /* WXOR */
1471 wrd = (insn >> 12) & 0xf;
1472 rd0 = (insn >> 0) & 0xf;
1473 rd1 = (insn >> 16) & 0xf;
1474 gen_op_iwmmxt_movq_M0_wRn(rd0);
1475 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1476 gen_op_iwmmxt_setpsr_nz();
1477 gen_op_iwmmxt_movq_wRn_M0(wrd);
1478 gen_op_iwmmxt_set_mup();
1479 gen_op_iwmmxt_set_cup();
1480 break;
1481 case 0x111: /* TMRC */
1482 if (insn & 0xf)
1483 return 1;
1484 rd = (insn >> 12) & 0xf;
1485 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1486 tmp = iwmmxt_load_creg(wrd);
1487 store_reg(s, rd, tmp);
18c9b560
AZ
1488 break;
1489 case 0x300: /* WANDN */
1490 wrd = (insn >> 12) & 0xf;
1491 rd0 = (insn >> 0) & 0xf;
1492 rd1 = (insn >> 16) & 0xf;
1493 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1494 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1495 gen_op_iwmmxt_andq_M0_wRn(rd1);
1496 gen_op_iwmmxt_setpsr_nz();
1497 gen_op_iwmmxt_movq_wRn_M0(wrd);
1498 gen_op_iwmmxt_set_mup();
1499 gen_op_iwmmxt_set_cup();
1500 break;
1501 case 0x200: /* WAND */
1502 wrd = (insn >> 12) & 0xf;
1503 rd0 = (insn >> 0) & 0xf;
1504 rd1 = (insn >> 16) & 0xf;
1505 gen_op_iwmmxt_movq_M0_wRn(rd0);
1506 gen_op_iwmmxt_andq_M0_wRn(rd1);
1507 gen_op_iwmmxt_setpsr_nz();
1508 gen_op_iwmmxt_movq_wRn_M0(wrd);
1509 gen_op_iwmmxt_set_mup();
1510 gen_op_iwmmxt_set_cup();
1511 break;
1512 case 0x810: case 0xa10: /* WMADD */
1513 wrd = (insn >> 12) & 0xf;
1514 rd0 = (insn >> 0) & 0xf;
1515 rd1 = (insn >> 16) & 0xf;
1516 gen_op_iwmmxt_movq_M0_wRn(rd0);
1517 if (insn & (1 << 21))
1518 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1519 else
1520 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1521 gen_op_iwmmxt_movq_wRn_M0(wrd);
1522 gen_op_iwmmxt_set_mup();
1523 break;
1524 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1525 wrd = (insn >> 12) & 0xf;
1526 rd0 = (insn >> 16) & 0xf;
1527 rd1 = (insn >> 0) & 0xf;
1528 gen_op_iwmmxt_movq_M0_wRn(rd0);
1529 switch ((insn >> 22) & 3) {
1530 case 0:
1531 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1532 break;
1533 case 1:
1534 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1535 break;
1536 case 2:
1537 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1538 break;
1539 case 3:
1540 return 1;
1541 }
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1545 break;
1546 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 16) & 0xf;
1549 rd1 = (insn >> 0) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 switch ((insn >> 22) & 3) {
1552 case 0:
1553 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1554 break;
1555 case 1:
1556 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1557 break;
1558 case 2:
1559 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1560 break;
1561 case 3:
1562 return 1;
1563 }
1564 gen_op_iwmmxt_movq_wRn_M0(wrd);
1565 gen_op_iwmmxt_set_mup();
1566 gen_op_iwmmxt_set_cup();
1567 break;
1568 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1569 wrd = (insn >> 12) & 0xf;
1570 rd0 = (insn >> 16) & 0xf;
1571 rd1 = (insn >> 0) & 0xf;
1572 gen_op_iwmmxt_movq_M0_wRn(rd0);
1573 if (insn & (1 << 22))
1574 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1575 else
1576 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1577 if (!(insn & (1 << 20)))
1578 gen_op_iwmmxt_addl_M0_wRn(wrd);
1579 gen_op_iwmmxt_movq_wRn_M0(wrd);
1580 gen_op_iwmmxt_set_mup();
1581 break;
1582 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1583 wrd = (insn >> 12) & 0xf;
1584 rd0 = (insn >> 16) & 0xf;
1585 rd1 = (insn >> 0) & 0xf;
1586 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1587 if (insn & (1 << 21)) {
1588 if (insn & (1 << 20))
1589 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1590 else
1591 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1592 } else {
1593 if (insn & (1 << 20))
1594 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1595 else
1596 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1597 }
18c9b560
AZ
1598 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 gen_op_iwmmxt_set_mup();
1600 break;
1601 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1602 wrd = (insn >> 12) & 0xf;
1603 rd0 = (insn >> 16) & 0xf;
1604 rd1 = (insn >> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0);
1606 if (insn & (1 << 21))
1607 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1608 else
1609 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1610 if (!(insn & (1 << 20))) {
e677137d
PB
1611 iwmmxt_load_reg(cpu_V1, wrd);
1612 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1613 }
1614 gen_op_iwmmxt_movq_wRn_M0(wrd);
1615 gen_op_iwmmxt_set_mup();
1616 break;
1617 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1618 wrd = (insn >> 12) & 0xf;
1619 rd0 = (insn >> 16) & 0xf;
1620 rd1 = (insn >> 0) & 0xf;
1621 gen_op_iwmmxt_movq_M0_wRn(rd0);
1622 switch ((insn >> 22) & 3) {
1623 case 0:
1624 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1625 break;
1626 case 1:
1627 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1628 break;
1629 case 2:
1630 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1631 break;
1632 case 3:
1633 return 1;
1634 }
1635 gen_op_iwmmxt_movq_wRn_M0(wrd);
1636 gen_op_iwmmxt_set_mup();
1637 gen_op_iwmmxt_set_cup();
1638 break;
1639 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1640 wrd = (insn >> 12) & 0xf;
1641 rd0 = (insn >> 16) & 0xf;
1642 rd1 = (insn >> 0) & 0xf;
1643 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1644 if (insn & (1 << 22)) {
1645 if (insn & (1 << 20))
1646 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1647 else
1648 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1649 } else {
1650 if (insn & (1 << 20))
1651 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1654 }
18c9b560
AZ
1655 gen_op_iwmmxt_movq_wRn_M0(wrd);
1656 gen_op_iwmmxt_set_mup();
1657 gen_op_iwmmxt_set_cup();
1658 break;
1659 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1660 wrd = (insn >> 12) & 0xf;
1661 rd0 = (insn >> 16) & 0xf;
1662 rd1 = (insn >> 0) & 0xf;
1663 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1664 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1665 tcg_gen_andi_i32(tmp, tmp, 7);
1666 iwmmxt_load_reg(cpu_V1, rd1);
1667 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1668 tcg_temp_free_i32(tmp);
18c9b560
AZ
1669 gen_op_iwmmxt_movq_wRn_M0(wrd);
1670 gen_op_iwmmxt_set_mup();
1671 break;
1672 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1673 if (((insn >> 6) & 3) == 3)
1674 return 1;
18c9b560
AZ
1675 rd = (insn >> 12) & 0xf;
1676 wrd = (insn >> 16) & 0xf;
da6b5335 1677 tmp = load_reg(s, rd);
18c9b560
AZ
1678 gen_op_iwmmxt_movq_M0_wRn(wrd);
1679 switch ((insn >> 6) & 3) {
1680 case 0:
da6b5335
FN
1681 tmp2 = tcg_const_i32(0xff);
1682 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1683 break;
1684 case 1:
da6b5335
FN
1685 tmp2 = tcg_const_i32(0xffff);
1686 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1687 break;
1688 case 2:
da6b5335
FN
1689 tmp2 = tcg_const_i32(0xffffffff);
1690 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1691 break;
da6b5335
FN
1692 default:
1693 TCGV_UNUSED(tmp2);
1694 TCGV_UNUSED(tmp3);
18c9b560 1695 }
da6b5335
FN
1696 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1697 tcg_temp_free(tmp3);
1698 tcg_temp_free(tmp2);
7d1b0095 1699 tcg_temp_free_i32(tmp);
18c9b560
AZ
1700 gen_op_iwmmxt_movq_wRn_M0(wrd);
1701 gen_op_iwmmxt_set_mup();
1702 break;
1703 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1704 rd = (insn >> 12) & 0xf;
1705 wrd = (insn >> 16) & 0xf;
da6b5335 1706 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1707 return 1;
1708 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1709 tmp = tcg_temp_new_i32();
18c9b560
AZ
1710 switch ((insn >> 22) & 3) {
1711 case 0:
da6b5335
FN
1712 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1713 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1714 if (insn & 8) {
1715 tcg_gen_ext8s_i32(tmp, tmp);
1716 } else {
1717 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1718 }
1719 break;
1720 case 1:
da6b5335
FN
1721 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1722 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1723 if (insn & 8) {
1724 tcg_gen_ext16s_i32(tmp, tmp);
1725 } else {
1726 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1727 }
1728 break;
1729 case 2:
da6b5335
FN
1730 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1731 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1732 break;
18c9b560 1733 }
da6b5335 1734 store_reg(s, rd, tmp);
18c9b560
AZ
1735 break;
1736 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1737 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1738 return 1;
da6b5335 1739 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1740 switch ((insn >> 22) & 3) {
1741 case 0:
da6b5335 1742 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1743 break;
1744 case 1:
da6b5335 1745 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1746 break;
1747 case 2:
da6b5335 1748 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1749 break;
18c9b560 1750 }
da6b5335
FN
1751 tcg_gen_shli_i32(tmp, tmp, 28);
1752 gen_set_nzcv(tmp);
7d1b0095 1753 tcg_temp_free_i32(tmp);
18c9b560
AZ
1754 break;
1755 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1756 if (((insn >> 6) & 3) == 3)
1757 return 1;
18c9b560
AZ
1758 rd = (insn >> 12) & 0xf;
1759 wrd = (insn >> 16) & 0xf;
da6b5335 1760 tmp = load_reg(s, rd);
18c9b560
AZ
1761 switch ((insn >> 6) & 3) {
1762 case 0:
da6b5335 1763 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1764 break;
1765 case 1:
da6b5335 1766 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1767 break;
1768 case 2:
da6b5335 1769 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1770 break;
18c9b560 1771 }
7d1b0095 1772 tcg_temp_free_i32(tmp);
18c9b560
AZ
1773 gen_op_iwmmxt_movq_wRn_M0(wrd);
1774 gen_op_iwmmxt_set_mup();
1775 break;
1776 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1777 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1778 return 1;
da6b5335 1779 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1780 tmp2 = tcg_temp_new_i32();
da6b5335 1781 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1782 switch ((insn >> 22) & 3) {
1783 case 0:
1784 for (i = 0; i < 7; i ++) {
da6b5335
FN
1785 tcg_gen_shli_i32(tmp2, tmp2, 4);
1786 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1787 }
1788 break;
1789 case 1:
1790 for (i = 0; i < 3; i ++) {
da6b5335
FN
1791 tcg_gen_shli_i32(tmp2, tmp2, 8);
1792 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1793 }
1794 break;
1795 case 2:
da6b5335
FN
1796 tcg_gen_shli_i32(tmp2, tmp2, 16);
1797 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1798 break;
18c9b560 1799 }
da6b5335 1800 gen_set_nzcv(tmp);
7d1b0095
PM
1801 tcg_temp_free_i32(tmp2);
1802 tcg_temp_free_i32(tmp);
18c9b560
AZ
1803 break;
1804 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1805 wrd = (insn >> 12) & 0xf;
1806 rd0 = (insn >> 16) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0);
1808 switch ((insn >> 22) & 3) {
1809 case 0:
e677137d 1810 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1811 break;
1812 case 1:
e677137d 1813 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1814 break;
1815 case 2:
e677137d 1816 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1817 break;
1818 case 3:
1819 return 1;
1820 }
1821 gen_op_iwmmxt_movq_wRn_M0(wrd);
1822 gen_op_iwmmxt_set_mup();
1823 break;
1824 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1825 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1826 return 1;
da6b5335 1827 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1828 tmp2 = tcg_temp_new_i32();
da6b5335 1829 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1830 switch ((insn >> 22) & 3) {
1831 case 0:
1832 for (i = 0; i < 7; i ++) {
da6b5335
FN
1833 tcg_gen_shli_i32(tmp2, tmp2, 4);
1834 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1835 }
1836 break;
1837 case 1:
1838 for (i = 0; i < 3; i ++) {
da6b5335
FN
1839 tcg_gen_shli_i32(tmp2, tmp2, 8);
1840 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1841 }
1842 break;
1843 case 2:
da6b5335
FN
1844 tcg_gen_shli_i32(tmp2, tmp2, 16);
1845 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1846 break;
18c9b560 1847 }
da6b5335 1848 gen_set_nzcv(tmp);
7d1b0095
PM
1849 tcg_temp_free_i32(tmp2);
1850 tcg_temp_free_i32(tmp);
18c9b560
AZ
1851 break;
1852 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1853 rd = (insn >> 12) & 0xf;
1854 rd0 = (insn >> 16) & 0xf;
da6b5335 1855 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1856 return 1;
1857 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1858 tmp = tcg_temp_new_i32();
18c9b560
AZ
1859 switch ((insn >> 22) & 3) {
1860 case 0:
da6b5335 1861 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1862 break;
1863 case 1:
da6b5335 1864 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1865 break;
1866 case 2:
da6b5335 1867 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1868 break;
18c9b560 1869 }
da6b5335 1870 store_reg(s, rd, tmp);
18c9b560
AZ
1871 break;
1872 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1873 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1874 wrd = (insn >> 12) & 0xf;
1875 rd0 = (insn >> 16) & 0xf;
1876 rd1 = (insn >> 0) & 0xf;
1877 gen_op_iwmmxt_movq_M0_wRn(rd0);
1878 switch ((insn >> 22) & 3) {
1879 case 0:
1880 if (insn & (1 << 21))
1881 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1882 else
1883 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1884 break;
1885 case 1:
1886 if (insn & (1 << 21))
1887 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1888 else
1889 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1890 break;
1891 case 2:
1892 if (insn & (1 << 21))
1893 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1894 else
1895 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1896 break;
1897 case 3:
1898 return 1;
1899 }
1900 gen_op_iwmmxt_movq_wRn_M0(wrd);
1901 gen_op_iwmmxt_set_mup();
1902 gen_op_iwmmxt_set_cup();
1903 break;
1904 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1905 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1906 wrd = (insn >> 12) & 0xf;
1907 rd0 = (insn >> 16) & 0xf;
1908 gen_op_iwmmxt_movq_M0_wRn(rd0);
1909 switch ((insn >> 22) & 3) {
1910 case 0:
1911 if (insn & (1 << 21))
1912 gen_op_iwmmxt_unpacklsb_M0();
1913 else
1914 gen_op_iwmmxt_unpacklub_M0();
1915 break;
1916 case 1:
1917 if (insn & (1 << 21))
1918 gen_op_iwmmxt_unpacklsw_M0();
1919 else
1920 gen_op_iwmmxt_unpackluw_M0();
1921 break;
1922 case 2:
1923 if (insn & (1 << 21))
1924 gen_op_iwmmxt_unpacklsl_M0();
1925 else
1926 gen_op_iwmmxt_unpacklul_M0();
1927 break;
1928 case 3:
1929 return 1;
1930 }
1931 gen_op_iwmmxt_movq_wRn_M0(wrd);
1932 gen_op_iwmmxt_set_mup();
1933 gen_op_iwmmxt_set_cup();
1934 break;
1935 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1936 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1937 wrd = (insn >> 12) & 0xf;
1938 rd0 = (insn >> 16) & 0xf;
1939 gen_op_iwmmxt_movq_M0_wRn(rd0);
1940 switch ((insn >> 22) & 3) {
1941 case 0:
1942 if (insn & (1 << 21))
1943 gen_op_iwmmxt_unpackhsb_M0();
1944 else
1945 gen_op_iwmmxt_unpackhub_M0();
1946 break;
1947 case 1:
1948 if (insn & (1 << 21))
1949 gen_op_iwmmxt_unpackhsw_M0();
1950 else
1951 gen_op_iwmmxt_unpackhuw_M0();
1952 break;
1953 case 2:
1954 if (insn & (1 << 21))
1955 gen_op_iwmmxt_unpackhsl_M0();
1956 else
1957 gen_op_iwmmxt_unpackhul_M0();
1958 break;
1959 case 3:
1960 return 1;
1961 }
1962 gen_op_iwmmxt_movq_wRn_M0(wrd);
1963 gen_op_iwmmxt_set_mup();
1964 gen_op_iwmmxt_set_cup();
1965 break;
1966 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1967 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
1968 if (((insn >> 22) & 3) == 0)
1969 return 1;
18c9b560
AZ
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1973 tmp = tcg_temp_new_i32();
da6b5335 1974 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 1975 tcg_temp_free_i32(tmp);
18c9b560 1976 return 1;
da6b5335 1977 }
18c9b560 1978 switch ((insn >> 22) & 3) {
18c9b560 1979 case 1:
947a2fa2 1980 gen_helper_iwmmxt_srlw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
1981 break;
1982 case 2:
947a2fa2 1983 gen_helper_iwmmxt_srll(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
1984 break;
1985 case 3:
947a2fa2 1986 gen_helper_iwmmxt_srlq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
1987 break;
1988 }
7d1b0095 1989 tcg_temp_free_i32(tmp);
18c9b560
AZ
1990 gen_op_iwmmxt_movq_wRn_M0(wrd);
1991 gen_op_iwmmxt_set_mup();
1992 gen_op_iwmmxt_set_cup();
1993 break;
1994 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1995 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
1996 if (((insn >> 22) & 3) == 0)
1997 return 1;
18c9b560
AZ
1998 wrd = (insn >> 12) & 0xf;
1999 rd0 = (insn >> 16) & 0xf;
2000 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2001 tmp = tcg_temp_new_i32();
da6b5335 2002 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2003 tcg_temp_free_i32(tmp);
18c9b560 2004 return 1;
da6b5335 2005 }
18c9b560 2006 switch ((insn >> 22) & 3) {
18c9b560 2007 case 1:
947a2fa2 2008 gen_helper_iwmmxt_sraw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2009 break;
2010 case 2:
947a2fa2 2011 gen_helper_iwmmxt_sral(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2012 break;
2013 case 3:
947a2fa2 2014 gen_helper_iwmmxt_sraq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2015 break;
2016 }
7d1b0095 2017 tcg_temp_free_i32(tmp);
18c9b560
AZ
2018 gen_op_iwmmxt_movq_wRn_M0(wrd);
2019 gen_op_iwmmxt_set_mup();
2020 gen_op_iwmmxt_set_cup();
2021 break;
2022 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2023 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2024 if (((insn >> 22) & 3) == 0)
2025 return 1;
18c9b560
AZ
2026 wrd = (insn >> 12) & 0xf;
2027 rd0 = (insn >> 16) & 0xf;
2028 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2029 tmp = tcg_temp_new_i32();
da6b5335 2030 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2031 tcg_temp_free_i32(tmp);
18c9b560 2032 return 1;
da6b5335 2033 }
18c9b560 2034 switch ((insn >> 22) & 3) {
18c9b560 2035 case 1:
947a2fa2 2036 gen_helper_iwmmxt_sllw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2037 break;
2038 case 2:
947a2fa2 2039 gen_helper_iwmmxt_slll(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2040 break;
2041 case 3:
947a2fa2 2042 gen_helper_iwmmxt_sllq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2043 break;
2044 }
7d1b0095 2045 tcg_temp_free_i32(tmp);
18c9b560
AZ
2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2047 gen_op_iwmmxt_set_mup();
2048 gen_op_iwmmxt_set_cup();
2049 break;
2050 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2051 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2052 if (((insn >> 22) & 3) == 0)
2053 return 1;
18c9b560
AZ
2054 wrd = (insn >> 12) & 0xf;
2055 rd0 = (insn >> 16) & 0xf;
2056 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2057 tmp = tcg_temp_new_i32();
18c9b560 2058 switch ((insn >> 22) & 3) {
18c9b560 2059 case 1:
da6b5335 2060 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2061 tcg_temp_free_i32(tmp);
18c9b560 2062 return 1;
da6b5335 2063 }
947a2fa2 2064 gen_helper_iwmmxt_rorw(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2065 break;
2066 case 2:
da6b5335 2067 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2068 tcg_temp_free_i32(tmp);
18c9b560 2069 return 1;
da6b5335 2070 }
947a2fa2 2071 gen_helper_iwmmxt_rorl(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2072 break;
2073 case 3:
da6b5335 2074 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2075 tcg_temp_free_i32(tmp);
18c9b560 2076 return 1;
da6b5335 2077 }
947a2fa2 2078 gen_helper_iwmmxt_rorq(cpu_M0, cpu_M0, tmp);
18c9b560
AZ
2079 break;
2080 }
7d1b0095 2081 tcg_temp_free_i32(tmp);
18c9b560
AZ
2082 gen_op_iwmmxt_movq_wRn_M0(wrd);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2085 break;
2086 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2087 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 rd1 = (insn >> 0) & 0xf;
2091 gen_op_iwmmxt_movq_M0_wRn(rd0);
2092 switch ((insn >> 22) & 3) {
2093 case 0:
2094 if (insn & (1 << 21))
2095 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2096 else
2097 gen_op_iwmmxt_minub_M0_wRn(rd1);
2098 break;
2099 case 1:
2100 if (insn & (1 << 21))
2101 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2102 else
2103 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2104 break;
2105 case 2:
2106 if (insn & (1 << 21))
2107 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2108 else
2109 gen_op_iwmmxt_minul_M0_wRn(rd1);
2110 break;
2111 case 3:
2112 return 1;
2113 }
2114 gen_op_iwmmxt_movq_wRn_M0(wrd);
2115 gen_op_iwmmxt_set_mup();
2116 break;
2117 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2118 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2119 wrd = (insn >> 12) & 0xf;
2120 rd0 = (insn >> 16) & 0xf;
2121 rd1 = (insn >> 0) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0);
2123 switch ((insn >> 22) & 3) {
2124 case 0:
2125 if (insn & (1 << 21))
2126 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2127 else
2128 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2129 break;
2130 case 1:
2131 if (insn & (1 << 21))
2132 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2133 else
2134 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2135 break;
2136 case 2:
2137 if (insn & (1 << 21))
2138 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2139 else
2140 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2141 break;
2142 case 3:
2143 return 1;
2144 }
2145 gen_op_iwmmxt_movq_wRn_M0(wrd);
2146 gen_op_iwmmxt_set_mup();
2147 break;
2148 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2149 case 0x402: case 0x502: case 0x602: case 0x702:
2150 wrd = (insn >> 12) & 0xf;
2151 rd0 = (insn >> 16) & 0xf;
2152 rd1 = (insn >> 0) & 0xf;
2153 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2154 tmp = tcg_const_i32((insn >> 20) & 3);
2155 iwmmxt_load_reg(cpu_V1, rd1);
2156 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2157 tcg_temp_free(tmp);
18c9b560
AZ
2158 gen_op_iwmmxt_movq_wRn_M0(wrd);
2159 gen_op_iwmmxt_set_mup();
2160 break;
2161 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2162 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2163 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2164 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2165 wrd = (insn >> 12) & 0xf;
2166 rd0 = (insn >> 16) & 0xf;
2167 rd1 = (insn >> 0) & 0xf;
2168 gen_op_iwmmxt_movq_M0_wRn(rd0);
2169 switch ((insn >> 20) & 0xf) {
2170 case 0x0:
2171 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2172 break;
2173 case 0x1:
2174 gen_op_iwmmxt_subub_M0_wRn(rd1);
2175 break;
2176 case 0x3:
2177 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2178 break;
2179 case 0x4:
2180 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2181 break;
2182 case 0x5:
2183 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2184 break;
2185 case 0x7:
2186 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2187 break;
2188 case 0x8:
2189 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2190 break;
2191 case 0x9:
2192 gen_op_iwmmxt_subul_M0_wRn(rd1);
2193 break;
2194 case 0xb:
2195 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2196 break;
2197 default:
2198 return 1;
2199 }
2200 gen_op_iwmmxt_movq_wRn_M0(wrd);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2203 break;
2204 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2205 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2206 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2207 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2208 wrd = (insn >> 12) & 0xf;
2209 rd0 = (insn >> 16) & 0xf;
2210 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2211 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
947a2fa2 2212 gen_helper_iwmmxt_shufh(cpu_M0, cpu_M0, tmp);
da6b5335 2213 tcg_temp_free(tmp);
18c9b560
AZ
2214 gen_op_iwmmxt_movq_wRn_M0(wrd);
2215 gen_op_iwmmxt_set_mup();
2216 gen_op_iwmmxt_set_cup();
2217 break;
2218 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2219 case 0x418: case 0x518: case 0x618: case 0x718:
2220 case 0x818: case 0x918: case 0xa18: case 0xb18:
2221 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2222 wrd = (insn >> 12) & 0xf;
2223 rd0 = (insn >> 16) & 0xf;
2224 rd1 = (insn >> 0) & 0xf;
2225 gen_op_iwmmxt_movq_M0_wRn(rd0);
2226 switch ((insn >> 20) & 0xf) {
2227 case 0x0:
2228 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2229 break;
2230 case 0x1:
2231 gen_op_iwmmxt_addub_M0_wRn(rd1);
2232 break;
2233 case 0x3:
2234 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2235 break;
2236 case 0x4:
2237 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2238 break;
2239 case 0x5:
2240 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2241 break;
2242 case 0x7:
2243 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2244 break;
2245 case 0x8:
2246 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2247 break;
2248 case 0x9:
2249 gen_op_iwmmxt_addul_M0_wRn(rd1);
2250 break;
2251 case 0xb:
2252 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2253 break;
2254 default:
2255 return 1;
2256 }
2257 gen_op_iwmmxt_movq_wRn_M0(wrd);
2258 gen_op_iwmmxt_set_mup();
2259 gen_op_iwmmxt_set_cup();
2260 break;
2261 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2262 case 0x408: case 0x508: case 0x608: case 0x708:
2263 case 0x808: case 0x908: case 0xa08: case 0xb08:
2264 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2265 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2266 return 1;
18c9b560
AZ
2267 wrd = (insn >> 12) & 0xf;
2268 rd0 = (insn >> 16) & 0xf;
2269 rd1 = (insn >> 0) & 0xf;
2270 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2271 switch ((insn >> 22) & 3) {
18c9b560
AZ
2272 case 1:
2273 if (insn & (1 << 21))
2274 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2275 else
2276 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2277 break;
2278 case 2:
2279 if (insn & (1 << 21))
2280 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2281 else
2282 gen_op_iwmmxt_packul_M0_wRn(rd1);
2283 break;
2284 case 3:
2285 if (insn & (1 << 21))
2286 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2287 else
2288 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2289 break;
2290 }
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
2295 case 0x201: case 0x203: case 0x205: case 0x207:
2296 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2297 case 0x211: case 0x213: case 0x215: case 0x217:
2298 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2299 wrd = (insn >> 5) & 0xf;
2300 rd0 = (insn >> 12) & 0xf;
2301 rd1 = (insn >> 0) & 0xf;
2302 if (rd0 == 0xf || rd1 == 0xf)
2303 return 1;
2304 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2305 tmp = load_reg(s, rd0);
2306 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2307 switch ((insn >> 16) & 0xf) {
2308 case 0x0: /* TMIA */
da6b5335 2309 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2310 break;
2311 case 0x8: /* TMIAPH */
da6b5335 2312 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2313 break;
2314 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2315 if (insn & (1 << 16))
da6b5335 2316 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2317 if (insn & (1 << 17))
da6b5335
FN
2318 tcg_gen_shri_i32(tmp2, tmp2, 16);
2319 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2320 break;
2321 default:
7d1b0095
PM
2322 tcg_temp_free_i32(tmp2);
2323 tcg_temp_free_i32(tmp);
18c9b560
AZ
2324 return 1;
2325 }
7d1b0095
PM
2326 tcg_temp_free_i32(tmp2);
2327 tcg_temp_free_i32(tmp);
18c9b560
AZ
2328 gen_op_iwmmxt_movq_wRn_M0(wrd);
2329 gen_op_iwmmxt_set_mup();
2330 break;
2331 default:
2332 return 1;
2333 }
2334
2335 return 0;
2336}
2337
2338/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2339 (ie. an undefined instruction). */
2340static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2341{
2342 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2343 TCGv tmp, tmp2;
18c9b560
AZ
2344
2345 if ((insn & 0x0ff00f10) == 0x0e200010) {
2346 /* Multiply with Internal Accumulate Format */
2347 rd0 = (insn >> 12) & 0xf;
2348 rd1 = insn & 0xf;
2349 acc = (insn >> 5) & 7;
2350
2351 if (acc != 0)
2352 return 1;
2353
3a554c0f
FN
2354 tmp = load_reg(s, rd0);
2355 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2356 switch ((insn >> 16) & 0xf) {
2357 case 0x0: /* MIA */
3a554c0f 2358 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2359 break;
2360 case 0x8: /* MIAPH */
3a554c0f 2361 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2362 break;
2363 case 0xc: /* MIABB */
2364 case 0xd: /* MIABT */
2365 case 0xe: /* MIATB */
2366 case 0xf: /* MIATT */
18c9b560 2367 if (insn & (1 << 16))
3a554c0f 2368 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2369 if (insn & (1 << 17))
3a554c0f
FN
2370 tcg_gen_shri_i32(tmp2, tmp2, 16);
2371 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2372 break;
2373 default:
2374 return 1;
2375 }
7d1b0095
PM
2376 tcg_temp_free_i32(tmp2);
2377 tcg_temp_free_i32(tmp);
18c9b560
AZ
2378
2379 gen_op_iwmmxt_movq_wRn_M0(acc);
2380 return 0;
2381 }
2382
2383 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2384 /* Internal Accumulator Access Format */
2385 rdhi = (insn >> 16) & 0xf;
2386 rdlo = (insn >> 12) & 0xf;
2387 acc = insn & 7;
2388
2389 if (acc != 0)
2390 return 1;
2391
2392 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2393 iwmmxt_load_reg(cpu_V0, acc);
2394 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2395 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2396 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2397 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2398 } else { /* MAR */
3a554c0f
FN
2399 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2400 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2401 }
2402 return 0;
2403 }
2404
2405 return 1;
2406}
2407
c1713132
AZ
2408/* Disassemble system coprocessor instruction. Return nonzero if
2409 instruction is not defined. */
2410static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2411{
b75263d6 2412 TCGv tmp, tmp2;
c1713132
AZ
2413 uint32_t rd = (insn >> 12) & 0xf;
2414 uint32_t cp = (insn >> 8) & 0xf;
2415 if (IS_USER(s)) {
2416 return 1;
2417 }
2418
18c9b560 2419 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2420 if (!env->cp[cp].cp_read)
2421 return 1;
8984bd2e 2422 gen_set_pc_im(s->pc);
7d1b0095 2423 tmp = tcg_temp_new_i32();
b75263d6
JR
2424 tmp2 = tcg_const_i32(insn);
2425 gen_helper_get_cp(tmp, cpu_env, tmp2);
2426 tcg_temp_free(tmp2);
8984bd2e 2427 store_reg(s, rd, tmp);
c1713132
AZ
2428 } else {
2429 if (!env->cp[cp].cp_write)
2430 return 1;
8984bd2e
PB
2431 gen_set_pc_im(s->pc);
2432 tmp = load_reg(s, rd);
b75263d6
JR
2433 tmp2 = tcg_const_i32(insn);
2434 gen_helper_set_cp(cpu_env, tmp2, tmp);
2435 tcg_temp_free(tmp2);
7d1b0095 2436 tcg_temp_free_i32(tmp);
c1713132
AZ
2437 }
2438 return 0;
2439}
2440
9ee6e8bb
PB
2441static int cp15_user_ok(uint32_t insn)
2442{
2443 int cpn = (insn >> 16) & 0xf;
2444 int cpm = insn & 0xf;
2445 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2446
2447 if (cpn == 13 && cpm == 0) {
2448 /* TLS register. */
2449 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2450 return 1;
2451 }
2452 if (cpn == 7) {
2453 /* ISB, DSB, DMB. */
2454 if ((cpm == 5 && op == 4)
2455 || (cpm == 10 && (op == 4 || op == 5)))
2456 return 1;
2457 }
2458 return 0;
2459}
2460
3f26c122
RV
2461static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2462{
2463 TCGv tmp;
2464 int cpn = (insn >> 16) & 0xf;
2465 int cpm = insn & 0xf;
2466 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2467
2468 if (!arm_feature(env, ARM_FEATURE_V6K))
2469 return 0;
2470
2471 if (!(cpn == 13 && cpm == 0))
2472 return 0;
2473
2474 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2475 switch (op) {
2476 case 2:
c5883be2 2477 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2478 break;
2479 case 3:
c5883be2 2480 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2481 break;
2482 case 4:
c5883be2 2483 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2484 break;
2485 default:
3f26c122
RV
2486 return 0;
2487 }
2488 store_reg(s, rd, tmp);
2489
2490 } else {
2491 tmp = load_reg(s, rd);
2492 switch (op) {
2493 case 2:
c5883be2 2494 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2495 break;
2496 case 3:
c5883be2 2497 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2498 break;
2499 case 4:
c5883be2 2500 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2501 break;
2502 default:
7d1b0095 2503 tcg_temp_free_i32(tmp);
3f26c122
RV
2504 return 0;
2505 }
3f26c122
RV
2506 }
2507 return 1;
2508}
2509
b5ff1b31
FB
2510/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2511 instruction is not defined. */
a90b7318 2512static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2513{
2514 uint32_t rd;
b75263d6 2515 TCGv tmp, tmp2;
b5ff1b31 2516
9ee6e8bb
PB
2517 /* M profile cores use memory mapped registers instead of cp15. */
2518 if (arm_feature(env, ARM_FEATURE_M))
2519 return 1;
2520
2521 if ((insn & (1 << 25)) == 0) {
2522 if (insn & (1 << 20)) {
2523 /* mrrc */
2524 return 1;
2525 }
2526 /* mcrr. Used for block cache operations, so implement as no-op. */
2527 return 0;
2528 }
2529 if ((insn & (1 << 4)) == 0) {
2530 /* cdp */
2531 return 1;
2532 }
2533 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2534 return 1;
2535 }
cc688901
PM
2536
2537 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2538 * instructions rather than a separate instruction.
2539 */
2540 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2541 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2542 * In v7, this must NOP.
2543 */
2544 if (!arm_feature(env, ARM_FEATURE_V7)) {
2545 /* Wait for interrupt. */
2546 gen_set_pc_im(s->pc);
2547 s->is_jmp = DISAS_WFI;
2548 }
9332f9da
FB
2549 return 0;
2550 }
cc688901
PM
2551
2552 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2553 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2554 * so this is slightly over-broad.
2555 */
2556 if (!arm_feature(env, ARM_FEATURE_V6)) {
2557 /* Wait for interrupt. */
2558 gen_set_pc_im(s->pc);
2559 s->is_jmp = DISAS_WFI;
2560 return 0;
2561 }
2562 /* Otherwise fall through to handle via helper function.
2563 * In particular, on v7 and some v6 cores this is one of
2564 * the VA-PA registers.
2565 */
2566 }
2567
b5ff1b31 2568 rd = (insn >> 12) & 0xf;
3f26c122
RV
2569
2570 if (cp15_tls_load_store(env, s, insn, rd))
2571 return 0;
2572
b75263d6 2573 tmp2 = tcg_const_i32(insn);
18c9b560 2574 if (insn & ARM_CP_RW_BIT) {
7d1b0095 2575 tmp = tcg_temp_new_i32();
b75263d6 2576 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2577 /* If the destination register is r15 then sets condition codes. */
2578 if (rd != 15)
8984bd2e
PB
2579 store_reg(s, rd, tmp);
2580 else
7d1b0095 2581 tcg_temp_free_i32(tmp);
b5ff1b31 2582 } else {
8984bd2e 2583 tmp = load_reg(s, rd);
b75263d6 2584 gen_helper_set_cp15(cpu_env, tmp2, tmp);
7d1b0095 2585 tcg_temp_free_i32(tmp);
a90b7318
AZ
2586 /* Normally we would always end the TB here, but Linux
2587 * arch/arm/mach-pxa/sleep.S expects two instructions following
2588 * an MMU enable to execute from cache. Imitate this behaviour. */
2589 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2590 (insn & 0x0fff0fff) != 0x0e010f10)
2591 gen_lookup_tb(s);
b5ff1b31 2592 }
b75263d6 2593 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2594 return 0;
2595}
2596
9ee6e8bb
PB
2597#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2598#define VFP_SREG(insn, bigbit, smallbit) \
2599 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2600#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2601 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2602 reg = (((insn) >> (bigbit)) & 0x0f) \
2603 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2604 } else { \
2605 if (insn & (1 << (smallbit))) \
2606 return 1; \
2607 reg = ((insn) >> (bigbit)) & 0x0f; \
2608 }} while (0)
2609
2610#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2611#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2612#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2613#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2614#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2615#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2616
4373f3ce
PB
2617/* Move between integer and VFP cores. */
2618static TCGv gen_vfp_mrs(void)
2619{
7d1b0095 2620 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2621 tcg_gen_mov_i32(tmp, cpu_F0s);
2622 return tmp;
2623}
2624
2625static void gen_vfp_msr(TCGv tmp)
2626{
2627 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2628 tcg_temp_free_i32(tmp);
4373f3ce
PB
2629}
2630
ad69471c
PB
2631static void gen_neon_dup_u8(TCGv var, int shift)
2632{
7d1b0095 2633 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2634 if (shift)
2635 tcg_gen_shri_i32(var, var, shift);
86831435 2636 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2637 tcg_gen_shli_i32(tmp, var, 8);
2638 tcg_gen_or_i32(var, var, tmp);
2639 tcg_gen_shli_i32(tmp, var, 16);
2640 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2641 tcg_temp_free_i32(tmp);
ad69471c
PB
2642}
2643
2644static void gen_neon_dup_low16(TCGv var)
2645{
7d1b0095 2646 TCGv tmp = tcg_temp_new_i32();
86831435 2647 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2648 tcg_gen_shli_i32(tmp, var, 16);
2649 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2650 tcg_temp_free_i32(tmp);
ad69471c
PB
2651}
2652
2653static void gen_neon_dup_high16(TCGv var)
2654{
7d1b0095 2655 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2656 tcg_gen_andi_i32(var, var, 0xffff0000);
2657 tcg_gen_shri_i32(tmp, var, 16);
2658 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2659 tcg_temp_free_i32(tmp);
ad69471c
PB
2660}
2661
8e18cde3
PM
2662static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2663{
2664 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2665 TCGv tmp;
2666 switch (size) {
2667 case 0:
2668 tmp = gen_ld8u(addr, IS_USER(s));
2669 gen_neon_dup_u8(tmp, 0);
2670 break;
2671 case 1:
2672 tmp = gen_ld16u(addr, IS_USER(s));
2673 gen_neon_dup_low16(tmp);
2674 break;
2675 case 2:
2676 tmp = gen_ld32(addr, IS_USER(s));
2677 break;
2678 default: /* Avoid compiler warnings. */
2679 abort();
2680 }
2681 return tmp;
2682}
2683
b7bcbe95
FB
2684/* Disassemble a VFP instruction. Returns nonzero if an error occured
2685 (ie. an undefined instruction). */
2686static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2687{
2688 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2689 int dp, veclen;
312eea9f 2690 TCGv addr;
4373f3ce 2691 TCGv tmp;
ad69471c 2692 TCGv tmp2;
b7bcbe95 2693
40f137e1
PB
2694 if (!arm_feature(env, ARM_FEATURE_VFP))
2695 return 1;
2696
5df8bac1 2697 if (!s->vfp_enabled) {
9ee6e8bb 2698 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2699 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2700 return 1;
2701 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2702 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2703 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2704 return 1;
2705 }
b7bcbe95
FB
2706 dp = ((insn & 0xf00) == 0xb00);
2707 switch ((insn >> 24) & 0xf) {
2708 case 0xe:
2709 if (insn & (1 << 4)) {
2710 /* single register transfer */
b7bcbe95
FB
2711 rd = (insn >> 12) & 0xf;
2712 if (dp) {
9ee6e8bb
PB
2713 int size;
2714 int pass;
2715
2716 VFP_DREG_N(rn, insn);
2717 if (insn & 0xf)
b7bcbe95 2718 return 1;
9ee6e8bb
PB
2719 if (insn & 0x00c00060
2720 && !arm_feature(env, ARM_FEATURE_NEON))
2721 return 1;
2722
2723 pass = (insn >> 21) & 1;
2724 if (insn & (1 << 22)) {
2725 size = 0;
2726 offset = ((insn >> 5) & 3) * 8;
2727 } else if (insn & (1 << 5)) {
2728 size = 1;
2729 offset = (insn & (1 << 6)) ? 16 : 0;
2730 } else {
2731 size = 2;
2732 offset = 0;
2733 }
18c9b560 2734 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2735 /* vfp->arm */
ad69471c 2736 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2737 switch (size) {
2738 case 0:
9ee6e8bb 2739 if (offset)
ad69471c 2740 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2741 if (insn & (1 << 23))
ad69471c 2742 gen_uxtb(tmp);
9ee6e8bb 2743 else
ad69471c 2744 gen_sxtb(tmp);
9ee6e8bb
PB
2745 break;
2746 case 1:
9ee6e8bb
PB
2747 if (insn & (1 << 23)) {
2748 if (offset) {
ad69471c 2749 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2750 } else {
ad69471c 2751 gen_uxth(tmp);
9ee6e8bb
PB
2752 }
2753 } else {
2754 if (offset) {
ad69471c 2755 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2756 } else {
ad69471c 2757 gen_sxth(tmp);
9ee6e8bb
PB
2758 }
2759 }
2760 break;
2761 case 2:
9ee6e8bb
PB
2762 break;
2763 }
ad69471c 2764 store_reg(s, rd, tmp);
b7bcbe95
FB
2765 } else {
2766 /* arm->vfp */
ad69471c 2767 tmp = load_reg(s, rd);
9ee6e8bb
PB
2768 if (insn & (1 << 23)) {
2769 /* VDUP */
2770 if (size == 0) {
ad69471c 2771 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2772 } else if (size == 1) {
ad69471c 2773 gen_neon_dup_low16(tmp);
9ee6e8bb 2774 }
cbbccffc 2775 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2776 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2777 tcg_gen_mov_i32(tmp2, tmp);
2778 neon_store_reg(rn, n, tmp2);
2779 }
2780 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2781 } else {
2782 /* VMOV */
2783 switch (size) {
2784 case 0:
ad69471c
PB
2785 tmp2 = neon_load_reg(rn, pass);
2786 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
7d1b0095 2787 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2788 break;
2789 case 1:
ad69471c
PB
2790 tmp2 = neon_load_reg(rn, pass);
2791 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
7d1b0095 2792 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2793 break;
2794 case 2:
9ee6e8bb
PB
2795 break;
2796 }
ad69471c 2797 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2798 }
b7bcbe95 2799 }
9ee6e8bb
PB
2800 } else { /* !dp */
2801 if ((insn & 0x6f) != 0x00)
2802 return 1;
2803 rn = VFP_SREG_N(insn);
18c9b560 2804 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2805 /* vfp->arm */
2806 if (insn & (1 << 21)) {
2807 /* system register */
40f137e1 2808 rn >>= 1;
9ee6e8bb 2809
b7bcbe95 2810 switch (rn) {
40f137e1 2811 case ARM_VFP_FPSID:
4373f3ce 2812 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2813 VFP3 restricts all id registers to privileged
2814 accesses. */
2815 if (IS_USER(s)
2816 && arm_feature(env, ARM_FEATURE_VFP3))
2817 return 1;
4373f3ce 2818 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2819 break;
40f137e1 2820 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2821 if (IS_USER(s))
2822 return 1;
4373f3ce 2823 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2824 break;
40f137e1
PB
2825 case ARM_VFP_FPINST:
2826 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2827 /* Not present in VFP3. */
2828 if (IS_USER(s)
2829 || arm_feature(env, ARM_FEATURE_VFP3))
2830 return 1;
4373f3ce 2831 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2832 break;
40f137e1 2833 case ARM_VFP_FPSCR:
601d70b9 2834 if (rd == 15) {
4373f3ce
PB
2835 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2836 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2837 } else {
7d1b0095 2838 tmp = tcg_temp_new_i32();
4373f3ce
PB
2839 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2840 }
b7bcbe95 2841 break;
9ee6e8bb
PB
2842 case ARM_VFP_MVFR0:
2843 case ARM_VFP_MVFR1:
2844 if (IS_USER(s)
2845 || !arm_feature(env, ARM_FEATURE_VFP3))
2846 return 1;
4373f3ce 2847 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2848 break;
b7bcbe95
FB
2849 default:
2850 return 1;
2851 }
2852 } else {
2853 gen_mov_F0_vreg(0, rn);
4373f3ce 2854 tmp = gen_vfp_mrs();
b7bcbe95
FB
2855 }
2856 if (rd == 15) {
b5ff1b31 2857 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2858 gen_set_nzcv(tmp);
7d1b0095 2859 tcg_temp_free_i32(tmp);
4373f3ce
PB
2860 } else {
2861 store_reg(s, rd, tmp);
2862 }
b7bcbe95
FB
2863 } else {
2864 /* arm->vfp */
4373f3ce 2865 tmp = load_reg(s, rd);
b7bcbe95 2866 if (insn & (1 << 21)) {
40f137e1 2867 rn >>= 1;
b7bcbe95
FB
2868 /* system register */
2869 switch (rn) {
40f137e1 2870 case ARM_VFP_FPSID:
9ee6e8bb
PB
2871 case ARM_VFP_MVFR0:
2872 case ARM_VFP_MVFR1:
b7bcbe95
FB
2873 /* Writes are ignored. */
2874 break;
40f137e1 2875 case ARM_VFP_FPSCR:
4373f3ce 2876 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2877 tcg_temp_free_i32(tmp);
b5ff1b31 2878 gen_lookup_tb(s);
b7bcbe95 2879 break;
40f137e1 2880 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2881 if (IS_USER(s))
2882 return 1;
71b3c3de
JR
2883 /* TODO: VFP subarchitecture support.
2884 * For now, keep the EN bit only */
2885 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2886 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2887 gen_lookup_tb(s);
2888 break;
2889 case ARM_VFP_FPINST:
2890 case ARM_VFP_FPINST2:
4373f3ce 2891 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2892 break;
b7bcbe95
FB
2893 default:
2894 return 1;
2895 }
2896 } else {
4373f3ce 2897 gen_vfp_msr(tmp);
b7bcbe95
FB
2898 gen_mov_vreg_F0(0, rn);
2899 }
2900 }
2901 }
2902 } else {
2903 /* data processing */
2904 /* The opcode is in bits 23, 21, 20 and 6. */
2905 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2906 if (dp) {
2907 if (op == 15) {
2908 /* rn is opcode */
2909 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2910 } else {
2911 /* rn is register number */
9ee6e8bb 2912 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2913 }
2914
04595bf6 2915 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2916 /* Integer or single precision destination. */
9ee6e8bb 2917 rd = VFP_SREG_D(insn);
b7bcbe95 2918 } else {
9ee6e8bb 2919 VFP_DREG_D(rd, insn);
b7bcbe95 2920 }
04595bf6
PM
2921 if (op == 15 &&
2922 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2923 /* VCVT from int is always from S reg regardless of dp bit.
2924 * VCVT with immediate frac_bits has same format as SREG_M
2925 */
2926 rm = VFP_SREG_M(insn);
b7bcbe95 2927 } else {
9ee6e8bb 2928 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2929 }
2930 } else {
9ee6e8bb 2931 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2932 if (op == 15 && rn == 15) {
2933 /* Double precision destination. */
9ee6e8bb
PB
2934 VFP_DREG_D(rd, insn);
2935 } else {
2936 rd = VFP_SREG_D(insn);
2937 }
04595bf6
PM
2938 /* NB that we implicitly rely on the encoding for the frac_bits
2939 * in VCVT of fixed to float being the same as that of an SREG_M
2940 */
9ee6e8bb 2941 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2942 }
2943
69d1fc22 2944 veclen = s->vec_len;
b7bcbe95
FB
2945 if (op == 15 && rn > 3)
2946 veclen = 0;
2947
2948 /* Shut up compiler warnings. */
2949 delta_m = 0;
2950 delta_d = 0;
2951 bank_mask = 0;
3b46e624 2952
b7bcbe95
FB
2953 if (veclen > 0) {
2954 if (dp)
2955 bank_mask = 0xc;
2956 else
2957 bank_mask = 0x18;
2958
2959 /* Figure out what type of vector operation this is. */
2960 if ((rd & bank_mask) == 0) {
2961 /* scalar */
2962 veclen = 0;
2963 } else {
2964 if (dp)
69d1fc22 2965 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2966 else
69d1fc22 2967 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2968
2969 if ((rm & bank_mask) == 0) {
2970 /* mixed scalar/vector */
2971 delta_m = 0;
2972 } else {
2973 /* vector */
2974 delta_m = delta_d;
2975 }
2976 }
2977 }
2978
2979 /* Load the initial operands. */
2980 if (op == 15) {
2981 switch (rn) {
2982 case 16:
2983 case 17:
2984 /* Integer source */
2985 gen_mov_F0_vreg(0, rm);
2986 break;
2987 case 8:
2988 case 9:
2989 /* Compare */
2990 gen_mov_F0_vreg(dp, rd);
2991 gen_mov_F1_vreg(dp, rm);
2992 break;
2993 case 10:
2994 case 11:
2995 /* Compare with zero */
2996 gen_mov_F0_vreg(dp, rd);
2997 gen_vfp_F1_ld0(dp);
2998 break;
9ee6e8bb
PB
2999 case 20:
3000 case 21:
3001 case 22:
3002 case 23:
644ad806
PB
3003 case 28:
3004 case 29:
3005 case 30:
3006 case 31:
9ee6e8bb
PB
3007 /* Source and destination the same. */
3008 gen_mov_F0_vreg(dp, rd);
3009 break;
b7bcbe95
FB
3010 default:
3011 /* One source operand. */
3012 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3013 break;
b7bcbe95
FB
3014 }
3015 } else {
3016 /* Two source operands. */
3017 gen_mov_F0_vreg(dp, rn);
3018 gen_mov_F1_vreg(dp, rm);
3019 }
3020
3021 for (;;) {
3022 /* Perform the calculation. */
3023 switch (op) {
3024 case 0: /* mac: fd + (fn * fm) */
3025 gen_vfp_mul(dp);
3026 gen_mov_F1_vreg(dp, rd);
3027 gen_vfp_add(dp);
3028 break;
3029 case 1: /* nmac: fd - (fn * fm) */
3030 gen_vfp_mul(dp);
3031 gen_vfp_neg(dp);
3032 gen_mov_F1_vreg(dp, rd);
3033 gen_vfp_add(dp);
3034 break;
3035 case 2: /* msc: -fd + (fn * fm) */
3036 gen_vfp_mul(dp);
3037 gen_mov_F1_vreg(dp, rd);
3038 gen_vfp_sub(dp);
3039 break;
3040 case 3: /* nmsc: -fd - (fn * fm) */
3041 gen_vfp_mul(dp);
b7bcbe95 3042 gen_vfp_neg(dp);
c9fb531a
PB
3043 gen_mov_F1_vreg(dp, rd);
3044 gen_vfp_sub(dp);
b7bcbe95
FB
3045 break;
3046 case 4: /* mul: fn * fm */
3047 gen_vfp_mul(dp);
3048 break;
3049 case 5: /* nmul: -(fn * fm) */
3050 gen_vfp_mul(dp);
3051 gen_vfp_neg(dp);
3052 break;
3053 case 6: /* add: fn + fm */
3054 gen_vfp_add(dp);
3055 break;
3056 case 7: /* sub: fn - fm */
3057 gen_vfp_sub(dp);
3058 break;
3059 case 8: /* div: fn / fm */
3060 gen_vfp_div(dp);
3061 break;
9ee6e8bb
PB
3062 case 14: /* fconst */
3063 if (!arm_feature(env, ARM_FEATURE_VFP3))
3064 return 1;
3065
3066 n = (insn << 12) & 0x80000000;
3067 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3068 if (dp) {
3069 if (i & 0x40)
3070 i |= 0x3f80;
3071 else
3072 i |= 0x4000;
3073 n |= i << 16;
4373f3ce 3074 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3075 } else {
3076 if (i & 0x40)
3077 i |= 0x780;
3078 else
3079 i |= 0x800;
3080 n |= i << 19;
5b340b51 3081 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3082 }
9ee6e8bb 3083 break;
b7bcbe95
FB
3084 case 15: /* extension space */
3085 switch (rn) {
3086 case 0: /* cpy */
3087 /* no-op */
3088 break;
3089 case 1: /* abs */
3090 gen_vfp_abs(dp);
3091 break;
3092 case 2: /* neg */
3093 gen_vfp_neg(dp);
3094 break;
3095 case 3: /* sqrt */
3096 gen_vfp_sqrt(dp);
3097 break;
60011498
PB
3098 case 4: /* vcvtb.f32.f16 */
3099 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3100 return 1;
3101 tmp = gen_vfp_mrs();
3102 tcg_gen_ext16u_i32(tmp, tmp);
3103 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3104 tcg_temp_free_i32(tmp);
60011498
PB
3105 break;
3106 case 5: /* vcvtt.f32.f16 */
3107 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3108 return 1;
3109 tmp = gen_vfp_mrs();
3110 tcg_gen_shri_i32(tmp, tmp, 16);
3111 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3112 tcg_temp_free_i32(tmp);
60011498
PB
3113 break;
3114 case 6: /* vcvtb.f16.f32 */
3115 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3116 return 1;
7d1b0095 3117 tmp = tcg_temp_new_i32();
60011498
PB
3118 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3119 gen_mov_F0_vreg(0, rd);
3120 tmp2 = gen_vfp_mrs();
3121 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3122 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3123 tcg_temp_free_i32(tmp2);
60011498
PB
3124 gen_vfp_msr(tmp);
3125 break;
3126 case 7: /* vcvtt.f16.f32 */
3127 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3128 return 1;
7d1b0095 3129 tmp = tcg_temp_new_i32();
60011498
PB
3130 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3131 tcg_gen_shli_i32(tmp, tmp, 16);
3132 gen_mov_F0_vreg(0, rd);
3133 tmp2 = gen_vfp_mrs();
3134 tcg_gen_ext16u_i32(tmp2, tmp2);
3135 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3136 tcg_temp_free_i32(tmp2);
60011498
PB
3137 gen_vfp_msr(tmp);
3138 break;
b7bcbe95
FB
3139 case 8: /* cmp */
3140 gen_vfp_cmp(dp);
3141 break;
3142 case 9: /* cmpe */
3143 gen_vfp_cmpe(dp);
3144 break;
3145 case 10: /* cmpz */
3146 gen_vfp_cmp(dp);
3147 break;
3148 case 11: /* cmpez */
3149 gen_vfp_F1_ld0(dp);
3150 gen_vfp_cmpe(dp);
3151 break;
3152 case 15: /* single<->double conversion */
3153 if (dp)
4373f3ce 3154 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3155 else
4373f3ce 3156 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3157 break;
3158 case 16: /* fuito */
3159 gen_vfp_uito(dp);
3160 break;
3161 case 17: /* fsito */
3162 gen_vfp_sito(dp);
3163 break;
9ee6e8bb
PB
3164 case 20: /* fshto */
3165 if (!arm_feature(env, ARM_FEATURE_VFP3))
3166 return 1;
644ad806 3167 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3168 break;
3169 case 21: /* fslto */
3170 if (!arm_feature(env, ARM_FEATURE_VFP3))
3171 return 1;
644ad806 3172 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3173 break;
3174 case 22: /* fuhto */
3175 if (!arm_feature(env, ARM_FEATURE_VFP3))
3176 return 1;
644ad806 3177 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3178 break;
3179 case 23: /* fulto */
3180 if (!arm_feature(env, ARM_FEATURE_VFP3))
3181 return 1;
644ad806 3182 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3183 break;
b7bcbe95
FB
3184 case 24: /* ftoui */
3185 gen_vfp_toui(dp);
3186 break;
3187 case 25: /* ftouiz */
3188 gen_vfp_touiz(dp);
3189 break;
3190 case 26: /* ftosi */
3191 gen_vfp_tosi(dp);
3192 break;
3193 case 27: /* ftosiz */
3194 gen_vfp_tosiz(dp);
3195 break;
9ee6e8bb
PB
3196 case 28: /* ftosh */
3197 if (!arm_feature(env, ARM_FEATURE_VFP3))
3198 return 1;
644ad806 3199 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3200 break;
3201 case 29: /* ftosl */
3202 if (!arm_feature(env, ARM_FEATURE_VFP3))
3203 return 1;
644ad806 3204 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3205 break;
3206 case 30: /* ftouh */
3207 if (!arm_feature(env, ARM_FEATURE_VFP3))
3208 return 1;
644ad806 3209 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3210 break;
3211 case 31: /* ftoul */
3212 if (!arm_feature(env, ARM_FEATURE_VFP3))
3213 return 1;
644ad806 3214 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3215 break;
b7bcbe95
FB
3216 default: /* undefined */
3217 printf ("rn:%d\n", rn);
3218 return 1;
3219 }
3220 break;
3221 default: /* undefined */
3222 printf ("op:%d\n", op);
3223 return 1;
3224 }
3225
3226 /* Write back the result. */
3227 if (op == 15 && (rn >= 8 && rn <= 11))
3228 ; /* Comparison, do nothing. */
04595bf6
PM
3229 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3230 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3231 gen_mov_vreg_F0(0, rd);
3232 else if (op == 15 && rn == 15)
3233 /* conversion */
3234 gen_mov_vreg_F0(!dp, rd);
3235 else
3236 gen_mov_vreg_F0(dp, rd);
3237
3238 /* break out of the loop if we have finished */
3239 if (veclen == 0)
3240 break;
3241
3242 if (op == 15 && delta_m == 0) {
3243 /* single source one-many */
3244 while (veclen--) {
3245 rd = ((rd + delta_d) & (bank_mask - 1))
3246 | (rd & bank_mask);
3247 gen_mov_vreg_F0(dp, rd);
3248 }
3249 break;
3250 }
3251 /* Setup the next operands. */
3252 veclen--;
3253 rd = ((rd + delta_d) & (bank_mask - 1))
3254 | (rd & bank_mask);
3255
3256 if (op == 15) {
3257 /* One source operand. */
3258 rm = ((rm + delta_m) & (bank_mask - 1))
3259 | (rm & bank_mask);
3260 gen_mov_F0_vreg(dp, rm);
3261 } else {
3262 /* Two source operands. */
3263 rn = ((rn + delta_d) & (bank_mask - 1))
3264 | (rn & bank_mask);
3265 gen_mov_F0_vreg(dp, rn);
3266 if (delta_m) {
3267 rm = ((rm + delta_m) & (bank_mask - 1))
3268 | (rm & bank_mask);
3269 gen_mov_F1_vreg(dp, rm);
3270 }
3271 }
3272 }
3273 }
3274 break;
3275 case 0xc:
3276 case 0xd:
8387da81 3277 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3278 /* two-register transfer */
3279 rn = (insn >> 16) & 0xf;
3280 rd = (insn >> 12) & 0xf;
3281 if (dp) {
9ee6e8bb
PB
3282 VFP_DREG_M(rm, insn);
3283 } else {
3284 rm = VFP_SREG_M(insn);
3285 }
b7bcbe95 3286
18c9b560 3287 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3288 /* vfp->arm */
3289 if (dp) {
4373f3ce
PB
3290 gen_mov_F0_vreg(0, rm * 2);
3291 tmp = gen_vfp_mrs();
3292 store_reg(s, rd, tmp);
3293 gen_mov_F0_vreg(0, rm * 2 + 1);
3294 tmp = gen_vfp_mrs();
3295 store_reg(s, rn, tmp);
b7bcbe95
FB
3296 } else {
3297 gen_mov_F0_vreg(0, rm);
4373f3ce 3298 tmp = gen_vfp_mrs();
8387da81 3299 store_reg(s, rd, tmp);
b7bcbe95 3300 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3301 tmp = gen_vfp_mrs();
8387da81 3302 store_reg(s, rn, tmp);
b7bcbe95
FB
3303 }
3304 } else {
3305 /* arm->vfp */
3306 if (dp) {
4373f3ce
PB
3307 tmp = load_reg(s, rd);
3308 gen_vfp_msr(tmp);
3309 gen_mov_vreg_F0(0, rm * 2);
3310 tmp = load_reg(s, rn);
3311 gen_vfp_msr(tmp);
3312 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3313 } else {
8387da81 3314 tmp = load_reg(s, rd);
4373f3ce 3315 gen_vfp_msr(tmp);
b7bcbe95 3316 gen_mov_vreg_F0(0, rm);
8387da81 3317 tmp = load_reg(s, rn);
4373f3ce 3318 gen_vfp_msr(tmp);
b7bcbe95
FB
3319 gen_mov_vreg_F0(0, rm + 1);
3320 }
3321 }
3322 } else {
3323 /* Load/store */
3324 rn = (insn >> 16) & 0xf;
3325 if (dp)
9ee6e8bb 3326 VFP_DREG_D(rd, insn);
b7bcbe95 3327 else
9ee6e8bb
PB
3328 rd = VFP_SREG_D(insn);
3329 if (s->thumb && rn == 15) {
7d1b0095 3330 addr = tcg_temp_new_i32();
312eea9f 3331 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3332 } else {
312eea9f 3333 addr = load_reg(s, rn);
9ee6e8bb 3334 }
b7bcbe95
FB
3335 if ((insn & 0x01200000) == 0x01000000) {
3336 /* Single load/store */
3337 offset = (insn & 0xff) << 2;
3338 if ((insn & (1 << 23)) == 0)
3339 offset = -offset;
312eea9f 3340 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3341 if (insn & (1 << 20)) {
312eea9f 3342 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3343 gen_mov_vreg_F0(dp, rd);
3344 } else {
3345 gen_mov_F0_vreg(dp, rd);
312eea9f 3346 gen_vfp_st(s, dp, addr);
b7bcbe95 3347 }
7d1b0095 3348 tcg_temp_free_i32(addr);
b7bcbe95
FB
3349 } else {
3350 /* load/store multiple */
3351 if (dp)
3352 n = (insn >> 1) & 0x7f;
3353 else
3354 n = insn & 0xff;
3355
3356 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3357 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3358
3359 if (dp)
3360 offset = 8;
3361 else
3362 offset = 4;
3363 for (i = 0; i < n; i++) {
18c9b560 3364 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3365 /* load */
312eea9f 3366 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3367 gen_mov_vreg_F0(dp, rd + i);
3368 } else {
3369 /* store */
3370 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3371 gen_vfp_st(s, dp, addr);
b7bcbe95 3372 }
312eea9f 3373 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3374 }
3375 if (insn & (1 << 21)) {
3376 /* writeback */
3377 if (insn & (1 << 24))
3378 offset = -offset * n;
3379 else if (dp && (insn & 1))
3380 offset = 4;
3381 else
3382 offset = 0;
3383
3384 if (offset != 0)
312eea9f
FN
3385 tcg_gen_addi_i32(addr, addr, offset);
3386 store_reg(s, rn, addr);
3387 } else {
7d1b0095 3388 tcg_temp_free_i32(addr);
b7bcbe95
FB
3389 }
3390 }
3391 }
3392 break;
3393 default:
3394 /* Should never happen. */
3395 return 1;
3396 }
3397 return 0;
3398}
3399
6e256c93 3400static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3401{
6e256c93
FB
3402 TranslationBlock *tb;
3403
3404 tb = s->tb;
3405 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3406 tcg_gen_goto_tb(n);
8984bd2e 3407 gen_set_pc_im(dest);
4b4a72e5 3408 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3409 } else {
8984bd2e 3410 gen_set_pc_im(dest);
57fec1fe 3411 tcg_gen_exit_tb(0);
6e256c93 3412 }
c53be334
FB
3413}
3414
8aaca4c0
FB
3415static inline void gen_jmp (DisasContext *s, uint32_t dest)
3416{
551bd27f 3417 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3418 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3419 if (s->thumb)
d9ba4830
PB
3420 dest |= 1;
3421 gen_bx_im(s, dest);
8aaca4c0 3422 } else {
6e256c93 3423 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3424 s->is_jmp = DISAS_TB_JUMP;
3425 }
3426}
3427
d9ba4830 3428static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3429{
ee097184 3430 if (x)
d9ba4830 3431 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3432 else
d9ba4830 3433 gen_sxth(t0);
ee097184 3434 if (y)
d9ba4830 3435 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3436 else
d9ba4830
PB
3437 gen_sxth(t1);
3438 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3439}
3440
3441/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3442static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3443 uint32_t mask;
3444
3445 mask = 0;
3446 if (flags & (1 << 0))
3447 mask |= 0xff;
3448 if (flags & (1 << 1))
3449 mask |= 0xff00;
3450 if (flags & (1 << 2))
3451 mask |= 0xff0000;
3452 if (flags & (1 << 3))
3453 mask |= 0xff000000;
9ee6e8bb 3454
2ae23e75 3455 /* Mask out undefined bits. */
9ee6e8bb 3456 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3457 if (!arm_feature(env, ARM_FEATURE_V4T))
3458 mask &= ~CPSR_T;
3459 if (!arm_feature(env, ARM_FEATURE_V5))
3460 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3461 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3462 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3463 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3464 mask &= ~CPSR_IT;
9ee6e8bb 3465 /* Mask out execution state bits. */
2ae23e75 3466 if (!spsr)
e160c51c 3467 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3468 /* Mask out privileged bits. */
3469 if (IS_USER(s))
9ee6e8bb 3470 mask &= CPSR_USER;
b5ff1b31
FB
3471 return mask;
3472}
3473
2fbac54b
FN
3474/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3475static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3476{
d9ba4830 3477 TCGv tmp;
b5ff1b31
FB
3478 if (spsr) {
3479 /* ??? This is also undefined in system mode. */
3480 if (IS_USER(s))
3481 return 1;
d9ba4830
PB
3482
3483 tmp = load_cpu_field(spsr);
3484 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3485 tcg_gen_andi_i32(t0, t0, mask);
3486 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3487 store_cpu_field(tmp, spsr);
b5ff1b31 3488 } else {
2fbac54b 3489 gen_set_cpsr(t0, mask);
b5ff1b31 3490 }
7d1b0095 3491 tcg_temp_free_i32(t0);
b5ff1b31
FB
3492 gen_lookup_tb(s);
3493 return 0;
3494}
3495
2fbac54b
FN
3496/* Returns nonzero if access to the PSR is not permitted. */
3497static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3498{
3499 TCGv tmp;
7d1b0095 3500 tmp = tcg_temp_new_i32();
2fbac54b
FN
3501 tcg_gen_movi_i32(tmp, val);
3502 return gen_set_psr(s, mask, spsr, tmp);
3503}
3504
e9bb4aa9
JR
3505/* Generate an old-style exception return. Marks pc as dead. */
3506static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3507{
d9ba4830 3508 TCGv tmp;
e9bb4aa9 3509 store_reg(s, 15, pc);
d9ba4830
PB
3510 tmp = load_cpu_field(spsr);
3511 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3512 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3513 s->is_jmp = DISAS_UPDATE;
3514}
3515
b0109805
PB
3516/* Generate a v6 exception return. Marks both values as dead. */
3517static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3518{
b0109805 3519 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3520 tcg_temp_free_i32(cpsr);
b0109805 3521 store_reg(s, 15, pc);
9ee6e8bb
PB
3522 s->is_jmp = DISAS_UPDATE;
3523}
3b46e624 3524
9ee6e8bb
PB
3525static inline void
3526gen_set_condexec (DisasContext *s)
3527{
3528 if (s->condexec_mask) {
8f01245e 3529 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3530 TCGv tmp = tcg_temp_new_i32();
8f01245e 3531 tcg_gen_movi_i32(tmp, val);
d9ba4830 3532 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3533 }
3534}
3b46e624 3535
bc4a0de0
PM
3536static void gen_exception_insn(DisasContext *s, int offset, int excp)
3537{
3538 gen_set_condexec(s);
3539 gen_set_pc_im(s->pc - offset);
3540 gen_exception(excp);
3541 s->is_jmp = DISAS_JUMP;
3542}
3543
9ee6e8bb
PB
3544static void gen_nop_hint(DisasContext *s, int val)
3545{
3546 switch (val) {
3547 case 3: /* wfi */
8984bd2e 3548 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3549 s->is_jmp = DISAS_WFI;
3550 break;
3551 case 2: /* wfe */
3552 case 4: /* sev */
3553 /* TODO: Implement SEV and WFE. May help SMP performance. */
3554 default: /* nop */
3555 break;
3556 }
3557}
99c475ab 3558
ad69471c 3559#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3560
62698be3 3561static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3562{
3563 switch (size) {
dd8fbd78
FN
3564 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3565 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3566 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3567 default: abort();
9ee6e8bb 3568 }
9ee6e8bb
PB
3569}
3570
dd8fbd78 3571static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3572{
3573 switch (size) {
dd8fbd78
FN
3574 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3575 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3576 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3577 default: return;
3578 }
3579}
3580
3581/* 32-bit pairwise ops end up the same as the elementwise versions. */
3582#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3583#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3584#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3585#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3586
ad69471c
PB
3587#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3588 switch ((size << 1) | u) { \
3589 case 0: \
dd8fbd78 3590 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3591 break; \
3592 case 1: \
dd8fbd78 3593 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3594 break; \
3595 case 2: \
dd8fbd78 3596 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3597 break; \
3598 case 3: \
dd8fbd78 3599 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3600 break; \
3601 case 4: \
dd8fbd78 3602 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3603 break; \
3604 case 5: \
dd8fbd78 3605 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3606 break; \
3607 default: return 1; \
3608 }} while (0)
9ee6e8bb
PB
3609
3610#define GEN_NEON_INTEGER_OP(name) do { \
3611 switch ((size << 1) | u) { \
ad69471c 3612 case 0: \
dd8fbd78 3613 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3614 break; \
3615 case 1: \
dd8fbd78 3616 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3617 break; \
3618 case 2: \
dd8fbd78 3619 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3620 break; \
3621 case 3: \
dd8fbd78 3622 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3623 break; \
3624 case 4: \
dd8fbd78 3625 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3626 break; \
3627 case 5: \
dd8fbd78 3628 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3629 break; \
9ee6e8bb
PB
3630 default: return 1; \
3631 }} while (0)
3632
dd8fbd78 3633static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3634{
7d1b0095 3635 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3636 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3637 return tmp;
9ee6e8bb
PB
3638}
3639
dd8fbd78 3640static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3641{
dd8fbd78 3642 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3643 tcg_temp_free_i32(var);
9ee6e8bb
PB
3644}
3645
dd8fbd78 3646static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3647{
dd8fbd78 3648 TCGv tmp;
9ee6e8bb 3649 if (size == 1) {
0fad6efc
PM
3650 tmp = neon_load_reg(reg & 7, reg >> 4);
3651 if (reg & 8) {
dd8fbd78 3652 gen_neon_dup_high16(tmp);
0fad6efc
PM
3653 } else {
3654 gen_neon_dup_low16(tmp);
dd8fbd78 3655 }
0fad6efc
PM
3656 } else {
3657 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3658 }
dd8fbd78 3659 return tmp;
9ee6e8bb
PB
3660}
3661
02acedf9 3662static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3663{
02acedf9
PM
3664 TCGv tmp, tmp2;
3665 if (size == 3 || (!q && size == 2)) {
3666 return 1;
3667 }
3668 tmp = tcg_const_i32(rd);
3669 tmp2 = tcg_const_i32(rm);
3670 if (q) {
3671 switch (size) {
3672 case 0:
2a3f75b4 3673 gen_helper_neon_qunzip8(tmp, tmp2);
02acedf9
PM
3674 break;
3675 case 1:
2a3f75b4 3676 gen_helper_neon_qunzip16(tmp, tmp2);
02acedf9
PM
3677 break;
3678 case 2:
2a3f75b4 3679 gen_helper_neon_qunzip32(tmp, tmp2);
02acedf9
PM
3680 break;
3681 default:
3682 abort();
3683 }
3684 } else {
3685 switch (size) {
3686 case 0:
2a3f75b4 3687 gen_helper_neon_unzip8(tmp, tmp2);
02acedf9
PM
3688 break;
3689 case 1:
2a3f75b4 3690 gen_helper_neon_unzip16(tmp, tmp2);
02acedf9
PM
3691 break;
3692 default:
3693 abort();
3694 }
3695 }
3696 tcg_temp_free_i32(tmp);
3697 tcg_temp_free_i32(tmp2);
3698 return 0;
19457615
FN
3699}
3700
d68a6f3a 3701static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3702{
3703 TCGv tmp, tmp2;
d68a6f3a
PM
3704 if (size == 3 || (!q && size == 2)) {
3705 return 1;
3706 }
3707 tmp = tcg_const_i32(rd);
3708 tmp2 = tcg_const_i32(rm);
3709 if (q) {
3710 switch (size) {
3711 case 0:
2a3f75b4 3712 gen_helper_neon_qzip8(tmp, tmp2);
d68a6f3a
PM
3713 break;
3714 case 1:
2a3f75b4 3715 gen_helper_neon_qzip16(tmp, tmp2);
d68a6f3a
PM
3716 break;
3717 case 2:
2a3f75b4 3718 gen_helper_neon_qzip32(tmp, tmp2);
d68a6f3a
PM
3719 break;
3720 default:
3721 abort();
3722 }
3723 } else {
3724 switch (size) {
3725 case 0:
2a3f75b4 3726 gen_helper_neon_zip8(tmp, tmp2);
d68a6f3a
PM
3727 break;
3728 case 1:
2a3f75b4 3729 gen_helper_neon_zip16(tmp, tmp2);
d68a6f3a
PM
3730 break;
3731 default:
3732 abort();
3733 }
3734 }
3735 tcg_temp_free_i32(tmp);
3736 tcg_temp_free_i32(tmp2);
3737 return 0;
19457615
FN
3738}
3739
19457615
FN
3740static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3741{
3742 TCGv rd, tmp;
3743
7d1b0095
PM
3744 rd = tcg_temp_new_i32();
3745 tmp = tcg_temp_new_i32();
19457615
FN
3746
3747 tcg_gen_shli_i32(rd, t0, 8);
3748 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3749 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3750 tcg_gen_or_i32(rd, rd, tmp);
3751
3752 tcg_gen_shri_i32(t1, t1, 8);
3753 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3754 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3755 tcg_gen_or_i32(t1, t1, tmp);
3756 tcg_gen_mov_i32(t0, rd);
3757
7d1b0095
PM
3758 tcg_temp_free_i32(tmp);
3759 tcg_temp_free_i32(rd);
19457615
FN
3760}
3761
3762static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3763{
3764 TCGv rd, tmp;
3765
7d1b0095
PM
3766 rd = tcg_temp_new_i32();
3767 tmp = tcg_temp_new_i32();
19457615
FN
3768
3769 tcg_gen_shli_i32(rd, t0, 16);
3770 tcg_gen_andi_i32(tmp, t1, 0xffff);
3771 tcg_gen_or_i32(rd, rd, tmp);
3772 tcg_gen_shri_i32(t1, t1, 16);
3773 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3774 tcg_gen_or_i32(t1, t1, tmp);
3775 tcg_gen_mov_i32(t0, rd);
3776
7d1b0095
PM
3777 tcg_temp_free_i32(tmp);
3778 tcg_temp_free_i32(rd);
19457615
FN
3779}
3780
3781
9ee6e8bb
PB
3782static struct {
3783 int nregs;
3784 int interleave;
3785 int spacing;
3786} neon_ls_element_type[11] = {
3787 {4, 4, 1},
3788 {4, 4, 2},
3789 {4, 1, 1},
3790 {4, 2, 1},
3791 {3, 3, 1},
3792 {3, 3, 2},
3793 {3, 1, 1},
3794 {1, 1, 1},
3795 {2, 2, 1},
3796 {2, 2, 2},
3797 {2, 1, 1}
3798};
3799
3800/* Translate a NEON load/store element instruction. Return nonzero if the
3801 instruction is invalid. */
3802static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3803{
3804 int rd, rn, rm;
3805 int op;
3806 int nregs;
3807 int interleave;
84496233 3808 int spacing;
9ee6e8bb
PB
3809 int stride;
3810 int size;
3811 int reg;
3812 int pass;
3813 int load;
3814 int shift;
9ee6e8bb 3815 int n;
1b2b1e54 3816 TCGv addr;
b0109805 3817 TCGv tmp;
8f8e3aa4 3818 TCGv tmp2;
84496233 3819 TCGv_i64 tmp64;
9ee6e8bb 3820
5df8bac1 3821 if (!s->vfp_enabled)
9ee6e8bb
PB
3822 return 1;
3823 VFP_DREG_D(rd, insn);
3824 rn = (insn >> 16) & 0xf;
3825 rm = insn & 0xf;
3826 load = (insn & (1 << 21)) != 0;
3827 if ((insn & (1 << 23)) == 0) {
3828 /* Load store all elements. */
3829 op = (insn >> 8) & 0xf;
3830 size = (insn >> 6) & 3;
84496233 3831 if (op > 10)
9ee6e8bb
PB
3832 return 1;
3833 nregs = neon_ls_element_type[op].nregs;
3834 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3835 spacing = neon_ls_element_type[op].spacing;
3836 if (size == 3 && (interleave | spacing) != 1)
3837 return 1;
e318a60b 3838 addr = tcg_temp_new_i32();
dcc65026 3839 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3840 stride = (1 << size) * interleave;
3841 for (reg = 0; reg < nregs; reg++) {
3842 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3843 load_reg_var(s, addr, rn);
3844 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3845 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3846 load_reg_var(s, addr, rn);
3847 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3848 }
84496233
JR
3849 if (size == 3) {
3850 if (load) {
3851 tmp64 = gen_ld64(addr, IS_USER(s));
3852 neon_store_reg64(tmp64, rd);
3853 tcg_temp_free_i64(tmp64);
3854 } else {
3855 tmp64 = tcg_temp_new_i64();
3856 neon_load_reg64(tmp64, rd);
3857 gen_st64(tmp64, addr, IS_USER(s));
3858 }
3859 tcg_gen_addi_i32(addr, addr, stride);
3860 } else {
3861 for (pass = 0; pass < 2; pass++) {
3862 if (size == 2) {
3863 if (load) {
3864 tmp = gen_ld32(addr, IS_USER(s));
3865 neon_store_reg(rd, pass, tmp);
3866 } else {
3867 tmp = neon_load_reg(rd, pass);
3868 gen_st32(tmp, addr, IS_USER(s));
3869 }
1b2b1e54 3870 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3871 } else if (size == 1) {
3872 if (load) {
3873 tmp = gen_ld16u(addr, IS_USER(s));
3874 tcg_gen_addi_i32(addr, addr, stride);
3875 tmp2 = gen_ld16u(addr, IS_USER(s));
3876 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3877 tcg_gen_shli_i32(tmp2, tmp2, 16);
3878 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3879 tcg_temp_free_i32(tmp2);
84496233
JR
3880 neon_store_reg(rd, pass, tmp);
3881 } else {
3882 tmp = neon_load_reg(rd, pass);
7d1b0095 3883 tmp2 = tcg_temp_new_i32();
84496233
JR
3884 tcg_gen_shri_i32(tmp2, tmp, 16);
3885 gen_st16(tmp, addr, IS_USER(s));
3886 tcg_gen_addi_i32(addr, addr, stride);
3887 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3888 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3889 }
84496233
JR
3890 } else /* size == 0 */ {
3891 if (load) {
3892 TCGV_UNUSED(tmp2);
3893 for (n = 0; n < 4; n++) {
3894 tmp = gen_ld8u(addr, IS_USER(s));
3895 tcg_gen_addi_i32(addr, addr, stride);
3896 if (n == 0) {
3897 tmp2 = tmp;
3898 } else {
41ba8341
PB
3899 tcg_gen_shli_i32(tmp, tmp, n * 8);
3900 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3901 tcg_temp_free_i32(tmp);
84496233 3902 }
9ee6e8bb 3903 }
84496233
JR
3904 neon_store_reg(rd, pass, tmp2);
3905 } else {
3906 tmp2 = neon_load_reg(rd, pass);
3907 for (n = 0; n < 4; n++) {
7d1b0095 3908 tmp = tcg_temp_new_i32();
84496233
JR
3909 if (n == 0) {
3910 tcg_gen_mov_i32(tmp, tmp2);
3911 } else {
3912 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3913 }
3914 gen_st8(tmp, addr, IS_USER(s));
3915 tcg_gen_addi_i32(addr, addr, stride);
3916 }
7d1b0095 3917 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3918 }
3919 }
3920 }
3921 }
84496233 3922 rd += spacing;
9ee6e8bb 3923 }
e318a60b 3924 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3925 stride = nregs * 8;
3926 } else {
3927 size = (insn >> 10) & 3;
3928 if (size == 3) {
3929 /* Load single element to all lanes. */
8e18cde3
PM
3930 int a = (insn >> 4) & 1;
3931 if (!load) {
9ee6e8bb 3932 return 1;
8e18cde3 3933 }
9ee6e8bb
PB
3934 size = (insn >> 6) & 3;
3935 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3936
3937 if (size == 3) {
3938 if (nregs != 4 || a == 0) {
9ee6e8bb 3939 return 1;
99c475ab 3940 }
8e18cde3
PM
3941 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3942 size = 2;
3943 }
3944 if (nregs == 1 && a == 1 && size == 0) {
3945 return 1;
3946 }
3947 if (nregs == 3 && a == 1) {
3948 return 1;
3949 }
e318a60b 3950 addr = tcg_temp_new_i32();
8e18cde3
PM
3951 load_reg_var(s, addr, rn);
3952 if (nregs == 1) {
3953 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3954 tmp = gen_load_and_replicate(s, addr, size);
3955 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3956 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3957 if (insn & (1 << 5)) {
3958 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3959 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3960 }
3961 tcg_temp_free_i32(tmp);
3962 } else {
3963 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3964 stride = (insn & (1 << 5)) ? 2 : 1;
3965 for (reg = 0; reg < nregs; reg++) {
3966 tmp = gen_load_and_replicate(s, addr, size);
3967 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3968 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3969 tcg_temp_free_i32(tmp);
3970 tcg_gen_addi_i32(addr, addr, 1 << size);
3971 rd += stride;
3972 }
9ee6e8bb 3973 }
e318a60b 3974 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3975 stride = (1 << size) * nregs;
3976 } else {
3977 /* Single element. */
3978 pass = (insn >> 7) & 1;
3979 switch (size) {
3980 case 0:
3981 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3982 stride = 1;
3983 break;
3984 case 1:
3985 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3986 stride = (insn & (1 << 5)) ? 2 : 1;
3987 break;
3988 case 2:
3989 shift = 0;
9ee6e8bb
PB
3990 stride = (insn & (1 << 6)) ? 2 : 1;
3991 break;
3992 default:
3993 abort();
3994 }
3995 nregs = ((insn >> 8) & 3) + 1;
e318a60b 3996 addr = tcg_temp_new_i32();
dcc65026 3997 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3998 for (reg = 0; reg < nregs; reg++) {
3999 if (load) {
9ee6e8bb
PB
4000 switch (size) {
4001 case 0:
1b2b1e54 4002 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
4003 break;
4004 case 1:
1b2b1e54 4005 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
4006 break;
4007 case 2:
1b2b1e54 4008 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 4009 break;
a50f5b91
PB
4010 default: /* Avoid compiler warnings. */
4011 abort();
9ee6e8bb
PB
4012 }
4013 if (size != 2) {
8f8e3aa4
PB
4014 tmp2 = neon_load_reg(rd, pass);
4015 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
7d1b0095 4016 tcg_temp_free_i32(tmp2);
9ee6e8bb 4017 }
8f8e3aa4 4018 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4019 } else { /* Store */
8f8e3aa4
PB
4020 tmp = neon_load_reg(rd, pass);
4021 if (shift)
4022 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4023 switch (size) {
4024 case 0:
1b2b1e54 4025 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4026 break;
4027 case 1:
1b2b1e54 4028 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4029 break;
4030 case 2:
1b2b1e54 4031 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4032 break;
99c475ab 4033 }
99c475ab 4034 }
9ee6e8bb 4035 rd += stride;
1b2b1e54 4036 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4037 }
e318a60b 4038 tcg_temp_free_i32(addr);
9ee6e8bb 4039 stride = nregs * (1 << size);
99c475ab 4040 }
9ee6e8bb
PB
4041 }
4042 if (rm != 15) {
b26eefb6
PB
4043 TCGv base;
4044
4045 base = load_reg(s, rn);
9ee6e8bb 4046 if (rm == 13) {
b26eefb6 4047 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4048 } else {
b26eefb6
PB
4049 TCGv index;
4050 index = load_reg(s, rm);
4051 tcg_gen_add_i32(base, base, index);
7d1b0095 4052 tcg_temp_free_i32(index);
9ee6e8bb 4053 }
b26eefb6 4054 store_reg(s, rn, base);
9ee6e8bb
PB
4055 }
4056 return 0;
4057}
3b46e624 4058
8f8e3aa4
PB
4059/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4060static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4061{
4062 tcg_gen_and_i32(t, t, c);
f669df27 4063 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4064 tcg_gen_or_i32(dest, t, f);
4065}
4066
a7812ae4 4067static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4068{
4069 switch (size) {
4070 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4071 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4072 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4073 default: abort();
4074 }
4075}
4076
a7812ae4 4077static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4078{
4079 switch (size) {
2a3f75b4
PM
4080 case 0: gen_helper_neon_narrow_sat_s8(dest, src); break;
4081 case 1: gen_helper_neon_narrow_sat_s16(dest, src); break;
4082 case 2: gen_helper_neon_narrow_sat_s32(dest, src); break;
ad69471c
PB
4083 default: abort();
4084 }
4085}
4086
a7812ae4 4087static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4088{
4089 switch (size) {
2a3f75b4
PM
4090 case 0: gen_helper_neon_narrow_sat_u8(dest, src); break;
4091 case 1: gen_helper_neon_narrow_sat_u16(dest, src); break;
4092 case 2: gen_helper_neon_narrow_sat_u32(dest, src); break;
ad69471c
PB
4093 default: abort();
4094 }
4095}
4096
af1bbf30
JR
4097static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4098{
4099 switch (size) {
2a3f75b4
PM
4100 case 0: gen_helper_neon_unarrow_sat8(dest, src); break;
4101 case 1: gen_helper_neon_unarrow_sat16(dest, src); break;
4102 case 2: gen_helper_neon_unarrow_sat32(dest, src); break;
af1bbf30
JR
4103 default: abort();
4104 }
4105}
4106
ad69471c
PB
4107static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4108 int q, int u)
4109{
4110 if (q) {
4111 if (u) {
4112 switch (size) {
4113 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4114 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4115 default: abort();
4116 }
4117 } else {
4118 switch (size) {
4119 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4120 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4121 default: abort();
4122 }
4123 }
4124 } else {
4125 if (u) {
4126 switch (size) {
b408a9b0
CL
4127 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4128 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4129 default: abort();
4130 }
4131 } else {
4132 switch (size) {
4133 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4134 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4135 default: abort();
4136 }
4137 }
4138 }
4139}
4140
a7812ae4 4141static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4142{
4143 if (u) {
4144 switch (size) {
4145 case 0: gen_helper_neon_widen_u8(dest, src); break;
4146 case 1: gen_helper_neon_widen_u16(dest, src); break;
4147 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4148 default: abort();
4149 }
4150 } else {
4151 switch (size) {
4152 case 0: gen_helper_neon_widen_s8(dest, src); break;
4153 case 1: gen_helper_neon_widen_s16(dest, src); break;
4154 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4155 default: abort();
4156 }
4157 }
7d1b0095 4158 tcg_temp_free_i32(src);
ad69471c
PB
4159}
4160
4161static inline void gen_neon_addl(int size)
4162{
4163 switch (size) {
4164 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4165 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4166 case 2: tcg_gen_add_i64(CPU_V001); break;
4167 default: abort();
4168 }
4169}
4170
4171static inline void gen_neon_subl(int size)
4172{
4173 switch (size) {
4174 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4175 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4176 case 2: tcg_gen_sub_i64(CPU_V001); break;
4177 default: abort();
4178 }
4179}
4180
a7812ae4 4181static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4182{
4183 switch (size) {
4184 case 0: gen_helper_neon_negl_u16(var, var); break;
4185 case 1: gen_helper_neon_negl_u32(var, var); break;
4186 case 2: gen_helper_neon_negl_u64(var, var); break;
4187 default: abort();
4188 }
4189}
4190
a7812ae4 4191static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4192{
4193 switch (size) {
2a3f75b4
PM
4194 case 1: gen_helper_neon_addl_saturate_s32(op0, op0, op1); break;
4195 case 2: gen_helper_neon_addl_saturate_s64(op0, op0, op1); break;
ad69471c
PB
4196 default: abort();
4197 }
4198}
4199
a7812ae4 4200static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4201{
a7812ae4 4202 TCGv_i64 tmp;
ad69471c
PB
4203
4204 switch ((size << 1) | u) {
4205 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4206 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4207 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4208 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4209 case 4:
4210 tmp = gen_muls_i64_i32(a, b);
4211 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4212 tcg_temp_free_i64(tmp);
ad69471c
PB
4213 break;
4214 case 5:
4215 tmp = gen_mulu_i64_i32(a, b);
4216 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4217 tcg_temp_free_i64(tmp);
ad69471c
PB
4218 break;
4219 default: abort();
4220 }
c6067f04
CL
4221
4222 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4223 Don't forget to clean them now. */
4224 if (size < 2) {
7d1b0095
PM
4225 tcg_temp_free_i32(a);
4226 tcg_temp_free_i32(b);
c6067f04 4227 }
ad69471c
PB
4228}
4229
c33171c7
PM
4230static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4231{
4232 if (op) {
4233 if (u) {
4234 gen_neon_unarrow_sats(size, dest, src);
4235 } else {
4236 gen_neon_narrow(size, dest, src);
4237 }
4238 } else {
4239 if (u) {
4240 gen_neon_narrow_satu(size, dest, src);
4241 } else {
4242 gen_neon_narrow_sats(size, dest, src);
4243 }
4244 }
4245}
4246
62698be3
PM
4247/* Symbolic constants for op fields for Neon 3-register same-length.
4248 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4249 * table A7-9.
4250 */
4251#define NEON_3R_VHADD 0
4252#define NEON_3R_VQADD 1
4253#define NEON_3R_VRHADD 2
4254#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4255#define NEON_3R_VHSUB 4
4256#define NEON_3R_VQSUB 5
4257#define NEON_3R_VCGT 6
4258#define NEON_3R_VCGE 7
4259#define NEON_3R_VSHL 8
4260#define NEON_3R_VQSHL 9
4261#define NEON_3R_VRSHL 10
4262#define NEON_3R_VQRSHL 11
4263#define NEON_3R_VMAX 12
4264#define NEON_3R_VMIN 13
4265#define NEON_3R_VABD 14
4266#define NEON_3R_VABA 15
4267#define NEON_3R_VADD_VSUB 16
4268#define NEON_3R_VTST_VCEQ 17
4269#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4270#define NEON_3R_VMUL 19
4271#define NEON_3R_VPMAX 20
4272#define NEON_3R_VPMIN 21
4273#define NEON_3R_VQDMULH_VQRDMULH 22
4274#define NEON_3R_VPADD 23
4275#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4276#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4277#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4278#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4279#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4280#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4281
4282static const uint8_t neon_3r_sizes[] = {
4283 [NEON_3R_VHADD] = 0x7,
4284 [NEON_3R_VQADD] = 0xf,
4285 [NEON_3R_VRHADD] = 0x7,
4286 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4287 [NEON_3R_VHSUB] = 0x7,
4288 [NEON_3R_VQSUB] = 0xf,
4289 [NEON_3R_VCGT] = 0x7,
4290 [NEON_3R_VCGE] = 0x7,
4291 [NEON_3R_VSHL] = 0xf,
4292 [NEON_3R_VQSHL] = 0xf,
4293 [NEON_3R_VRSHL] = 0xf,
4294 [NEON_3R_VQRSHL] = 0xf,
4295 [NEON_3R_VMAX] = 0x7,
4296 [NEON_3R_VMIN] = 0x7,
4297 [NEON_3R_VABD] = 0x7,
4298 [NEON_3R_VABA] = 0x7,
4299 [NEON_3R_VADD_VSUB] = 0xf,
4300 [NEON_3R_VTST_VCEQ] = 0x7,
4301 [NEON_3R_VML] = 0x7,
4302 [NEON_3R_VMUL] = 0x7,
4303 [NEON_3R_VPMAX] = 0x7,
4304 [NEON_3R_VPMIN] = 0x7,
4305 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4306 [NEON_3R_VPADD] = 0x7,
4307 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4308 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4309 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4310 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4311 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4312 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4313};
4314
9ee6e8bb
PB
4315/* Translate a NEON data processing instruction. Return nonzero if the
4316 instruction is invalid.
ad69471c
PB
4317 We process data in a mixture of 32-bit and 64-bit chunks.
4318 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4319
9ee6e8bb
PB
4320static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4321{
4322 int op;
4323 int q;
4324 int rd, rn, rm;
4325 int size;
4326 int shift;
4327 int pass;
4328 int count;
4329 int pairwise;
4330 int u;
ca9a32e4 4331 uint32_t imm, mask;
b75263d6 4332 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4333 TCGv_i64 tmp64;
9ee6e8bb 4334
5df8bac1 4335 if (!s->vfp_enabled)
9ee6e8bb
PB
4336 return 1;
4337 q = (insn & (1 << 6)) != 0;
4338 u = (insn >> 24) & 1;
4339 VFP_DREG_D(rd, insn);
4340 VFP_DREG_N(rn, insn);
4341 VFP_DREG_M(rm, insn);
4342 size = (insn >> 20) & 3;
4343 if ((insn & (1 << 23)) == 0) {
4344 /* Three register same length. */
4345 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4346 /* Catch invalid op and bad size combinations: UNDEF */
4347 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4348 return 1;
4349 }
25f84f79
PM
4350 /* All insns of this form UNDEF for either this condition or the
4351 * superset of cases "Q==1"; we catch the latter later.
4352 */
4353 if (q && ((rd | rn | rm) & 1)) {
4354 return 1;
4355 }
62698be3
PM
4356 if (size == 3 && op != NEON_3R_LOGIC) {
4357 /* 64-bit element instructions. */
9ee6e8bb 4358 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4359 neon_load_reg64(cpu_V0, rn + pass);
4360 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4361 switch (op) {
62698be3 4362 case NEON_3R_VQADD:
9ee6e8bb 4363 if (u) {
2a3f75b4 4364 gen_helper_neon_qadd_u64(cpu_V0, cpu_V0, cpu_V1);
2c0262af 4365 } else {
2a3f75b4 4366 gen_helper_neon_qadd_s64(cpu_V0, cpu_V0, cpu_V1);
2c0262af 4367 }
9ee6e8bb 4368 break;
62698be3 4369 case NEON_3R_VQSUB:
9ee6e8bb 4370 if (u) {
2a3f75b4 4371 gen_helper_neon_qsub_u64(cpu_V0, cpu_V0, cpu_V1);
ad69471c 4372 } else {
2a3f75b4 4373 gen_helper_neon_qsub_s64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
4374 }
4375 break;
62698be3 4376 case NEON_3R_VSHL:
ad69471c
PB
4377 if (u) {
4378 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4379 } else {
4380 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4381 }
4382 break;
62698be3 4383 case NEON_3R_VQSHL:
ad69471c 4384 if (u) {
2a3f75b4 4385 gen_helper_neon_qshl_u64(cpu_V0, cpu_V1, cpu_V0);
ad69471c 4386 } else {
2a3f75b4 4387 gen_helper_neon_qshl_s64(cpu_V0, cpu_V1, cpu_V0);
ad69471c
PB
4388 }
4389 break;
62698be3 4390 case NEON_3R_VRSHL:
ad69471c
PB
4391 if (u) {
4392 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4393 } else {
ad69471c
PB
4394 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4395 }
4396 break;
62698be3 4397 case NEON_3R_VQRSHL:
ad69471c 4398 if (u) {
2a3f75b4 4399 gen_helper_neon_qrshl_u64(cpu_V0, cpu_V1, cpu_V0);
ad69471c 4400 } else {
2a3f75b4 4401 gen_helper_neon_qrshl_s64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4402 }
9ee6e8bb 4403 break;
62698be3 4404 case NEON_3R_VADD_VSUB:
9ee6e8bb 4405 if (u) {
ad69471c 4406 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4407 } else {
ad69471c 4408 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4409 }
4410 break;
4411 default:
4412 abort();
2c0262af 4413 }
ad69471c 4414 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4415 }
9ee6e8bb 4416 return 0;
2c0262af 4417 }
25f84f79 4418 pairwise = 0;
9ee6e8bb 4419 switch (op) {
62698be3
PM
4420 case NEON_3R_VSHL:
4421 case NEON_3R_VQSHL:
4422 case NEON_3R_VRSHL:
4423 case NEON_3R_VQRSHL:
9ee6e8bb 4424 {
ad69471c
PB
4425 int rtmp;
4426 /* Shift instruction operands are reversed. */
4427 rtmp = rn;
9ee6e8bb 4428 rn = rm;
ad69471c 4429 rm = rtmp;
9ee6e8bb 4430 }
2c0262af 4431 break;
25f84f79
PM
4432 case NEON_3R_VPADD:
4433 if (u) {
4434 return 1;
4435 }
4436 /* Fall through */
62698be3
PM
4437 case NEON_3R_VPMAX:
4438 case NEON_3R_VPMIN:
9ee6e8bb 4439 pairwise = 1;
2c0262af 4440 break;
25f84f79
PM
4441 case NEON_3R_FLOAT_ARITH:
4442 pairwise = (u && size < 2); /* if VPADD (float) */
4443 break;
4444 case NEON_3R_FLOAT_MINMAX:
4445 pairwise = u; /* if VPMIN/VPMAX (float) */
4446 break;
4447 case NEON_3R_FLOAT_CMP:
4448 if (!u && size) {
4449 /* no encoding for U=0 C=1x */
4450 return 1;
4451 }
4452 break;
4453 case NEON_3R_FLOAT_ACMP:
4454 if (!u) {
4455 return 1;
4456 }
4457 break;
4458 case NEON_3R_VRECPS_VRSQRTS:
4459 if (u) {
4460 return 1;
4461 }
2c0262af 4462 break;
25f84f79
PM
4463 case NEON_3R_VMUL:
4464 if (u && (size != 0)) {
4465 /* UNDEF on invalid size for polynomial subcase */
4466 return 1;
4467 }
2c0262af 4468 break;
9ee6e8bb 4469 default:
2c0262af 4470 break;
9ee6e8bb 4471 }
dd8fbd78 4472
25f84f79
PM
4473 if (pairwise && q) {
4474 /* All the pairwise insns UNDEF if Q is set */
4475 return 1;
4476 }
4477
9ee6e8bb
PB
4478 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4479
4480 if (pairwise) {
4481 /* Pairwise. */
a5a14945
JR
4482 if (pass < 1) {
4483 tmp = neon_load_reg(rn, 0);
4484 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4485 } else {
a5a14945
JR
4486 tmp = neon_load_reg(rm, 0);
4487 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4488 }
4489 } else {
4490 /* Elementwise. */
dd8fbd78
FN
4491 tmp = neon_load_reg(rn, pass);
4492 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4493 }
4494 switch (op) {
62698be3 4495 case NEON_3R_VHADD:
9ee6e8bb
PB
4496 GEN_NEON_INTEGER_OP(hadd);
4497 break;
62698be3 4498 case NEON_3R_VQADD:
2a3f75b4 4499 GEN_NEON_INTEGER_OP(qadd);
2c0262af 4500 break;
62698be3 4501 case NEON_3R_VRHADD:
9ee6e8bb 4502 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4503 break;
62698be3 4504 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4505 switch ((u << 2) | size) {
4506 case 0: /* VAND */
dd8fbd78 4507 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4508 break;
4509 case 1: /* BIC */
f669df27 4510 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4511 break;
4512 case 2: /* VORR */
dd8fbd78 4513 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4514 break;
4515 case 3: /* VORN */
f669df27 4516 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4517 break;
4518 case 4: /* VEOR */
dd8fbd78 4519 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4520 break;
4521 case 5: /* VBSL */
dd8fbd78
FN
4522 tmp3 = neon_load_reg(rd, pass);
4523 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4524 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4525 break;
4526 case 6: /* VBIT */
dd8fbd78
FN
4527 tmp3 = neon_load_reg(rd, pass);
4528 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4529 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4530 break;
4531 case 7: /* VBIF */
dd8fbd78
FN
4532 tmp3 = neon_load_reg(rd, pass);
4533 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4534 tcg_temp_free_i32(tmp3);
9ee6e8bb 4535 break;
2c0262af
FB
4536 }
4537 break;
62698be3 4538 case NEON_3R_VHSUB:
9ee6e8bb
PB
4539 GEN_NEON_INTEGER_OP(hsub);
4540 break;
62698be3 4541 case NEON_3R_VQSUB:
2a3f75b4 4542 GEN_NEON_INTEGER_OP(qsub);
2c0262af 4543 break;
62698be3 4544 case NEON_3R_VCGT:
9ee6e8bb
PB
4545 GEN_NEON_INTEGER_OP(cgt);
4546 break;
62698be3 4547 case NEON_3R_VCGE:
9ee6e8bb
PB
4548 GEN_NEON_INTEGER_OP(cge);
4549 break;
62698be3 4550 case NEON_3R_VSHL:
ad69471c 4551 GEN_NEON_INTEGER_OP(shl);
2c0262af 4552 break;
62698be3 4553 case NEON_3R_VQSHL:
2a3f75b4 4554 GEN_NEON_INTEGER_OP(qshl);
2c0262af 4555 break;
62698be3 4556 case NEON_3R_VRSHL:
ad69471c 4557 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4558 break;
62698be3 4559 case NEON_3R_VQRSHL:
2a3f75b4 4560 GEN_NEON_INTEGER_OP(qrshl);
9ee6e8bb 4561 break;
62698be3 4562 case NEON_3R_VMAX:
9ee6e8bb
PB
4563 GEN_NEON_INTEGER_OP(max);
4564 break;
62698be3 4565 case NEON_3R_VMIN:
9ee6e8bb
PB
4566 GEN_NEON_INTEGER_OP(min);
4567 break;
62698be3 4568 case NEON_3R_VABD:
9ee6e8bb
PB
4569 GEN_NEON_INTEGER_OP(abd);
4570 break;
62698be3 4571 case NEON_3R_VABA:
9ee6e8bb 4572 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4573 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4574 tmp2 = neon_load_reg(rd, pass);
4575 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4576 break;
62698be3 4577 case NEON_3R_VADD_VSUB:
9ee6e8bb 4578 if (!u) { /* VADD */
62698be3 4579 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4580 } else { /* VSUB */
4581 switch (size) {
dd8fbd78
FN
4582 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4583 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4584 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4585 default: abort();
9ee6e8bb
PB
4586 }
4587 }
4588 break;
62698be3 4589 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4590 if (!u) { /* VTST */
4591 switch (size) {
dd8fbd78
FN
4592 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4593 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4594 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4595 default: abort();
9ee6e8bb
PB
4596 }
4597 } else { /* VCEQ */
4598 switch (size) {
dd8fbd78
FN
4599 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4600 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4601 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4602 default: abort();
9ee6e8bb
PB
4603 }
4604 }
4605 break;
62698be3 4606 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4607 switch (size) {
dd8fbd78
FN
4608 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4609 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4610 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4611 default: abort();
9ee6e8bb 4612 }
7d1b0095 4613 tcg_temp_free_i32(tmp2);
dd8fbd78 4614 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4615 if (u) { /* VMLS */
dd8fbd78 4616 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4617 } else { /* VMLA */
dd8fbd78 4618 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4619 }
4620 break;
62698be3 4621 case NEON_3R_VMUL:
9ee6e8bb 4622 if (u) { /* polynomial */
dd8fbd78 4623 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4624 } else { /* Integer */
4625 switch (size) {
dd8fbd78
FN
4626 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4627 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4628 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4629 default: abort();
9ee6e8bb
PB
4630 }
4631 }
4632 break;
62698be3 4633 case NEON_3R_VPMAX:
9ee6e8bb
PB
4634 GEN_NEON_INTEGER_OP(pmax);
4635 break;
62698be3 4636 case NEON_3R_VPMIN:
9ee6e8bb
PB
4637 GEN_NEON_INTEGER_OP(pmin);
4638 break;
62698be3 4639 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4640 if (!u) { /* VQDMULH */
4641 switch (size) {
2a3f75b4
PM
4642 case 1: gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); break;
4643 case 2: gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); break;
62698be3 4644 default: abort();
9ee6e8bb 4645 }
62698be3 4646 } else { /* VQRDMULH */
9ee6e8bb 4647 switch (size) {
2a3f75b4
PM
4648 case 1: gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); break;
4649 case 2: gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); break;
62698be3 4650 default: abort();
9ee6e8bb
PB
4651 }
4652 }
4653 break;
62698be3 4654 case NEON_3R_VPADD:
9ee6e8bb 4655 switch (size) {
dd8fbd78
FN
4656 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4657 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4658 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4659 default: abort();
9ee6e8bb
PB
4660 }
4661 break;
62698be3 4662 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
9ee6e8bb
PB
4663 switch ((u << 2) | size) {
4664 case 0: /* VADD */
dd8fbd78 4665 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4666 break;
4667 case 2: /* VSUB */
dd8fbd78 4668 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4669 break;
4670 case 4: /* VPADD */
dd8fbd78 4671 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4672 break;
4673 case 6: /* VABD */
dd8fbd78 4674 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4675 break;
4676 default:
62698be3 4677 abort();
9ee6e8bb
PB
4678 }
4679 break;
62698be3 4680 case NEON_3R_FLOAT_MULTIPLY:
dd8fbd78 4681 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4682 if (!u) {
7d1b0095 4683 tcg_temp_free_i32(tmp2);
dd8fbd78 4684 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4685 if (size == 0) {
dd8fbd78 4686 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4687 } else {
dd8fbd78 4688 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4689 }
4690 }
4691 break;
62698be3 4692 case NEON_3R_FLOAT_CMP:
9ee6e8bb 4693 if (!u) {
dd8fbd78 4694 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4695 } else {
9ee6e8bb 4696 if (size == 0)
dd8fbd78 4697 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4698 else
dd8fbd78 4699 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4700 }
2c0262af 4701 break;
62698be3 4702 case NEON_3R_FLOAT_ACMP:
9ee6e8bb 4703 if (size == 0)
dd8fbd78 4704 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4705 else
dd8fbd78 4706 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4707 break;
62698be3 4708 case NEON_3R_FLOAT_MINMAX:
9ee6e8bb 4709 if (size == 0)
dd8fbd78 4710 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4711 else
dd8fbd78 4712 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb 4713 break;
62698be3 4714 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4715 if (size == 0)
dd8fbd78 4716 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4717 else
dd8fbd78 4718 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4719 break;
9ee6e8bb
PB
4720 default:
4721 abort();
2c0262af 4722 }
7d1b0095 4723 tcg_temp_free_i32(tmp2);
dd8fbd78 4724
9ee6e8bb
PB
4725 /* Save the result. For elementwise operations we can put it
4726 straight into the destination register. For pairwise operations
4727 we have to be careful to avoid clobbering the source operands. */
4728 if (pairwise && rd == rm) {
dd8fbd78 4729 neon_store_scratch(pass, tmp);
9ee6e8bb 4730 } else {
dd8fbd78 4731 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4732 }
4733
4734 } /* for pass */
4735 if (pairwise && rd == rm) {
4736 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4737 tmp = neon_load_scratch(pass);
4738 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4739 }
4740 }
ad69471c 4741 /* End of 3 register same size operations. */
9ee6e8bb
PB
4742 } else if (insn & (1 << 4)) {
4743 if ((insn & 0x00380080) != 0) {
4744 /* Two registers and shift. */
4745 op = (insn >> 8) & 0xf;
4746 if (insn & (1 << 7)) {
cc13115b
PM
4747 /* 64-bit shift. */
4748 if (op > 7) {
4749 return 1;
4750 }
9ee6e8bb
PB
4751 size = 3;
4752 } else {
4753 size = 2;
4754 while ((insn & (1 << (size + 19))) == 0)
4755 size--;
4756 }
4757 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4758 /* To avoid excessive dumplication of ops we implement shift
4759 by immediate using the variable shift operations. */
4760 if (op < 8) {
4761 /* Shift by immediate:
4762 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
4763 if (q && ((rd | rm) & 1)) {
4764 return 1;
4765 }
4766 if (!u && (op == 4 || op == 6)) {
4767 return 1;
4768 }
9ee6e8bb
PB
4769 /* Right shifts are encoded as N - shift, where N is the
4770 element size in bits. */
4771 if (op <= 4)
4772 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4773 if (size == 3) {
4774 count = q + 1;
4775 } else {
4776 count = q ? 4: 2;
4777 }
4778 switch (size) {
4779 case 0:
4780 imm = (uint8_t) shift;
4781 imm |= imm << 8;
4782 imm |= imm << 16;
4783 break;
4784 case 1:
4785 imm = (uint16_t) shift;
4786 imm |= imm << 16;
4787 break;
4788 case 2:
4789 case 3:
4790 imm = shift;
4791 break;
4792 default:
4793 abort();
4794 }
4795
4796 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4797 if (size == 3) {
4798 neon_load_reg64(cpu_V0, rm + pass);
4799 tcg_gen_movi_i64(cpu_V1, imm);
4800 switch (op) {
4801 case 0: /* VSHR */
4802 case 1: /* VSRA */
4803 if (u)
4804 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4805 else
ad69471c 4806 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4807 break;
ad69471c
PB
4808 case 2: /* VRSHR */
4809 case 3: /* VRSRA */
4810 if (u)
4811 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4812 else
ad69471c 4813 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4814 break;
ad69471c 4815 case 4: /* VSRI */
ad69471c
PB
4816 case 5: /* VSHL, VSLI */
4817 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4818 break;
0322b26e 4819 case 6: /* VQSHLU */
cc13115b 4820 gen_helper_neon_qshlu_s64(cpu_V0, cpu_V0, cpu_V1);
ad69471c 4821 break;
0322b26e
PM
4822 case 7: /* VQSHL */
4823 if (u) {
2a3f75b4 4824 gen_helper_neon_qshl_u64(cpu_V0,
0322b26e
PM
4825 cpu_V0, cpu_V1);
4826 } else {
2a3f75b4 4827 gen_helper_neon_qshl_s64(cpu_V0,
0322b26e
PM
4828 cpu_V0, cpu_V1);
4829 }
9ee6e8bb 4830 break;
9ee6e8bb 4831 }
ad69471c
PB
4832 if (op == 1 || op == 3) {
4833 /* Accumulate. */
5371cb81 4834 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
4835 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4836 } else if (op == 4 || (op == 5 && u)) {
4837 /* Insert */
923e6509
CL
4838 neon_load_reg64(cpu_V1, rd + pass);
4839 uint64_t mask;
4840 if (shift < -63 || shift > 63) {
4841 mask = 0;
4842 } else {
4843 if (op == 4) {
4844 mask = 0xffffffffffffffffull >> -shift;
4845 } else {
4846 mask = 0xffffffffffffffffull << shift;
4847 }
4848 }
4849 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
4850 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
4851 }
4852 neon_store_reg64(cpu_V0, rd + pass);
4853 } else { /* size < 3 */
4854 /* Operands in T0 and T1. */
dd8fbd78 4855 tmp = neon_load_reg(rm, pass);
7d1b0095 4856 tmp2 = tcg_temp_new_i32();
dd8fbd78 4857 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
4858 switch (op) {
4859 case 0: /* VSHR */
4860 case 1: /* VSRA */
4861 GEN_NEON_INTEGER_OP(shl);
4862 break;
4863 case 2: /* VRSHR */
4864 case 3: /* VRSRA */
4865 GEN_NEON_INTEGER_OP(rshl);
4866 break;
4867 case 4: /* VSRI */
ad69471c
PB
4868 case 5: /* VSHL, VSLI */
4869 switch (size) {
dd8fbd78
FN
4870 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4871 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4872 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 4873 default: abort();
ad69471c
PB
4874 }
4875 break;
0322b26e 4876 case 6: /* VQSHLU */
ad69471c 4877 switch (size) {
0322b26e 4878 case 0:
2a3f75b4 4879 gen_helper_neon_qshlu_s8(tmp, tmp, tmp2);
0322b26e
PM
4880 break;
4881 case 1:
2a3f75b4 4882 gen_helper_neon_qshlu_s16(tmp, tmp, tmp2);
0322b26e
PM
4883 break;
4884 case 2:
2a3f75b4 4885 gen_helper_neon_qshlu_s32(tmp, tmp, tmp2);
0322b26e
PM
4886 break;
4887 default:
cc13115b 4888 abort();
ad69471c
PB
4889 }
4890 break;
0322b26e 4891 case 7: /* VQSHL */
2a3f75b4 4892 GEN_NEON_INTEGER_OP(qshl);
0322b26e 4893 break;
ad69471c 4894 }
7d1b0095 4895 tcg_temp_free_i32(tmp2);
ad69471c
PB
4896
4897 if (op == 1 || op == 3) {
4898 /* Accumulate. */
dd8fbd78 4899 tmp2 = neon_load_reg(rd, pass);
5371cb81 4900 gen_neon_add(size, tmp, tmp2);
7d1b0095 4901 tcg_temp_free_i32(tmp2);
ad69471c
PB
4902 } else if (op == 4 || (op == 5 && u)) {
4903 /* Insert */
4904 switch (size) {
4905 case 0:
4906 if (op == 4)
ca9a32e4 4907 mask = 0xff >> -shift;
ad69471c 4908 else
ca9a32e4
JR
4909 mask = (uint8_t)(0xff << shift);
4910 mask |= mask << 8;
4911 mask |= mask << 16;
ad69471c
PB
4912 break;
4913 case 1:
4914 if (op == 4)
ca9a32e4 4915 mask = 0xffff >> -shift;
ad69471c 4916 else
ca9a32e4
JR
4917 mask = (uint16_t)(0xffff << shift);
4918 mask |= mask << 16;
ad69471c
PB
4919 break;
4920 case 2:
ca9a32e4
JR
4921 if (shift < -31 || shift > 31) {
4922 mask = 0;
4923 } else {
4924 if (op == 4)
4925 mask = 0xffffffffu >> -shift;
4926 else
4927 mask = 0xffffffffu << shift;
4928 }
ad69471c
PB
4929 break;
4930 default:
4931 abort();
4932 }
dd8fbd78 4933 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
4934 tcg_gen_andi_i32(tmp, tmp, mask);
4935 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 4936 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4937 tcg_temp_free_i32(tmp2);
ad69471c 4938 }
dd8fbd78 4939 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4940 }
4941 } /* for pass */
4942 } else if (op < 10) {
ad69471c 4943 /* Shift by immediate and narrow:
9ee6e8bb 4944 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 4945 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
4946 if (rm & 1) {
4947 return 1;
4948 }
9ee6e8bb
PB
4949 shift = shift - (1 << (size + 3));
4950 size++;
92cdfaeb 4951 if (size == 3) {
a7812ae4 4952 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
4953 neon_load_reg64(cpu_V0, rm);
4954 neon_load_reg64(cpu_V1, rm + 1);
4955 for (pass = 0; pass < 2; pass++) {
4956 TCGv_i64 in;
4957 if (pass == 0) {
4958 in = cpu_V0;
4959 } else {
4960 in = cpu_V1;
4961 }
ad69471c 4962 if (q) {
0b36f4cd 4963 if (input_unsigned) {
92cdfaeb 4964 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 4965 } else {
92cdfaeb 4966 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 4967 }
ad69471c 4968 } else {
0b36f4cd 4969 if (input_unsigned) {
92cdfaeb 4970 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 4971 } else {
92cdfaeb 4972 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 4973 }
ad69471c 4974 }
7d1b0095 4975 tmp = tcg_temp_new_i32();
92cdfaeb
PM
4976 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4977 neon_store_reg(rd, pass, tmp);
4978 } /* for pass */
4979 tcg_temp_free_i64(tmp64);
4980 } else {
4981 if (size == 1) {
4982 imm = (uint16_t)shift;
4983 imm |= imm << 16;
2c0262af 4984 } else {
92cdfaeb
PM
4985 /* size == 2 */
4986 imm = (uint32_t)shift;
4987 }
4988 tmp2 = tcg_const_i32(imm);
4989 tmp4 = neon_load_reg(rm + 1, 0);
4990 tmp5 = neon_load_reg(rm + 1, 1);
4991 for (pass = 0; pass < 2; pass++) {
4992 if (pass == 0) {
4993 tmp = neon_load_reg(rm, 0);
4994 } else {
4995 tmp = tmp4;
4996 }
0b36f4cd
CL
4997 gen_neon_shift_narrow(size, tmp, tmp2, q,
4998 input_unsigned);
92cdfaeb
PM
4999 if (pass == 0) {
5000 tmp3 = neon_load_reg(rm, 1);
5001 } else {
5002 tmp3 = tmp5;
5003 }
0b36f4cd
CL
5004 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5005 input_unsigned);
36aa55dc 5006 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5007 tcg_temp_free_i32(tmp);
5008 tcg_temp_free_i32(tmp3);
5009 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5010 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5011 neon_store_reg(rd, pass, tmp);
5012 } /* for pass */
c6067f04 5013 tcg_temp_free_i32(tmp2);
b75263d6 5014 }
9ee6e8bb 5015 } else if (op == 10) {
cc13115b
PM
5016 /* VSHLL, VMOVL */
5017 if (q || (rd & 1)) {
9ee6e8bb 5018 return 1;
cc13115b 5019 }
ad69471c
PB
5020 tmp = neon_load_reg(rm, 0);
5021 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5022 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5023 if (pass == 1)
5024 tmp = tmp2;
5025
5026 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5027
9ee6e8bb
PB
5028 if (shift != 0) {
5029 /* The shift is less than the width of the source
ad69471c
PB
5030 type, so we can just shift the whole register. */
5031 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5032 /* Widen the result of shift: we need to clear
5033 * the potential overflow bits resulting from
5034 * left bits of the narrow input appearing as
5035 * right bits of left the neighbour narrow
5036 * input. */
ad69471c
PB
5037 if (size < 2 || !u) {
5038 uint64_t imm64;
5039 if (size == 0) {
5040 imm = (0xffu >> (8 - shift));
5041 imm |= imm << 16;
acdf01ef 5042 } else if (size == 1) {
ad69471c 5043 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5044 } else {
5045 /* size == 2 */
5046 imm = 0xffffffff >> (32 - shift);
5047 }
5048 if (size < 2) {
5049 imm64 = imm | (((uint64_t)imm) << 32);
5050 } else {
5051 imm64 = imm;
9ee6e8bb 5052 }
acdf01ef 5053 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5054 }
5055 }
ad69471c 5056 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5057 }
f73534a5 5058 } else if (op >= 14) {
9ee6e8bb 5059 /* VCVT fixed-point. */
cc13115b
PM
5060 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5061 return 1;
5062 }
f73534a5
PM
5063 /* We have already masked out the must-be-1 top bit of imm6,
5064 * hence this 32-shift where the ARM ARM has 64-imm6.
5065 */
5066 shift = 32 - shift;
9ee6e8bb 5067 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5068 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5069 if (!(op & 1)) {
9ee6e8bb 5070 if (u)
4373f3ce 5071 gen_vfp_ulto(0, shift);
9ee6e8bb 5072 else
4373f3ce 5073 gen_vfp_slto(0, shift);
9ee6e8bb
PB
5074 } else {
5075 if (u)
4373f3ce 5076 gen_vfp_toul(0, shift);
9ee6e8bb 5077 else
4373f3ce 5078 gen_vfp_tosl(0, shift);
2c0262af 5079 }
4373f3ce 5080 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5081 }
5082 } else {
9ee6e8bb
PB
5083 return 1;
5084 }
5085 } else { /* (insn & 0x00380080) == 0 */
5086 int invert;
7d80fee5
PM
5087 if (q && (rd & 1)) {
5088 return 1;
5089 }
9ee6e8bb
PB
5090
5091 op = (insn >> 8) & 0xf;
5092 /* One register and immediate. */
5093 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5094 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5095 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5096 * We choose to not special-case this and will behave as if a
5097 * valid constant encoding of 0 had been given.
5098 */
9ee6e8bb
PB
5099 switch (op) {
5100 case 0: case 1:
5101 /* no-op */
5102 break;
5103 case 2: case 3:
5104 imm <<= 8;
5105 break;
5106 case 4: case 5:
5107 imm <<= 16;
5108 break;
5109 case 6: case 7:
5110 imm <<= 24;
5111 break;
5112 case 8: case 9:
5113 imm |= imm << 16;
5114 break;
5115 case 10: case 11:
5116 imm = (imm << 8) | (imm << 24);
5117 break;
5118 case 12:
8e31209e 5119 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5120 break;
5121 case 13:
5122 imm = (imm << 16) | 0xffff;
5123 break;
5124 case 14:
5125 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5126 if (invert)
5127 imm = ~imm;
5128 break;
5129 case 15:
7d80fee5
PM
5130 if (invert) {
5131 return 1;
5132 }
9ee6e8bb
PB
5133 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5134 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5135 break;
5136 }
5137 if (invert)
5138 imm = ~imm;
5139
9ee6e8bb
PB
5140 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5141 if (op & 1 && op < 12) {
ad69471c 5142 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5143 if (invert) {
5144 /* The immediate value has already been inverted, so
5145 BIC becomes AND. */
ad69471c 5146 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5147 } else {
ad69471c 5148 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5149 }
9ee6e8bb 5150 } else {
ad69471c 5151 /* VMOV, VMVN. */
7d1b0095 5152 tmp = tcg_temp_new_i32();
9ee6e8bb 5153 if (op == 14 && invert) {
a5a14945 5154 int n;
ad69471c
PB
5155 uint32_t val;
5156 val = 0;
9ee6e8bb
PB
5157 for (n = 0; n < 4; n++) {
5158 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5159 val |= 0xff << (n * 8);
9ee6e8bb 5160 }
ad69471c
PB
5161 tcg_gen_movi_i32(tmp, val);
5162 } else {
5163 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5164 }
9ee6e8bb 5165 }
ad69471c 5166 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5167 }
5168 }
e4b3861d 5169 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5170 if (size != 3) {
5171 op = (insn >> 8) & 0xf;
5172 if ((insn & (1 << 6)) == 0) {
5173 /* Three registers of different lengths. */
5174 int src1_wide;
5175 int src2_wide;
5176 int prewiden;
695272dc
PM
5177 /* undefreq: bit 0 : UNDEF if size != 0
5178 * bit 1 : UNDEF if size == 0
5179 * bit 2 : UNDEF if U == 1
5180 * Note that [1:0] set implies 'always UNDEF'
5181 */
5182 int undefreq;
5183 /* prewiden, src1_wide, src2_wide, undefreq */
5184 static const int neon_3reg_wide[16][4] = {
5185 {1, 0, 0, 0}, /* VADDL */
5186 {1, 1, 0, 0}, /* VADDW */
5187 {1, 0, 0, 0}, /* VSUBL */
5188 {1, 1, 0, 0}, /* VSUBW */
5189 {0, 1, 1, 0}, /* VADDHN */
5190 {0, 0, 0, 0}, /* VABAL */
5191 {0, 1, 1, 0}, /* VSUBHN */
5192 {0, 0, 0, 0}, /* VABDL */
5193 {0, 0, 0, 0}, /* VMLAL */
5194 {0, 0, 0, 6}, /* VQDMLAL */
5195 {0, 0, 0, 0}, /* VMLSL */
5196 {0, 0, 0, 6}, /* VQDMLSL */
5197 {0, 0, 0, 0}, /* Integer VMULL */
5198 {0, 0, 0, 2}, /* VQDMULL */
5199 {0, 0, 0, 5}, /* Polynomial VMULL */
5200 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5201 };
5202
5203 prewiden = neon_3reg_wide[op][0];
5204 src1_wide = neon_3reg_wide[op][1];
5205 src2_wide = neon_3reg_wide[op][2];
695272dc 5206 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5207
695272dc
PM
5208 if (((undefreq & 1) && (size != 0)) ||
5209 ((undefreq & 2) && (size == 0)) ||
5210 ((undefreq & 4) && u)) {
5211 return 1;
5212 }
5213 if ((src1_wide && (rn & 1)) ||
5214 (src2_wide && (rm & 1)) ||
5215 (!src2_wide && (rd & 1))) {
ad69471c 5216 return 1;
695272dc 5217 }
ad69471c 5218
9ee6e8bb
PB
5219 /* Avoid overlapping operands. Wide source operands are
5220 always aligned so will never overlap with wide
5221 destinations in problematic ways. */
8f8e3aa4 5222 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5223 tmp = neon_load_reg(rm, 1);
5224 neon_store_scratch(2, tmp);
8f8e3aa4 5225 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5226 tmp = neon_load_reg(rn, 1);
5227 neon_store_scratch(2, tmp);
9ee6e8bb 5228 }
a50f5b91 5229 TCGV_UNUSED(tmp3);
9ee6e8bb 5230 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5231 if (src1_wide) {
5232 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5233 TCGV_UNUSED(tmp);
9ee6e8bb 5234 } else {
ad69471c 5235 if (pass == 1 && rd == rn) {
dd8fbd78 5236 tmp = neon_load_scratch(2);
9ee6e8bb 5237 } else {
ad69471c
PB
5238 tmp = neon_load_reg(rn, pass);
5239 }
5240 if (prewiden) {
5241 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5242 }
5243 }
ad69471c
PB
5244 if (src2_wide) {
5245 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5246 TCGV_UNUSED(tmp2);
9ee6e8bb 5247 } else {
ad69471c 5248 if (pass == 1 && rd == rm) {
dd8fbd78 5249 tmp2 = neon_load_scratch(2);
9ee6e8bb 5250 } else {
ad69471c
PB
5251 tmp2 = neon_load_reg(rm, pass);
5252 }
5253 if (prewiden) {
5254 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5255 }
9ee6e8bb
PB
5256 }
5257 switch (op) {
5258 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5259 gen_neon_addl(size);
9ee6e8bb 5260 break;
79b0e534 5261 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5262 gen_neon_subl(size);
9ee6e8bb
PB
5263 break;
5264 case 5: case 7: /* VABAL, VABDL */
5265 switch ((size << 1) | u) {
ad69471c
PB
5266 case 0:
5267 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5268 break;
5269 case 1:
5270 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5271 break;
5272 case 2:
5273 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5274 break;
5275 case 3:
5276 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5277 break;
5278 case 4:
5279 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5280 break;
5281 case 5:
5282 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5283 break;
9ee6e8bb
PB
5284 default: abort();
5285 }
7d1b0095
PM
5286 tcg_temp_free_i32(tmp2);
5287 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5288 break;
5289 case 8: case 9: case 10: case 11: case 12: case 13:
5290 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5291 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5292 break;
5293 case 14: /* Polynomial VMULL */
e5ca24cb 5294 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5295 tcg_temp_free_i32(tmp2);
5296 tcg_temp_free_i32(tmp);
e5ca24cb 5297 break;
695272dc
PM
5298 default: /* 15 is RESERVED: caught earlier */
5299 abort();
9ee6e8bb 5300 }
ebcd88ce
PM
5301 if (op == 13) {
5302 /* VQDMULL */
5303 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5304 neon_store_reg64(cpu_V0, rd + pass);
5305 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5306 /* Accumulate. */
ebcd88ce 5307 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5308 switch (op) {
4dc064e6
PM
5309 case 10: /* VMLSL */
5310 gen_neon_negl(cpu_V0, size);
5311 /* Fall through */
5312 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5313 gen_neon_addl(size);
9ee6e8bb
PB
5314 break;
5315 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5316 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5317 if (op == 11) {
5318 gen_neon_negl(cpu_V0, size);
5319 }
ad69471c
PB
5320 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5321 break;
9ee6e8bb
PB
5322 default:
5323 abort();
5324 }
ad69471c 5325 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5326 } else if (op == 4 || op == 6) {
5327 /* Narrowing operation. */
7d1b0095 5328 tmp = tcg_temp_new_i32();
79b0e534 5329 if (!u) {
9ee6e8bb 5330 switch (size) {
ad69471c
PB
5331 case 0:
5332 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5333 break;
5334 case 1:
5335 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5336 break;
5337 case 2:
5338 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5339 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5340 break;
9ee6e8bb
PB
5341 default: abort();
5342 }
5343 } else {
5344 switch (size) {
ad69471c
PB
5345 case 0:
5346 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5347 break;
5348 case 1:
5349 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5350 break;
5351 case 2:
5352 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5353 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5354 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5355 break;
9ee6e8bb
PB
5356 default: abort();
5357 }
5358 }
ad69471c
PB
5359 if (pass == 0) {
5360 tmp3 = tmp;
5361 } else {
5362 neon_store_reg(rd, 0, tmp3);
5363 neon_store_reg(rd, 1, tmp);
5364 }
9ee6e8bb
PB
5365 } else {
5366 /* Write back the result. */
ad69471c 5367 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5368 }
5369 }
5370 } else {
3e3326df
PM
5371 /* Two registers and a scalar. NB that for ops of this form
5372 * the ARM ARM labels bit 24 as Q, but it is in our variable
5373 * 'u', not 'q'.
5374 */
5375 if (size == 0) {
5376 return 1;
5377 }
9ee6e8bb 5378 switch (op) {
9ee6e8bb 5379 case 1: /* Float VMLA scalar */
9ee6e8bb 5380 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5381 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5382 if (size == 1) {
5383 return 1;
5384 }
5385 /* fall through */
5386 case 0: /* Integer VMLA scalar */
5387 case 4: /* Integer VMLS scalar */
5388 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5389 case 12: /* VQDMULH scalar */
5390 case 13: /* VQRDMULH scalar */
3e3326df
PM
5391 if (u && ((rd | rn) & 1)) {
5392 return 1;
5393 }
dd8fbd78
FN
5394 tmp = neon_get_scalar(size, rm);
5395 neon_store_scratch(0, tmp);
9ee6e8bb 5396 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5397 tmp = neon_load_scratch(0);
5398 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5399 if (op == 12) {
5400 if (size == 1) {
2a3f75b4 5401 gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2);
9ee6e8bb 5402 } else {
2a3f75b4 5403 gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2);
9ee6e8bb
PB
5404 }
5405 } else if (op == 13) {
5406 if (size == 1) {
2a3f75b4 5407 gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2);
9ee6e8bb 5408 } else {
2a3f75b4 5409 gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2);
9ee6e8bb
PB
5410 }
5411 } else if (op & 1) {
dd8fbd78 5412 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5413 } else {
5414 switch (size) {
dd8fbd78
FN
5415 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5416 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5417 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5418 default: abort();
9ee6e8bb
PB
5419 }
5420 }
7d1b0095 5421 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5422 if (op < 8) {
5423 /* Accumulate. */
dd8fbd78 5424 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5425 switch (op) {
5426 case 0:
dd8fbd78 5427 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5428 break;
5429 case 1:
dd8fbd78 5430 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5431 break;
5432 case 4:
dd8fbd78 5433 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5434 break;
5435 case 5:
dd8fbd78 5436 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5437 break;
5438 default:
5439 abort();
5440 }
7d1b0095 5441 tcg_temp_free_i32(tmp2);
9ee6e8bb 5442 }
dd8fbd78 5443 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5444 }
5445 break;
9ee6e8bb 5446 case 3: /* VQDMLAL scalar */
9ee6e8bb 5447 case 7: /* VQDMLSL scalar */
9ee6e8bb 5448 case 11: /* VQDMULL scalar */
3e3326df 5449 if (u == 1) {
ad69471c 5450 return 1;
3e3326df
PM
5451 }
5452 /* fall through */
5453 case 2: /* VMLAL sclar */
5454 case 6: /* VMLSL scalar */
5455 case 10: /* VMULL scalar */
5456 if (rd & 1) {
5457 return 1;
5458 }
dd8fbd78 5459 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5460 /* We need a copy of tmp2 because gen_neon_mull
5461 * deletes it during pass 0. */
7d1b0095 5462 tmp4 = tcg_temp_new_i32();
c6067f04 5463 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5464 tmp3 = neon_load_reg(rn, 1);
ad69471c 5465
9ee6e8bb 5466 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5467 if (pass == 0) {
5468 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5469 } else {
dd8fbd78 5470 tmp = tmp3;
c6067f04 5471 tmp2 = tmp4;
9ee6e8bb 5472 }
ad69471c 5473 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5474 if (op != 11) {
5475 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5476 }
9ee6e8bb 5477 switch (op) {
4dc064e6
PM
5478 case 6:
5479 gen_neon_negl(cpu_V0, size);
5480 /* Fall through */
5481 case 2:
ad69471c 5482 gen_neon_addl(size);
9ee6e8bb
PB
5483 break;
5484 case 3: case 7:
ad69471c 5485 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5486 if (op == 7) {
5487 gen_neon_negl(cpu_V0, size);
5488 }
ad69471c 5489 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5490 break;
5491 case 10:
5492 /* no-op */
5493 break;
5494 case 11:
ad69471c 5495 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5496 break;
5497 default:
5498 abort();
5499 }
ad69471c 5500 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5501 }
dd8fbd78 5502
dd8fbd78 5503
9ee6e8bb
PB
5504 break;
5505 default: /* 14 and 15 are RESERVED */
5506 return 1;
5507 }
5508 }
5509 } else { /* size == 3 */
5510 if (!u) {
5511 /* Extract. */
9ee6e8bb 5512 imm = (insn >> 8) & 0xf;
ad69471c
PB
5513
5514 if (imm > 7 && !q)
5515 return 1;
5516
52579ea1
PM
5517 if (q && ((rd | rn | rm) & 1)) {
5518 return 1;
5519 }
5520
ad69471c
PB
5521 if (imm == 0) {
5522 neon_load_reg64(cpu_V0, rn);
5523 if (q) {
5524 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5525 }
ad69471c
PB
5526 } else if (imm == 8) {
5527 neon_load_reg64(cpu_V0, rn + 1);
5528 if (q) {
5529 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5530 }
ad69471c 5531 } else if (q) {
a7812ae4 5532 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5533 if (imm < 8) {
5534 neon_load_reg64(cpu_V0, rn);
a7812ae4 5535 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5536 } else {
5537 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5538 neon_load_reg64(tmp64, rm);
ad69471c
PB
5539 }
5540 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5541 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5542 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5543 if (imm < 8) {
5544 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5545 } else {
ad69471c
PB
5546 neon_load_reg64(cpu_V1, rm + 1);
5547 imm -= 8;
9ee6e8bb 5548 }
ad69471c 5549 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5550 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5551 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5552 tcg_temp_free_i64(tmp64);
ad69471c 5553 } else {
a7812ae4 5554 /* BUGFIX */
ad69471c 5555 neon_load_reg64(cpu_V0, rn);
a7812ae4 5556 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5557 neon_load_reg64(cpu_V1, rm);
a7812ae4 5558 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5559 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5560 }
5561 neon_store_reg64(cpu_V0, rd);
5562 if (q) {
5563 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5564 }
5565 } else if ((insn & (1 << 11)) == 0) {
5566 /* Two register misc. */
5567 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5568 size = (insn >> 18) & 3;
5569 switch (op) {
5570 case 0: /* VREV64 */
5571 if (size == 3)
5572 return 1;
5573 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5574 tmp = neon_load_reg(rm, pass * 2);
5575 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5576 switch (size) {
dd8fbd78
FN
5577 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5578 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5579 case 2: /* no-op */ break;
5580 default: abort();
5581 }
dd8fbd78 5582 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5583 if (size == 2) {
dd8fbd78 5584 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5585 } else {
9ee6e8bb 5586 switch (size) {
dd8fbd78
FN
5587 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5588 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5589 default: abort();
5590 }
dd8fbd78 5591 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5592 }
5593 }
5594 break;
5595 case 4: case 5: /* VPADDL */
5596 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5597 if (size == 3)
5598 return 1;
ad69471c
PB
5599 for (pass = 0; pass < q + 1; pass++) {
5600 tmp = neon_load_reg(rm, pass * 2);
5601 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5602 tmp = neon_load_reg(rm, pass * 2 + 1);
5603 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5604 switch (size) {
5605 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5606 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5607 case 2: tcg_gen_add_i64(CPU_V001); break;
5608 default: abort();
5609 }
9ee6e8bb
PB
5610 if (op >= 12) {
5611 /* Accumulate. */
ad69471c
PB
5612 neon_load_reg64(cpu_V1, rd + pass);
5613 gen_neon_addl(size);
9ee6e8bb 5614 }
ad69471c 5615 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5616 }
5617 break;
5618 case 33: /* VTRN */
5619 if (size == 2) {
a5a14945 5620 int n;
9ee6e8bb 5621 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5622 tmp = neon_load_reg(rm, n);
5623 tmp2 = neon_load_reg(rd, n + 1);
5624 neon_store_reg(rm, n, tmp2);
5625 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5626 }
5627 } else {
5628 goto elementwise;
5629 }
5630 break;
5631 case 34: /* VUZP */
02acedf9 5632 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5633 return 1;
9ee6e8bb
PB
5634 }
5635 break;
5636 case 35: /* VZIP */
d68a6f3a 5637 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5638 return 1;
9ee6e8bb
PB
5639 }
5640 break;
5641 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5642 if (size == 3)
5643 return 1;
a50f5b91 5644 TCGV_UNUSED(tmp2);
9ee6e8bb 5645 for (pass = 0; pass < 2; pass++) {
ad69471c 5646 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5647 tmp = tcg_temp_new_i32();
c33171c7 5648 gen_neon_narrow_op(op == 36, q, size, tmp, cpu_V0);
ad69471c
PB
5649 if (pass == 0) {
5650 tmp2 = tmp;
5651 } else {
5652 neon_store_reg(rd, 0, tmp2);
5653 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5654 }
9ee6e8bb
PB
5655 }
5656 break;
5657 case 38: /* VSHLL */
ad69471c 5658 if (q || size == 3)
9ee6e8bb 5659 return 1;
ad69471c
PB
5660 tmp = neon_load_reg(rm, 0);
5661 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5662 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5663 if (pass == 1)
5664 tmp = tmp2;
5665 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5666 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5667 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5668 }
5669 break;
60011498
PB
5670 case 44: /* VCVT.F16.F32 */
5671 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5672 return 1;
7d1b0095
PM
5673 tmp = tcg_temp_new_i32();
5674 tmp2 = tcg_temp_new_i32();
60011498 5675 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5676 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5677 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5678 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5679 tcg_gen_shli_i32(tmp2, tmp2, 16);
5680 tcg_gen_or_i32(tmp2, tmp2, tmp);
5681 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5682 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5683 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5684 neon_store_reg(rd, 0, tmp2);
7d1b0095 5685 tmp2 = tcg_temp_new_i32();
2d981da7 5686 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5687 tcg_gen_shli_i32(tmp2, tmp2, 16);
5688 tcg_gen_or_i32(tmp2, tmp2, tmp);
5689 neon_store_reg(rd, 1, tmp2);
7d1b0095 5690 tcg_temp_free_i32(tmp);
60011498
PB
5691 break;
5692 case 46: /* VCVT.F32.F16 */
5693 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5694 return 1;
7d1b0095 5695 tmp3 = tcg_temp_new_i32();
60011498
PB
5696 tmp = neon_load_reg(rm, 0);
5697 tmp2 = neon_load_reg(rm, 1);
5698 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5699 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5700 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5701 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5702 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5703 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5704 tcg_temp_free_i32(tmp);
60011498 5705 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5706 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5707 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5708 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5709 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5710 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5711 tcg_temp_free_i32(tmp2);
5712 tcg_temp_free_i32(tmp3);
60011498 5713 break;
9ee6e8bb
PB
5714 default:
5715 elementwise:
5716 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5717 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5718 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5719 neon_reg_offset(rm, pass));
dd8fbd78 5720 TCGV_UNUSED(tmp);
9ee6e8bb 5721 } else {
dd8fbd78 5722 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5723 }
5724 switch (op) {
5725 case 1: /* VREV32 */
5726 switch (size) {
dd8fbd78
FN
5727 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5728 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5729 default: return 1;
5730 }
5731 break;
5732 case 2: /* VREV16 */
5733 if (size != 0)
5734 return 1;
dd8fbd78 5735 gen_rev16(tmp);
9ee6e8bb 5736 break;
9ee6e8bb
PB
5737 case 8: /* CLS */
5738 switch (size) {
dd8fbd78
FN
5739 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5740 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5741 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
9ee6e8bb
PB
5742 default: return 1;
5743 }
5744 break;
5745 case 9: /* CLZ */
5746 switch (size) {
dd8fbd78
FN
5747 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5748 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5749 case 2: gen_helper_clz(tmp, tmp); break;
9ee6e8bb
PB
5750 default: return 1;
5751 }
5752 break;
5753 case 10: /* CNT */
5754 if (size != 0)
5755 return 1;
dd8fbd78 5756 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb
PB
5757 break;
5758 case 11: /* VNOT */
5759 if (size != 0)
5760 return 1;
dd8fbd78 5761 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5762 break;
5763 case 14: /* VQABS */
5764 switch (size) {
2a3f75b4
PM
5765 case 0: gen_helper_neon_qabs_s8(tmp, tmp); break;
5766 case 1: gen_helper_neon_qabs_s16(tmp, tmp); break;
5767 case 2: gen_helper_neon_qabs_s32(tmp, tmp); break;
9ee6e8bb
PB
5768 default: return 1;
5769 }
5770 break;
5771 case 15: /* VQNEG */
5772 switch (size) {
2a3f75b4
PM
5773 case 0: gen_helper_neon_qneg_s8(tmp, tmp); break;
5774 case 1: gen_helper_neon_qneg_s16(tmp, tmp); break;
5775 case 2: gen_helper_neon_qneg_s32(tmp, tmp); break;
9ee6e8bb
PB
5776 default: return 1;
5777 }
5778 break;
5779 case 16: case 19: /* VCGT #0, VCLE #0 */
dd8fbd78 5780 tmp2 = tcg_const_i32(0);
9ee6e8bb 5781 switch(size) {
dd8fbd78
FN
5782 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5783 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5784 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5785 default: return 1;
5786 }
dd8fbd78 5787 tcg_temp_free(tmp2);
9ee6e8bb 5788 if (op == 19)
dd8fbd78 5789 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5790 break;
5791 case 17: case 20: /* VCGE #0, VCLT #0 */
dd8fbd78 5792 tmp2 = tcg_const_i32(0);
9ee6e8bb 5793 switch(size) {
dd8fbd78
FN
5794 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5795 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5796 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5797 default: return 1;
5798 }
dd8fbd78 5799 tcg_temp_free(tmp2);
9ee6e8bb 5800 if (op == 20)
dd8fbd78 5801 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb
PB
5802 break;
5803 case 18: /* VCEQ #0 */
dd8fbd78 5804 tmp2 = tcg_const_i32(0);
9ee6e8bb 5805 switch(size) {
dd8fbd78
FN
5806 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5807 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5808 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
9ee6e8bb
PB
5809 default: return 1;
5810 }
dd8fbd78 5811 tcg_temp_free(tmp2);
9ee6e8bb
PB
5812 break;
5813 case 22: /* VABS */
5814 switch(size) {
dd8fbd78
FN
5815 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5816 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5817 case 2: tcg_gen_abs_i32(tmp, tmp); break;
9ee6e8bb
PB
5818 default: return 1;
5819 }
5820 break;
5821 case 23: /* VNEG */
ad69471c
PB
5822 if (size == 3)
5823 return 1;
dd8fbd78
FN
5824 tmp2 = tcg_const_i32(0);
5825 gen_neon_rsb(size, tmp, tmp2);
5826 tcg_temp_free(tmp2);
9ee6e8bb 5827 break;
0e326109 5828 case 24: /* Float VCGT #0 */
dd8fbd78
FN
5829 tmp2 = tcg_const_i32(0);
5830 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5831 tcg_temp_free(tmp2);
9ee6e8bb 5832 break;
0e326109 5833 case 25: /* Float VCGE #0 */
dd8fbd78
FN
5834 tmp2 = tcg_const_i32(0);
5835 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5836 tcg_temp_free(tmp2);
9ee6e8bb
PB
5837 break;
5838 case 26: /* Float VCEQ #0 */
dd8fbd78
FN
5839 tmp2 = tcg_const_i32(0);
5840 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5841 tcg_temp_free(tmp2);
9ee6e8bb 5842 break;
0e326109
PM
5843 case 27: /* Float VCLE #0 */
5844 tmp2 = tcg_const_i32(0);
5845 gen_helper_neon_cge_f32(tmp, tmp2, tmp);
5846 tcg_temp_free(tmp2);
5847 break;
5848 case 28: /* Float VCLT #0 */
5849 tmp2 = tcg_const_i32(0);
5850 gen_helper_neon_cgt_f32(tmp, tmp2, tmp);
5851 tcg_temp_free(tmp2);
5852 break;
9ee6e8bb 5853 case 30: /* Float VABS */
4373f3ce 5854 gen_vfp_abs(0);
9ee6e8bb
PB
5855 break;
5856 case 31: /* Float VNEG */
4373f3ce 5857 gen_vfp_neg(0);
9ee6e8bb
PB
5858 break;
5859 case 32: /* VSWP */
dd8fbd78
FN
5860 tmp2 = neon_load_reg(rd, pass);
5861 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5862 break;
5863 case 33: /* VTRN */
dd8fbd78 5864 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5865 switch (size) {
dd8fbd78
FN
5866 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5867 case 1: gen_neon_trn_u16(tmp, tmp2); break;
9ee6e8bb
PB
5868 case 2: abort();
5869 default: return 1;
5870 }
dd8fbd78 5871 neon_store_reg(rm, pass, tmp2);
9ee6e8bb
PB
5872 break;
5873 case 56: /* Integer VRECPE */
dd8fbd78 5874 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5875 break;
5876 case 57: /* Integer VRSQRTE */
dd8fbd78 5877 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb
PB
5878 break;
5879 case 58: /* Float VRECPE */
4373f3ce 5880 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5881 break;
5882 case 59: /* Float VRSQRTE */
4373f3ce 5883 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5884 break;
5885 case 60: /* VCVT.F32.S32 */
d3587ef8 5886 gen_vfp_sito(0);
9ee6e8bb
PB
5887 break;
5888 case 61: /* VCVT.F32.U32 */
d3587ef8 5889 gen_vfp_uito(0);
9ee6e8bb
PB
5890 break;
5891 case 62: /* VCVT.S32.F32 */
d3587ef8 5892 gen_vfp_tosiz(0);
9ee6e8bb
PB
5893 break;
5894 case 63: /* VCVT.U32.F32 */
d3587ef8 5895 gen_vfp_touiz(0);
9ee6e8bb
PB
5896 break;
5897 default:
5898 /* Reserved: 21, 29, 39-56 */
5899 return 1;
5900 }
5901 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5902 tcg_gen_st_f32(cpu_F0s, cpu_env,
5903 neon_reg_offset(rd, pass));
9ee6e8bb 5904 } else {
dd8fbd78 5905 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5906 }
5907 }
5908 break;
5909 }
5910 } else if ((insn & (1 << 10)) == 0) {
5911 /* VTBL, VTBX. */
a5a14945 5912 int n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5913 if (insn & (1 << 6)) {
8f8e3aa4 5914 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5915 } else {
7d1b0095 5916 tmp = tcg_temp_new_i32();
8f8e3aa4 5917 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5918 }
8f8e3aa4 5919 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
5920 tmp4 = tcg_const_i32(rn);
5921 tmp5 = tcg_const_i32(n);
5922 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
7d1b0095 5923 tcg_temp_free_i32(tmp);
9ee6e8bb 5924 if (insn & (1 << 6)) {
8f8e3aa4 5925 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5926 } else {
7d1b0095 5927 tmp = tcg_temp_new_i32();
8f8e3aa4 5928 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5929 }
8f8e3aa4 5930 tmp3 = neon_load_reg(rm, 1);
b75263d6 5931 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
5932 tcg_temp_free_i32(tmp5);
5933 tcg_temp_free_i32(tmp4);
8f8e3aa4 5934 neon_store_reg(rd, 0, tmp2);
3018f259 5935 neon_store_reg(rd, 1, tmp3);
7d1b0095 5936 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5937 } else if ((insn & 0x380) == 0) {
5938 /* VDUP */
5939 if (insn & (1 << 19)) {
dd8fbd78 5940 tmp = neon_load_reg(rm, 1);
9ee6e8bb 5941 } else {
dd8fbd78 5942 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
5943 }
5944 if (insn & (1 << 16)) {
dd8fbd78 5945 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5946 } else if (insn & (1 << 17)) {
5947 if ((insn >> 18) & 1)
dd8fbd78 5948 gen_neon_dup_high16(tmp);
9ee6e8bb 5949 else
dd8fbd78 5950 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
5951 }
5952 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 5953 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
5954 tcg_gen_mov_i32(tmp2, tmp);
5955 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 5956 }
7d1b0095 5957 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5958 } else {
5959 return 1;
5960 }
5961 }
5962 }
5963 return 0;
5964}
5965
fe1479c3
PB
5966static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5967{
5968 int crn = (insn >> 16) & 0xf;
5969 int crm = insn & 0xf;
5970 int op1 = (insn >> 21) & 7;
5971 int op2 = (insn >> 5) & 7;
5972 int rt = (insn >> 12) & 0xf;
5973 TCGv tmp;
5974
ca27c052
PM
5975 /* Minimal set of debug registers, since we don't support debug */
5976 if (op1 == 0 && crn == 0 && op2 == 0) {
5977 switch (crm) {
5978 case 0:
5979 /* DBGDIDR: just RAZ. In particular this means the
5980 * "debug architecture version" bits will read as
5981 * a reserved value, which should cause Linux to
5982 * not try to use the debug hardware.
5983 */
5984 tmp = tcg_const_i32(0);
5985 store_reg(s, rt, tmp);
5986 return 0;
5987 case 1:
5988 case 2:
5989 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
5990 * don't implement memory mapped debug components
5991 */
5992 if (ENABLE_ARCH_7) {
5993 tmp = tcg_const_i32(0);
5994 store_reg(s, rt, tmp);
5995 return 0;
5996 }
5997 break;
5998 default:
5999 break;
6000 }
6001 }
6002
fe1479c3
PB
6003 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6004 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6005 /* TEECR */
6006 if (IS_USER(s))
6007 return 1;
6008 tmp = load_cpu_field(teecr);
6009 store_reg(s, rt, tmp);
6010 return 0;
6011 }
6012 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6013 /* TEEHBR */
6014 if (IS_USER(s) && (env->teecr & 1))
6015 return 1;
6016 tmp = load_cpu_field(teehbr);
6017 store_reg(s, rt, tmp);
6018 return 0;
6019 }
6020 }
6021 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
6022 op1, crn, crm, op2);
6023 return 1;
6024}
6025
6026static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
6027{
6028 int crn = (insn >> 16) & 0xf;
6029 int crm = insn & 0xf;
6030 int op1 = (insn >> 21) & 7;
6031 int op2 = (insn >> 5) & 7;
6032 int rt = (insn >> 12) & 0xf;
6033 TCGv tmp;
6034
6035 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6036 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6037 /* TEECR */
6038 if (IS_USER(s))
6039 return 1;
6040 tmp = load_reg(s, rt);
6041 gen_helper_set_teecr(cpu_env, tmp);
7d1b0095 6042 tcg_temp_free_i32(tmp);
fe1479c3
PB
6043 return 0;
6044 }
6045 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6046 /* TEEHBR */
6047 if (IS_USER(s) && (env->teecr & 1))
6048 return 1;
6049 tmp = load_reg(s, rt);
6050 store_cpu_field(tmp, teehbr);
6051 return 0;
6052 }
6053 }
6054 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6055 op1, crn, crm, op2);
6056 return 1;
6057}
6058
9ee6e8bb
PB
6059static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
6060{
6061 int cpnum;
6062
6063 cpnum = (insn >> 8) & 0xf;
6064 if (arm_feature(env, ARM_FEATURE_XSCALE)
6065 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6066 return 1;
6067
6068 switch (cpnum) {
6069 case 0:
6070 case 1:
6071 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6072 return disas_iwmmxt_insn(env, s, insn);
6073 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6074 return disas_dsp_insn(env, s, insn);
6075 }
6076 return 1;
6077 case 10:
6078 case 11:
6079 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
6080 case 14:
6081 /* Coprocessors 7-15 are architecturally reserved by ARM.
6082 Unfortunately Intel decided to ignore this. */
6083 if (arm_feature(env, ARM_FEATURE_XSCALE))
6084 goto board;
6085 if (insn & (1 << 20))
6086 return disas_cp14_read(env, s, insn);
6087 else
6088 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
6089 case 15:
6090 return disas_cp15_insn (env, s, insn);
6091 default:
fe1479c3 6092 board:
9ee6e8bb
PB
6093 /* Unknown coprocessor. See if the board has hooked it. */
6094 return disas_cp_insn (env, s, insn);
6095 }
6096}
6097
5e3f878a
PB
6098
6099/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6100static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
6101{
6102 TCGv tmp;
7d1b0095 6103 tmp = tcg_temp_new_i32();
5e3f878a
PB
6104 tcg_gen_trunc_i64_i32(tmp, val);
6105 store_reg(s, rlow, tmp);
7d1b0095 6106 tmp = tcg_temp_new_i32();
5e3f878a
PB
6107 tcg_gen_shri_i64(val, val, 32);
6108 tcg_gen_trunc_i64_i32(tmp, val);
6109 store_reg(s, rhigh, tmp);
6110}
6111
6112/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6113static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6114{
a7812ae4 6115 TCGv_i64 tmp;
5e3f878a
PB
6116 TCGv tmp2;
6117
36aa55dc 6118 /* Load value and extend to 64 bits. */
a7812ae4 6119 tmp = tcg_temp_new_i64();
5e3f878a
PB
6120 tmp2 = load_reg(s, rlow);
6121 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6122 tcg_temp_free_i32(tmp2);
5e3f878a 6123 tcg_gen_add_i64(val, val, tmp);
b75263d6 6124 tcg_temp_free_i64(tmp);
5e3f878a
PB
6125}
6126
6127/* load and add a 64-bit value from a register pair. */
a7812ae4 6128static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6129{
a7812ae4 6130 TCGv_i64 tmp;
36aa55dc
PB
6131 TCGv tmpl;
6132 TCGv tmph;
5e3f878a
PB
6133
6134 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6135 tmpl = load_reg(s, rlow);
6136 tmph = load_reg(s, rhigh);
a7812ae4 6137 tmp = tcg_temp_new_i64();
36aa55dc 6138 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6139 tcg_temp_free_i32(tmpl);
6140 tcg_temp_free_i32(tmph);
5e3f878a 6141 tcg_gen_add_i64(val, val, tmp);
b75263d6 6142 tcg_temp_free_i64(tmp);
5e3f878a
PB
6143}
6144
6145/* Set N and Z flags from a 64-bit value. */
a7812ae4 6146static void gen_logicq_cc(TCGv_i64 val)
5e3f878a 6147{
7d1b0095 6148 TCGv tmp = tcg_temp_new_i32();
5e3f878a 6149 gen_helper_logicq_cc(tmp, val);
6fbe23d5 6150 gen_logic_CC(tmp);
7d1b0095 6151 tcg_temp_free_i32(tmp);
5e3f878a
PB
6152}
6153
426f5abc
PB
6154/* Load/Store exclusive instructions are implemented by remembering
6155 the value/address loaded, and seeing if these are the same
6156 when the store is performed. This should be is sufficient to implement
6157 the architecturally mandated semantics, and avoids having to monitor
6158 regular stores.
6159
6160 In system emulation mode only one CPU will be running at once, so
6161 this sequence is effectively atomic. In user emulation mode we
6162 throw an exception and handle the atomic operation elsewhere. */
6163static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6164 TCGv addr, int size)
6165{
6166 TCGv tmp;
6167
6168 switch (size) {
6169 case 0:
6170 tmp = gen_ld8u(addr, IS_USER(s));
6171 break;
6172 case 1:
6173 tmp = gen_ld16u(addr, IS_USER(s));
6174 break;
6175 case 2:
6176 case 3:
6177 tmp = gen_ld32(addr, IS_USER(s));
6178 break;
6179 default:
6180 abort();
6181 }
6182 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6183 store_reg(s, rt, tmp);
6184 if (size == 3) {
7d1b0095 6185 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6186 tcg_gen_addi_i32(tmp2, addr, 4);
6187 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6188 tcg_temp_free_i32(tmp2);
426f5abc
PB
6189 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6190 store_reg(s, rt2, tmp);
6191 }
6192 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6193}
6194
6195static void gen_clrex(DisasContext *s)
6196{
6197 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6198}
6199
6200#ifdef CONFIG_USER_ONLY
6201static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6202 TCGv addr, int size)
6203{
6204 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6205 tcg_gen_movi_i32(cpu_exclusive_info,
6206 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6207 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6208}
6209#else
6210static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6211 TCGv addr, int size)
6212{
6213 TCGv tmp;
6214 int done_label;
6215 int fail_label;
6216
6217 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6218 [addr] = {Rt};
6219 {Rd} = 0;
6220 } else {
6221 {Rd} = 1;
6222 } */
6223 fail_label = gen_new_label();
6224 done_label = gen_new_label();
6225 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6226 switch (size) {
6227 case 0:
6228 tmp = gen_ld8u(addr, IS_USER(s));
6229 break;
6230 case 1:
6231 tmp = gen_ld16u(addr, IS_USER(s));
6232 break;
6233 case 2:
6234 case 3:
6235 tmp = gen_ld32(addr, IS_USER(s));
6236 break;
6237 default:
6238 abort();
6239 }
6240 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6241 tcg_temp_free_i32(tmp);
426f5abc 6242 if (size == 3) {
7d1b0095 6243 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6244 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6245 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6246 tcg_temp_free_i32(tmp2);
426f5abc 6247 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6248 tcg_temp_free_i32(tmp);
426f5abc
PB
6249 }
6250 tmp = load_reg(s, rt);
6251 switch (size) {
6252 case 0:
6253 gen_st8(tmp, addr, IS_USER(s));
6254 break;
6255 case 1:
6256 gen_st16(tmp, addr, IS_USER(s));
6257 break;
6258 case 2:
6259 case 3:
6260 gen_st32(tmp, addr, IS_USER(s));
6261 break;
6262 default:
6263 abort();
6264 }
6265 if (size == 3) {
6266 tcg_gen_addi_i32(addr, addr, 4);
6267 tmp = load_reg(s, rt2);
6268 gen_st32(tmp, addr, IS_USER(s));
6269 }
6270 tcg_gen_movi_i32(cpu_R[rd], 0);
6271 tcg_gen_br(done_label);
6272 gen_set_label(fail_label);
6273 tcg_gen_movi_i32(cpu_R[rd], 1);
6274 gen_set_label(done_label);
6275 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6276}
6277#endif
6278
9ee6e8bb
PB
6279static void disas_arm_insn(CPUState * env, DisasContext *s)
6280{
6281 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6282 TCGv tmp;
3670669c 6283 TCGv tmp2;
6ddbc6e4 6284 TCGv tmp3;
b0109805 6285 TCGv addr;
a7812ae4 6286 TCGv_i64 tmp64;
9ee6e8bb
PB
6287
6288 insn = ldl_code(s->pc);
6289 s->pc += 4;
6290
6291 /* M variants do not implement ARM mode. */
6292 if (IS_M(env))
6293 goto illegal_op;
6294 cond = insn >> 28;
6295 if (cond == 0xf){
be5e7a76
DES
6296 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6297 * choose to UNDEF. In ARMv5 and above the space is used
6298 * for miscellaneous unconditional instructions.
6299 */
6300 ARCH(5);
6301
9ee6e8bb
PB
6302 /* Unconditional instructions. */
6303 if (((insn >> 25) & 7) == 1) {
6304 /* NEON Data processing. */
6305 if (!arm_feature(env, ARM_FEATURE_NEON))
6306 goto illegal_op;
6307
6308 if (disas_neon_data_insn(env, s, insn))
6309 goto illegal_op;
6310 return;
6311 }
6312 if ((insn & 0x0f100000) == 0x04000000) {
6313 /* NEON load/store. */
6314 if (!arm_feature(env, ARM_FEATURE_NEON))
6315 goto illegal_op;
6316
6317 if (disas_neon_ls_insn(env, s, insn))
6318 goto illegal_op;
6319 return;
6320 }
3d185e5d
PM
6321 if (((insn & 0x0f30f000) == 0x0510f000) ||
6322 ((insn & 0x0f30f010) == 0x0710f000)) {
6323 if ((insn & (1 << 22)) == 0) {
6324 /* PLDW; v7MP */
6325 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6326 goto illegal_op;
6327 }
6328 }
6329 /* Otherwise PLD; v5TE+ */
be5e7a76 6330 ARCH(5TE);
3d185e5d
PM
6331 return;
6332 }
6333 if (((insn & 0x0f70f000) == 0x0450f000) ||
6334 ((insn & 0x0f70f010) == 0x0650f000)) {
6335 ARCH(7);
6336 return; /* PLI; V7 */
6337 }
6338 if (((insn & 0x0f700000) == 0x04100000) ||
6339 ((insn & 0x0f700010) == 0x06100000)) {
6340 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6341 goto illegal_op;
6342 }
6343 return; /* v7MP: Unallocated memory hint: must NOP */
6344 }
6345
6346 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6347 ARCH(6);
6348 /* setend */
6349 if (insn & (1 << 9)) {
6350 /* BE8 mode not implemented. */
6351 goto illegal_op;
6352 }
6353 return;
6354 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6355 switch ((insn >> 4) & 0xf) {
6356 case 1: /* clrex */
6357 ARCH(6K);
426f5abc 6358 gen_clrex(s);
9ee6e8bb
PB
6359 return;
6360 case 4: /* dsb */
6361 case 5: /* dmb */
6362 case 6: /* isb */
6363 ARCH(7);
6364 /* We don't emulate caches so these are a no-op. */
6365 return;
6366 default:
6367 goto illegal_op;
6368 }
6369 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6370 /* srs */
c67b6b71 6371 int32_t offset;
9ee6e8bb
PB
6372 if (IS_USER(s))
6373 goto illegal_op;
6374 ARCH(6);
6375 op1 = (insn & 0x1f);
7d1b0095 6376 addr = tcg_temp_new_i32();
39ea3d4e
PM
6377 tmp = tcg_const_i32(op1);
6378 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6379 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6380 i = (insn >> 23) & 3;
6381 switch (i) {
6382 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6383 case 1: offset = 0; break; /* IA */
6384 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6385 case 3: offset = 4; break; /* IB */
6386 default: abort();
6387 }
6388 if (offset)
b0109805
PB
6389 tcg_gen_addi_i32(addr, addr, offset);
6390 tmp = load_reg(s, 14);
6391 gen_st32(tmp, addr, 0);
c67b6b71 6392 tmp = load_cpu_field(spsr);
b0109805
PB
6393 tcg_gen_addi_i32(addr, addr, 4);
6394 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6395 if (insn & (1 << 21)) {
6396 /* Base writeback. */
6397 switch (i) {
6398 case 0: offset = -8; break;
c67b6b71
FN
6399 case 1: offset = 4; break;
6400 case 2: offset = -4; break;
9ee6e8bb
PB
6401 case 3: offset = 0; break;
6402 default: abort();
6403 }
6404 if (offset)
c67b6b71 6405 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6406 tmp = tcg_const_i32(op1);
6407 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6408 tcg_temp_free_i32(tmp);
7d1b0095 6409 tcg_temp_free_i32(addr);
b0109805 6410 } else {
7d1b0095 6411 tcg_temp_free_i32(addr);
9ee6e8bb 6412 }
a990f58f 6413 return;
ea825eee 6414 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6415 /* rfe */
c67b6b71 6416 int32_t offset;
9ee6e8bb
PB
6417 if (IS_USER(s))
6418 goto illegal_op;
6419 ARCH(6);
6420 rn = (insn >> 16) & 0xf;
b0109805 6421 addr = load_reg(s, rn);
9ee6e8bb
PB
6422 i = (insn >> 23) & 3;
6423 switch (i) {
b0109805 6424 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6425 case 1: offset = 0; break; /* IA */
6426 case 2: offset = -8; break; /* DB */
b0109805 6427 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6428 default: abort();
6429 }
6430 if (offset)
b0109805
PB
6431 tcg_gen_addi_i32(addr, addr, offset);
6432 /* Load PC into tmp and CPSR into tmp2. */
6433 tmp = gen_ld32(addr, 0);
6434 tcg_gen_addi_i32(addr, addr, 4);
6435 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6436 if (insn & (1 << 21)) {
6437 /* Base writeback. */
6438 switch (i) {
b0109805 6439 case 0: offset = -8; break;
c67b6b71
FN
6440 case 1: offset = 4; break;
6441 case 2: offset = -4; break;
b0109805 6442 case 3: offset = 0; break;
9ee6e8bb
PB
6443 default: abort();
6444 }
6445 if (offset)
b0109805
PB
6446 tcg_gen_addi_i32(addr, addr, offset);
6447 store_reg(s, rn, addr);
6448 } else {
7d1b0095 6449 tcg_temp_free_i32(addr);
9ee6e8bb 6450 }
b0109805 6451 gen_rfe(s, tmp, tmp2);
c67b6b71 6452 return;
9ee6e8bb
PB
6453 } else if ((insn & 0x0e000000) == 0x0a000000) {
6454 /* branch link and change to thumb (blx <offset>) */
6455 int32_t offset;
6456
6457 val = (uint32_t)s->pc;
7d1b0095 6458 tmp = tcg_temp_new_i32();
d9ba4830
PB
6459 tcg_gen_movi_i32(tmp, val);
6460 store_reg(s, 14, tmp);
9ee6e8bb
PB
6461 /* Sign-extend the 24-bit offset */
6462 offset = (((int32_t)insn) << 8) >> 8;
6463 /* offset * 4 + bit24 * 2 + (thumb bit) */
6464 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6465 /* pipeline offset */
6466 val += 4;
be5e7a76 6467 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6468 gen_bx_im(s, val);
9ee6e8bb
PB
6469 return;
6470 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6471 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6472 /* iWMMXt register transfer. */
6473 if (env->cp15.c15_cpar & (1 << 1))
6474 if (!disas_iwmmxt_insn(env, s, insn))
6475 return;
6476 }
6477 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6478 /* Coprocessor double register transfer. */
be5e7a76 6479 ARCH(5TE);
9ee6e8bb
PB
6480 } else if ((insn & 0x0f000010) == 0x0e000010) {
6481 /* Additional coprocessor register transfer. */
7997d92f 6482 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6483 uint32_t mask;
6484 uint32_t val;
6485 /* cps (privileged) */
6486 if (IS_USER(s))
6487 return;
6488 mask = val = 0;
6489 if (insn & (1 << 19)) {
6490 if (insn & (1 << 8))
6491 mask |= CPSR_A;
6492 if (insn & (1 << 7))
6493 mask |= CPSR_I;
6494 if (insn & (1 << 6))
6495 mask |= CPSR_F;
6496 if (insn & (1 << 18))
6497 val |= mask;
6498 }
7997d92f 6499 if (insn & (1 << 17)) {
9ee6e8bb
PB
6500 mask |= CPSR_M;
6501 val |= (insn & 0x1f);
6502 }
6503 if (mask) {
2fbac54b 6504 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6505 }
6506 return;
6507 }
6508 goto illegal_op;
6509 }
6510 if (cond != 0xe) {
6511 /* if not always execute, we generate a conditional jump to
6512 next instruction */
6513 s->condlabel = gen_new_label();
d9ba4830 6514 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6515 s->condjmp = 1;
6516 }
6517 if ((insn & 0x0f900000) == 0x03000000) {
6518 if ((insn & (1 << 21)) == 0) {
6519 ARCH(6T2);
6520 rd = (insn >> 12) & 0xf;
6521 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6522 if ((insn & (1 << 22)) == 0) {
6523 /* MOVW */
7d1b0095 6524 tmp = tcg_temp_new_i32();
5e3f878a 6525 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6526 } else {
6527 /* MOVT */
5e3f878a 6528 tmp = load_reg(s, rd);
86831435 6529 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6530 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6531 }
5e3f878a 6532 store_reg(s, rd, tmp);
9ee6e8bb
PB
6533 } else {
6534 if (((insn >> 12) & 0xf) != 0xf)
6535 goto illegal_op;
6536 if (((insn >> 16) & 0xf) == 0) {
6537 gen_nop_hint(s, insn & 0xff);
6538 } else {
6539 /* CPSR = immediate */
6540 val = insn & 0xff;
6541 shift = ((insn >> 8) & 0xf) * 2;
6542 if (shift)
6543 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6544 i = ((insn & (1 << 22)) != 0);
2fbac54b 6545 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6546 goto illegal_op;
6547 }
6548 }
6549 } else if ((insn & 0x0f900000) == 0x01000000
6550 && (insn & 0x00000090) != 0x00000090) {
6551 /* miscellaneous instructions */
6552 op1 = (insn >> 21) & 3;
6553 sh = (insn >> 4) & 0xf;
6554 rm = insn & 0xf;
6555 switch (sh) {
6556 case 0x0: /* move program status register */
6557 if (op1 & 1) {
6558 /* PSR = reg */
2fbac54b 6559 tmp = load_reg(s, rm);
9ee6e8bb 6560 i = ((op1 & 2) != 0);
2fbac54b 6561 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6562 goto illegal_op;
6563 } else {
6564 /* reg = PSR */
6565 rd = (insn >> 12) & 0xf;
6566 if (op1 & 2) {
6567 if (IS_USER(s))
6568 goto illegal_op;
d9ba4830 6569 tmp = load_cpu_field(spsr);
9ee6e8bb 6570 } else {
7d1b0095 6571 tmp = tcg_temp_new_i32();
d9ba4830 6572 gen_helper_cpsr_read(tmp);
9ee6e8bb 6573 }
d9ba4830 6574 store_reg(s, rd, tmp);
9ee6e8bb
PB
6575 }
6576 break;
6577 case 0x1:
6578 if (op1 == 1) {
6579 /* branch/exchange thumb (bx). */
be5e7a76 6580 ARCH(4T);
d9ba4830
PB
6581 tmp = load_reg(s, rm);
6582 gen_bx(s, tmp);
9ee6e8bb
PB
6583 } else if (op1 == 3) {
6584 /* clz */
be5e7a76 6585 ARCH(5);
9ee6e8bb 6586 rd = (insn >> 12) & 0xf;
1497c961
PB
6587 tmp = load_reg(s, rm);
6588 gen_helper_clz(tmp, tmp);
6589 store_reg(s, rd, tmp);
9ee6e8bb
PB
6590 } else {
6591 goto illegal_op;
6592 }
6593 break;
6594 case 0x2:
6595 if (op1 == 1) {
6596 ARCH(5J); /* bxj */
6597 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6598 tmp = load_reg(s, rm);
6599 gen_bx(s, tmp);
9ee6e8bb
PB
6600 } else {
6601 goto illegal_op;
6602 }
6603 break;
6604 case 0x3:
6605 if (op1 != 1)
6606 goto illegal_op;
6607
be5e7a76 6608 ARCH(5);
9ee6e8bb 6609 /* branch link/exchange thumb (blx) */
d9ba4830 6610 tmp = load_reg(s, rm);
7d1b0095 6611 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6612 tcg_gen_movi_i32(tmp2, s->pc);
6613 store_reg(s, 14, tmp2);
6614 gen_bx(s, tmp);
9ee6e8bb
PB
6615 break;
6616 case 0x5: /* saturating add/subtract */
be5e7a76 6617 ARCH(5TE);
9ee6e8bb
PB
6618 rd = (insn >> 12) & 0xf;
6619 rn = (insn >> 16) & 0xf;
b40d0353 6620 tmp = load_reg(s, rm);
5e3f878a 6621 tmp2 = load_reg(s, rn);
9ee6e8bb 6622 if (op1 & 2)
5e3f878a 6623 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6624 if (op1 & 1)
5e3f878a 6625 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6626 else
5e3f878a 6627 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 6628 tcg_temp_free_i32(tmp2);
5e3f878a 6629 store_reg(s, rd, tmp);
9ee6e8bb 6630 break;
49e14940
AL
6631 case 7:
6632 /* SMC instruction (op1 == 3)
6633 and undefined instructions (op1 == 0 || op1 == 2)
6634 will trap */
6635 if (op1 != 1) {
6636 goto illegal_op;
6637 }
6638 /* bkpt */
be5e7a76 6639 ARCH(5);
bc4a0de0 6640 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6641 break;
6642 case 0x8: /* signed multiply */
6643 case 0xa:
6644 case 0xc:
6645 case 0xe:
be5e7a76 6646 ARCH(5TE);
9ee6e8bb
PB
6647 rs = (insn >> 8) & 0xf;
6648 rn = (insn >> 12) & 0xf;
6649 rd = (insn >> 16) & 0xf;
6650 if (op1 == 1) {
6651 /* (32 * 16) >> 16 */
5e3f878a
PB
6652 tmp = load_reg(s, rm);
6653 tmp2 = load_reg(s, rs);
9ee6e8bb 6654 if (sh & 4)
5e3f878a 6655 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6656 else
5e3f878a 6657 gen_sxth(tmp2);
a7812ae4
PB
6658 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6659 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 6660 tmp = tcg_temp_new_i32();
a7812ae4 6661 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6662 tcg_temp_free_i64(tmp64);
9ee6e8bb 6663 if ((sh & 2) == 0) {
5e3f878a
PB
6664 tmp2 = load_reg(s, rn);
6665 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6666 tcg_temp_free_i32(tmp2);
9ee6e8bb 6667 }
5e3f878a 6668 store_reg(s, rd, tmp);
9ee6e8bb
PB
6669 } else {
6670 /* 16 * 16 */
5e3f878a
PB
6671 tmp = load_reg(s, rm);
6672 tmp2 = load_reg(s, rs);
6673 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 6674 tcg_temp_free_i32(tmp2);
9ee6e8bb 6675 if (op1 == 2) {
a7812ae4
PB
6676 tmp64 = tcg_temp_new_i64();
6677 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 6678 tcg_temp_free_i32(tmp);
a7812ae4
PB
6679 gen_addq(s, tmp64, rn, rd);
6680 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6681 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6682 } else {
6683 if (op1 == 0) {
5e3f878a
PB
6684 tmp2 = load_reg(s, rn);
6685 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6686 tcg_temp_free_i32(tmp2);
9ee6e8bb 6687 }
5e3f878a 6688 store_reg(s, rd, tmp);
9ee6e8bb
PB
6689 }
6690 }
6691 break;
6692 default:
6693 goto illegal_op;
6694 }
6695 } else if (((insn & 0x0e000000) == 0 &&
6696 (insn & 0x00000090) != 0x90) ||
6697 ((insn & 0x0e000000) == (1 << 25))) {
6698 int set_cc, logic_cc, shiftop;
6699
6700 op1 = (insn >> 21) & 0xf;
6701 set_cc = (insn >> 20) & 1;
6702 logic_cc = table_logic_cc[op1] & set_cc;
6703
6704 /* data processing instruction */
6705 if (insn & (1 << 25)) {
6706 /* immediate operand */
6707 val = insn & 0xff;
6708 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6709 if (shift) {
9ee6e8bb 6710 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 6711 }
7d1b0095 6712 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
6713 tcg_gen_movi_i32(tmp2, val);
6714 if (logic_cc && shift) {
6715 gen_set_CF_bit31(tmp2);
6716 }
9ee6e8bb
PB
6717 } else {
6718 /* register */
6719 rm = (insn) & 0xf;
e9bb4aa9 6720 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6721 shiftop = (insn >> 5) & 3;
6722 if (!(insn & (1 << 4))) {
6723 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6724 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6725 } else {
6726 rs = (insn >> 8) & 0xf;
8984bd2e 6727 tmp = load_reg(s, rs);
e9bb4aa9 6728 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6729 }
6730 }
6731 if (op1 != 0x0f && op1 != 0x0d) {
6732 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6733 tmp = load_reg(s, rn);
6734 } else {
6735 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6736 }
6737 rd = (insn >> 12) & 0xf;
6738 switch(op1) {
6739 case 0x00:
e9bb4aa9
JR
6740 tcg_gen_and_i32(tmp, tmp, tmp2);
6741 if (logic_cc) {
6742 gen_logic_CC(tmp);
6743 }
21aeb343 6744 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6745 break;
6746 case 0x01:
e9bb4aa9
JR
6747 tcg_gen_xor_i32(tmp, tmp, tmp2);
6748 if (logic_cc) {
6749 gen_logic_CC(tmp);
6750 }
21aeb343 6751 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6752 break;
6753 case 0x02:
6754 if (set_cc && rd == 15) {
6755 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6756 if (IS_USER(s)) {
9ee6e8bb 6757 goto illegal_op;
e9bb4aa9
JR
6758 }
6759 gen_helper_sub_cc(tmp, tmp, tmp2);
6760 gen_exception_return(s, tmp);
9ee6e8bb 6761 } else {
e9bb4aa9
JR
6762 if (set_cc) {
6763 gen_helper_sub_cc(tmp, tmp, tmp2);
6764 } else {
6765 tcg_gen_sub_i32(tmp, tmp, tmp2);
6766 }
21aeb343 6767 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6768 }
6769 break;
6770 case 0x03:
e9bb4aa9
JR
6771 if (set_cc) {
6772 gen_helper_sub_cc(tmp, tmp2, tmp);
6773 } else {
6774 tcg_gen_sub_i32(tmp, tmp2, tmp);
6775 }
21aeb343 6776 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6777 break;
6778 case 0x04:
e9bb4aa9
JR
6779 if (set_cc) {
6780 gen_helper_add_cc(tmp, tmp, tmp2);
6781 } else {
6782 tcg_gen_add_i32(tmp, tmp, tmp2);
6783 }
21aeb343 6784 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6785 break;
6786 case 0x05:
e9bb4aa9
JR
6787 if (set_cc) {
6788 gen_helper_adc_cc(tmp, tmp, tmp2);
6789 } else {
6790 gen_add_carry(tmp, tmp, tmp2);
6791 }
21aeb343 6792 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6793 break;
6794 case 0x06:
e9bb4aa9
JR
6795 if (set_cc) {
6796 gen_helper_sbc_cc(tmp, tmp, tmp2);
6797 } else {
6798 gen_sub_carry(tmp, tmp, tmp2);
6799 }
21aeb343 6800 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6801 break;
6802 case 0x07:
e9bb4aa9
JR
6803 if (set_cc) {
6804 gen_helper_sbc_cc(tmp, tmp2, tmp);
6805 } else {
6806 gen_sub_carry(tmp, tmp2, tmp);
6807 }
21aeb343 6808 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6809 break;
6810 case 0x08:
6811 if (set_cc) {
e9bb4aa9
JR
6812 tcg_gen_and_i32(tmp, tmp, tmp2);
6813 gen_logic_CC(tmp);
9ee6e8bb 6814 }
7d1b0095 6815 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6816 break;
6817 case 0x09:
6818 if (set_cc) {
e9bb4aa9
JR
6819 tcg_gen_xor_i32(tmp, tmp, tmp2);
6820 gen_logic_CC(tmp);
9ee6e8bb 6821 }
7d1b0095 6822 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6823 break;
6824 case 0x0a:
6825 if (set_cc) {
e9bb4aa9 6826 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6827 }
7d1b0095 6828 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6829 break;
6830 case 0x0b:
6831 if (set_cc) {
e9bb4aa9 6832 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6833 }
7d1b0095 6834 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6835 break;
6836 case 0x0c:
e9bb4aa9
JR
6837 tcg_gen_or_i32(tmp, tmp, tmp2);
6838 if (logic_cc) {
6839 gen_logic_CC(tmp);
6840 }
21aeb343 6841 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6842 break;
6843 case 0x0d:
6844 if (logic_cc && rd == 15) {
6845 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6846 if (IS_USER(s)) {
9ee6e8bb 6847 goto illegal_op;
e9bb4aa9
JR
6848 }
6849 gen_exception_return(s, tmp2);
9ee6e8bb 6850 } else {
e9bb4aa9
JR
6851 if (logic_cc) {
6852 gen_logic_CC(tmp2);
6853 }
21aeb343 6854 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6855 }
6856 break;
6857 case 0x0e:
f669df27 6858 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
6859 if (logic_cc) {
6860 gen_logic_CC(tmp);
6861 }
21aeb343 6862 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6863 break;
6864 default:
6865 case 0x0f:
e9bb4aa9
JR
6866 tcg_gen_not_i32(tmp2, tmp2);
6867 if (logic_cc) {
6868 gen_logic_CC(tmp2);
6869 }
21aeb343 6870 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6871 break;
6872 }
e9bb4aa9 6873 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 6874 tcg_temp_free_i32(tmp2);
e9bb4aa9 6875 }
9ee6e8bb
PB
6876 } else {
6877 /* other instructions */
6878 op1 = (insn >> 24) & 0xf;
6879 switch(op1) {
6880 case 0x0:
6881 case 0x1:
6882 /* multiplies, extra load/stores */
6883 sh = (insn >> 5) & 3;
6884 if (sh == 0) {
6885 if (op1 == 0x0) {
6886 rd = (insn >> 16) & 0xf;
6887 rn = (insn >> 12) & 0xf;
6888 rs = (insn >> 8) & 0xf;
6889 rm = (insn) & 0xf;
6890 op1 = (insn >> 20) & 0xf;
6891 switch (op1) {
6892 case 0: case 1: case 2: case 3: case 6:
6893 /* 32 bit mul */
5e3f878a
PB
6894 tmp = load_reg(s, rs);
6895 tmp2 = load_reg(s, rm);
6896 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 6897 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6898 if (insn & (1 << 22)) {
6899 /* Subtract (mls) */
6900 ARCH(6T2);
5e3f878a
PB
6901 tmp2 = load_reg(s, rn);
6902 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 6903 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6904 } else if (insn & (1 << 21)) {
6905 /* Add */
5e3f878a
PB
6906 tmp2 = load_reg(s, rn);
6907 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 6908 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6909 }
6910 if (insn & (1 << 20))
5e3f878a
PB
6911 gen_logic_CC(tmp);
6912 store_reg(s, rd, tmp);
9ee6e8bb 6913 break;
8aac08b1
AJ
6914 case 4:
6915 /* 64 bit mul double accumulate (UMAAL) */
6916 ARCH(6);
6917 tmp = load_reg(s, rs);
6918 tmp2 = load_reg(s, rm);
6919 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6920 gen_addq_lo(s, tmp64, rn);
6921 gen_addq_lo(s, tmp64, rd);
6922 gen_storeq_reg(s, rn, rd, tmp64);
6923 tcg_temp_free_i64(tmp64);
6924 break;
6925 case 8: case 9: case 10: case 11:
6926 case 12: case 13: case 14: case 15:
6927 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
6928 tmp = load_reg(s, rs);
6929 tmp2 = load_reg(s, rm);
8aac08b1 6930 if (insn & (1 << 22)) {
a7812ae4 6931 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 6932 } else {
a7812ae4 6933 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
6934 }
6935 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 6936 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 6937 }
8aac08b1 6938 if (insn & (1 << 20)) {
a7812ae4 6939 gen_logicq_cc(tmp64);
8aac08b1 6940 }
a7812ae4 6941 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6942 tcg_temp_free_i64(tmp64);
9ee6e8bb 6943 break;
8aac08b1
AJ
6944 default:
6945 goto illegal_op;
9ee6e8bb
PB
6946 }
6947 } else {
6948 rn = (insn >> 16) & 0xf;
6949 rd = (insn >> 12) & 0xf;
6950 if (insn & (1 << 23)) {
6951 /* load/store exclusive */
86753403
PB
6952 op1 = (insn >> 21) & 0x3;
6953 if (op1)
a47f43d2 6954 ARCH(6K);
86753403
PB
6955 else
6956 ARCH(6);
3174f8e9 6957 addr = tcg_temp_local_new_i32();
98a46317 6958 load_reg_var(s, addr, rn);
9ee6e8bb 6959 if (insn & (1 << 20)) {
86753403
PB
6960 switch (op1) {
6961 case 0: /* ldrex */
426f5abc 6962 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
6963 break;
6964 case 1: /* ldrexd */
426f5abc 6965 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
6966 break;
6967 case 2: /* ldrexb */
426f5abc 6968 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
6969 break;
6970 case 3: /* ldrexh */
426f5abc 6971 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
6972 break;
6973 default:
6974 abort();
6975 }
9ee6e8bb
PB
6976 } else {
6977 rm = insn & 0xf;
86753403
PB
6978 switch (op1) {
6979 case 0: /* strex */
426f5abc 6980 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
6981 break;
6982 case 1: /* strexd */
502e64fe 6983 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
6984 break;
6985 case 2: /* strexb */
426f5abc 6986 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
6987 break;
6988 case 3: /* strexh */
426f5abc 6989 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
6990 break;
6991 default:
6992 abort();
6993 }
9ee6e8bb 6994 }
3174f8e9 6995 tcg_temp_free(addr);
9ee6e8bb
PB
6996 } else {
6997 /* SWP instruction */
6998 rm = (insn) & 0xf;
6999
8984bd2e
PB
7000 /* ??? This is not really atomic. However we know
7001 we never have multiple CPUs running in parallel,
7002 so it is good enough. */
7003 addr = load_reg(s, rn);
7004 tmp = load_reg(s, rm);
9ee6e8bb 7005 if (insn & (1 << 22)) {
8984bd2e
PB
7006 tmp2 = gen_ld8u(addr, IS_USER(s));
7007 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7008 } else {
8984bd2e
PB
7009 tmp2 = gen_ld32(addr, IS_USER(s));
7010 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7011 }
7d1b0095 7012 tcg_temp_free_i32(addr);
8984bd2e 7013 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7014 }
7015 }
7016 } else {
7017 int address_offset;
7018 int load;
7019 /* Misc load/store */
7020 rn = (insn >> 16) & 0xf;
7021 rd = (insn >> 12) & 0xf;
b0109805 7022 addr = load_reg(s, rn);
9ee6e8bb 7023 if (insn & (1 << 24))
b0109805 7024 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7025 address_offset = 0;
7026 if (insn & (1 << 20)) {
7027 /* load */
7028 switch(sh) {
7029 case 1:
b0109805 7030 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
7031 break;
7032 case 2:
b0109805 7033 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
7034 break;
7035 default:
7036 case 3:
b0109805 7037 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
7038 break;
7039 }
7040 load = 1;
7041 } else if (sh & 2) {
be5e7a76 7042 ARCH(5TE);
9ee6e8bb
PB
7043 /* doubleword */
7044 if (sh & 1) {
7045 /* store */
b0109805
PB
7046 tmp = load_reg(s, rd);
7047 gen_st32(tmp, addr, IS_USER(s));
7048 tcg_gen_addi_i32(addr, addr, 4);
7049 tmp = load_reg(s, rd + 1);
7050 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7051 load = 0;
7052 } else {
7053 /* load */
b0109805
PB
7054 tmp = gen_ld32(addr, IS_USER(s));
7055 store_reg(s, rd, tmp);
7056 tcg_gen_addi_i32(addr, addr, 4);
7057 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
7058 rd++;
7059 load = 1;
7060 }
7061 address_offset = -4;
7062 } else {
7063 /* store */
b0109805
PB
7064 tmp = load_reg(s, rd);
7065 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7066 load = 0;
7067 }
7068 /* Perform base writeback before the loaded value to
7069 ensure correct behavior with overlapping index registers.
7070 ldrd with base writeback is is undefined if the
7071 destination and index registers overlap. */
7072 if (!(insn & (1 << 24))) {
b0109805
PB
7073 gen_add_datah_offset(s, insn, address_offset, addr);
7074 store_reg(s, rn, addr);
9ee6e8bb
PB
7075 } else if (insn & (1 << 21)) {
7076 if (address_offset)
b0109805
PB
7077 tcg_gen_addi_i32(addr, addr, address_offset);
7078 store_reg(s, rn, addr);
7079 } else {
7d1b0095 7080 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7081 }
7082 if (load) {
7083 /* Complete the load. */
b0109805 7084 store_reg(s, rd, tmp);
9ee6e8bb
PB
7085 }
7086 }
7087 break;
7088 case 0x4:
7089 case 0x5:
7090 goto do_ldst;
7091 case 0x6:
7092 case 0x7:
7093 if (insn & (1 << 4)) {
7094 ARCH(6);
7095 /* Armv6 Media instructions. */
7096 rm = insn & 0xf;
7097 rn = (insn >> 16) & 0xf;
2c0262af 7098 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7099 rs = (insn >> 8) & 0xf;
7100 switch ((insn >> 23) & 3) {
7101 case 0: /* Parallel add/subtract. */
7102 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7103 tmp = load_reg(s, rn);
7104 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7105 sh = (insn >> 5) & 7;
7106 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7107 goto illegal_op;
6ddbc6e4 7108 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7109 tcg_temp_free_i32(tmp2);
6ddbc6e4 7110 store_reg(s, rd, tmp);
9ee6e8bb
PB
7111 break;
7112 case 1:
7113 if ((insn & 0x00700020) == 0) {
6c95676b 7114 /* Halfword pack. */
3670669c
PB
7115 tmp = load_reg(s, rn);
7116 tmp2 = load_reg(s, rm);
9ee6e8bb 7117 shift = (insn >> 7) & 0x1f;
3670669c
PB
7118 if (insn & (1 << 6)) {
7119 /* pkhtb */
22478e79
AZ
7120 if (shift == 0)
7121 shift = 31;
7122 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7123 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7124 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7125 } else {
7126 /* pkhbt */
22478e79
AZ
7127 if (shift)
7128 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7129 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7130 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7131 }
7132 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7133 tcg_temp_free_i32(tmp2);
3670669c 7134 store_reg(s, rd, tmp);
9ee6e8bb
PB
7135 } else if ((insn & 0x00200020) == 0x00200000) {
7136 /* [us]sat */
6ddbc6e4 7137 tmp = load_reg(s, rm);
9ee6e8bb
PB
7138 shift = (insn >> 7) & 0x1f;
7139 if (insn & (1 << 6)) {
7140 if (shift == 0)
7141 shift = 31;
6ddbc6e4 7142 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7143 } else {
6ddbc6e4 7144 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7145 }
7146 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7147 tmp2 = tcg_const_i32(sh);
7148 if (insn & (1 << 22))
7149 gen_helper_usat(tmp, tmp, tmp2);
7150 else
7151 gen_helper_ssat(tmp, tmp, tmp2);
7152 tcg_temp_free_i32(tmp2);
6ddbc6e4 7153 store_reg(s, rd, tmp);
9ee6e8bb
PB
7154 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7155 /* [us]sat16 */
6ddbc6e4 7156 tmp = load_reg(s, rm);
9ee6e8bb 7157 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7158 tmp2 = tcg_const_i32(sh);
7159 if (insn & (1 << 22))
7160 gen_helper_usat16(tmp, tmp, tmp2);
7161 else
7162 gen_helper_ssat16(tmp, tmp, tmp2);
7163 tcg_temp_free_i32(tmp2);
6ddbc6e4 7164 store_reg(s, rd, tmp);
9ee6e8bb
PB
7165 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7166 /* Select bytes. */
6ddbc6e4
PB
7167 tmp = load_reg(s, rn);
7168 tmp2 = load_reg(s, rm);
7d1b0095 7169 tmp3 = tcg_temp_new_i32();
6ddbc6e4
PB
7170 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7171 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7172 tcg_temp_free_i32(tmp3);
7173 tcg_temp_free_i32(tmp2);
6ddbc6e4 7174 store_reg(s, rd, tmp);
9ee6e8bb 7175 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7176 tmp = load_reg(s, rm);
9ee6e8bb
PB
7177 shift = (insn >> 10) & 3;
7178 /* ??? In many cases it's not neccessary to do a
7179 rotate, a shift is sufficient. */
7180 if (shift != 0)
f669df27 7181 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7182 op1 = (insn >> 20) & 7;
7183 switch (op1) {
5e3f878a
PB
7184 case 0: gen_sxtb16(tmp); break;
7185 case 2: gen_sxtb(tmp); break;
7186 case 3: gen_sxth(tmp); break;
7187 case 4: gen_uxtb16(tmp); break;
7188 case 6: gen_uxtb(tmp); break;
7189 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7190 default: goto illegal_op;
7191 }
7192 if (rn != 15) {
5e3f878a 7193 tmp2 = load_reg(s, rn);
9ee6e8bb 7194 if ((op1 & 3) == 0) {
5e3f878a 7195 gen_add16(tmp, tmp2);
9ee6e8bb 7196 } else {
5e3f878a 7197 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7198 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7199 }
7200 }
6c95676b 7201 store_reg(s, rd, tmp);
9ee6e8bb
PB
7202 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7203 /* rev */
b0109805 7204 tmp = load_reg(s, rm);
9ee6e8bb
PB
7205 if (insn & (1 << 22)) {
7206 if (insn & (1 << 7)) {
b0109805 7207 gen_revsh(tmp);
9ee6e8bb
PB
7208 } else {
7209 ARCH(6T2);
b0109805 7210 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7211 }
7212 } else {
7213 if (insn & (1 << 7))
b0109805 7214 gen_rev16(tmp);
9ee6e8bb 7215 else
66896cb8 7216 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7217 }
b0109805 7218 store_reg(s, rd, tmp);
9ee6e8bb
PB
7219 } else {
7220 goto illegal_op;
7221 }
7222 break;
7223 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
7224 tmp = load_reg(s, rm);
7225 tmp2 = load_reg(s, rs);
9ee6e8bb 7226 if (insn & (1 << 20)) {
838fa72d
AJ
7227 /* Signed multiply most significant [accumulate].
7228 (SMMUL, SMMLA, SMMLS) */
a7812ae4 7229 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7230
955a7dd5 7231 if (rd != 15) {
838fa72d 7232 tmp = load_reg(s, rd);
9ee6e8bb 7233 if (insn & (1 << 6)) {
838fa72d 7234 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7235 } else {
838fa72d 7236 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7237 }
7238 }
838fa72d
AJ
7239 if (insn & (1 << 5)) {
7240 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7241 }
7242 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7243 tmp = tcg_temp_new_i32();
838fa72d
AJ
7244 tcg_gen_trunc_i64_i32(tmp, tmp64);
7245 tcg_temp_free_i64(tmp64);
955a7dd5 7246 store_reg(s, rn, tmp);
9ee6e8bb
PB
7247 } else {
7248 if (insn & (1 << 5))
5e3f878a
PB
7249 gen_swap_half(tmp2);
7250 gen_smul_dual(tmp, tmp2);
5e3f878a 7251 if (insn & (1 << 6)) {
e1d177b9 7252 /* This subtraction cannot overflow. */
5e3f878a
PB
7253 tcg_gen_sub_i32(tmp, tmp, tmp2);
7254 } else {
e1d177b9
PM
7255 /* This addition cannot overflow 32 bits;
7256 * however it may overflow considered as a signed
7257 * operation, in which case we must set the Q flag.
7258 */
7259 gen_helper_add_setq(tmp, tmp, tmp2);
5e3f878a 7260 }
7d1b0095 7261 tcg_temp_free_i32(tmp2);
9ee6e8bb 7262 if (insn & (1 << 22)) {
5e3f878a 7263 /* smlald, smlsld */
a7812ae4
PB
7264 tmp64 = tcg_temp_new_i64();
7265 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7266 tcg_temp_free_i32(tmp);
a7812ae4
PB
7267 gen_addq(s, tmp64, rd, rn);
7268 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7269 tcg_temp_free_i64(tmp64);
9ee6e8bb 7270 } else {
5e3f878a 7271 /* smuad, smusd, smlad, smlsd */
22478e79 7272 if (rd != 15)
9ee6e8bb 7273 {
22478e79 7274 tmp2 = load_reg(s, rd);
5e3f878a 7275 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7276 tcg_temp_free_i32(tmp2);
9ee6e8bb 7277 }
22478e79 7278 store_reg(s, rn, tmp);
9ee6e8bb
PB
7279 }
7280 }
7281 break;
7282 case 3:
7283 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7284 switch (op1) {
7285 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7286 ARCH(6);
7287 tmp = load_reg(s, rm);
7288 tmp2 = load_reg(s, rs);
7289 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7290 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7291 if (rd != 15) {
7292 tmp2 = load_reg(s, rd);
6ddbc6e4 7293 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7294 tcg_temp_free_i32(tmp2);
9ee6e8bb 7295 }
ded9d295 7296 store_reg(s, rn, tmp);
9ee6e8bb
PB
7297 break;
7298 case 0x20: case 0x24: case 0x28: case 0x2c:
7299 /* Bitfield insert/clear. */
7300 ARCH(6T2);
7301 shift = (insn >> 7) & 0x1f;
7302 i = (insn >> 16) & 0x1f;
7303 i = i + 1 - shift;
7304 if (rm == 15) {
7d1b0095 7305 tmp = tcg_temp_new_i32();
5e3f878a 7306 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7307 } else {
5e3f878a 7308 tmp = load_reg(s, rm);
9ee6e8bb
PB
7309 }
7310 if (i != 32) {
5e3f878a 7311 tmp2 = load_reg(s, rd);
8f8e3aa4 7312 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7d1b0095 7313 tcg_temp_free_i32(tmp2);
9ee6e8bb 7314 }
5e3f878a 7315 store_reg(s, rd, tmp);
9ee6e8bb
PB
7316 break;
7317 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7318 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7319 ARCH(6T2);
5e3f878a 7320 tmp = load_reg(s, rm);
9ee6e8bb
PB
7321 shift = (insn >> 7) & 0x1f;
7322 i = ((insn >> 16) & 0x1f) + 1;
7323 if (shift + i > 32)
7324 goto illegal_op;
7325 if (i < 32) {
7326 if (op1 & 0x20) {
5e3f878a 7327 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7328 } else {
5e3f878a 7329 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7330 }
7331 }
5e3f878a 7332 store_reg(s, rd, tmp);
9ee6e8bb
PB
7333 break;
7334 default:
7335 goto illegal_op;
7336 }
7337 break;
7338 }
7339 break;
7340 }
7341 do_ldst:
7342 /* Check for undefined extension instructions
7343 * per the ARM Bible IE:
7344 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7345 */
7346 sh = (0xf << 20) | (0xf << 4);
7347 if (op1 == 0x7 && ((insn & sh) == sh))
7348 {
7349 goto illegal_op;
7350 }
7351 /* load/store byte/word */
7352 rn = (insn >> 16) & 0xf;
7353 rd = (insn >> 12) & 0xf;
b0109805 7354 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7355 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7356 if (insn & (1 << 24))
b0109805 7357 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7358 if (insn & (1 << 20)) {
7359 /* load */
9ee6e8bb 7360 if (insn & (1 << 22)) {
b0109805 7361 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7362 } else {
b0109805 7363 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7364 }
9ee6e8bb
PB
7365 } else {
7366 /* store */
b0109805 7367 tmp = load_reg(s, rd);
9ee6e8bb 7368 if (insn & (1 << 22))
b0109805 7369 gen_st8(tmp, tmp2, i);
9ee6e8bb 7370 else
b0109805 7371 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7372 }
7373 if (!(insn & (1 << 24))) {
b0109805
PB
7374 gen_add_data_offset(s, insn, tmp2);
7375 store_reg(s, rn, tmp2);
7376 } else if (insn & (1 << 21)) {
7377 store_reg(s, rn, tmp2);
7378 } else {
7d1b0095 7379 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7380 }
7381 if (insn & (1 << 20)) {
7382 /* Complete the load. */
be5e7a76 7383 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7384 }
7385 break;
7386 case 0x08:
7387 case 0x09:
7388 {
7389 int j, n, user, loaded_base;
b0109805 7390 TCGv loaded_var;
9ee6e8bb
PB
7391 /* load/store multiple words */
7392 /* XXX: store correct base if write back */
7393 user = 0;
7394 if (insn & (1 << 22)) {
7395 if (IS_USER(s))
7396 goto illegal_op; /* only usable in supervisor mode */
7397
7398 if ((insn & (1 << 15)) == 0)
7399 user = 1;
7400 }
7401 rn = (insn >> 16) & 0xf;
b0109805 7402 addr = load_reg(s, rn);
9ee6e8bb
PB
7403
7404 /* compute total size */
7405 loaded_base = 0;
a50f5b91 7406 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7407 n = 0;
7408 for(i=0;i<16;i++) {
7409 if (insn & (1 << i))
7410 n++;
7411 }
7412 /* XXX: test invalid n == 0 case ? */
7413 if (insn & (1 << 23)) {
7414 if (insn & (1 << 24)) {
7415 /* pre increment */
b0109805 7416 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7417 } else {
7418 /* post increment */
7419 }
7420 } else {
7421 if (insn & (1 << 24)) {
7422 /* pre decrement */
b0109805 7423 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7424 } else {
7425 /* post decrement */
7426 if (n != 1)
b0109805 7427 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7428 }
7429 }
7430 j = 0;
7431 for(i=0;i<16;i++) {
7432 if (insn & (1 << i)) {
7433 if (insn & (1 << 20)) {
7434 /* load */
b0109805 7435 tmp = gen_ld32(addr, IS_USER(s));
be5e7a76 7436 if (user) {
b75263d6
JR
7437 tmp2 = tcg_const_i32(i);
7438 gen_helper_set_user_reg(tmp2, tmp);
7439 tcg_temp_free_i32(tmp2);
7d1b0095 7440 tcg_temp_free_i32(tmp);
9ee6e8bb 7441 } else if (i == rn) {
b0109805 7442 loaded_var = tmp;
9ee6e8bb
PB
7443 loaded_base = 1;
7444 } else {
be5e7a76 7445 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7446 }
7447 } else {
7448 /* store */
7449 if (i == 15) {
7450 /* special case: r15 = PC + 8 */
7451 val = (long)s->pc + 4;
7d1b0095 7452 tmp = tcg_temp_new_i32();
b0109805 7453 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7454 } else if (user) {
7d1b0095 7455 tmp = tcg_temp_new_i32();
b75263d6
JR
7456 tmp2 = tcg_const_i32(i);
7457 gen_helper_get_user_reg(tmp, tmp2);
7458 tcg_temp_free_i32(tmp2);
9ee6e8bb 7459 } else {
b0109805 7460 tmp = load_reg(s, i);
9ee6e8bb 7461 }
b0109805 7462 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7463 }
7464 j++;
7465 /* no need to add after the last transfer */
7466 if (j != n)
b0109805 7467 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7468 }
7469 }
7470 if (insn & (1 << 21)) {
7471 /* write back */
7472 if (insn & (1 << 23)) {
7473 if (insn & (1 << 24)) {
7474 /* pre increment */
7475 } else {
7476 /* post increment */
b0109805 7477 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7478 }
7479 } else {
7480 if (insn & (1 << 24)) {
7481 /* pre decrement */
7482 if (n != 1)
b0109805 7483 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7484 } else {
7485 /* post decrement */
b0109805 7486 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7487 }
7488 }
b0109805
PB
7489 store_reg(s, rn, addr);
7490 } else {
7d1b0095 7491 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7492 }
7493 if (loaded_base) {
b0109805 7494 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7495 }
7496 if ((insn & (1 << 22)) && !user) {
7497 /* Restore CPSR from SPSR. */
d9ba4830
PB
7498 tmp = load_cpu_field(spsr);
7499 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7500 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7501 s->is_jmp = DISAS_UPDATE;
7502 }
7503 }
7504 break;
7505 case 0xa:
7506 case 0xb:
7507 {
7508 int32_t offset;
7509
7510 /* branch (and link) */
7511 val = (int32_t)s->pc;
7512 if (insn & (1 << 24)) {
7d1b0095 7513 tmp = tcg_temp_new_i32();
5e3f878a
PB
7514 tcg_gen_movi_i32(tmp, val);
7515 store_reg(s, 14, tmp);
9ee6e8bb
PB
7516 }
7517 offset = (((int32_t)insn << 8) >> 8);
7518 val += (offset << 2) + 4;
7519 gen_jmp(s, val);
7520 }
7521 break;
7522 case 0xc:
7523 case 0xd:
7524 case 0xe:
7525 /* Coprocessor. */
7526 if (disas_coproc_insn(env, s, insn))
7527 goto illegal_op;
7528 break;
7529 case 0xf:
7530 /* swi */
5e3f878a 7531 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7532 s->is_jmp = DISAS_SWI;
7533 break;
7534 default:
7535 illegal_op:
bc4a0de0 7536 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7537 break;
7538 }
7539 }
7540}
7541
7542/* Return true if this is a Thumb-2 logical op. */
7543static int
7544thumb2_logic_op(int op)
7545{
7546 return (op < 8);
7547}
7548
7549/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7550 then set condition code flags based on the result of the operation.
7551 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7552 to the high bit of T1.
7553 Returns zero if the opcode is valid. */
7554
7555static int
396e467c 7556gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7557{
7558 int logic_cc;
7559
7560 logic_cc = 0;
7561 switch (op) {
7562 case 0: /* and */
396e467c 7563 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7564 logic_cc = conds;
7565 break;
7566 case 1: /* bic */
f669df27 7567 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7568 logic_cc = conds;
7569 break;
7570 case 2: /* orr */
396e467c 7571 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7572 logic_cc = conds;
7573 break;
7574 case 3: /* orn */
29501f1b 7575 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7576 logic_cc = conds;
7577 break;
7578 case 4: /* eor */
396e467c 7579 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7580 logic_cc = conds;
7581 break;
7582 case 8: /* add */
7583 if (conds)
396e467c 7584 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7585 else
396e467c 7586 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7587 break;
7588 case 10: /* adc */
7589 if (conds)
396e467c 7590 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7591 else
396e467c 7592 gen_adc(t0, t1);
9ee6e8bb
PB
7593 break;
7594 case 11: /* sbc */
7595 if (conds)
396e467c 7596 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7597 else
396e467c 7598 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7599 break;
7600 case 13: /* sub */
7601 if (conds)
396e467c 7602 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7603 else
396e467c 7604 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7605 break;
7606 case 14: /* rsb */
7607 if (conds)
396e467c 7608 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7609 else
396e467c 7610 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7611 break;
7612 default: /* 5, 6, 7, 9, 12, 15. */
7613 return 1;
7614 }
7615 if (logic_cc) {
396e467c 7616 gen_logic_CC(t0);
9ee6e8bb 7617 if (shifter_out)
396e467c 7618 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7619 }
7620 return 0;
7621}
7622
7623/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7624 is not legal. */
7625static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7626{
b0109805 7627 uint32_t insn, imm, shift, offset;
9ee6e8bb 7628 uint32_t rd, rn, rm, rs;
b26eefb6 7629 TCGv tmp;
6ddbc6e4
PB
7630 TCGv tmp2;
7631 TCGv tmp3;
b0109805 7632 TCGv addr;
a7812ae4 7633 TCGv_i64 tmp64;
9ee6e8bb
PB
7634 int op;
7635 int shiftop;
7636 int conds;
7637 int logic_cc;
7638
7639 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7640 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7641 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7642 16-bit instructions to get correct prefetch abort behavior. */
7643 insn = insn_hw1;
7644 if ((insn & (1 << 12)) == 0) {
be5e7a76 7645 ARCH(5);
9ee6e8bb
PB
7646 /* Second half of blx. */
7647 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7648 tmp = load_reg(s, 14);
7649 tcg_gen_addi_i32(tmp, tmp, offset);
7650 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7651
7d1b0095 7652 tmp2 = tcg_temp_new_i32();
b0109805 7653 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7654 store_reg(s, 14, tmp2);
7655 gen_bx(s, tmp);
9ee6e8bb
PB
7656 return 0;
7657 }
7658 if (insn & (1 << 11)) {
7659 /* Second half of bl. */
7660 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7661 tmp = load_reg(s, 14);
6a0d8a1d 7662 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7663
7d1b0095 7664 tmp2 = tcg_temp_new_i32();
b0109805 7665 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7666 store_reg(s, 14, tmp2);
7667 gen_bx(s, tmp);
9ee6e8bb
PB
7668 return 0;
7669 }
7670 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7671 /* Instruction spans a page boundary. Implement it as two
7672 16-bit instructions in case the second half causes an
7673 prefetch abort. */
7674 offset = ((int32_t)insn << 21) >> 9;
396e467c 7675 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7676 return 0;
7677 }
7678 /* Fall through to 32-bit decode. */
7679 }
7680
7681 insn = lduw_code(s->pc);
7682 s->pc += 2;
7683 insn |= (uint32_t)insn_hw1 << 16;
7684
7685 if ((insn & 0xf800e800) != 0xf000e800) {
7686 ARCH(6T2);
7687 }
7688
7689 rn = (insn >> 16) & 0xf;
7690 rs = (insn >> 12) & 0xf;
7691 rd = (insn >> 8) & 0xf;
7692 rm = insn & 0xf;
7693 switch ((insn >> 25) & 0xf) {
7694 case 0: case 1: case 2: case 3:
7695 /* 16-bit instructions. Should never happen. */
7696 abort();
7697 case 4:
7698 if (insn & (1 << 22)) {
7699 /* Other load/store, table branch. */
7700 if (insn & 0x01200000) {
7701 /* Load/store doubleword. */
7702 if (rn == 15) {
7d1b0095 7703 addr = tcg_temp_new_i32();
b0109805 7704 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7705 } else {
b0109805 7706 addr = load_reg(s, rn);
9ee6e8bb
PB
7707 }
7708 offset = (insn & 0xff) * 4;
7709 if ((insn & (1 << 23)) == 0)
7710 offset = -offset;
7711 if (insn & (1 << 24)) {
b0109805 7712 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7713 offset = 0;
7714 }
7715 if (insn & (1 << 20)) {
7716 /* ldrd */
b0109805
PB
7717 tmp = gen_ld32(addr, IS_USER(s));
7718 store_reg(s, rs, tmp);
7719 tcg_gen_addi_i32(addr, addr, 4);
7720 tmp = gen_ld32(addr, IS_USER(s));
7721 store_reg(s, rd, tmp);
9ee6e8bb
PB
7722 } else {
7723 /* strd */
b0109805
PB
7724 tmp = load_reg(s, rs);
7725 gen_st32(tmp, addr, IS_USER(s));
7726 tcg_gen_addi_i32(addr, addr, 4);
7727 tmp = load_reg(s, rd);
7728 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7729 }
7730 if (insn & (1 << 21)) {
7731 /* Base writeback. */
7732 if (rn == 15)
7733 goto illegal_op;
b0109805
PB
7734 tcg_gen_addi_i32(addr, addr, offset - 4);
7735 store_reg(s, rn, addr);
7736 } else {
7d1b0095 7737 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7738 }
7739 } else if ((insn & (1 << 23)) == 0) {
7740 /* Load/store exclusive word. */
3174f8e9 7741 addr = tcg_temp_local_new();
98a46317 7742 load_reg_var(s, addr, rn);
426f5abc 7743 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 7744 if (insn & (1 << 20)) {
426f5abc 7745 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 7746 } else {
426f5abc 7747 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 7748 }
3174f8e9 7749 tcg_temp_free(addr);
9ee6e8bb
PB
7750 } else if ((insn & (1 << 6)) == 0) {
7751 /* Table Branch. */
7752 if (rn == 15) {
7d1b0095 7753 addr = tcg_temp_new_i32();
b0109805 7754 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7755 } else {
b0109805 7756 addr = load_reg(s, rn);
9ee6e8bb 7757 }
b26eefb6 7758 tmp = load_reg(s, rm);
b0109805 7759 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7760 if (insn & (1 << 4)) {
7761 /* tbh */
b0109805 7762 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 7763 tcg_temp_free_i32(tmp);
b0109805 7764 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7765 } else { /* tbb */
7d1b0095 7766 tcg_temp_free_i32(tmp);
b0109805 7767 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7768 }
7d1b0095 7769 tcg_temp_free_i32(addr);
b0109805
PB
7770 tcg_gen_shli_i32(tmp, tmp, 1);
7771 tcg_gen_addi_i32(tmp, tmp, s->pc);
7772 store_reg(s, 15, tmp);
9ee6e8bb
PB
7773 } else {
7774 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 7775 ARCH(7);
9ee6e8bb 7776 op = (insn >> 4) & 0x3;
426f5abc
PB
7777 if (op == 2) {
7778 goto illegal_op;
7779 }
3174f8e9 7780 addr = tcg_temp_local_new();
98a46317 7781 load_reg_var(s, addr, rn);
9ee6e8bb 7782 if (insn & (1 << 20)) {
426f5abc 7783 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 7784 } else {
426f5abc 7785 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 7786 }
3174f8e9 7787 tcg_temp_free(addr);
9ee6e8bb
PB
7788 }
7789 } else {
7790 /* Load/store multiple, RFE, SRS. */
7791 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7792 /* Not available in user mode. */
b0109805 7793 if (IS_USER(s))
9ee6e8bb
PB
7794 goto illegal_op;
7795 if (insn & (1 << 20)) {
7796 /* rfe */
b0109805
PB
7797 addr = load_reg(s, rn);
7798 if ((insn & (1 << 24)) == 0)
7799 tcg_gen_addi_i32(addr, addr, -8);
7800 /* Load PC into tmp and CPSR into tmp2. */
7801 tmp = gen_ld32(addr, 0);
7802 tcg_gen_addi_i32(addr, addr, 4);
7803 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7804 if (insn & (1 << 21)) {
7805 /* Base writeback. */
b0109805
PB
7806 if (insn & (1 << 24)) {
7807 tcg_gen_addi_i32(addr, addr, 4);
7808 } else {
7809 tcg_gen_addi_i32(addr, addr, -4);
7810 }
7811 store_reg(s, rn, addr);
7812 } else {
7d1b0095 7813 tcg_temp_free_i32(addr);
9ee6e8bb 7814 }
b0109805 7815 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7816 } else {
7817 /* srs */
7818 op = (insn & 0x1f);
7d1b0095 7819 addr = tcg_temp_new_i32();
39ea3d4e
PM
7820 tmp = tcg_const_i32(op);
7821 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7822 tcg_temp_free_i32(tmp);
9ee6e8bb 7823 if ((insn & (1 << 24)) == 0) {
b0109805 7824 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7825 }
b0109805
PB
7826 tmp = load_reg(s, 14);
7827 gen_st32(tmp, addr, 0);
7828 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 7829 tmp = tcg_temp_new_i32();
b0109805
PB
7830 gen_helper_cpsr_read(tmp);
7831 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7832 if (insn & (1 << 21)) {
7833 if ((insn & (1 << 24)) == 0) {
b0109805 7834 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7835 } else {
b0109805 7836 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 7837 }
39ea3d4e
PM
7838 tmp = tcg_const_i32(op);
7839 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7840 tcg_temp_free_i32(tmp);
b0109805 7841 } else {
7d1b0095 7842 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7843 }
7844 }
7845 } else {
7846 int i;
7847 /* Load/store multiple. */
b0109805 7848 addr = load_reg(s, rn);
9ee6e8bb
PB
7849 offset = 0;
7850 for (i = 0; i < 16; i++) {
7851 if (insn & (1 << i))
7852 offset += 4;
7853 }
7854 if (insn & (1 << 24)) {
b0109805 7855 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7856 }
7857
7858 for (i = 0; i < 16; i++) {
7859 if ((insn & (1 << i)) == 0)
7860 continue;
7861 if (insn & (1 << 20)) {
7862 /* Load. */
b0109805 7863 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7864 if (i == 15) {
b0109805 7865 gen_bx(s, tmp);
9ee6e8bb 7866 } else {
b0109805 7867 store_reg(s, i, tmp);
9ee6e8bb
PB
7868 }
7869 } else {
7870 /* Store. */
b0109805
PB
7871 tmp = load_reg(s, i);
7872 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7873 }
b0109805 7874 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7875 }
7876 if (insn & (1 << 21)) {
7877 /* Base register writeback. */
7878 if (insn & (1 << 24)) {
b0109805 7879 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7880 }
7881 /* Fault if writeback register is in register list. */
7882 if (insn & (1 << rn))
7883 goto illegal_op;
b0109805
PB
7884 store_reg(s, rn, addr);
7885 } else {
7d1b0095 7886 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7887 }
7888 }
7889 }
7890 break;
2af9ab77
JB
7891 case 5:
7892
9ee6e8bb 7893 op = (insn >> 21) & 0xf;
2af9ab77
JB
7894 if (op == 6) {
7895 /* Halfword pack. */
7896 tmp = load_reg(s, rn);
7897 tmp2 = load_reg(s, rm);
7898 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7899 if (insn & (1 << 5)) {
7900 /* pkhtb */
7901 if (shift == 0)
7902 shift = 31;
7903 tcg_gen_sari_i32(tmp2, tmp2, shift);
7904 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7905 tcg_gen_ext16u_i32(tmp2, tmp2);
7906 } else {
7907 /* pkhbt */
7908 if (shift)
7909 tcg_gen_shli_i32(tmp2, tmp2, shift);
7910 tcg_gen_ext16u_i32(tmp, tmp);
7911 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7912 }
7913 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7914 tcg_temp_free_i32(tmp2);
3174f8e9
FN
7915 store_reg(s, rd, tmp);
7916 } else {
2af9ab77
JB
7917 /* Data processing register constant shift. */
7918 if (rn == 15) {
7d1b0095 7919 tmp = tcg_temp_new_i32();
2af9ab77
JB
7920 tcg_gen_movi_i32(tmp, 0);
7921 } else {
7922 tmp = load_reg(s, rn);
7923 }
7924 tmp2 = load_reg(s, rm);
7925
7926 shiftop = (insn >> 4) & 3;
7927 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7928 conds = (insn & (1 << 20)) != 0;
7929 logic_cc = (conds && thumb2_logic_op(op));
7930 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7931 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7932 goto illegal_op;
7d1b0095 7933 tcg_temp_free_i32(tmp2);
2af9ab77
JB
7934 if (rd != 15) {
7935 store_reg(s, rd, tmp);
7936 } else {
7d1b0095 7937 tcg_temp_free_i32(tmp);
2af9ab77 7938 }
3174f8e9 7939 }
9ee6e8bb
PB
7940 break;
7941 case 13: /* Misc data processing. */
7942 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7943 if (op < 4 && (insn & 0xf000) != 0xf000)
7944 goto illegal_op;
7945 switch (op) {
7946 case 0: /* Register controlled shift. */
8984bd2e
PB
7947 tmp = load_reg(s, rn);
7948 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7949 if ((insn & 0x70) != 0)
7950 goto illegal_op;
7951 op = (insn >> 21) & 3;
8984bd2e
PB
7952 logic_cc = (insn & (1 << 20)) != 0;
7953 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7954 if (logic_cc)
7955 gen_logic_CC(tmp);
21aeb343 7956 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7957 break;
7958 case 1: /* Sign/zero extend. */
5e3f878a 7959 tmp = load_reg(s, rm);
9ee6e8bb
PB
7960 shift = (insn >> 4) & 3;
7961 /* ??? In many cases it's not neccessary to do a
7962 rotate, a shift is sufficient. */
7963 if (shift != 0)
f669df27 7964 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7965 op = (insn >> 20) & 7;
7966 switch (op) {
5e3f878a
PB
7967 case 0: gen_sxth(tmp); break;
7968 case 1: gen_uxth(tmp); break;
7969 case 2: gen_sxtb16(tmp); break;
7970 case 3: gen_uxtb16(tmp); break;
7971 case 4: gen_sxtb(tmp); break;
7972 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7973 default: goto illegal_op;
7974 }
7975 if (rn != 15) {
5e3f878a 7976 tmp2 = load_reg(s, rn);
9ee6e8bb 7977 if ((op >> 1) == 1) {
5e3f878a 7978 gen_add16(tmp, tmp2);
9ee6e8bb 7979 } else {
5e3f878a 7980 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7981 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7982 }
7983 }
5e3f878a 7984 store_reg(s, rd, tmp);
9ee6e8bb
PB
7985 break;
7986 case 2: /* SIMD add/subtract. */
7987 op = (insn >> 20) & 7;
7988 shift = (insn >> 4) & 7;
7989 if ((op & 3) == 3 || (shift & 3) == 3)
7990 goto illegal_op;
6ddbc6e4
PB
7991 tmp = load_reg(s, rn);
7992 tmp2 = load_reg(s, rm);
7993 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 7994 tcg_temp_free_i32(tmp2);
6ddbc6e4 7995 store_reg(s, rd, tmp);
9ee6e8bb
PB
7996 break;
7997 case 3: /* Other data processing. */
7998 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7999 if (op < 4) {
8000 /* Saturating add/subtract. */
d9ba4830
PB
8001 tmp = load_reg(s, rn);
8002 tmp2 = load_reg(s, rm);
9ee6e8bb 8003 if (op & 1)
4809c612
JB
8004 gen_helper_double_saturate(tmp, tmp);
8005 if (op & 2)
d9ba4830 8006 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 8007 else
d9ba4830 8008 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 8009 tcg_temp_free_i32(tmp2);
9ee6e8bb 8010 } else {
d9ba4830 8011 tmp = load_reg(s, rn);
9ee6e8bb
PB
8012 switch (op) {
8013 case 0x0a: /* rbit */
d9ba4830 8014 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8015 break;
8016 case 0x08: /* rev */
66896cb8 8017 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8018 break;
8019 case 0x09: /* rev16 */
d9ba4830 8020 gen_rev16(tmp);
9ee6e8bb
PB
8021 break;
8022 case 0x0b: /* revsh */
d9ba4830 8023 gen_revsh(tmp);
9ee6e8bb
PB
8024 break;
8025 case 0x10: /* sel */
d9ba4830 8026 tmp2 = load_reg(s, rm);
7d1b0095 8027 tmp3 = tcg_temp_new_i32();
6ddbc6e4 8028 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 8029 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8030 tcg_temp_free_i32(tmp3);
8031 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8032 break;
8033 case 0x18: /* clz */
d9ba4830 8034 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8035 break;
8036 default:
8037 goto illegal_op;
8038 }
8039 }
d9ba4830 8040 store_reg(s, rd, tmp);
9ee6e8bb
PB
8041 break;
8042 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8043 op = (insn >> 4) & 0xf;
d9ba4830
PB
8044 tmp = load_reg(s, rn);
8045 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8046 switch ((insn >> 20) & 7) {
8047 case 0: /* 32 x 32 -> 32 */
d9ba4830 8048 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8049 tcg_temp_free_i32(tmp2);
9ee6e8bb 8050 if (rs != 15) {
d9ba4830 8051 tmp2 = load_reg(s, rs);
9ee6e8bb 8052 if (op)
d9ba4830 8053 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8054 else
d9ba4830 8055 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8056 tcg_temp_free_i32(tmp2);
9ee6e8bb 8057 }
9ee6e8bb
PB
8058 break;
8059 case 1: /* 16 x 16 -> 32 */
d9ba4830 8060 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8061 tcg_temp_free_i32(tmp2);
9ee6e8bb 8062 if (rs != 15) {
d9ba4830
PB
8063 tmp2 = load_reg(s, rs);
8064 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8065 tcg_temp_free_i32(tmp2);
9ee6e8bb 8066 }
9ee6e8bb
PB
8067 break;
8068 case 2: /* Dual multiply add. */
8069 case 4: /* Dual multiply subtract. */
8070 if (op)
d9ba4830
PB
8071 gen_swap_half(tmp2);
8072 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8073 if (insn & (1 << 22)) {
e1d177b9 8074 /* This subtraction cannot overflow. */
d9ba4830 8075 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8076 } else {
e1d177b9
PM
8077 /* This addition cannot overflow 32 bits;
8078 * however it may overflow considered as a signed
8079 * operation, in which case we must set the Q flag.
8080 */
8081 gen_helper_add_setq(tmp, tmp, tmp2);
9ee6e8bb 8082 }
7d1b0095 8083 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8084 if (rs != 15)
8085 {
d9ba4830
PB
8086 tmp2 = load_reg(s, rs);
8087 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8088 tcg_temp_free_i32(tmp2);
9ee6e8bb 8089 }
9ee6e8bb
PB
8090 break;
8091 case 3: /* 32 * 16 -> 32msb */
8092 if (op)
d9ba4830 8093 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8094 else
d9ba4830 8095 gen_sxth(tmp2);
a7812ae4
PB
8096 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8097 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8098 tmp = tcg_temp_new_i32();
a7812ae4 8099 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8100 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8101 if (rs != 15)
8102 {
d9ba4830
PB
8103 tmp2 = load_reg(s, rs);
8104 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8105 tcg_temp_free_i32(tmp2);
9ee6e8bb 8106 }
9ee6e8bb 8107 break;
838fa72d
AJ
8108 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8109 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8110 if (rs != 15) {
838fa72d
AJ
8111 tmp = load_reg(s, rs);
8112 if (insn & (1 << 20)) {
8113 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8114 } else {
838fa72d 8115 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8116 }
2c0262af 8117 }
838fa72d
AJ
8118 if (insn & (1 << 4)) {
8119 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8120 }
8121 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8122 tmp = tcg_temp_new_i32();
838fa72d
AJ
8123 tcg_gen_trunc_i64_i32(tmp, tmp64);
8124 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8125 break;
8126 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8127 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8128 tcg_temp_free_i32(tmp2);
9ee6e8bb 8129 if (rs != 15) {
d9ba4830
PB
8130 tmp2 = load_reg(s, rs);
8131 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8132 tcg_temp_free_i32(tmp2);
5fd46862 8133 }
9ee6e8bb 8134 break;
2c0262af 8135 }
d9ba4830 8136 store_reg(s, rd, tmp);
2c0262af 8137 break;
9ee6e8bb
PB
8138 case 6: case 7: /* 64-bit multiply, Divide. */
8139 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8140 tmp = load_reg(s, rn);
8141 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8142 if ((op & 0x50) == 0x10) {
8143 /* sdiv, udiv */
8144 if (!arm_feature(env, ARM_FEATURE_DIV))
8145 goto illegal_op;
8146 if (op & 0x20)
5e3f878a 8147 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8148 else
5e3f878a 8149 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8150 tcg_temp_free_i32(tmp2);
5e3f878a 8151 store_reg(s, rd, tmp);
9ee6e8bb
PB
8152 } else if ((op & 0xe) == 0xc) {
8153 /* Dual multiply accumulate long. */
8154 if (op & 1)
5e3f878a
PB
8155 gen_swap_half(tmp2);
8156 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8157 if (op & 0x10) {
5e3f878a 8158 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8159 } else {
5e3f878a 8160 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8161 }
7d1b0095 8162 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8163 /* BUGFIX */
8164 tmp64 = tcg_temp_new_i64();
8165 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8166 tcg_temp_free_i32(tmp);
a7812ae4
PB
8167 gen_addq(s, tmp64, rs, rd);
8168 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8169 tcg_temp_free_i64(tmp64);
2c0262af 8170 } else {
9ee6e8bb
PB
8171 if (op & 0x20) {
8172 /* Unsigned 64-bit multiply */
a7812ae4 8173 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8174 } else {
9ee6e8bb
PB
8175 if (op & 8) {
8176 /* smlalxy */
5e3f878a 8177 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8178 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8179 tmp64 = tcg_temp_new_i64();
8180 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8181 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8182 } else {
8183 /* Signed 64-bit multiply */
a7812ae4 8184 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8185 }
b5ff1b31 8186 }
9ee6e8bb
PB
8187 if (op & 4) {
8188 /* umaal */
a7812ae4
PB
8189 gen_addq_lo(s, tmp64, rs);
8190 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8191 } else if (op & 0x40) {
8192 /* 64-bit accumulate. */
a7812ae4 8193 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8194 }
a7812ae4 8195 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8196 tcg_temp_free_i64(tmp64);
5fd46862 8197 }
2c0262af 8198 break;
9ee6e8bb
PB
8199 }
8200 break;
8201 case 6: case 7: case 14: case 15:
8202 /* Coprocessor. */
8203 if (((insn >> 24) & 3) == 3) {
8204 /* Translate into the equivalent ARM encoding. */
f06053e3 8205 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8206 if (disas_neon_data_insn(env, s, insn))
8207 goto illegal_op;
8208 } else {
8209 if (insn & (1 << 28))
8210 goto illegal_op;
8211 if (disas_coproc_insn (env, s, insn))
8212 goto illegal_op;
8213 }
8214 break;
8215 case 8: case 9: case 10: case 11:
8216 if (insn & (1 << 15)) {
8217 /* Branches, misc control. */
8218 if (insn & 0x5000) {
8219 /* Unconditional branch. */
8220 /* signextend(hw1[10:0]) -> offset[:12]. */
8221 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8222 /* hw1[10:0] -> offset[11:1]. */
8223 offset |= (insn & 0x7ff) << 1;
8224 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8225 offset[24:22] already have the same value because of the
8226 sign extension above. */
8227 offset ^= ((~insn) & (1 << 13)) << 10;
8228 offset ^= ((~insn) & (1 << 11)) << 11;
8229
9ee6e8bb
PB
8230 if (insn & (1 << 14)) {
8231 /* Branch and link. */
3174f8e9 8232 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8233 }
3b46e624 8234
b0109805 8235 offset += s->pc;
9ee6e8bb
PB
8236 if (insn & (1 << 12)) {
8237 /* b/bl */
b0109805 8238 gen_jmp(s, offset);
9ee6e8bb
PB
8239 } else {
8240 /* blx */
b0109805 8241 offset &= ~(uint32_t)2;
be5e7a76 8242 /* thumb2 bx, no need to check */
b0109805 8243 gen_bx_im(s, offset);
2c0262af 8244 }
9ee6e8bb
PB
8245 } else if (((insn >> 23) & 7) == 7) {
8246 /* Misc control */
8247 if (insn & (1 << 13))
8248 goto illegal_op;
8249
8250 if (insn & (1 << 26)) {
8251 /* Secure monitor call (v6Z) */
8252 goto illegal_op; /* not implemented. */
2c0262af 8253 } else {
9ee6e8bb
PB
8254 op = (insn >> 20) & 7;
8255 switch (op) {
8256 case 0: /* msr cpsr. */
8257 if (IS_M(env)) {
8984bd2e
PB
8258 tmp = load_reg(s, rn);
8259 addr = tcg_const_i32(insn & 0xff);
8260 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8261 tcg_temp_free_i32(addr);
7d1b0095 8262 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8263 gen_lookup_tb(s);
8264 break;
8265 }
8266 /* fall through */
8267 case 1: /* msr spsr. */
8268 if (IS_M(env))
8269 goto illegal_op;
2fbac54b
FN
8270 tmp = load_reg(s, rn);
8271 if (gen_set_psr(s,
9ee6e8bb 8272 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8273 op == 1, tmp))
9ee6e8bb
PB
8274 goto illegal_op;
8275 break;
8276 case 2: /* cps, nop-hint. */
8277 if (((insn >> 8) & 7) == 0) {
8278 gen_nop_hint(s, insn & 0xff);
8279 }
8280 /* Implemented as NOP in user mode. */
8281 if (IS_USER(s))
8282 break;
8283 offset = 0;
8284 imm = 0;
8285 if (insn & (1 << 10)) {
8286 if (insn & (1 << 7))
8287 offset |= CPSR_A;
8288 if (insn & (1 << 6))
8289 offset |= CPSR_I;
8290 if (insn & (1 << 5))
8291 offset |= CPSR_F;
8292 if (insn & (1 << 9))
8293 imm = CPSR_A | CPSR_I | CPSR_F;
8294 }
8295 if (insn & (1 << 8)) {
8296 offset |= 0x1f;
8297 imm |= (insn & 0x1f);
8298 }
8299 if (offset) {
2fbac54b 8300 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8301 }
8302 break;
8303 case 3: /* Special control operations. */
426f5abc 8304 ARCH(7);
9ee6e8bb
PB
8305 op = (insn >> 4) & 0xf;
8306 switch (op) {
8307 case 2: /* clrex */
426f5abc 8308 gen_clrex(s);
9ee6e8bb
PB
8309 break;
8310 case 4: /* dsb */
8311 case 5: /* dmb */
8312 case 6: /* isb */
8313 /* These execute as NOPs. */
9ee6e8bb
PB
8314 break;
8315 default:
8316 goto illegal_op;
8317 }
8318 break;
8319 case 4: /* bxj */
8320 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8321 tmp = load_reg(s, rn);
8322 gen_bx(s, tmp);
9ee6e8bb
PB
8323 break;
8324 case 5: /* Exception return. */
b8b45b68
RV
8325 if (IS_USER(s)) {
8326 goto illegal_op;
8327 }
8328 if (rn != 14 || rd != 15) {
8329 goto illegal_op;
8330 }
8331 tmp = load_reg(s, rn);
8332 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8333 gen_exception_return(s, tmp);
8334 break;
9ee6e8bb 8335 case 6: /* mrs cpsr. */
7d1b0095 8336 tmp = tcg_temp_new_i32();
9ee6e8bb 8337 if (IS_M(env)) {
8984bd2e
PB
8338 addr = tcg_const_i32(insn & 0xff);
8339 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8340 tcg_temp_free_i32(addr);
9ee6e8bb 8341 } else {
8984bd2e 8342 gen_helper_cpsr_read(tmp);
9ee6e8bb 8343 }
8984bd2e 8344 store_reg(s, rd, tmp);
9ee6e8bb
PB
8345 break;
8346 case 7: /* mrs spsr. */
8347 /* Not accessible in user mode. */
8348 if (IS_USER(s) || IS_M(env))
8349 goto illegal_op;
d9ba4830
PB
8350 tmp = load_cpu_field(spsr);
8351 store_reg(s, rd, tmp);
9ee6e8bb 8352 break;
2c0262af
FB
8353 }
8354 }
9ee6e8bb
PB
8355 } else {
8356 /* Conditional branch. */
8357 op = (insn >> 22) & 0xf;
8358 /* Generate a conditional jump to next instruction. */
8359 s->condlabel = gen_new_label();
d9ba4830 8360 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8361 s->condjmp = 1;
8362
8363 /* offset[11:1] = insn[10:0] */
8364 offset = (insn & 0x7ff) << 1;
8365 /* offset[17:12] = insn[21:16]. */
8366 offset |= (insn & 0x003f0000) >> 4;
8367 /* offset[31:20] = insn[26]. */
8368 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8369 /* offset[18] = insn[13]. */
8370 offset |= (insn & (1 << 13)) << 5;
8371 /* offset[19] = insn[11]. */
8372 offset |= (insn & (1 << 11)) << 8;
8373
8374 /* jump to the offset */
b0109805 8375 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8376 }
8377 } else {
8378 /* Data processing immediate. */
8379 if (insn & (1 << 25)) {
8380 if (insn & (1 << 24)) {
8381 if (insn & (1 << 20))
8382 goto illegal_op;
8383 /* Bitfield/Saturate. */
8384 op = (insn >> 21) & 7;
8385 imm = insn & 0x1f;
8386 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8387 if (rn == 15) {
7d1b0095 8388 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8389 tcg_gen_movi_i32(tmp, 0);
8390 } else {
8391 tmp = load_reg(s, rn);
8392 }
9ee6e8bb
PB
8393 switch (op) {
8394 case 2: /* Signed bitfield extract. */
8395 imm++;
8396 if (shift + imm > 32)
8397 goto illegal_op;
8398 if (imm < 32)
6ddbc6e4 8399 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8400 break;
8401 case 6: /* Unsigned bitfield extract. */
8402 imm++;
8403 if (shift + imm > 32)
8404 goto illegal_op;
8405 if (imm < 32)
6ddbc6e4 8406 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8407 break;
8408 case 3: /* Bitfield insert/clear. */
8409 if (imm < shift)
8410 goto illegal_op;
8411 imm = imm + 1 - shift;
8412 if (imm != 32) {
6ddbc6e4 8413 tmp2 = load_reg(s, rd);
8f8e3aa4 8414 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7d1b0095 8415 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8416 }
8417 break;
8418 case 7:
8419 goto illegal_op;
8420 default: /* Saturate. */
9ee6e8bb
PB
8421 if (shift) {
8422 if (op & 1)
6ddbc6e4 8423 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8424 else
6ddbc6e4 8425 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8426 }
6ddbc6e4 8427 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8428 if (op & 4) {
8429 /* Unsigned. */
9ee6e8bb 8430 if ((op & 1) && shift == 0)
6ddbc6e4 8431 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8432 else
6ddbc6e4 8433 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8434 } else {
9ee6e8bb 8435 /* Signed. */
9ee6e8bb 8436 if ((op & 1) && shift == 0)
6ddbc6e4 8437 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8438 else
6ddbc6e4 8439 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8440 }
b75263d6 8441 tcg_temp_free_i32(tmp2);
9ee6e8bb 8442 break;
2c0262af 8443 }
6ddbc6e4 8444 store_reg(s, rd, tmp);
9ee6e8bb
PB
8445 } else {
8446 imm = ((insn & 0x04000000) >> 15)
8447 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8448 if (insn & (1 << 22)) {
8449 /* 16-bit immediate. */
8450 imm |= (insn >> 4) & 0xf000;
8451 if (insn & (1 << 23)) {
8452 /* movt */
5e3f878a 8453 tmp = load_reg(s, rd);
86831435 8454 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8455 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8456 } else {
9ee6e8bb 8457 /* movw */
7d1b0095 8458 tmp = tcg_temp_new_i32();
5e3f878a 8459 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8460 }
8461 } else {
9ee6e8bb
PB
8462 /* Add/sub 12-bit immediate. */
8463 if (rn == 15) {
b0109805 8464 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8465 if (insn & (1 << 23))
b0109805 8466 offset -= imm;
9ee6e8bb 8467 else
b0109805 8468 offset += imm;
7d1b0095 8469 tmp = tcg_temp_new_i32();
5e3f878a 8470 tcg_gen_movi_i32(tmp, offset);
2c0262af 8471 } else {
5e3f878a 8472 tmp = load_reg(s, rn);
9ee6e8bb 8473 if (insn & (1 << 23))
5e3f878a 8474 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8475 else
5e3f878a 8476 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8477 }
9ee6e8bb 8478 }
5e3f878a 8479 store_reg(s, rd, tmp);
191abaa2 8480 }
9ee6e8bb
PB
8481 } else {
8482 int shifter_out = 0;
8483 /* modified 12-bit immediate. */
8484 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8485 imm = (insn & 0xff);
8486 switch (shift) {
8487 case 0: /* XY */
8488 /* Nothing to do. */
8489 break;
8490 case 1: /* 00XY00XY */
8491 imm |= imm << 16;
8492 break;
8493 case 2: /* XY00XY00 */
8494 imm |= imm << 16;
8495 imm <<= 8;
8496 break;
8497 case 3: /* XYXYXYXY */
8498 imm |= imm << 16;
8499 imm |= imm << 8;
8500 break;
8501 default: /* Rotated constant. */
8502 shift = (shift << 1) | (imm >> 7);
8503 imm |= 0x80;
8504 imm = imm << (32 - shift);
8505 shifter_out = 1;
8506 break;
b5ff1b31 8507 }
7d1b0095 8508 tmp2 = tcg_temp_new_i32();
3174f8e9 8509 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8510 rn = (insn >> 16) & 0xf;
3174f8e9 8511 if (rn == 15) {
7d1b0095 8512 tmp = tcg_temp_new_i32();
3174f8e9
FN
8513 tcg_gen_movi_i32(tmp, 0);
8514 } else {
8515 tmp = load_reg(s, rn);
8516 }
9ee6e8bb
PB
8517 op = (insn >> 21) & 0xf;
8518 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8519 shifter_out, tmp, tmp2))
9ee6e8bb 8520 goto illegal_op;
7d1b0095 8521 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8522 rd = (insn >> 8) & 0xf;
8523 if (rd != 15) {
3174f8e9
FN
8524 store_reg(s, rd, tmp);
8525 } else {
7d1b0095 8526 tcg_temp_free_i32(tmp);
2c0262af 8527 }
2c0262af 8528 }
9ee6e8bb
PB
8529 }
8530 break;
8531 case 12: /* Load/store single data item. */
8532 {
8533 int postinc = 0;
8534 int writeback = 0;
b0109805 8535 int user;
9ee6e8bb
PB
8536 if ((insn & 0x01100000) == 0x01000000) {
8537 if (disas_neon_ls_insn(env, s, insn))
c1713132 8538 goto illegal_op;
9ee6e8bb
PB
8539 break;
8540 }
a2fdc890
PM
8541 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8542 if (rs == 15) {
8543 if (!(insn & (1 << 20))) {
8544 goto illegal_op;
8545 }
8546 if (op != 2) {
8547 /* Byte or halfword load space with dest == r15 : memory hints.
8548 * Catch them early so we don't emit pointless addressing code.
8549 * This space is a mix of:
8550 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8551 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8552 * cores)
8553 * unallocated hints, which must be treated as NOPs
8554 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8555 * which is easiest for the decoding logic
8556 * Some space which must UNDEF
8557 */
8558 int op1 = (insn >> 23) & 3;
8559 int op2 = (insn >> 6) & 0x3f;
8560 if (op & 2) {
8561 goto illegal_op;
8562 }
8563 if (rn == 15) {
8564 /* UNPREDICTABLE or unallocated hint */
8565 return 0;
8566 }
8567 if (op1 & 1) {
8568 return 0; /* PLD* or unallocated hint */
8569 }
8570 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8571 return 0; /* PLD* or unallocated hint */
8572 }
8573 /* UNDEF space, or an UNPREDICTABLE */
8574 return 1;
8575 }
8576 }
b0109805 8577 user = IS_USER(s);
9ee6e8bb 8578 if (rn == 15) {
7d1b0095 8579 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8580 /* PC relative. */
8581 /* s->pc has already been incremented by 4. */
8582 imm = s->pc & 0xfffffffc;
8583 if (insn & (1 << 23))
8584 imm += insn & 0xfff;
8585 else
8586 imm -= insn & 0xfff;
b0109805 8587 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8588 } else {
b0109805 8589 addr = load_reg(s, rn);
9ee6e8bb
PB
8590 if (insn & (1 << 23)) {
8591 /* Positive offset. */
8592 imm = insn & 0xfff;
b0109805 8593 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8594 } else {
9ee6e8bb 8595 imm = insn & 0xff;
2a0308c5
PM
8596 switch ((insn >> 8) & 0xf) {
8597 case 0x0: /* Shifted Register. */
9ee6e8bb 8598 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8599 if (shift > 3) {
8600 tcg_temp_free_i32(addr);
18c9b560 8601 goto illegal_op;
2a0308c5 8602 }
b26eefb6 8603 tmp = load_reg(s, rm);
9ee6e8bb 8604 if (shift)
b26eefb6 8605 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8606 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8607 tcg_temp_free_i32(tmp);
9ee6e8bb 8608 break;
2a0308c5 8609 case 0xc: /* Negative offset. */
b0109805 8610 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8611 break;
2a0308c5 8612 case 0xe: /* User privilege. */
b0109805
PB
8613 tcg_gen_addi_i32(addr, addr, imm);
8614 user = 1;
9ee6e8bb 8615 break;
2a0308c5 8616 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8617 imm = -imm;
8618 /* Fall through. */
2a0308c5 8619 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8620 postinc = 1;
8621 writeback = 1;
8622 break;
2a0308c5 8623 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
8624 imm = -imm;
8625 /* Fall through. */
2a0308c5 8626 case 0xf: /* Pre-increment. */
b0109805 8627 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8628 writeback = 1;
8629 break;
8630 default:
2a0308c5 8631 tcg_temp_free_i32(addr);
b7bcbe95 8632 goto illegal_op;
9ee6e8bb
PB
8633 }
8634 }
8635 }
9ee6e8bb
PB
8636 if (insn & (1 << 20)) {
8637 /* Load. */
a2fdc890
PM
8638 switch (op) {
8639 case 0: tmp = gen_ld8u(addr, user); break;
8640 case 4: tmp = gen_ld8s(addr, user); break;
8641 case 1: tmp = gen_ld16u(addr, user); break;
8642 case 5: tmp = gen_ld16s(addr, user); break;
8643 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
8644 default:
8645 tcg_temp_free_i32(addr);
8646 goto illegal_op;
a2fdc890
PM
8647 }
8648 if (rs == 15) {
8649 gen_bx(s, tmp);
9ee6e8bb 8650 } else {
a2fdc890 8651 store_reg(s, rs, tmp);
9ee6e8bb
PB
8652 }
8653 } else {
8654 /* Store. */
b0109805 8655 tmp = load_reg(s, rs);
9ee6e8bb 8656 switch (op) {
b0109805
PB
8657 case 0: gen_st8(tmp, addr, user); break;
8658 case 1: gen_st16(tmp, addr, user); break;
8659 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
8660 default:
8661 tcg_temp_free_i32(addr);
8662 goto illegal_op;
b7bcbe95 8663 }
2c0262af 8664 }
9ee6e8bb 8665 if (postinc)
b0109805
PB
8666 tcg_gen_addi_i32(addr, addr, imm);
8667 if (writeback) {
8668 store_reg(s, rn, addr);
8669 } else {
7d1b0095 8670 tcg_temp_free_i32(addr);
b0109805 8671 }
9ee6e8bb
PB
8672 }
8673 break;
8674 default:
8675 goto illegal_op;
2c0262af 8676 }
9ee6e8bb
PB
8677 return 0;
8678illegal_op:
8679 return 1;
2c0262af
FB
8680}
8681
9ee6e8bb 8682static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8683{
8684 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8685 int32_t offset;
8686 int i;
b26eefb6 8687 TCGv tmp;
d9ba4830 8688 TCGv tmp2;
b0109805 8689 TCGv addr;
99c475ab 8690
9ee6e8bb
PB
8691 if (s->condexec_mask) {
8692 cond = s->condexec_cond;
bedd2912
JB
8693 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8694 s->condlabel = gen_new_label();
8695 gen_test_cc(cond ^ 1, s->condlabel);
8696 s->condjmp = 1;
8697 }
9ee6e8bb
PB
8698 }
8699
b5ff1b31 8700 insn = lduw_code(s->pc);
99c475ab 8701 s->pc += 2;
b5ff1b31 8702
99c475ab
FB
8703 switch (insn >> 12) {
8704 case 0: case 1:
396e467c 8705
99c475ab
FB
8706 rd = insn & 7;
8707 op = (insn >> 11) & 3;
8708 if (op == 3) {
8709 /* add/subtract */
8710 rn = (insn >> 3) & 7;
396e467c 8711 tmp = load_reg(s, rn);
99c475ab
FB
8712 if (insn & (1 << 10)) {
8713 /* immediate */
7d1b0095 8714 tmp2 = tcg_temp_new_i32();
396e467c 8715 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8716 } else {
8717 /* reg */
8718 rm = (insn >> 6) & 7;
396e467c 8719 tmp2 = load_reg(s, rm);
99c475ab 8720 }
9ee6e8bb
PB
8721 if (insn & (1 << 9)) {
8722 if (s->condexec_mask)
396e467c 8723 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8724 else
396e467c 8725 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8726 } else {
8727 if (s->condexec_mask)
396e467c 8728 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8729 else
396e467c 8730 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8731 }
7d1b0095 8732 tcg_temp_free_i32(tmp2);
396e467c 8733 store_reg(s, rd, tmp);
99c475ab
FB
8734 } else {
8735 /* shift immediate */
8736 rm = (insn >> 3) & 7;
8737 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8738 tmp = load_reg(s, rm);
8739 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8740 if (!s->condexec_mask)
8741 gen_logic_CC(tmp);
8742 store_reg(s, rd, tmp);
99c475ab
FB
8743 }
8744 break;
8745 case 2: case 3:
8746 /* arithmetic large immediate */
8747 op = (insn >> 11) & 3;
8748 rd = (insn >> 8) & 0x7;
396e467c 8749 if (op == 0) { /* mov */
7d1b0095 8750 tmp = tcg_temp_new_i32();
396e467c 8751 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8752 if (!s->condexec_mask)
396e467c
FN
8753 gen_logic_CC(tmp);
8754 store_reg(s, rd, tmp);
8755 } else {
8756 tmp = load_reg(s, rd);
7d1b0095 8757 tmp2 = tcg_temp_new_i32();
396e467c
FN
8758 tcg_gen_movi_i32(tmp2, insn & 0xff);
8759 switch (op) {
8760 case 1: /* cmp */
8761 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
8762 tcg_temp_free_i32(tmp);
8763 tcg_temp_free_i32(tmp2);
396e467c
FN
8764 break;
8765 case 2: /* add */
8766 if (s->condexec_mask)
8767 tcg_gen_add_i32(tmp, tmp, tmp2);
8768 else
8769 gen_helper_add_cc(tmp, tmp, tmp2);
7d1b0095 8770 tcg_temp_free_i32(tmp2);
396e467c
FN
8771 store_reg(s, rd, tmp);
8772 break;
8773 case 3: /* sub */
8774 if (s->condexec_mask)
8775 tcg_gen_sub_i32(tmp, tmp, tmp2);
8776 else
8777 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095 8778 tcg_temp_free_i32(tmp2);
396e467c
FN
8779 store_reg(s, rd, tmp);
8780 break;
8781 }
99c475ab 8782 }
99c475ab
FB
8783 break;
8784 case 4:
8785 if (insn & (1 << 11)) {
8786 rd = (insn >> 8) & 7;
5899f386
FB
8787 /* load pc-relative. Bit 1 of PC is ignored. */
8788 val = s->pc + 2 + ((insn & 0xff) * 4);
8789 val &= ~(uint32_t)2;
7d1b0095 8790 addr = tcg_temp_new_i32();
b0109805
PB
8791 tcg_gen_movi_i32(addr, val);
8792 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 8793 tcg_temp_free_i32(addr);
b0109805 8794 store_reg(s, rd, tmp);
99c475ab
FB
8795 break;
8796 }
8797 if (insn & (1 << 10)) {
8798 /* data processing extended or blx */
8799 rd = (insn & 7) | ((insn >> 4) & 8);
8800 rm = (insn >> 3) & 0xf;
8801 op = (insn >> 8) & 3;
8802 switch (op) {
8803 case 0: /* add */
396e467c
FN
8804 tmp = load_reg(s, rd);
8805 tmp2 = load_reg(s, rm);
8806 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8807 tcg_temp_free_i32(tmp2);
396e467c 8808 store_reg(s, rd, tmp);
99c475ab
FB
8809 break;
8810 case 1: /* cmp */
396e467c
FN
8811 tmp = load_reg(s, rd);
8812 tmp2 = load_reg(s, rm);
8813 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
8814 tcg_temp_free_i32(tmp2);
8815 tcg_temp_free_i32(tmp);
99c475ab
FB
8816 break;
8817 case 2: /* mov/cpy */
396e467c
FN
8818 tmp = load_reg(s, rm);
8819 store_reg(s, rd, tmp);
99c475ab
FB
8820 break;
8821 case 3:/* branch [and link] exchange thumb register */
b0109805 8822 tmp = load_reg(s, rm);
99c475ab 8823 if (insn & (1 << 7)) {
be5e7a76 8824 ARCH(5);
99c475ab 8825 val = (uint32_t)s->pc | 1;
7d1b0095 8826 tmp2 = tcg_temp_new_i32();
b0109805
PB
8827 tcg_gen_movi_i32(tmp2, val);
8828 store_reg(s, 14, tmp2);
99c475ab 8829 }
be5e7a76 8830 /* already thumb, no need to check */
d9ba4830 8831 gen_bx(s, tmp);
99c475ab
FB
8832 break;
8833 }
8834 break;
8835 }
8836
8837 /* data processing register */
8838 rd = insn & 7;
8839 rm = (insn >> 3) & 7;
8840 op = (insn >> 6) & 0xf;
8841 if (op == 2 || op == 3 || op == 4 || op == 7) {
8842 /* the shift/rotate ops want the operands backwards */
8843 val = rm;
8844 rm = rd;
8845 rd = val;
8846 val = 1;
8847 } else {
8848 val = 0;
8849 }
8850
396e467c 8851 if (op == 9) { /* neg */
7d1b0095 8852 tmp = tcg_temp_new_i32();
396e467c
FN
8853 tcg_gen_movi_i32(tmp, 0);
8854 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8855 tmp = load_reg(s, rd);
8856 } else {
8857 TCGV_UNUSED(tmp);
8858 }
99c475ab 8859
396e467c 8860 tmp2 = load_reg(s, rm);
5899f386 8861 switch (op) {
99c475ab 8862 case 0x0: /* and */
396e467c 8863 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 8864 if (!s->condexec_mask)
396e467c 8865 gen_logic_CC(tmp);
99c475ab
FB
8866 break;
8867 case 0x1: /* eor */
396e467c 8868 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 8869 if (!s->condexec_mask)
396e467c 8870 gen_logic_CC(tmp);
99c475ab
FB
8871 break;
8872 case 0x2: /* lsl */
9ee6e8bb 8873 if (s->condexec_mask) {
396e467c 8874 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 8875 } else {
396e467c
FN
8876 gen_helper_shl_cc(tmp2, tmp2, tmp);
8877 gen_logic_CC(tmp2);
9ee6e8bb 8878 }
99c475ab
FB
8879 break;
8880 case 0x3: /* lsr */
9ee6e8bb 8881 if (s->condexec_mask) {
396e467c 8882 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 8883 } else {
396e467c
FN
8884 gen_helper_shr_cc(tmp2, tmp2, tmp);
8885 gen_logic_CC(tmp2);
9ee6e8bb 8886 }
99c475ab
FB
8887 break;
8888 case 0x4: /* asr */
9ee6e8bb 8889 if (s->condexec_mask) {
396e467c 8890 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 8891 } else {
396e467c
FN
8892 gen_helper_sar_cc(tmp2, tmp2, tmp);
8893 gen_logic_CC(tmp2);
9ee6e8bb 8894 }
99c475ab
FB
8895 break;
8896 case 0x5: /* adc */
9ee6e8bb 8897 if (s->condexec_mask)
396e467c 8898 gen_adc(tmp, tmp2);
9ee6e8bb 8899 else
396e467c 8900 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
8901 break;
8902 case 0x6: /* sbc */
9ee6e8bb 8903 if (s->condexec_mask)
396e467c 8904 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 8905 else
396e467c 8906 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
8907 break;
8908 case 0x7: /* ror */
9ee6e8bb 8909 if (s->condexec_mask) {
f669df27
AJ
8910 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8911 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 8912 } else {
396e467c
FN
8913 gen_helper_ror_cc(tmp2, tmp2, tmp);
8914 gen_logic_CC(tmp2);
9ee6e8bb 8915 }
99c475ab
FB
8916 break;
8917 case 0x8: /* tst */
396e467c
FN
8918 tcg_gen_and_i32(tmp, tmp, tmp2);
8919 gen_logic_CC(tmp);
99c475ab 8920 rd = 16;
5899f386 8921 break;
99c475ab 8922 case 0x9: /* neg */
9ee6e8bb 8923 if (s->condexec_mask)
396e467c 8924 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 8925 else
396e467c 8926 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8927 break;
8928 case 0xa: /* cmp */
396e467c 8929 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
8930 rd = 16;
8931 break;
8932 case 0xb: /* cmn */
396e467c 8933 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
8934 rd = 16;
8935 break;
8936 case 0xc: /* orr */
396e467c 8937 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 8938 if (!s->condexec_mask)
396e467c 8939 gen_logic_CC(tmp);
99c475ab
FB
8940 break;
8941 case 0xd: /* mul */
7b2919a0 8942 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 8943 if (!s->condexec_mask)
396e467c 8944 gen_logic_CC(tmp);
99c475ab
FB
8945 break;
8946 case 0xe: /* bic */
f669df27 8947 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 8948 if (!s->condexec_mask)
396e467c 8949 gen_logic_CC(tmp);
99c475ab
FB
8950 break;
8951 case 0xf: /* mvn */
396e467c 8952 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 8953 if (!s->condexec_mask)
396e467c 8954 gen_logic_CC(tmp2);
99c475ab 8955 val = 1;
5899f386 8956 rm = rd;
99c475ab
FB
8957 break;
8958 }
8959 if (rd != 16) {
396e467c
FN
8960 if (val) {
8961 store_reg(s, rm, tmp2);
8962 if (op != 0xf)
7d1b0095 8963 tcg_temp_free_i32(tmp);
396e467c
FN
8964 } else {
8965 store_reg(s, rd, tmp);
7d1b0095 8966 tcg_temp_free_i32(tmp2);
396e467c
FN
8967 }
8968 } else {
7d1b0095
PM
8969 tcg_temp_free_i32(tmp);
8970 tcg_temp_free_i32(tmp2);
99c475ab
FB
8971 }
8972 break;
8973
8974 case 5:
8975 /* load/store register offset. */
8976 rd = insn & 7;
8977 rn = (insn >> 3) & 7;
8978 rm = (insn >> 6) & 7;
8979 op = (insn >> 9) & 7;
b0109805 8980 addr = load_reg(s, rn);
b26eefb6 8981 tmp = load_reg(s, rm);
b0109805 8982 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8983 tcg_temp_free_i32(tmp);
99c475ab
FB
8984
8985 if (op < 3) /* store */
b0109805 8986 tmp = load_reg(s, rd);
99c475ab
FB
8987
8988 switch (op) {
8989 case 0: /* str */
b0109805 8990 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8991 break;
8992 case 1: /* strh */
b0109805 8993 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8994 break;
8995 case 2: /* strb */
b0109805 8996 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8997 break;
8998 case 3: /* ldrsb */
b0109805 8999 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
9000 break;
9001 case 4: /* ldr */
b0109805 9002 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9003 break;
9004 case 5: /* ldrh */
b0109805 9005 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
9006 break;
9007 case 6: /* ldrb */
b0109805 9008 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
9009 break;
9010 case 7: /* ldrsh */
b0109805 9011 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
9012 break;
9013 }
9014 if (op >= 3) /* load */
b0109805 9015 store_reg(s, rd, tmp);
7d1b0095 9016 tcg_temp_free_i32(addr);
99c475ab
FB
9017 break;
9018
9019 case 6:
9020 /* load/store word immediate offset */
9021 rd = insn & 7;
9022 rn = (insn >> 3) & 7;
b0109805 9023 addr = load_reg(s, rn);
99c475ab 9024 val = (insn >> 4) & 0x7c;
b0109805 9025 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9026
9027 if (insn & (1 << 11)) {
9028 /* load */
b0109805
PB
9029 tmp = gen_ld32(addr, IS_USER(s));
9030 store_reg(s, rd, tmp);
99c475ab
FB
9031 } else {
9032 /* store */
b0109805
PB
9033 tmp = load_reg(s, rd);
9034 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9035 }
7d1b0095 9036 tcg_temp_free_i32(addr);
99c475ab
FB
9037 break;
9038
9039 case 7:
9040 /* load/store byte immediate offset */
9041 rd = insn & 7;
9042 rn = (insn >> 3) & 7;
b0109805 9043 addr = load_reg(s, rn);
99c475ab 9044 val = (insn >> 6) & 0x1f;
b0109805 9045 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9046
9047 if (insn & (1 << 11)) {
9048 /* load */
b0109805
PB
9049 tmp = gen_ld8u(addr, IS_USER(s));
9050 store_reg(s, rd, tmp);
99c475ab
FB
9051 } else {
9052 /* store */
b0109805
PB
9053 tmp = load_reg(s, rd);
9054 gen_st8(tmp, addr, IS_USER(s));
99c475ab 9055 }
7d1b0095 9056 tcg_temp_free_i32(addr);
99c475ab
FB
9057 break;
9058
9059 case 8:
9060 /* load/store halfword immediate offset */
9061 rd = insn & 7;
9062 rn = (insn >> 3) & 7;
b0109805 9063 addr = load_reg(s, rn);
99c475ab 9064 val = (insn >> 5) & 0x3e;
b0109805 9065 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9066
9067 if (insn & (1 << 11)) {
9068 /* load */
b0109805
PB
9069 tmp = gen_ld16u(addr, IS_USER(s));
9070 store_reg(s, rd, tmp);
99c475ab
FB
9071 } else {
9072 /* store */
b0109805
PB
9073 tmp = load_reg(s, rd);
9074 gen_st16(tmp, addr, IS_USER(s));
99c475ab 9075 }
7d1b0095 9076 tcg_temp_free_i32(addr);
99c475ab
FB
9077 break;
9078
9079 case 9:
9080 /* load/store from stack */
9081 rd = (insn >> 8) & 7;
b0109805 9082 addr = load_reg(s, 13);
99c475ab 9083 val = (insn & 0xff) * 4;
b0109805 9084 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9085
9086 if (insn & (1 << 11)) {
9087 /* load */
b0109805
PB
9088 tmp = gen_ld32(addr, IS_USER(s));
9089 store_reg(s, rd, tmp);
99c475ab
FB
9090 } else {
9091 /* store */
b0109805
PB
9092 tmp = load_reg(s, rd);
9093 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9094 }
7d1b0095 9095 tcg_temp_free_i32(addr);
99c475ab
FB
9096 break;
9097
9098 case 10:
9099 /* add to high reg */
9100 rd = (insn >> 8) & 7;
5899f386
FB
9101 if (insn & (1 << 11)) {
9102 /* SP */
5e3f878a 9103 tmp = load_reg(s, 13);
5899f386
FB
9104 } else {
9105 /* PC. bit 1 is ignored. */
7d1b0095 9106 tmp = tcg_temp_new_i32();
5e3f878a 9107 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9108 }
99c475ab 9109 val = (insn & 0xff) * 4;
5e3f878a
PB
9110 tcg_gen_addi_i32(tmp, tmp, val);
9111 store_reg(s, rd, tmp);
99c475ab
FB
9112 break;
9113
9114 case 11:
9115 /* misc */
9116 op = (insn >> 8) & 0xf;
9117 switch (op) {
9118 case 0:
9119 /* adjust stack pointer */
b26eefb6 9120 tmp = load_reg(s, 13);
99c475ab
FB
9121 val = (insn & 0x7f) * 4;
9122 if (insn & (1 << 7))
6a0d8a1d 9123 val = -(int32_t)val;
b26eefb6
PB
9124 tcg_gen_addi_i32(tmp, tmp, val);
9125 store_reg(s, 13, tmp);
99c475ab
FB
9126 break;
9127
9ee6e8bb
PB
9128 case 2: /* sign/zero extend. */
9129 ARCH(6);
9130 rd = insn & 7;
9131 rm = (insn >> 3) & 7;
b0109805 9132 tmp = load_reg(s, rm);
9ee6e8bb 9133 switch ((insn >> 6) & 3) {
b0109805
PB
9134 case 0: gen_sxth(tmp); break;
9135 case 1: gen_sxtb(tmp); break;
9136 case 2: gen_uxth(tmp); break;
9137 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9138 }
b0109805 9139 store_reg(s, rd, tmp);
9ee6e8bb 9140 break;
99c475ab
FB
9141 case 4: case 5: case 0xc: case 0xd:
9142 /* push/pop */
b0109805 9143 addr = load_reg(s, 13);
5899f386
FB
9144 if (insn & (1 << 8))
9145 offset = 4;
99c475ab 9146 else
5899f386
FB
9147 offset = 0;
9148 for (i = 0; i < 8; i++) {
9149 if (insn & (1 << i))
9150 offset += 4;
9151 }
9152 if ((insn & (1 << 11)) == 0) {
b0109805 9153 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9154 }
99c475ab
FB
9155 for (i = 0; i < 8; i++) {
9156 if (insn & (1 << i)) {
9157 if (insn & (1 << 11)) {
9158 /* pop */
b0109805
PB
9159 tmp = gen_ld32(addr, IS_USER(s));
9160 store_reg(s, i, tmp);
99c475ab
FB
9161 } else {
9162 /* push */
b0109805
PB
9163 tmp = load_reg(s, i);
9164 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9165 }
5899f386 9166 /* advance to the next address. */
b0109805 9167 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9168 }
9169 }
a50f5b91 9170 TCGV_UNUSED(tmp);
99c475ab
FB
9171 if (insn & (1 << 8)) {
9172 if (insn & (1 << 11)) {
9173 /* pop pc */
b0109805 9174 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9175 /* don't set the pc until the rest of the instruction
9176 has completed */
9177 } else {
9178 /* push lr */
b0109805
PB
9179 tmp = load_reg(s, 14);
9180 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9181 }
b0109805 9182 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9183 }
5899f386 9184 if ((insn & (1 << 11)) == 0) {
b0109805 9185 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9186 }
99c475ab 9187 /* write back the new stack pointer */
b0109805 9188 store_reg(s, 13, addr);
99c475ab 9189 /* set the new PC value */
be5e7a76
DES
9190 if ((insn & 0x0900) == 0x0900) {
9191 store_reg_from_load(env, s, 15, tmp);
9192 }
99c475ab
FB
9193 break;
9194
9ee6e8bb
PB
9195 case 1: case 3: case 9: case 11: /* czb */
9196 rm = insn & 7;
d9ba4830 9197 tmp = load_reg(s, rm);
9ee6e8bb
PB
9198 s->condlabel = gen_new_label();
9199 s->condjmp = 1;
9200 if (insn & (1 << 11))
cb63669a 9201 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9202 else
cb63669a 9203 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9204 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9205 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9206 val = (uint32_t)s->pc + 2;
9207 val += offset;
9208 gen_jmp(s, val);
9209 break;
9210
9211 case 15: /* IT, nop-hint. */
9212 if ((insn & 0xf) == 0) {
9213 gen_nop_hint(s, (insn >> 4) & 0xf);
9214 break;
9215 }
9216 /* If Then. */
9217 s->condexec_cond = (insn >> 4) & 0xe;
9218 s->condexec_mask = insn & 0x1f;
9219 /* No actual code generated for this insn, just setup state. */
9220 break;
9221
06c949e6 9222 case 0xe: /* bkpt */
be5e7a76 9223 ARCH(5);
bc4a0de0 9224 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9225 break;
9226
9ee6e8bb
PB
9227 case 0xa: /* rev */
9228 ARCH(6);
9229 rn = (insn >> 3) & 0x7;
9230 rd = insn & 0x7;
b0109805 9231 tmp = load_reg(s, rn);
9ee6e8bb 9232 switch ((insn >> 6) & 3) {
66896cb8 9233 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9234 case 1: gen_rev16(tmp); break;
9235 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9236 default: goto illegal_op;
9237 }
b0109805 9238 store_reg(s, rd, tmp);
9ee6e8bb
PB
9239 break;
9240
9241 case 6: /* cps */
9242 ARCH(6);
9243 if (IS_USER(s))
9244 break;
9245 if (IS_M(env)) {
8984bd2e 9246 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 9247 /* PRIMASK */
8984bd2e
PB
9248 if (insn & 1) {
9249 addr = tcg_const_i32(16);
9250 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9251 tcg_temp_free_i32(addr);
8984bd2e 9252 }
9ee6e8bb 9253 /* FAULTMASK */
8984bd2e
PB
9254 if (insn & 2) {
9255 addr = tcg_const_i32(17);
9256 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9257 tcg_temp_free_i32(addr);
8984bd2e 9258 }
b75263d6 9259 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9260 gen_lookup_tb(s);
9261 } else {
9262 if (insn & (1 << 4))
9263 shift = CPSR_A | CPSR_I | CPSR_F;
9264 else
9265 shift = 0;
fa26df03 9266 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
9267 }
9268 break;
9269
99c475ab
FB
9270 default:
9271 goto undef;
9272 }
9273 break;
9274
9275 case 12:
9276 /* load/store multiple */
9277 rn = (insn >> 8) & 0x7;
b0109805 9278 addr = load_reg(s, rn);
99c475ab
FB
9279 for (i = 0; i < 8; i++) {
9280 if (insn & (1 << i)) {
99c475ab
FB
9281 if (insn & (1 << 11)) {
9282 /* load */
b0109805
PB
9283 tmp = gen_ld32(addr, IS_USER(s));
9284 store_reg(s, i, tmp);
99c475ab
FB
9285 } else {
9286 /* store */
b0109805
PB
9287 tmp = load_reg(s, i);
9288 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9289 }
5899f386 9290 /* advance to the next address */
b0109805 9291 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9292 }
9293 }
5899f386 9294 /* Base register writeback. */
b0109805
PB
9295 if ((insn & (1 << rn)) == 0) {
9296 store_reg(s, rn, addr);
9297 } else {
7d1b0095 9298 tcg_temp_free_i32(addr);
b0109805 9299 }
99c475ab
FB
9300 break;
9301
9302 case 13:
9303 /* conditional branch or swi */
9304 cond = (insn >> 8) & 0xf;
9305 if (cond == 0xe)
9306 goto undef;
9307
9308 if (cond == 0xf) {
9309 /* swi */
422ebf69 9310 gen_set_pc_im(s->pc);
9ee6e8bb 9311 s->is_jmp = DISAS_SWI;
99c475ab
FB
9312 break;
9313 }
9314 /* generate a conditional jump to next instruction */
e50e6a20 9315 s->condlabel = gen_new_label();
d9ba4830 9316 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9317 s->condjmp = 1;
99c475ab
FB
9318
9319 /* jump to the offset */
5899f386 9320 val = (uint32_t)s->pc + 2;
99c475ab 9321 offset = ((int32_t)insn << 24) >> 24;
5899f386 9322 val += offset << 1;
8aaca4c0 9323 gen_jmp(s, val);
99c475ab
FB
9324 break;
9325
9326 case 14:
358bf29e 9327 if (insn & (1 << 11)) {
9ee6e8bb
PB
9328 if (disas_thumb2_insn(env, s, insn))
9329 goto undef32;
358bf29e
PB
9330 break;
9331 }
9ee6e8bb 9332 /* unconditional branch */
99c475ab
FB
9333 val = (uint32_t)s->pc;
9334 offset = ((int32_t)insn << 21) >> 21;
9335 val += (offset << 1) + 2;
8aaca4c0 9336 gen_jmp(s, val);
99c475ab
FB
9337 break;
9338
9339 case 15:
9ee6e8bb 9340 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9341 goto undef32;
9ee6e8bb 9342 break;
99c475ab
FB
9343 }
9344 return;
9ee6e8bb 9345undef32:
bc4a0de0 9346 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9347 return;
9348illegal_op:
99c475ab 9349undef:
bc4a0de0 9350 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9351}
9352
2c0262af
FB
9353/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9354 basic block 'tb'. If search_pc is TRUE, also generate PC
9355 information for each intermediate instruction. */
2cfc5f17
TS
9356static inline void gen_intermediate_code_internal(CPUState *env,
9357 TranslationBlock *tb,
9358 int search_pc)
2c0262af
FB
9359{
9360 DisasContext dc1, *dc = &dc1;
a1d1bb31 9361 CPUBreakpoint *bp;
2c0262af
FB
9362 uint16_t *gen_opc_end;
9363 int j, lj;
0fa85d43 9364 target_ulong pc_start;
b5ff1b31 9365 uint32_t next_page_start;
2e70f6ef
PB
9366 int num_insns;
9367 int max_insns;
3b46e624 9368
2c0262af 9369 /* generate intermediate code */
0fa85d43 9370 pc_start = tb->pc;
3b46e624 9371
2c0262af
FB
9372 dc->tb = tb;
9373
2c0262af 9374 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9375
9376 dc->is_jmp = DISAS_NEXT;
9377 dc->pc = pc_start;
8aaca4c0 9378 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9379 dc->condjmp = 0;
7204ab88 9380 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
98eac7ca
PM
9381 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9382 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9383#if !defined(CONFIG_USER_ONLY)
61f74d6a 9384 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9385#endif
5df8bac1 9386 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9387 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9388 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9389 cpu_F0s = tcg_temp_new_i32();
9390 cpu_F1s = tcg_temp_new_i32();
9391 cpu_F0d = tcg_temp_new_i64();
9392 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9393 cpu_V0 = cpu_F0d;
9394 cpu_V1 = cpu_F1d;
e677137d 9395 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9396 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9397 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9398 lj = -1;
2e70f6ef
PB
9399 num_insns = 0;
9400 max_insns = tb->cflags & CF_COUNT_MASK;
9401 if (max_insns == 0)
9402 max_insns = CF_COUNT_MASK;
9403
9404 gen_icount_start();
e12ce78d 9405
3849902c
PM
9406 tcg_clear_temp_count();
9407
e12ce78d
PM
9408 /* A note on handling of the condexec (IT) bits:
9409 *
9410 * We want to avoid the overhead of having to write the updated condexec
9411 * bits back to the CPUState for every instruction in an IT block. So:
9412 * (1) if the condexec bits are not already zero then we write
9413 * zero back into the CPUState now. This avoids complications trying
9414 * to do it at the end of the block. (For example if we don't do this
9415 * it's hard to identify whether we can safely skip writing condexec
9416 * at the end of the TB, which we definitely want to do for the case
9417 * where a TB doesn't do anything with the IT state at all.)
9418 * (2) if we are going to leave the TB then we call gen_set_condexec()
9419 * which will write the correct value into CPUState if zero is wrong.
9420 * This is done both for leaving the TB at the end, and for leaving
9421 * it because of an exception we know will happen, which is done in
9422 * gen_exception_insn(). The latter is necessary because we need to
9423 * leave the TB with the PC/IT state just prior to execution of the
9424 * instruction which caused the exception.
9425 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9426 * then the CPUState will be wrong and we need to reset it.
9427 * This is handled in the same way as restoration of the
9428 * PC in these situations: we will be called again with search_pc=1
9429 * and generate a mapping of the condexec bits for each PC in
9430 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9431 * the condexec bits.
9432 *
9433 * Note that there are no instructions which can read the condexec
9434 * bits, and none which can write non-static values to them, so
9435 * we don't need to care about whether CPUState is correct in the
9436 * middle of a TB.
9437 */
9438
9ee6e8bb
PB
9439 /* Reset the conditional execution bits immediately. This avoids
9440 complications trying to do it at the end of the block. */
98eac7ca 9441 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9442 {
7d1b0095 9443 TCGv tmp = tcg_temp_new_i32();
8f01245e 9444 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9445 store_cpu_field(tmp, condexec_bits);
8f01245e 9446 }
2c0262af 9447 do {
fbb4a2e3
PB
9448#ifdef CONFIG_USER_ONLY
9449 /* Intercept jump to the magic kernel page. */
9450 if (dc->pc >= 0xffff0000) {
9451 /* We always get here via a jump, so know we are not in a
9452 conditional execution block. */
9453 gen_exception(EXCP_KERNEL_TRAP);
9454 dc->is_jmp = DISAS_UPDATE;
9455 break;
9456 }
9457#else
9ee6e8bb
PB
9458 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9459 /* We always get here via a jump, so know we are not in a
9460 conditional execution block. */
d9ba4830 9461 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9462 dc->is_jmp = DISAS_UPDATE;
9463 break;
9ee6e8bb
PB
9464 }
9465#endif
9466
72cf2d4f
BS
9467 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9468 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9469 if (bp->pc == dc->pc) {
bc4a0de0 9470 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9471 /* Advance PC so that clearing the breakpoint will
9472 invalidate this TB. */
9473 dc->pc += 2;
9474 goto done_generating;
1fddef4b
FB
9475 break;
9476 }
9477 }
9478 }
2c0262af
FB
9479 if (search_pc) {
9480 j = gen_opc_ptr - gen_opc_buf;
9481 if (lj < j) {
9482 lj++;
9483 while (lj < j)
9484 gen_opc_instr_start[lj++] = 0;
9485 }
0fa85d43 9486 gen_opc_pc[lj] = dc->pc;
e12ce78d 9487 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
2c0262af 9488 gen_opc_instr_start[lj] = 1;
2e70f6ef 9489 gen_opc_icount[lj] = num_insns;
2c0262af 9490 }
e50e6a20 9491
2e70f6ef
PB
9492 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9493 gen_io_start();
9494
5642463a
PM
9495 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9496 tcg_gen_debug_insn_start(dc->pc);
9497 }
9498
7204ab88 9499 if (dc->thumb) {
9ee6e8bb
PB
9500 disas_thumb_insn(env, dc);
9501 if (dc->condexec_mask) {
9502 dc->condexec_cond = (dc->condexec_cond & 0xe)
9503 | ((dc->condexec_mask >> 4) & 1);
9504 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9505 if (dc->condexec_mask == 0) {
9506 dc->condexec_cond = 0;
9507 }
9508 }
9509 } else {
9510 disas_arm_insn(env, dc);
9511 }
e50e6a20
FB
9512
9513 if (dc->condjmp && !dc->is_jmp) {
9514 gen_set_label(dc->condlabel);
9515 dc->condjmp = 0;
9516 }
3849902c
PM
9517
9518 if (tcg_check_temp_count()) {
9519 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9520 }
9521
aaf2d97d 9522 /* Translation stops when a conditional branch is encountered.
e50e6a20 9523 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9524 * Also stop translation when a page boundary is reached. This
bf20dc07 9525 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9526 num_insns ++;
1fddef4b
FB
9527 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9528 !env->singlestep_enabled &&
1b530a6d 9529 !singlestep &&
2e70f6ef
PB
9530 dc->pc < next_page_start &&
9531 num_insns < max_insns);
9532
9533 if (tb->cflags & CF_LAST_IO) {
9534 if (dc->condjmp) {
9535 /* FIXME: This can theoretically happen with self-modifying
9536 code. */
9537 cpu_abort(env, "IO on conditional branch instruction");
9538 }
9539 gen_io_end();
9540 }
9ee6e8bb 9541
b5ff1b31 9542 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9543 instruction was a conditional branch or trap, and the PC has
9544 already been written. */
551bd27f 9545 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9546 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9547 if (dc->condjmp) {
9ee6e8bb
PB
9548 gen_set_condexec(dc);
9549 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9550 gen_exception(EXCP_SWI);
9ee6e8bb 9551 } else {
d9ba4830 9552 gen_exception(EXCP_DEBUG);
9ee6e8bb 9553 }
e50e6a20
FB
9554 gen_set_label(dc->condlabel);
9555 }
9556 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9557 gen_set_pc_im(dc->pc);
e50e6a20 9558 dc->condjmp = 0;
8aaca4c0 9559 }
9ee6e8bb
PB
9560 gen_set_condexec(dc);
9561 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9562 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9563 } else {
9564 /* FIXME: Single stepping a WFI insn will not halt
9565 the CPU. */
d9ba4830 9566 gen_exception(EXCP_DEBUG);
9ee6e8bb 9567 }
8aaca4c0 9568 } else {
9ee6e8bb
PB
9569 /* While branches must always occur at the end of an IT block,
9570 there are a few other things that can cause us to terminate
9571 the TB in the middel of an IT block:
9572 - Exception generating instructions (bkpt, swi, undefined).
9573 - Page boundaries.
9574 - Hardware watchpoints.
9575 Hardware breakpoints have already been handled and skip this code.
9576 */
9577 gen_set_condexec(dc);
8aaca4c0 9578 switch(dc->is_jmp) {
8aaca4c0 9579 case DISAS_NEXT:
6e256c93 9580 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9581 break;
9582 default:
9583 case DISAS_JUMP:
9584 case DISAS_UPDATE:
9585 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9586 tcg_gen_exit_tb(0);
8aaca4c0
FB
9587 break;
9588 case DISAS_TB_JUMP:
9589 /* nothing more to generate */
9590 break;
9ee6e8bb 9591 case DISAS_WFI:
d9ba4830 9592 gen_helper_wfi();
9ee6e8bb
PB
9593 break;
9594 case DISAS_SWI:
d9ba4830 9595 gen_exception(EXCP_SWI);
9ee6e8bb 9596 break;
8aaca4c0 9597 }
e50e6a20
FB
9598 if (dc->condjmp) {
9599 gen_set_label(dc->condlabel);
9ee6e8bb 9600 gen_set_condexec(dc);
6e256c93 9601 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9602 dc->condjmp = 0;
9603 }
2c0262af 9604 }
2e70f6ef 9605
9ee6e8bb 9606done_generating:
2e70f6ef 9607 gen_icount_end(tb, num_insns);
2c0262af
FB
9608 *gen_opc_ptr = INDEX_op_end;
9609
9610#ifdef DEBUG_DISAS
8fec2b8c 9611 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9612 qemu_log("----------------\n");
9613 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7204ab88 9614 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
93fcfe39 9615 qemu_log("\n");
2c0262af
FB
9616 }
9617#endif
b5ff1b31
FB
9618 if (search_pc) {
9619 j = gen_opc_ptr - gen_opc_buf;
9620 lj++;
9621 while (lj <= j)
9622 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9623 } else {
2c0262af 9624 tb->size = dc->pc - pc_start;
2e70f6ef 9625 tb->icount = num_insns;
b5ff1b31 9626 }
2c0262af
FB
9627}
9628
2cfc5f17 9629void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9630{
2cfc5f17 9631 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9632}
9633
2cfc5f17 9634void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9635{
2cfc5f17 9636 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9637}
9638
b5ff1b31
FB
9639static const char *cpu_mode_names[16] = {
9640 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9641 "???", "???", "???", "und", "???", "???", "???", "sys"
9642};
9ee6e8bb 9643
9a78eead 9644void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 9645 int flags)
2c0262af
FB
9646{
9647 int i;
06e80fc9 9648#if 0
bc380d17 9649 union {
b7bcbe95
FB
9650 uint32_t i;
9651 float s;
9652 } s0, s1;
9653 CPU_DoubleU d;
a94a6abf
PB
9654 /* ??? This assumes float64 and double have the same layout.
9655 Oh well, it's only debug dumps. */
9656 union {
9657 float64 f64;
9658 double d;
9659 } d0;
06e80fc9 9660#endif
b5ff1b31 9661 uint32_t psr;
2c0262af
FB
9662
9663 for(i=0;i<16;i++) {
7fe48483 9664 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9665 if ((i % 4) == 3)
7fe48483 9666 cpu_fprintf(f, "\n");
2c0262af 9667 else
7fe48483 9668 cpu_fprintf(f, " ");
2c0262af 9669 }
b5ff1b31 9670 psr = cpsr_read(env);
687fa640
TS
9671 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9672 psr,
b5ff1b31
FB
9673 psr & (1 << 31) ? 'N' : '-',
9674 psr & (1 << 30) ? 'Z' : '-',
9675 psr & (1 << 29) ? 'C' : '-',
9676 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9677 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9678 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9679
5e3f878a 9680#if 0
b7bcbe95 9681 for (i = 0; i < 16; i++) {
8e96005d
FB
9682 d.d = env->vfp.regs[i];
9683 s0.i = d.l.lower;
9684 s1.i = d.l.upper;
a94a6abf
PB
9685 d0.f64 = d.d;
9686 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9687 i * 2, (int)s0.i, s0.s,
a94a6abf 9688 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9689 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9690 d0.d);
b7bcbe95 9691 }
40f137e1 9692 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9693#endif
2c0262af 9694}
a6b025d3 9695
d2856f1a
AJ
9696void gen_pc_load(CPUState *env, TranslationBlock *tb,
9697 unsigned long searched_pc, int pc_pos, void *puc)
9698{
9699 env->regs[15] = gen_opc_pc[pc_pos];
e12ce78d 9700 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 9701}