]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
target-arm: Make VFP binop helpers take pointer to fpstatus, not CPUState
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
28#include "exec-all.h"
29#include "disas.h"
57fec1fe 30#include "tcg-op.h"
79383c9c 31#include "qemu-log.h"
1497c961 32
7b59220e 33#include "helper.h"
1497c961 34#define GEN_HELPER 1
7b59220e 35#include "helper.h"
2c0262af 36
be5e7a76
DES
37#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39/* currently all emulated v5 cores are also v5TE, so don't bother */
40#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
41#define ENABLE_ARCH_5J 0
42#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 46
86753403 47#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 48
2c0262af
FB
49/* internal defines */
50typedef struct DisasContext {
0fa85d43 51 target_ulong pc;
2c0262af 52 int is_jmp;
e50e6a20
FB
53 /* Nonzero if this instruction has been conditionally skipped. */
54 int condjmp;
55 /* The label that will be jumped to when the instruction is skipped. */
56 int condlabel;
9ee6e8bb
PB
57 /* Thumb-2 condtional execution bits. */
58 int condexec_mask;
59 int condexec_cond;
2c0262af 60 struct TranslationBlock *tb;
8aaca4c0 61 int singlestep_enabled;
5899f386 62 int thumb;
b5ff1b31
FB
63#if !defined(CONFIG_USER_ONLY)
64 int user;
65#endif
5df8bac1 66 int vfp_enabled;
69d1fc22
PM
67 int vec_len;
68 int vec_stride;
2c0262af
FB
69} DisasContext;
70
e12ce78d
PM
71static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
b5ff1b31
FB
73#if defined(CONFIG_USER_ONLY)
74#define IS_USER(s) 1
75#else
76#define IS_USER(s) (s->user)
77#endif
78
9ee6e8bb
PB
79/* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
81#define DISAS_WFI 4
82#define DISAS_SWI 5
2c0262af 83
a7812ae4 84static TCGv_ptr cpu_env;
ad69471c 85/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 86static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 87static TCGv_i32 cpu_R[16];
426f5abc
PB
88static TCGv_i32 cpu_exclusive_addr;
89static TCGv_i32 cpu_exclusive_val;
90static TCGv_i32 cpu_exclusive_high;
91#ifdef CONFIG_USER_ONLY
92static TCGv_i32 cpu_exclusive_test;
93static TCGv_i32 cpu_exclusive_info;
94#endif
ad69471c 95
b26eefb6 96/* FIXME: These should be removed. */
a7812ae4
PB
97static TCGv cpu_F0s, cpu_F1s;
98static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 99
2e70f6ef
PB
100#include "gen-icount.h"
101
155c3eac
FN
102static const char *regnames[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
105
b26eefb6
PB
106/* initialize TCG globals. */
107void arm_translate_init(void)
108{
155c3eac
FN
109 int i;
110
a7812ae4
PB
111 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
112
155c3eac
FN
113 for (i = 0; i < 16; i++) {
114 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, regs[i]),
116 regnames[i]);
117 }
426f5abc
PB
118 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_addr), "exclusive_addr");
120 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUState, exclusive_val), "exclusive_val");
122 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUState, exclusive_high), "exclusive_high");
124#ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUState, exclusive_test), "exclusive_test");
127 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUState, exclusive_info), "exclusive_info");
129#endif
155c3eac 130
a7812ae4 131#define GEN_HELPER 2
7b59220e 132#include "helper.h"
b26eefb6
PB
133}
134
d9ba4830
PB
135static inline TCGv load_cpu_offset(int offset)
136{
7d1b0095 137 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
138 tcg_gen_ld_i32(tmp, cpu_env, offset);
139 return tmp;
140}
141
142#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
143
144static inline void store_cpu_offset(TCGv var, int offset)
145{
146 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 147 tcg_temp_free_i32(var);
d9ba4830
PB
148}
149
150#define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUState, name))
152
b26eefb6
PB
153/* Set a variable to the value of a CPU register. */
154static void load_reg_var(DisasContext *s, TCGv var, int reg)
155{
156 if (reg == 15) {
157 uint32_t addr;
158 /* normaly, since we updated PC, we need only to add one insn */
159 if (s->thumb)
160 addr = (long)s->pc + 2;
161 else
162 addr = (long)s->pc + 4;
163 tcg_gen_movi_i32(var, addr);
164 } else {
155c3eac 165 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
166 }
167}
168
169/* Create a new temporary and set it to the value of a CPU register. */
170static inline TCGv load_reg(DisasContext *s, int reg)
171{
7d1b0095 172 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
173 load_reg_var(s, tmp, reg);
174 return tmp;
175}
176
177/* Set a CPU register. The source must be a temporary and will be
178 marked as dead. */
179static void store_reg(DisasContext *s, int reg, TCGv var)
180{
181 if (reg == 15) {
182 tcg_gen_andi_i32(var, var, ~1);
183 s->is_jmp = DISAS_JUMP;
184 }
155c3eac 185 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 186 tcg_temp_free_i32(var);
b26eefb6
PB
187}
188
b26eefb6 189/* Value extensions. */
86831435
PB
190#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
192#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
194
1497c961
PB
195#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 197
b26eefb6 198
b75263d6
JR
199static inline void gen_set_cpsr(TCGv var, uint32_t mask)
200{
201 TCGv tmp_mask = tcg_const_i32(mask);
202 gen_helper_cpsr_write(var, tmp_mask);
203 tcg_temp_free_i32(tmp_mask);
204}
d9ba4830
PB
205/* Set NZCV flags from the high 4 bits of var. */
206#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
207
208static void gen_exception(int excp)
209{
7d1b0095 210 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
211 tcg_gen_movi_i32(tmp, excp);
212 gen_helper_exception(tmp);
7d1b0095 213 tcg_temp_free_i32(tmp);
d9ba4830
PB
214}
215
3670669c
PB
216static void gen_smul_dual(TCGv a, TCGv b)
217{
7d1b0095
PM
218 TCGv tmp1 = tcg_temp_new_i32();
219 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
220 tcg_gen_ext16s_i32(tmp1, a);
221 tcg_gen_ext16s_i32(tmp2, b);
3670669c 222 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 223 tcg_temp_free_i32(tmp2);
3670669c
PB
224 tcg_gen_sari_i32(a, a, 16);
225 tcg_gen_sari_i32(b, b, 16);
226 tcg_gen_mul_i32(b, b, a);
227 tcg_gen_mov_i32(a, tmp1);
7d1b0095 228 tcg_temp_free_i32(tmp1);
3670669c
PB
229}
230
231/* Byteswap each halfword. */
232static void gen_rev16(TCGv var)
233{
7d1b0095 234 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
235 tcg_gen_shri_i32(tmp, var, 8);
236 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
237 tcg_gen_shli_i32(var, var, 8);
238 tcg_gen_andi_i32(var, var, 0xff00ff00);
239 tcg_gen_or_i32(var, var, tmp);
7d1b0095 240 tcg_temp_free_i32(tmp);
3670669c
PB
241}
242
243/* Byteswap low halfword and sign extend. */
244static void gen_revsh(TCGv var)
245{
1a855029
AJ
246 tcg_gen_ext16u_i32(var, var);
247 tcg_gen_bswap16_i32(var, var);
248 tcg_gen_ext16s_i32(var, var);
3670669c
PB
249}
250
251/* Unsigned bitfield extract. */
252static void gen_ubfx(TCGv var, int shift, uint32_t mask)
253{
254 if (shift)
255 tcg_gen_shri_i32(var, var, shift);
256 tcg_gen_andi_i32(var, var, mask);
257}
258
259/* Signed bitfield extract. */
260static void gen_sbfx(TCGv var, int shift, int width)
261{
262 uint32_t signbit;
263
264 if (shift)
265 tcg_gen_sari_i32(var, var, shift);
266 if (shift + width < 32) {
267 signbit = 1u << (width - 1);
268 tcg_gen_andi_i32(var, var, (1u << width) - 1);
269 tcg_gen_xori_i32(var, var, signbit);
270 tcg_gen_subi_i32(var, var, signbit);
271 }
272}
273
274/* Bitfield insertion. Insert val into base. Clobbers base and val. */
275static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
276{
3670669c 277 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
278 tcg_gen_shli_i32(val, val, shift);
279 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
280 tcg_gen_or_i32(dest, base, val);
281}
282
838fa72d
AJ
283/* Return (b << 32) + a. Mark inputs as dead */
284static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 285{
838fa72d
AJ
286 TCGv_i64 tmp64 = tcg_temp_new_i64();
287
288 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 289 tcg_temp_free_i32(b);
838fa72d
AJ
290 tcg_gen_shli_i64(tmp64, tmp64, 32);
291 tcg_gen_add_i64(a, tmp64, a);
292
293 tcg_temp_free_i64(tmp64);
294 return a;
295}
296
297/* Return (b << 32) - a. Mark inputs as dead. */
298static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
299{
300 TCGv_i64 tmp64 = tcg_temp_new_i64();
301
302 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 303 tcg_temp_free_i32(b);
838fa72d
AJ
304 tcg_gen_shli_i64(tmp64, tmp64, 32);
305 tcg_gen_sub_i64(a, tmp64, a);
306
307 tcg_temp_free_i64(tmp64);
308 return a;
3670669c
PB
309}
310
8f01245e
PB
311/* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
5e3f878a 313/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 314static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 315{
a7812ae4
PB
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
318
319 tcg_gen_extu_i32_i64(tmp1, a);
7d1b0095 320 tcg_temp_free_i32(a);
5e3f878a 321 tcg_gen_extu_i32_i64(tmp2, b);
7d1b0095 322 tcg_temp_free_i32(b);
5e3f878a 323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 324 tcg_temp_free_i64(tmp2);
5e3f878a
PB
325 return tmp1;
326}
327
a7812ae4 328static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 329{
a7812ae4
PB
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
332
333 tcg_gen_ext_i32_i64(tmp1, a);
7d1b0095 334 tcg_temp_free_i32(a);
5e3f878a 335 tcg_gen_ext_i32_i64(tmp2, b);
7d1b0095 336 tcg_temp_free_i32(b);
5e3f878a 337 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
b75263d6 338 tcg_temp_free_i64(tmp2);
5e3f878a
PB
339 return tmp1;
340}
341
8f01245e
PB
342/* Swap low and high halfwords. */
343static void gen_swap_half(TCGv var)
344{
7d1b0095 345 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
346 tcg_gen_shri_i32(tmp, var, 16);
347 tcg_gen_shli_i32(var, var, 16);
348 tcg_gen_or_i32(var, var, tmp);
7d1b0095 349 tcg_temp_free_i32(tmp);
8f01245e
PB
350}
351
b26eefb6
PB
352/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
354 t0 &= ~0x8000;
355 t1 &= ~0x8000;
356 t0 = (t0 + t1) ^ tmp;
357 */
358
359static void gen_add16(TCGv t0, TCGv t1)
360{
7d1b0095 361 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
362 tcg_gen_xor_i32(tmp, t0, t1);
363 tcg_gen_andi_i32(tmp, tmp, 0x8000);
364 tcg_gen_andi_i32(t0, t0, ~0x8000);
365 tcg_gen_andi_i32(t1, t1, ~0x8000);
366 tcg_gen_add_i32(t0, t0, t1);
367 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
368 tcg_temp_free_i32(tmp);
369 tcg_temp_free_i32(t1);
b26eefb6
PB
370}
371
9a119ff6
PB
372#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
373
b26eefb6
PB
374/* Set CF to the top bit of var. */
375static void gen_set_CF_bit31(TCGv var)
376{
7d1b0095 377 TCGv tmp = tcg_temp_new_i32();
b26eefb6 378 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 379 gen_set_CF(tmp);
7d1b0095 380 tcg_temp_free_i32(tmp);
b26eefb6
PB
381}
382
383/* Set N and Z flags from var. */
384static inline void gen_logic_CC(TCGv var)
385{
6fbe23d5
PB
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
388}
389
390/* T0 += T1 + CF. */
396e467c 391static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 392{
d9ba4830 393 TCGv tmp;
396e467c 394 tcg_gen_add_i32(t0, t0, t1);
d9ba4830 395 tmp = load_cpu_field(CF);
396e467c 396 tcg_gen_add_i32(t0, t0, tmp);
7d1b0095 397 tcg_temp_free_i32(tmp);
b26eefb6
PB
398}
399
e9bb4aa9
JR
400/* dest = T0 + T1 + CF. */
401static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
402{
403 TCGv tmp;
404 tcg_gen_add_i32(dest, t0, t1);
405 tmp = load_cpu_field(CF);
406 tcg_gen_add_i32(dest, dest, tmp);
7d1b0095 407 tcg_temp_free_i32(tmp);
e9bb4aa9
JR
408}
409
3670669c
PB
410/* dest = T0 - T1 + CF - 1. */
411static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
412{
d9ba4830 413 TCGv tmp;
3670669c 414 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 415 tmp = load_cpu_field(CF);
3670669c
PB
416 tcg_gen_add_i32(dest, dest, tmp);
417 tcg_gen_subi_i32(dest, dest, 1);
7d1b0095 418 tcg_temp_free_i32(tmp);
3670669c
PB
419}
420
ad69471c
PB
421/* FIXME: Implement this natively. */
422#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
423
9a119ff6 424static void shifter_out_im(TCGv var, int shift)
b26eefb6 425{
7d1b0095 426 TCGv tmp = tcg_temp_new_i32();
9a119ff6
PB
427 if (shift == 0) {
428 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 429 } else {
9a119ff6 430 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 431 if (shift != 31)
9a119ff6
PB
432 tcg_gen_andi_i32(tmp, tmp, 1);
433 }
434 gen_set_CF(tmp);
7d1b0095 435 tcg_temp_free_i32(tmp);
9a119ff6 436}
b26eefb6 437
9a119ff6
PB
438/* Shift by immediate. Includes special handling for shift == 0. */
439static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
440{
441 switch (shiftop) {
442 case 0: /* LSL */
443 if (shift != 0) {
444 if (flags)
445 shifter_out_im(var, 32 - shift);
446 tcg_gen_shli_i32(var, var, shift);
447 }
448 break;
449 case 1: /* LSR */
450 if (shift == 0) {
451 if (flags) {
452 tcg_gen_shri_i32(var, var, 31);
453 gen_set_CF(var);
454 }
455 tcg_gen_movi_i32(var, 0);
456 } else {
457 if (flags)
458 shifter_out_im(var, shift - 1);
459 tcg_gen_shri_i32(var, var, shift);
460 }
461 break;
462 case 2: /* ASR */
463 if (shift == 0)
464 shift = 32;
465 if (flags)
466 shifter_out_im(var, shift - 1);
467 if (shift == 32)
468 shift = 31;
469 tcg_gen_sari_i32(var, var, shift);
470 break;
471 case 3: /* ROR/RRX */
472 if (shift != 0) {
473 if (flags)
474 shifter_out_im(var, shift - 1);
f669df27 475 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 476 } else {
d9ba4830 477 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
478 if (flags)
479 shifter_out_im(var, 0);
480 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
481 tcg_gen_shli_i32(tmp, tmp, 31);
482 tcg_gen_or_i32(var, var, tmp);
7d1b0095 483 tcg_temp_free_i32(tmp);
b26eefb6
PB
484 }
485 }
486};
487
8984bd2e
PB
488static inline void gen_arm_shift_reg(TCGv var, int shiftop,
489 TCGv shift, int flags)
490{
491 if (flags) {
492 switch (shiftop) {
493 case 0: gen_helper_shl_cc(var, var, shift); break;
494 case 1: gen_helper_shr_cc(var, var, shift); break;
495 case 2: gen_helper_sar_cc(var, var, shift); break;
496 case 3: gen_helper_ror_cc(var, var, shift); break;
497 }
498 } else {
499 switch (shiftop) {
500 case 0: gen_helper_shl(var, var, shift); break;
501 case 1: gen_helper_shr(var, var, shift); break;
502 case 2: gen_helper_sar(var, var, shift); break;
f669df27
AJ
503 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
504 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
505 }
506 }
7d1b0095 507 tcg_temp_free_i32(shift);
8984bd2e
PB
508}
509
6ddbc6e4
PB
510#define PAS_OP(pfx) \
511 switch (op2) { \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
518 }
d9ba4830 519static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 520{
a7812ae4 521 TCGv_ptr tmp;
6ddbc6e4
PB
522
523 switch (op1) {
524#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
525 case 1:
a7812ae4 526 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
527 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
528 PAS_OP(s)
b75263d6 529 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
530 break;
531 case 5:
a7812ae4 532 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
534 PAS_OP(u)
b75263d6 535 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
536 break;
537#undef gen_pas_helper
538#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
539 case 2:
540 PAS_OP(q);
541 break;
542 case 3:
543 PAS_OP(sh);
544 break;
545 case 6:
546 PAS_OP(uq);
547 break;
548 case 7:
549 PAS_OP(uh);
550 break;
551#undef gen_pas_helper
552 }
553}
9ee6e8bb
PB
554#undef PAS_OP
555
6ddbc6e4
PB
556/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557#define PAS_OP(pfx) \
ed89a2f1 558 switch (op1) { \
6ddbc6e4
PB
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
565 }
d9ba4830 566static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 567{
a7812ae4 568 TCGv_ptr tmp;
6ddbc6e4 569
ed89a2f1 570 switch (op2) {
6ddbc6e4
PB
571#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
572 case 0:
a7812ae4 573 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
575 PAS_OP(s)
b75263d6 576 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
577 break;
578 case 4:
a7812ae4 579 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
581 PAS_OP(u)
b75263d6 582 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
583 break;
584#undef gen_pas_helper
585#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
586 case 1:
587 PAS_OP(q);
588 break;
589 case 2:
590 PAS_OP(sh);
591 break;
592 case 5:
593 PAS_OP(uq);
594 break;
595 case 6:
596 PAS_OP(uh);
597 break;
598#undef gen_pas_helper
599 }
600}
9ee6e8bb
PB
601#undef PAS_OP
602
d9ba4830
PB
603static void gen_test_cc(int cc, int label)
604{
605 TCGv tmp;
606 TCGv tmp2;
d9ba4830
PB
607 int inv;
608
d9ba4830
PB
609 switch (cc) {
610 case 0: /* eq: Z */
6fbe23d5 611 tmp = load_cpu_field(ZF);
cb63669a 612 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
613 break;
614 case 1: /* ne: !Z */
6fbe23d5 615 tmp = load_cpu_field(ZF);
cb63669a 616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
617 break;
618 case 2: /* cs: C */
619 tmp = load_cpu_field(CF);
cb63669a 620 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
621 break;
622 case 3: /* cc: !C */
623 tmp = load_cpu_field(CF);
cb63669a 624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
625 break;
626 case 4: /* mi: N */
6fbe23d5 627 tmp = load_cpu_field(NF);
cb63669a 628 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
629 break;
630 case 5: /* pl: !N */
6fbe23d5 631 tmp = load_cpu_field(NF);
cb63669a 632 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
633 break;
634 case 6: /* vs: V */
635 tmp = load_cpu_field(VF);
cb63669a 636 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
637 break;
638 case 7: /* vc: !V */
639 tmp = load_cpu_field(VF);
cb63669a 640 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
641 break;
642 case 8: /* hi: C && !Z */
643 inv = gen_new_label();
644 tmp = load_cpu_field(CF);
cb63669a 645 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 646 tcg_temp_free_i32(tmp);
6fbe23d5 647 tmp = load_cpu_field(ZF);
cb63669a 648 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
649 gen_set_label(inv);
650 break;
651 case 9: /* ls: !C || Z */
652 tmp = load_cpu_field(CF);
cb63669a 653 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 654 tcg_temp_free_i32(tmp);
6fbe23d5 655 tmp = load_cpu_field(ZF);
cb63669a 656 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
657 break;
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp = load_cpu_field(VF);
6fbe23d5 660 tmp2 = load_cpu_field(NF);
d9ba4830 661 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 662 tcg_temp_free_i32(tmp2);
cb63669a 663 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
664 break;
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp = load_cpu_field(VF);
6fbe23d5 667 tmp2 = load_cpu_field(NF);
d9ba4830 668 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 669 tcg_temp_free_i32(tmp2);
cb63669a 670 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
671 break;
672 case 12: /* gt: !Z && N == V */
673 inv = gen_new_label();
6fbe23d5 674 tmp = load_cpu_field(ZF);
cb63669a 675 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
7d1b0095 676 tcg_temp_free_i32(tmp);
d9ba4830 677 tmp = load_cpu_field(VF);
6fbe23d5 678 tmp2 = load_cpu_field(NF);
d9ba4830 679 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 680 tcg_temp_free_i32(tmp2);
cb63669a 681 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
682 gen_set_label(inv);
683 break;
684 case 13: /* le: Z || N != V */
6fbe23d5 685 tmp = load_cpu_field(ZF);
cb63669a 686 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
7d1b0095 687 tcg_temp_free_i32(tmp);
d9ba4830 688 tmp = load_cpu_field(VF);
6fbe23d5 689 tmp2 = load_cpu_field(NF);
d9ba4830 690 tcg_gen_xor_i32(tmp, tmp, tmp2);
7d1b0095 691 tcg_temp_free_i32(tmp2);
cb63669a 692 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
693 break;
694 default:
695 fprintf(stderr, "Bad condition code 0x%x\n", cc);
696 abort();
697 }
7d1b0095 698 tcg_temp_free_i32(tmp);
d9ba4830 699}
2c0262af 700
b1d8e52e 701static const uint8_t table_logic_cc[16] = {
2c0262af
FB
702 1, /* and */
703 1, /* xor */
704 0, /* sub */
705 0, /* rsb */
706 0, /* add */
707 0, /* adc */
708 0, /* sbc */
709 0, /* rsc */
710 1, /* andl */
711 1, /* xorl */
712 0, /* cmp */
713 0, /* cmn */
714 1, /* orr */
715 1, /* mov */
716 1, /* bic */
717 1, /* mvn */
718};
3b46e624 719
d9ba4830
PB
720/* Set PC and Thumb state from an immediate address. */
721static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 722{
b26eefb6 723 TCGv tmp;
99c475ab 724
b26eefb6 725 s->is_jmp = DISAS_UPDATE;
d9ba4830 726 if (s->thumb != (addr & 1)) {
7d1b0095 727 tmp = tcg_temp_new_i32();
d9ba4830
PB
728 tcg_gen_movi_i32(tmp, addr & 1);
729 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
7d1b0095 730 tcg_temp_free_i32(tmp);
d9ba4830 731 }
155c3eac 732 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
733}
734
735/* Set PC and Thumb state from var. var is marked as dead. */
736static inline void gen_bx(DisasContext *s, TCGv var)
737{
d9ba4830 738 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
739 tcg_gen_andi_i32(cpu_R[15], var, ~1);
740 tcg_gen_andi_i32(var, var, 1);
741 store_cpu_field(var, thumb);
d9ba4830
PB
742}
743
21aeb343
JR
744/* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747static inline void store_reg_bx(CPUState *env, DisasContext *s,
748 int reg, TCGv var)
749{
750 if (reg == 15 && ENABLE_ARCH_7) {
751 gen_bx(s, var);
752 } else {
753 store_reg(s, reg, var);
754 }
755}
756
be5e7a76
DES
757/* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761static inline void store_reg_from_load(CPUState *env, DisasContext *s,
762 int reg, TCGv var)
763{
764 if (reg == 15 && ENABLE_ARCH_5) {
765 gen_bx(s, var);
766 } else {
767 store_reg(s, reg, var);
768 }
769}
770
b0109805
PB
771static inline TCGv gen_ld8s(TCGv addr, int index)
772{
7d1b0095 773 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
774 tcg_gen_qemu_ld8s(tmp, addr, index);
775 return tmp;
776}
777static inline TCGv gen_ld8u(TCGv addr, int index)
778{
7d1b0095 779 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
780 tcg_gen_qemu_ld8u(tmp, addr, index);
781 return tmp;
782}
783static inline TCGv gen_ld16s(TCGv addr, int index)
784{
7d1b0095 785 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
786 tcg_gen_qemu_ld16s(tmp, addr, index);
787 return tmp;
788}
789static inline TCGv gen_ld16u(TCGv addr, int index)
790{
7d1b0095 791 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
792 tcg_gen_qemu_ld16u(tmp, addr, index);
793 return tmp;
794}
795static inline TCGv gen_ld32(TCGv addr, int index)
796{
7d1b0095 797 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
798 tcg_gen_qemu_ld32u(tmp, addr, index);
799 return tmp;
800}
84496233
JR
801static inline TCGv_i64 gen_ld64(TCGv addr, int index)
802{
803 TCGv_i64 tmp = tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp, addr, index);
805 return tmp;
806}
b0109805
PB
807static inline void gen_st8(TCGv val, TCGv addr, int index)
808{
809 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 810 tcg_temp_free_i32(val);
b0109805
PB
811}
812static inline void gen_st16(TCGv val, TCGv addr, int index)
813{
814 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 815 tcg_temp_free_i32(val);
b0109805
PB
816}
817static inline void gen_st32(TCGv val, TCGv addr, int index)
818{
819 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 820 tcg_temp_free_i32(val);
b0109805 821}
84496233
JR
822static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
823{
824 tcg_gen_qemu_st64(val, addr, index);
825 tcg_temp_free_i64(val);
826}
b5ff1b31 827
5e3f878a
PB
828static inline void gen_set_pc_im(uint32_t val)
829{
155c3eac 830 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
831}
832
b5ff1b31
FB
833/* Force a TB lookup after an instruction that changes the CPU state. */
834static inline void gen_lookup_tb(DisasContext *s)
835{
a6445c52 836 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
837 s->is_jmp = DISAS_UPDATE;
838}
839
b0109805
PB
840static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
841 TCGv var)
2c0262af 842{
1e8d4eec 843 int val, rm, shift, shiftop;
b26eefb6 844 TCGv offset;
2c0262af
FB
845
846 if (!(insn & (1 << 25))) {
847 /* immediate */
848 val = insn & 0xfff;
849 if (!(insn & (1 << 23)))
850 val = -val;
537730b9 851 if (val != 0)
b0109805 852 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
853 } else {
854 /* shift/register */
855 rm = (insn) & 0xf;
856 shift = (insn >> 7) & 0x1f;
1e8d4eec 857 shiftop = (insn >> 5) & 3;
b26eefb6 858 offset = load_reg(s, rm);
9a119ff6 859 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 860 if (!(insn & (1 << 23)))
b0109805 861 tcg_gen_sub_i32(var, var, offset);
2c0262af 862 else
b0109805 863 tcg_gen_add_i32(var, var, offset);
7d1b0095 864 tcg_temp_free_i32(offset);
2c0262af
FB
865 }
866}
867
191f9a93 868static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 869 int extra, TCGv var)
2c0262af
FB
870{
871 int val, rm;
b26eefb6 872 TCGv offset;
3b46e624 873
2c0262af
FB
874 if (insn & (1 << 22)) {
875 /* immediate */
876 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
877 if (!(insn & (1 << 23)))
878 val = -val;
18acad92 879 val += extra;
537730b9 880 if (val != 0)
b0109805 881 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
882 } else {
883 /* register */
191f9a93 884 if (extra)
b0109805 885 tcg_gen_addi_i32(var, var, extra);
2c0262af 886 rm = (insn) & 0xf;
b26eefb6 887 offset = load_reg(s, rm);
2c0262af 888 if (!(insn & (1 << 23)))
b0109805 889 tcg_gen_sub_i32(var, var, offset);
2c0262af 890 else
b0109805 891 tcg_gen_add_i32(var, var, offset);
7d1b0095 892 tcg_temp_free_i32(offset);
2c0262af
FB
893 }
894}
895
5aaebd13
PM
896static TCGv_ptr get_fpstatus_ptr(int neon)
897{
898 TCGv_ptr statusptr = tcg_temp_new_ptr();
899 int offset;
900 if (neon) {
901 offset = offsetof(CPUState, vfp.standard_fp_status);
902 } else {
903 offset = offsetof(CPUState, vfp.fp_status);
904 }
905 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
906 return statusptr;
907}
908
4373f3ce
PB
909#define VFP_OP2(name) \
910static inline void gen_vfp_##name(int dp) \
911{ \
ae1857ec
PM
912 TCGv_ptr fpst = get_fpstatus_ptr(0); \
913 if (dp) { \
914 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
915 } else { \
916 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
917 } \
918 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
919}
920
4373f3ce
PB
921VFP_OP2(add)
922VFP_OP2(sub)
923VFP_OP2(mul)
924VFP_OP2(div)
925
926#undef VFP_OP2
927
605a6aed
PM
928static inline void gen_vfp_F1_mul(int dp)
929{
930 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 931 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 932 if (dp) {
ae1857ec 933 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 934 } else {
ae1857ec 935 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 936 }
ae1857ec 937 tcg_temp_free_ptr(fpst);
605a6aed
PM
938}
939
940static inline void gen_vfp_F1_neg(int dp)
941{
942 /* Like gen_vfp_neg() but put result in F1 */
943 if (dp) {
944 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
945 } else {
946 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
947 }
948}
949
4373f3ce
PB
950static inline void gen_vfp_abs(int dp)
951{
952 if (dp)
953 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
954 else
955 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
956}
957
958static inline void gen_vfp_neg(int dp)
959{
960 if (dp)
961 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
962 else
963 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
964}
965
966static inline void gen_vfp_sqrt(int dp)
967{
968 if (dp)
969 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
970 else
971 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
972}
973
974static inline void gen_vfp_cmp(int dp)
975{
976 if (dp)
977 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
978 else
979 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
980}
981
982static inline void gen_vfp_cmpe(int dp)
983{
984 if (dp)
985 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
986 else
987 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
988}
989
990static inline void gen_vfp_F1_ld0(int dp)
991{
992 if (dp)
5b340b51 993 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 994 else
5b340b51 995 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
996}
997
5500b06c
PM
998#define VFP_GEN_ITOF(name) \
999static inline void gen_vfp_##name(int dp, int neon) \
1000{ \
5aaebd13 1001 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1002 if (dp) { \
1003 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1004 } else { \
1005 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1006 } \
b7fa9214 1007 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1008}
1009
5500b06c
PM
1010VFP_GEN_ITOF(uito)
1011VFP_GEN_ITOF(sito)
1012#undef VFP_GEN_ITOF
4373f3ce 1013
5500b06c
PM
1014#define VFP_GEN_FTOI(name) \
1015static inline void gen_vfp_##name(int dp, int neon) \
1016{ \
5aaebd13 1017 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1018 if (dp) { \
1019 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1020 } else { \
1021 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1022 } \
b7fa9214 1023 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1024}
1025
5500b06c
PM
1026VFP_GEN_FTOI(toui)
1027VFP_GEN_FTOI(touiz)
1028VFP_GEN_FTOI(tosi)
1029VFP_GEN_FTOI(tosiz)
1030#undef VFP_GEN_FTOI
4373f3ce
PB
1031
1032#define VFP_GEN_FIX(name) \
5500b06c 1033static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1034{ \
b75263d6 1035 TCGv tmp_shift = tcg_const_i32(shift); \
5aaebd13 1036 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1037 if (dp) { \
1038 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1039 } else { \
1040 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1041 } \
b75263d6 1042 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1043 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1044}
4373f3ce
PB
1045VFP_GEN_FIX(tosh)
1046VFP_GEN_FIX(tosl)
1047VFP_GEN_FIX(touh)
1048VFP_GEN_FIX(toul)
1049VFP_GEN_FIX(shto)
1050VFP_GEN_FIX(slto)
1051VFP_GEN_FIX(uhto)
1052VFP_GEN_FIX(ulto)
1053#undef VFP_GEN_FIX
9ee6e8bb 1054
312eea9f 1055static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1056{
1057 if (dp)
312eea9f 1058 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1059 else
312eea9f 1060 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1061}
1062
312eea9f 1063static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1064{
1065 if (dp)
312eea9f 1066 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1067 else
312eea9f 1068 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1069}
1070
8e96005d
FB
1071static inline long
1072vfp_reg_offset (int dp, int reg)
1073{
1074 if (dp)
1075 return offsetof(CPUARMState, vfp.regs[reg]);
1076 else if (reg & 1) {
1077 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1078 + offsetof(CPU_DoubleU, l.upper);
1079 } else {
1080 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1081 + offsetof(CPU_DoubleU, l.lower);
1082 }
1083}
9ee6e8bb
PB
1084
1085/* Return the offset of a 32-bit piece of a NEON register.
1086 zero is the least significant end of the register. */
1087static inline long
1088neon_reg_offset (int reg, int n)
1089{
1090 int sreg;
1091 sreg = reg * 2 + n;
1092 return vfp_reg_offset(0, sreg);
1093}
1094
8f8e3aa4
PB
1095static TCGv neon_load_reg(int reg, int pass)
1096{
7d1b0095 1097 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1098 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1099 return tmp;
1100}
1101
1102static void neon_store_reg(int reg, int pass, TCGv var)
1103{
1104 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1105 tcg_temp_free_i32(var);
8f8e3aa4
PB
1106}
1107
a7812ae4 1108static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1109{
1110 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1111}
1112
a7812ae4 1113static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1114{
1115 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1116}
1117
4373f3ce
PB
1118#define tcg_gen_ld_f32 tcg_gen_ld_i32
1119#define tcg_gen_ld_f64 tcg_gen_ld_i64
1120#define tcg_gen_st_f32 tcg_gen_st_i32
1121#define tcg_gen_st_f64 tcg_gen_st_i64
1122
b7bcbe95
FB
1123static inline void gen_mov_F0_vreg(int dp, int reg)
1124{
1125 if (dp)
4373f3ce 1126 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1127 else
4373f3ce 1128 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1129}
1130
1131static inline void gen_mov_F1_vreg(int dp, int reg)
1132{
1133 if (dp)
4373f3ce 1134 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1135 else
4373f3ce 1136 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1137}
1138
1139static inline void gen_mov_vreg_F0(int dp, int reg)
1140{
1141 if (dp)
4373f3ce 1142 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1143 else
4373f3ce 1144 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1145}
1146
18c9b560
AZ
1147#define ARM_CP_RW_BIT (1 << 20)
1148
a7812ae4 1149static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1150{
1151 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1152}
1153
a7812ae4 1154static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1155{
1156 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1157}
1158
da6b5335 1159static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1160{
7d1b0095 1161 TCGv var = tcg_temp_new_i32();
da6b5335
FN
1162 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1163 return var;
e677137d
PB
1164}
1165
da6b5335 1166static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1167{
da6b5335 1168 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
7d1b0095 1169 tcg_temp_free_i32(var);
e677137d
PB
1170}
1171
1172static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1173{
1174 iwmmxt_store_reg(cpu_M0, rn);
1175}
1176
1177static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1178{
1179 iwmmxt_load_reg(cpu_M0, rn);
1180}
1181
1182static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1183{
1184 iwmmxt_load_reg(cpu_V1, rn);
1185 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1186}
1187
1188static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1189{
1190 iwmmxt_load_reg(cpu_V1, rn);
1191 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1192}
1193
1194static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1195{
1196 iwmmxt_load_reg(cpu_V1, rn);
1197 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1198}
1199
1200#define IWMMXT_OP(name) \
1201static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1202{ \
1203 iwmmxt_load_reg(cpu_V1, rn); \
1204 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1205}
1206
477955bd
PM
1207#define IWMMXT_OP_ENV(name) \
1208static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1209{ \
1210 iwmmxt_load_reg(cpu_V1, rn); \
1211 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1212}
1213
1214#define IWMMXT_OP_ENV_SIZE(name) \
1215IWMMXT_OP_ENV(name##b) \
1216IWMMXT_OP_ENV(name##w) \
1217IWMMXT_OP_ENV(name##l)
e677137d 1218
477955bd 1219#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1220static inline void gen_op_iwmmxt_##name##_M0(void) \
1221{ \
477955bd 1222 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1223}
1224
1225IWMMXT_OP(maddsq)
1226IWMMXT_OP(madduq)
1227IWMMXT_OP(sadb)
1228IWMMXT_OP(sadw)
1229IWMMXT_OP(mulslw)
1230IWMMXT_OP(mulshw)
1231IWMMXT_OP(mululw)
1232IWMMXT_OP(muluhw)
1233IWMMXT_OP(macsw)
1234IWMMXT_OP(macuw)
1235
477955bd
PM
1236IWMMXT_OP_ENV_SIZE(unpackl)
1237IWMMXT_OP_ENV_SIZE(unpackh)
1238
1239IWMMXT_OP_ENV1(unpacklub)
1240IWMMXT_OP_ENV1(unpackluw)
1241IWMMXT_OP_ENV1(unpacklul)
1242IWMMXT_OP_ENV1(unpackhub)
1243IWMMXT_OP_ENV1(unpackhuw)
1244IWMMXT_OP_ENV1(unpackhul)
1245IWMMXT_OP_ENV1(unpacklsb)
1246IWMMXT_OP_ENV1(unpacklsw)
1247IWMMXT_OP_ENV1(unpacklsl)
1248IWMMXT_OP_ENV1(unpackhsb)
1249IWMMXT_OP_ENV1(unpackhsw)
1250IWMMXT_OP_ENV1(unpackhsl)
1251
1252IWMMXT_OP_ENV_SIZE(cmpeq)
1253IWMMXT_OP_ENV_SIZE(cmpgtu)
1254IWMMXT_OP_ENV_SIZE(cmpgts)
1255
1256IWMMXT_OP_ENV_SIZE(mins)
1257IWMMXT_OP_ENV_SIZE(minu)
1258IWMMXT_OP_ENV_SIZE(maxs)
1259IWMMXT_OP_ENV_SIZE(maxu)
1260
1261IWMMXT_OP_ENV_SIZE(subn)
1262IWMMXT_OP_ENV_SIZE(addn)
1263IWMMXT_OP_ENV_SIZE(subu)
1264IWMMXT_OP_ENV_SIZE(addu)
1265IWMMXT_OP_ENV_SIZE(subs)
1266IWMMXT_OP_ENV_SIZE(adds)
1267
1268IWMMXT_OP_ENV(avgb0)
1269IWMMXT_OP_ENV(avgb1)
1270IWMMXT_OP_ENV(avgw0)
1271IWMMXT_OP_ENV(avgw1)
e677137d
PB
1272
1273IWMMXT_OP(msadb)
1274
477955bd
PM
1275IWMMXT_OP_ENV(packuw)
1276IWMMXT_OP_ENV(packul)
1277IWMMXT_OP_ENV(packuq)
1278IWMMXT_OP_ENV(packsw)
1279IWMMXT_OP_ENV(packsl)
1280IWMMXT_OP_ENV(packsq)
e677137d 1281
e677137d
PB
1282static void gen_op_iwmmxt_set_mup(void)
1283{
1284 TCGv tmp;
1285 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1286 tcg_gen_ori_i32(tmp, tmp, 2);
1287 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1288}
1289
1290static void gen_op_iwmmxt_set_cup(void)
1291{
1292 TCGv tmp;
1293 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1294 tcg_gen_ori_i32(tmp, tmp, 1);
1295 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1296}
1297
1298static void gen_op_iwmmxt_setpsr_nz(void)
1299{
7d1b0095 1300 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1301 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1302 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1303}
1304
1305static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1306{
1307 iwmmxt_load_reg(cpu_V1, rn);
86831435 1308 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1309 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1310}
1311
da6b5335 1312static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1313{
1314 int rd;
1315 uint32_t offset;
da6b5335 1316 TCGv tmp;
18c9b560
AZ
1317
1318 rd = (insn >> 16) & 0xf;
da6b5335 1319 tmp = load_reg(s, rd);
18c9b560
AZ
1320
1321 offset = (insn & 0xff) << ((insn >> 7) & 2);
1322 if (insn & (1 << 24)) {
1323 /* Pre indexed */
1324 if (insn & (1 << 23))
da6b5335 1325 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1326 else
da6b5335
FN
1327 tcg_gen_addi_i32(tmp, tmp, -offset);
1328 tcg_gen_mov_i32(dest, tmp);
18c9b560 1329 if (insn & (1 << 21))
da6b5335
FN
1330 store_reg(s, rd, tmp);
1331 else
7d1b0095 1332 tcg_temp_free_i32(tmp);
18c9b560
AZ
1333 } else if (insn & (1 << 21)) {
1334 /* Post indexed */
da6b5335 1335 tcg_gen_mov_i32(dest, tmp);
18c9b560 1336 if (insn & (1 << 23))
da6b5335 1337 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1338 else
da6b5335
FN
1339 tcg_gen_addi_i32(tmp, tmp, -offset);
1340 store_reg(s, rd, tmp);
18c9b560
AZ
1341 } else if (!(insn & (1 << 23)))
1342 return 1;
1343 return 0;
1344}
1345
da6b5335 1346static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1347{
1348 int rd = (insn >> 0) & 0xf;
da6b5335 1349 TCGv tmp;
18c9b560 1350
da6b5335
FN
1351 if (insn & (1 << 8)) {
1352 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1353 return 1;
da6b5335
FN
1354 } else {
1355 tmp = iwmmxt_load_creg(rd);
1356 }
1357 } else {
7d1b0095 1358 tmp = tcg_temp_new_i32();
da6b5335
FN
1359 iwmmxt_load_reg(cpu_V0, rd);
1360 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1361 }
1362 tcg_gen_andi_i32(tmp, tmp, mask);
1363 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1364 tcg_temp_free_i32(tmp);
18c9b560
AZ
1365 return 0;
1366}
1367
a1c7273b 1368/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560
AZ
1369 (ie. an undefined instruction). */
1370static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1371{
1372 int rd, wrd;
1373 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1374 TCGv addr;
1375 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1376
1377 if ((insn & 0x0e000e00) == 0x0c000000) {
1378 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1379 wrd = insn & 0xf;
1380 rdlo = (insn >> 12) & 0xf;
1381 rdhi = (insn >> 16) & 0xf;
1382 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1383 iwmmxt_load_reg(cpu_V0, wrd);
1384 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1385 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1386 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1387 } else { /* TMCRR */
da6b5335
FN
1388 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1389 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1390 gen_op_iwmmxt_set_mup();
1391 }
1392 return 0;
1393 }
1394
1395 wrd = (insn >> 12) & 0xf;
7d1b0095 1396 addr = tcg_temp_new_i32();
da6b5335 1397 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1398 tcg_temp_free_i32(addr);
18c9b560 1399 return 1;
da6b5335 1400 }
18c9b560
AZ
1401 if (insn & ARM_CP_RW_BIT) {
1402 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1403 tmp = tcg_temp_new_i32();
da6b5335
FN
1404 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1405 iwmmxt_store_creg(wrd, tmp);
18c9b560 1406 } else {
e677137d
PB
1407 i = 1;
1408 if (insn & (1 << 8)) {
1409 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1410 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1411 i = 0;
1412 } else { /* WLDRW wRd */
da6b5335 1413 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1414 }
1415 } else {
1416 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1417 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1418 } else { /* WLDRB */
da6b5335 1419 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1420 }
1421 }
1422 if (i) {
1423 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1424 tcg_temp_free_i32(tmp);
e677137d 1425 }
18c9b560
AZ
1426 gen_op_iwmmxt_movq_wRn_M0(wrd);
1427 }
1428 } else {
1429 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1430 tmp = iwmmxt_load_creg(wrd);
1431 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1432 } else {
1433 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1434 tmp = tcg_temp_new_i32();
e677137d
PB
1435 if (insn & (1 << 8)) {
1436 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1437 tcg_temp_free_i32(tmp);
da6b5335 1438 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1439 } else { /* WSTRW wRd */
1440 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1441 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1442 }
1443 } else {
1444 if (insn & (1 << 22)) { /* WSTRH */
1445 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1446 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1447 } else { /* WSTRB */
1448 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1449 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1450 }
1451 }
18c9b560
AZ
1452 }
1453 }
7d1b0095 1454 tcg_temp_free_i32(addr);
18c9b560
AZ
1455 return 0;
1456 }
1457
1458 if ((insn & 0x0f000000) != 0x0e000000)
1459 return 1;
1460
1461 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1462 case 0x000: /* WOR */
1463 wrd = (insn >> 12) & 0xf;
1464 rd0 = (insn >> 0) & 0xf;
1465 rd1 = (insn >> 16) & 0xf;
1466 gen_op_iwmmxt_movq_M0_wRn(rd0);
1467 gen_op_iwmmxt_orq_M0_wRn(rd1);
1468 gen_op_iwmmxt_setpsr_nz();
1469 gen_op_iwmmxt_movq_wRn_M0(wrd);
1470 gen_op_iwmmxt_set_mup();
1471 gen_op_iwmmxt_set_cup();
1472 break;
1473 case 0x011: /* TMCR */
1474 if (insn & 0xf)
1475 return 1;
1476 rd = (insn >> 12) & 0xf;
1477 wrd = (insn >> 16) & 0xf;
1478 switch (wrd) {
1479 case ARM_IWMMXT_wCID:
1480 case ARM_IWMMXT_wCASF:
1481 break;
1482 case ARM_IWMMXT_wCon:
1483 gen_op_iwmmxt_set_cup();
1484 /* Fall through. */
1485 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1486 tmp = iwmmxt_load_creg(wrd);
1487 tmp2 = load_reg(s, rd);
f669df27 1488 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1489 tcg_temp_free_i32(tmp2);
da6b5335 1490 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1491 break;
1492 case ARM_IWMMXT_wCGR0:
1493 case ARM_IWMMXT_wCGR1:
1494 case ARM_IWMMXT_wCGR2:
1495 case ARM_IWMMXT_wCGR3:
1496 gen_op_iwmmxt_set_cup();
da6b5335
FN
1497 tmp = load_reg(s, rd);
1498 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1499 break;
1500 default:
1501 return 1;
1502 }
1503 break;
1504 case 0x100: /* WXOR */
1505 wrd = (insn >> 12) & 0xf;
1506 rd0 = (insn >> 0) & 0xf;
1507 rd1 = (insn >> 16) & 0xf;
1508 gen_op_iwmmxt_movq_M0_wRn(rd0);
1509 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1510 gen_op_iwmmxt_setpsr_nz();
1511 gen_op_iwmmxt_movq_wRn_M0(wrd);
1512 gen_op_iwmmxt_set_mup();
1513 gen_op_iwmmxt_set_cup();
1514 break;
1515 case 0x111: /* TMRC */
1516 if (insn & 0xf)
1517 return 1;
1518 rd = (insn >> 12) & 0xf;
1519 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1520 tmp = iwmmxt_load_creg(wrd);
1521 store_reg(s, rd, tmp);
18c9b560
AZ
1522 break;
1523 case 0x300: /* WANDN */
1524 wrd = (insn >> 12) & 0xf;
1525 rd0 = (insn >> 0) & 0xf;
1526 rd1 = (insn >> 16) & 0xf;
1527 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1528 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1529 gen_op_iwmmxt_andq_M0_wRn(rd1);
1530 gen_op_iwmmxt_setpsr_nz();
1531 gen_op_iwmmxt_movq_wRn_M0(wrd);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1534 break;
1535 case 0x200: /* WAND */
1536 wrd = (insn >> 12) & 0xf;
1537 rd0 = (insn >> 0) & 0xf;
1538 rd1 = (insn >> 16) & 0xf;
1539 gen_op_iwmmxt_movq_M0_wRn(rd0);
1540 gen_op_iwmmxt_andq_M0_wRn(rd1);
1541 gen_op_iwmmxt_setpsr_nz();
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1545 break;
1546 case 0x810: case 0xa10: /* WMADD */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 0) & 0xf;
1549 rd1 = (insn >> 16) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 if (insn & (1 << 21))
1552 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1553 else
1554 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1555 gen_op_iwmmxt_movq_wRn_M0(wrd);
1556 gen_op_iwmmxt_set_mup();
1557 break;
1558 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1559 wrd = (insn >> 12) & 0xf;
1560 rd0 = (insn >> 16) & 0xf;
1561 rd1 = (insn >> 0) & 0xf;
1562 gen_op_iwmmxt_movq_M0_wRn(rd0);
1563 switch ((insn >> 22) & 3) {
1564 case 0:
1565 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1566 break;
1567 case 1:
1568 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1569 break;
1570 case 2:
1571 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1572 break;
1573 case 3:
1574 return 1;
1575 }
1576 gen_op_iwmmxt_movq_wRn_M0(wrd);
1577 gen_op_iwmmxt_set_mup();
1578 gen_op_iwmmxt_set_cup();
1579 break;
1580 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1581 wrd = (insn >> 12) & 0xf;
1582 rd0 = (insn >> 16) & 0xf;
1583 rd1 = (insn >> 0) & 0xf;
1584 gen_op_iwmmxt_movq_M0_wRn(rd0);
1585 switch ((insn >> 22) & 3) {
1586 case 0:
1587 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1588 break;
1589 case 1:
1590 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1591 break;
1592 case 2:
1593 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1594 break;
1595 case 3:
1596 return 1;
1597 }
1598 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 gen_op_iwmmxt_set_mup();
1600 gen_op_iwmmxt_set_cup();
1601 break;
1602 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1603 wrd = (insn >> 12) & 0xf;
1604 rd0 = (insn >> 16) & 0xf;
1605 rd1 = (insn >> 0) & 0xf;
1606 gen_op_iwmmxt_movq_M0_wRn(rd0);
1607 if (insn & (1 << 22))
1608 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1609 else
1610 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1611 if (!(insn & (1 << 20)))
1612 gen_op_iwmmxt_addl_M0_wRn(wrd);
1613 gen_op_iwmmxt_movq_wRn_M0(wrd);
1614 gen_op_iwmmxt_set_mup();
1615 break;
1616 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1617 wrd = (insn >> 12) & 0xf;
1618 rd0 = (insn >> 16) & 0xf;
1619 rd1 = (insn >> 0) & 0xf;
1620 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1621 if (insn & (1 << 21)) {
1622 if (insn & (1 << 20))
1623 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1624 else
1625 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1626 } else {
1627 if (insn & (1 << 20))
1628 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1629 else
1630 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1631 }
18c9b560
AZ
1632 gen_op_iwmmxt_movq_wRn_M0(wrd);
1633 gen_op_iwmmxt_set_mup();
1634 break;
1635 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1636 wrd = (insn >> 12) & 0xf;
1637 rd0 = (insn >> 16) & 0xf;
1638 rd1 = (insn >> 0) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0);
1640 if (insn & (1 << 21))
1641 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1642 else
1643 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1644 if (!(insn & (1 << 20))) {
e677137d
PB
1645 iwmmxt_load_reg(cpu_V1, wrd);
1646 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1647 }
1648 gen_op_iwmmxt_movq_wRn_M0(wrd);
1649 gen_op_iwmmxt_set_mup();
1650 break;
1651 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1652 wrd = (insn >> 12) & 0xf;
1653 rd0 = (insn >> 16) & 0xf;
1654 rd1 = (insn >> 0) & 0xf;
1655 gen_op_iwmmxt_movq_M0_wRn(rd0);
1656 switch ((insn >> 22) & 3) {
1657 case 0:
1658 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1659 break;
1660 case 1:
1661 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1662 break;
1663 case 2:
1664 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1665 break;
1666 case 3:
1667 return 1;
1668 }
1669 gen_op_iwmmxt_movq_wRn_M0(wrd);
1670 gen_op_iwmmxt_set_mup();
1671 gen_op_iwmmxt_set_cup();
1672 break;
1673 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1674 wrd = (insn >> 12) & 0xf;
1675 rd0 = (insn >> 16) & 0xf;
1676 rd1 = (insn >> 0) & 0xf;
1677 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1678 if (insn & (1 << 22)) {
1679 if (insn & (1 << 20))
1680 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1681 else
1682 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1683 } else {
1684 if (insn & (1 << 20))
1685 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1686 else
1687 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1688 }
18c9b560
AZ
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 gen_op_iwmmxt_set_cup();
1692 break;
1693 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1694 wrd = (insn >> 12) & 0xf;
1695 rd0 = (insn >> 16) & 0xf;
1696 rd1 = (insn >> 0) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1698 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1699 tcg_gen_andi_i32(tmp, tmp, 7);
1700 iwmmxt_load_reg(cpu_V1, rd1);
1701 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1702 tcg_temp_free_i32(tmp);
18c9b560
AZ
1703 gen_op_iwmmxt_movq_wRn_M0(wrd);
1704 gen_op_iwmmxt_set_mup();
1705 break;
1706 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1707 if (((insn >> 6) & 3) == 3)
1708 return 1;
18c9b560
AZ
1709 rd = (insn >> 12) & 0xf;
1710 wrd = (insn >> 16) & 0xf;
da6b5335 1711 tmp = load_reg(s, rd);
18c9b560
AZ
1712 gen_op_iwmmxt_movq_M0_wRn(wrd);
1713 switch ((insn >> 6) & 3) {
1714 case 0:
da6b5335
FN
1715 tmp2 = tcg_const_i32(0xff);
1716 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1717 break;
1718 case 1:
da6b5335
FN
1719 tmp2 = tcg_const_i32(0xffff);
1720 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1721 break;
1722 case 2:
da6b5335
FN
1723 tmp2 = tcg_const_i32(0xffffffff);
1724 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1725 break;
da6b5335
FN
1726 default:
1727 TCGV_UNUSED(tmp2);
1728 TCGV_UNUSED(tmp3);
18c9b560 1729 }
da6b5335
FN
1730 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1731 tcg_temp_free(tmp3);
1732 tcg_temp_free(tmp2);
7d1b0095 1733 tcg_temp_free_i32(tmp);
18c9b560
AZ
1734 gen_op_iwmmxt_movq_wRn_M0(wrd);
1735 gen_op_iwmmxt_set_mup();
1736 break;
1737 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1738 rd = (insn >> 12) & 0xf;
1739 wrd = (insn >> 16) & 0xf;
da6b5335 1740 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1741 return 1;
1742 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1743 tmp = tcg_temp_new_i32();
18c9b560
AZ
1744 switch ((insn >> 22) & 3) {
1745 case 0:
da6b5335
FN
1746 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1747 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1748 if (insn & 8) {
1749 tcg_gen_ext8s_i32(tmp, tmp);
1750 } else {
1751 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1752 }
1753 break;
1754 case 1:
da6b5335
FN
1755 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1756 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1757 if (insn & 8) {
1758 tcg_gen_ext16s_i32(tmp, tmp);
1759 } else {
1760 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1761 }
1762 break;
1763 case 2:
da6b5335
FN
1764 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1765 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1766 break;
18c9b560 1767 }
da6b5335 1768 store_reg(s, rd, tmp);
18c9b560
AZ
1769 break;
1770 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1771 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1772 return 1;
da6b5335 1773 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1774 switch ((insn >> 22) & 3) {
1775 case 0:
da6b5335 1776 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1777 break;
1778 case 1:
da6b5335 1779 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1780 break;
1781 case 2:
da6b5335 1782 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1783 break;
18c9b560 1784 }
da6b5335
FN
1785 tcg_gen_shli_i32(tmp, tmp, 28);
1786 gen_set_nzcv(tmp);
7d1b0095 1787 tcg_temp_free_i32(tmp);
18c9b560
AZ
1788 break;
1789 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1790 if (((insn >> 6) & 3) == 3)
1791 return 1;
18c9b560
AZ
1792 rd = (insn >> 12) & 0xf;
1793 wrd = (insn >> 16) & 0xf;
da6b5335 1794 tmp = load_reg(s, rd);
18c9b560
AZ
1795 switch ((insn >> 6) & 3) {
1796 case 0:
da6b5335 1797 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1798 break;
1799 case 1:
da6b5335 1800 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1801 break;
1802 case 2:
da6b5335 1803 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1804 break;
18c9b560 1805 }
7d1b0095 1806 tcg_temp_free_i32(tmp);
18c9b560
AZ
1807 gen_op_iwmmxt_movq_wRn_M0(wrd);
1808 gen_op_iwmmxt_set_mup();
1809 break;
1810 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1811 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1812 return 1;
da6b5335 1813 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1814 tmp2 = tcg_temp_new_i32();
da6b5335 1815 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1816 switch ((insn >> 22) & 3) {
1817 case 0:
1818 for (i = 0; i < 7; i ++) {
da6b5335
FN
1819 tcg_gen_shli_i32(tmp2, tmp2, 4);
1820 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1821 }
1822 break;
1823 case 1:
1824 for (i = 0; i < 3; i ++) {
da6b5335
FN
1825 tcg_gen_shli_i32(tmp2, tmp2, 8);
1826 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1827 }
1828 break;
1829 case 2:
da6b5335
FN
1830 tcg_gen_shli_i32(tmp2, tmp2, 16);
1831 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1832 break;
18c9b560 1833 }
da6b5335 1834 gen_set_nzcv(tmp);
7d1b0095
PM
1835 tcg_temp_free_i32(tmp2);
1836 tcg_temp_free_i32(tmp);
18c9b560
AZ
1837 break;
1838 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1839 wrd = (insn >> 12) & 0xf;
1840 rd0 = (insn >> 16) & 0xf;
1841 gen_op_iwmmxt_movq_M0_wRn(rd0);
1842 switch ((insn >> 22) & 3) {
1843 case 0:
e677137d 1844 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1845 break;
1846 case 1:
e677137d 1847 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1848 break;
1849 case 2:
e677137d 1850 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1851 break;
1852 case 3:
1853 return 1;
1854 }
1855 gen_op_iwmmxt_movq_wRn_M0(wrd);
1856 gen_op_iwmmxt_set_mup();
1857 break;
1858 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1859 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1860 return 1;
da6b5335 1861 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1862 tmp2 = tcg_temp_new_i32();
da6b5335 1863 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1864 switch ((insn >> 22) & 3) {
1865 case 0:
1866 for (i = 0; i < 7; i ++) {
da6b5335
FN
1867 tcg_gen_shli_i32(tmp2, tmp2, 4);
1868 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1869 }
1870 break;
1871 case 1:
1872 for (i = 0; i < 3; i ++) {
da6b5335
FN
1873 tcg_gen_shli_i32(tmp2, tmp2, 8);
1874 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1875 }
1876 break;
1877 case 2:
da6b5335
FN
1878 tcg_gen_shli_i32(tmp2, tmp2, 16);
1879 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1880 break;
18c9b560 1881 }
da6b5335 1882 gen_set_nzcv(tmp);
7d1b0095
PM
1883 tcg_temp_free_i32(tmp2);
1884 tcg_temp_free_i32(tmp);
18c9b560
AZ
1885 break;
1886 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1887 rd = (insn >> 12) & 0xf;
1888 rd0 = (insn >> 16) & 0xf;
da6b5335 1889 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1890 return 1;
1891 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1892 tmp = tcg_temp_new_i32();
18c9b560
AZ
1893 switch ((insn >> 22) & 3) {
1894 case 0:
da6b5335 1895 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1896 break;
1897 case 1:
da6b5335 1898 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1899 break;
1900 case 2:
da6b5335 1901 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1902 break;
18c9b560 1903 }
da6b5335 1904 store_reg(s, rd, tmp);
18c9b560
AZ
1905 break;
1906 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1907 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1908 wrd = (insn >> 12) & 0xf;
1909 rd0 = (insn >> 16) & 0xf;
1910 rd1 = (insn >> 0) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0);
1912 switch ((insn >> 22) & 3) {
1913 case 0:
1914 if (insn & (1 << 21))
1915 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1916 else
1917 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1918 break;
1919 case 1:
1920 if (insn & (1 << 21))
1921 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1922 else
1923 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1924 break;
1925 case 2:
1926 if (insn & (1 << 21))
1927 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1928 else
1929 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1930 break;
1931 case 3:
1932 return 1;
1933 }
1934 gen_op_iwmmxt_movq_wRn_M0(wrd);
1935 gen_op_iwmmxt_set_mup();
1936 gen_op_iwmmxt_set_cup();
1937 break;
1938 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1939 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1940 wrd = (insn >> 12) & 0xf;
1941 rd0 = (insn >> 16) & 0xf;
1942 gen_op_iwmmxt_movq_M0_wRn(rd0);
1943 switch ((insn >> 22) & 3) {
1944 case 0:
1945 if (insn & (1 << 21))
1946 gen_op_iwmmxt_unpacklsb_M0();
1947 else
1948 gen_op_iwmmxt_unpacklub_M0();
1949 break;
1950 case 1:
1951 if (insn & (1 << 21))
1952 gen_op_iwmmxt_unpacklsw_M0();
1953 else
1954 gen_op_iwmmxt_unpackluw_M0();
1955 break;
1956 case 2:
1957 if (insn & (1 << 21))
1958 gen_op_iwmmxt_unpacklsl_M0();
1959 else
1960 gen_op_iwmmxt_unpacklul_M0();
1961 break;
1962 case 3:
1963 return 1;
1964 }
1965 gen_op_iwmmxt_movq_wRn_M0(wrd);
1966 gen_op_iwmmxt_set_mup();
1967 gen_op_iwmmxt_set_cup();
1968 break;
1969 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1970 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1971 wrd = (insn >> 12) & 0xf;
1972 rd0 = (insn >> 16) & 0xf;
1973 gen_op_iwmmxt_movq_M0_wRn(rd0);
1974 switch ((insn >> 22) & 3) {
1975 case 0:
1976 if (insn & (1 << 21))
1977 gen_op_iwmmxt_unpackhsb_M0();
1978 else
1979 gen_op_iwmmxt_unpackhub_M0();
1980 break;
1981 case 1:
1982 if (insn & (1 << 21))
1983 gen_op_iwmmxt_unpackhsw_M0();
1984 else
1985 gen_op_iwmmxt_unpackhuw_M0();
1986 break;
1987 case 2:
1988 if (insn & (1 << 21))
1989 gen_op_iwmmxt_unpackhsl_M0();
1990 else
1991 gen_op_iwmmxt_unpackhul_M0();
1992 break;
1993 case 3:
1994 return 1;
1995 }
1996 gen_op_iwmmxt_movq_wRn_M0(wrd);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
1999 break;
2000 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2001 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2002 if (((insn >> 22) & 3) == 0)
2003 return 1;
18c9b560
AZ
2004 wrd = (insn >> 12) & 0xf;
2005 rd0 = (insn >> 16) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2007 tmp = tcg_temp_new_i32();
da6b5335 2008 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2009 tcg_temp_free_i32(tmp);
18c9b560 2010 return 1;
da6b5335 2011 }
18c9b560 2012 switch ((insn >> 22) & 3) {
18c9b560 2013 case 1:
477955bd 2014 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2015 break;
2016 case 2:
477955bd 2017 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2018 break;
2019 case 3:
477955bd 2020 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2021 break;
2022 }
7d1b0095 2023 tcg_temp_free_i32(tmp);
18c9b560
AZ
2024 gen_op_iwmmxt_movq_wRn_M0(wrd);
2025 gen_op_iwmmxt_set_mup();
2026 gen_op_iwmmxt_set_cup();
2027 break;
2028 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2029 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2030 if (((insn >> 22) & 3) == 0)
2031 return 1;
18c9b560
AZ
2032 wrd = (insn >> 12) & 0xf;
2033 rd0 = (insn >> 16) & 0xf;
2034 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2035 tmp = tcg_temp_new_i32();
da6b5335 2036 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2037 tcg_temp_free_i32(tmp);
18c9b560 2038 return 1;
da6b5335 2039 }
18c9b560 2040 switch ((insn >> 22) & 3) {
18c9b560 2041 case 1:
477955bd 2042 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2043 break;
2044 case 2:
477955bd 2045 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2046 break;
2047 case 3:
477955bd 2048 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2049 break;
2050 }
7d1b0095 2051 tcg_temp_free_i32(tmp);
18c9b560
AZ
2052 gen_op_iwmmxt_movq_wRn_M0(wrd);
2053 gen_op_iwmmxt_set_mup();
2054 gen_op_iwmmxt_set_cup();
2055 break;
2056 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2057 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2058 if (((insn >> 22) & 3) == 0)
2059 return 1;
18c9b560
AZ
2060 wrd = (insn >> 12) & 0xf;
2061 rd0 = (insn >> 16) & 0xf;
2062 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2063 tmp = tcg_temp_new_i32();
da6b5335 2064 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2065 tcg_temp_free_i32(tmp);
18c9b560 2066 return 1;
da6b5335 2067 }
18c9b560 2068 switch ((insn >> 22) & 3) {
18c9b560 2069 case 1:
477955bd 2070 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2071 break;
2072 case 2:
477955bd 2073 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2074 break;
2075 case 3:
477955bd 2076 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2077 break;
2078 }
7d1b0095 2079 tcg_temp_free_i32(tmp);
18c9b560
AZ
2080 gen_op_iwmmxt_movq_wRn_M0(wrd);
2081 gen_op_iwmmxt_set_mup();
2082 gen_op_iwmmxt_set_cup();
2083 break;
2084 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2085 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2086 if (((insn >> 22) & 3) == 0)
2087 return 1;
18c9b560
AZ
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2091 tmp = tcg_temp_new_i32();
18c9b560 2092 switch ((insn >> 22) & 3) {
18c9b560 2093 case 1:
da6b5335 2094 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2095 tcg_temp_free_i32(tmp);
18c9b560 2096 return 1;
da6b5335 2097 }
477955bd 2098 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2099 break;
2100 case 2:
da6b5335 2101 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2102 tcg_temp_free_i32(tmp);
18c9b560 2103 return 1;
da6b5335 2104 }
477955bd 2105 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2106 break;
2107 case 3:
da6b5335 2108 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2109 tcg_temp_free_i32(tmp);
18c9b560 2110 return 1;
da6b5335 2111 }
477955bd 2112 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2113 break;
2114 }
7d1b0095 2115 tcg_temp_free_i32(tmp);
18c9b560
AZ
2116 gen_op_iwmmxt_movq_wRn_M0(wrd);
2117 gen_op_iwmmxt_set_mup();
2118 gen_op_iwmmxt_set_cup();
2119 break;
2120 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2121 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2122 wrd = (insn >> 12) & 0xf;
2123 rd0 = (insn >> 16) & 0xf;
2124 rd1 = (insn >> 0) & 0xf;
2125 gen_op_iwmmxt_movq_M0_wRn(rd0);
2126 switch ((insn >> 22) & 3) {
2127 case 0:
2128 if (insn & (1 << 21))
2129 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2130 else
2131 gen_op_iwmmxt_minub_M0_wRn(rd1);
2132 break;
2133 case 1:
2134 if (insn & (1 << 21))
2135 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2136 else
2137 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2138 break;
2139 case 2:
2140 if (insn & (1 << 21))
2141 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2142 else
2143 gen_op_iwmmxt_minul_M0_wRn(rd1);
2144 break;
2145 case 3:
2146 return 1;
2147 }
2148 gen_op_iwmmxt_movq_wRn_M0(wrd);
2149 gen_op_iwmmxt_set_mup();
2150 break;
2151 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2152 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2153 wrd = (insn >> 12) & 0xf;
2154 rd0 = (insn >> 16) & 0xf;
2155 rd1 = (insn >> 0) & 0xf;
2156 gen_op_iwmmxt_movq_M0_wRn(rd0);
2157 switch ((insn >> 22) & 3) {
2158 case 0:
2159 if (insn & (1 << 21))
2160 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2161 else
2162 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2163 break;
2164 case 1:
2165 if (insn & (1 << 21))
2166 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2167 else
2168 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2169 break;
2170 case 2:
2171 if (insn & (1 << 21))
2172 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2173 else
2174 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2175 break;
2176 case 3:
2177 return 1;
2178 }
2179 gen_op_iwmmxt_movq_wRn_M0(wrd);
2180 gen_op_iwmmxt_set_mup();
2181 break;
2182 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2183 case 0x402: case 0x502: case 0x602: case 0x702:
2184 wrd = (insn >> 12) & 0xf;
2185 rd0 = (insn >> 16) & 0xf;
2186 rd1 = (insn >> 0) & 0xf;
2187 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2188 tmp = tcg_const_i32((insn >> 20) & 3);
2189 iwmmxt_load_reg(cpu_V1, rd1);
2190 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2191 tcg_temp_free(tmp);
18c9b560
AZ
2192 gen_op_iwmmxt_movq_wRn_M0(wrd);
2193 gen_op_iwmmxt_set_mup();
2194 break;
2195 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2196 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2197 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2198 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2199 wrd = (insn >> 12) & 0xf;
2200 rd0 = (insn >> 16) & 0xf;
2201 rd1 = (insn >> 0) & 0xf;
2202 gen_op_iwmmxt_movq_M0_wRn(rd0);
2203 switch ((insn >> 20) & 0xf) {
2204 case 0x0:
2205 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2206 break;
2207 case 0x1:
2208 gen_op_iwmmxt_subub_M0_wRn(rd1);
2209 break;
2210 case 0x3:
2211 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2212 break;
2213 case 0x4:
2214 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2215 break;
2216 case 0x5:
2217 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2218 break;
2219 case 0x7:
2220 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2221 break;
2222 case 0x8:
2223 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2224 break;
2225 case 0x9:
2226 gen_op_iwmmxt_subul_M0_wRn(rd1);
2227 break;
2228 case 0xb:
2229 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2230 break;
2231 default:
2232 return 1;
2233 }
2234 gen_op_iwmmxt_movq_wRn_M0(wrd);
2235 gen_op_iwmmxt_set_mup();
2236 gen_op_iwmmxt_set_cup();
2237 break;
2238 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2239 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2240 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2241 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2242 wrd = (insn >> 12) & 0xf;
2243 rd0 = (insn >> 16) & 0xf;
2244 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2245 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2246 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
da6b5335 2247 tcg_temp_free(tmp);
18c9b560
AZ
2248 gen_op_iwmmxt_movq_wRn_M0(wrd);
2249 gen_op_iwmmxt_set_mup();
2250 gen_op_iwmmxt_set_cup();
2251 break;
2252 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2253 case 0x418: case 0x518: case 0x618: case 0x718:
2254 case 0x818: case 0x918: case 0xa18: case 0xb18:
2255 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2256 wrd = (insn >> 12) & 0xf;
2257 rd0 = (insn >> 16) & 0xf;
2258 rd1 = (insn >> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0);
2260 switch ((insn >> 20) & 0xf) {
2261 case 0x0:
2262 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2263 break;
2264 case 0x1:
2265 gen_op_iwmmxt_addub_M0_wRn(rd1);
2266 break;
2267 case 0x3:
2268 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2269 break;
2270 case 0x4:
2271 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2272 break;
2273 case 0x5:
2274 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2275 break;
2276 case 0x7:
2277 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2278 break;
2279 case 0x8:
2280 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2281 break;
2282 case 0x9:
2283 gen_op_iwmmxt_addul_M0_wRn(rd1);
2284 break;
2285 case 0xb:
2286 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2287 break;
2288 default:
2289 return 1;
2290 }
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
2295 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2296 case 0x408: case 0x508: case 0x608: case 0x708:
2297 case 0x808: case 0x908: case 0xa08: case 0xb08:
2298 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2299 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2300 return 1;
18c9b560
AZ
2301 wrd = (insn >> 12) & 0xf;
2302 rd0 = (insn >> 16) & 0xf;
2303 rd1 = (insn >> 0) & 0xf;
2304 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2305 switch ((insn >> 22) & 3) {
18c9b560
AZ
2306 case 1:
2307 if (insn & (1 << 21))
2308 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2309 else
2310 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2311 break;
2312 case 2:
2313 if (insn & (1 << 21))
2314 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2315 else
2316 gen_op_iwmmxt_packul_M0_wRn(rd1);
2317 break;
2318 case 3:
2319 if (insn & (1 << 21))
2320 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2321 else
2322 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2323 break;
2324 }
2325 gen_op_iwmmxt_movq_wRn_M0(wrd);
2326 gen_op_iwmmxt_set_mup();
2327 gen_op_iwmmxt_set_cup();
2328 break;
2329 case 0x201: case 0x203: case 0x205: case 0x207:
2330 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2331 case 0x211: case 0x213: case 0x215: case 0x217:
2332 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2333 wrd = (insn >> 5) & 0xf;
2334 rd0 = (insn >> 12) & 0xf;
2335 rd1 = (insn >> 0) & 0xf;
2336 if (rd0 == 0xf || rd1 == 0xf)
2337 return 1;
2338 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2339 tmp = load_reg(s, rd0);
2340 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2341 switch ((insn >> 16) & 0xf) {
2342 case 0x0: /* TMIA */
da6b5335 2343 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2344 break;
2345 case 0x8: /* TMIAPH */
da6b5335 2346 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2347 break;
2348 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2349 if (insn & (1 << 16))
da6b5335 2350 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2351 if (insn & (1 << 17))
da6b5335
FN
2352 tcg_gen_shri_i32(tmp2, tmp2, 16);
2353 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2354 break;
2355 default:
7d1b0095
PM
2356 tcg_temp_free_i32(tmp2);
2357 tcg_temp_free_i32(tmp);
18c9b560
AZ
2358 return 1;
2359 }
7d1b0095
PM
2360 tcg_temp_free_i32(tmp2);
2361 tcg_temp_free_i32(tmp);
18c9b560
AZ
2362 gen_op_iwmmxt_movq_wRn_M0(wrd);
2363 gen_op_iwmmxt_set_mup();
2364 break;
2365 default:
2366 return 1;
2367 }
2368
2369 return 0;
2370}
2371
a1c7273b 2372/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560
AZ
2373 (ie. an undefined instruction). */
2374static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2375{
2376 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2377 TCGv tmp, tmp2;
18c9b560
AZ
2378
2379 if ((insn & 0x0ff00f10) == 0x0e200010) {
2380 /* Multiply with Internal Accumulate Format */
2381 rd0 = (insn >> 12) & 0xf;
2382 rd1 = insn & 0xf;
2383 acc = (insn >> 5) & 7;
2384
2385 if (acc != 0)
2386 return 1;
2387
3a554c0f
FN
2388 tmp = load_reg(s, rd0);
2389 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2390 switch ((insn >> 16) & 0xf) {
2391 case 0x0: /* MIA */
3a554c0f 2392 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2393 break;
2394 case 0x8: /* MIAPH */
3a554c0f 2395 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2396 break;
2397 case 0xc: /* MIABB */
2398 case 0xd: /* MIABT */
2399 case 0xe: /* MIATB */
2400 case 0xf: /* MIATT */
18c9b560 2401 if (insn & (1 << 16))
3a554c0f 2402 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2403 if (insn & (1 << 17))
3a554c0f
FN
2404 tcg_gen_shri_i32(tmp2, tmp2, 16);
2405 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2406 break;
2407 default:
2408 return 1;
2409 }
7d1b0095
PM
2410 tcg_temp_free_i32(tmp2);
2411 tcg_temp_free_i32(tmp);
18c9b560
AZ
2412
2413 gen_op_iwmmxt_movq_wRn_M0(acc);
2414 return 0;
2415 }
2416
2417 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2418 /* Internal Accumulator Access Format */
2419 rdhi = (insn >> 16) & 0xf;
2420 rdlo = (insn >> 12) & 0xf;
2421 acc = insn & 7;
2422
2423 if (acc != 0)
2424 return 1;
2425
2426 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2427 iwmmxt_load_reg(cpu_V0, acc);
2428 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2429 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2430 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2431 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2432 } else { /* MAR */
3a554c0f
FN
2433 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2434 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2435 }
2436 return 0;
2437 }
2438
2439 return 1;
2440}
2441
c1713132
AZ
2442/* Disassemble system coprocessor instruction. Return nonzero if
2443 instruction is not defined. */
2444static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2445{
b75263d6 2446 TCGv tmp, tmp2;
c1713132
AZ
2447 uint32_t rd = (insn >> 12) & 0xf;
2448 uint32_t cp = (insn >> 8) & 0xf;
2449 if (IS_USER(s)) {
2450 return 1;
2451 }
2452
18c9b560 2453 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2454 if (!env->cp[cp].cp_read)
2455 return 1;
8984bd2e 2456 gen_set_pc_im(s->pc);
7d1b0095 2457 tmp = tcg_temp_new_i32();
b75263d6
JR
2458 tmp2 = tcg_const_i32(insn);
2459 gen_helper_get_cp(tmp, cpu_env, tmp2);
2460 tcg_temp_free(tmp2);
8984bd2e 2461 store_reg(s, rd, tmp);
c1713132
AZ
2462 } else {
2463 if (!env->cp[cp].cp_write)
2464 return 1;
8984bd2e
PB
2465 gen_set_pc_im(s->pc);
2466 tmp = load_reg(s, rd);
b75263d6
JR
2467 tmp2 = tcg_const_i32(insn);
2468 gen_helper_set_cp(cpu_env, tmp2, tmp);
2469 tcg_temp_free(tmp2);
7d1b0095 2470 tcg_temp_free_i32(tmp);
c1713132
AZ
2471 }
2472 return 0;
2473}
2474
9ee6e8bb
PB
2475static int cp15_user_ok(uint32_t insn)
2476{
2477 int cpn = (insn >> 16) & 0xf;
2478 int cpm = insn & 0xf;
2479 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2480
2481 if (cpn == 13 && cpm == 0) {
2482 /* TLS register. */
2483 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2484 return 1;
2485 }
2486 if (cpn == 7) {
2487 /* ISB, DSB, DMB. */
2488 if ((cpm == 5 && op == 4)
2489 || (cpm == 10 && (op == 4 || op == 5)))
2490 return 1;
2491 }
2492 return 0;
2493}
2494
3f26c122
RV
2495static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2496{
2497 TCGv tmp;
2498 int cpn = (insn >> 16) & 0xf;
2499 int cpm = insn & 0xf;
2500 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2501
2502 if (!arm_feature(env, ARM_FEATURE_V6K))
2503 return 0;
2504
2505 if (!(cpn == 13 && cpm == 0))
2506 return 0;
2507
2508 if (insn & ARM_CP_RW_BIT) {
3f26c122
RV
2509 switch (op) {
2510 case 2:
c5883be2 2511 tmp = load_cpu_field(cp15.c13_tls1);
3f26c122
RV
2512 break;
2513 case 3:
c5883be2 2514 tmp = load_cpu_field(cp15.c13_tls2);
3f26c122
RV
2515 break;
2516 case 4:
c5883be2 2517 tmp = load_cpu_field(cp15.c13_tls3);
3f26c122
RV
2518 break;
2519 default:
3f26c122
RV
2520 return 0;
2521 }
2522 store_reg(s, rd, tmp);
2523
2524 } else {
2525 tmp = load_reg(s, rd);
2526 switch (op) {
2527 case 2:
c5883be2 2528 store_cpu_field(tmp, cp15.c13_tls1);
3f26c122
RV
2529 break;
2530 case 3:
c5883be2 2531 store_cpu_field(tmp, cp15.c13_tls2);
3f26c122
RV
2532 break;
2533 case 4:
c5883be2 2534 store_cpu_field(tmp, cp15.c13_tls3);
3f26c122
RV
2535 break;
2536 default:
7d1b0095 2537 tcg_temp_free_i32(tmp);
3f26c122
RV
2538 return 0;
2539 }
3f26c122
RV
2540 }
2541 return 1;
2542}
2543
b5ff1b31
FB
2544/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2545 instruction is not defined. */
a90b7318 2546static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2547{
2548 uint32_t rd;
b75263d6 2549 TCGv tmp, tmp2;
b5ff1b31 2550
9ee6e8bb
PB
2551 /* M profile cores use memory mapped registers instead of cp15. */
2552 if (arm_feature(env, ARM_FEATURE_M))
2553 return 1;
2554
2555 if ((insn & (1 << 25)) == 0) {
2556 if (insn & (1 << 20)) {
2557 /* mrrc */
2558 return 1;
2559 }
2560 /* mcrr. Used for block cache operations, so implement as no-op. */
2561 return 0;
2562 }
2563 if ((insn & (1 << 4)) == 0) {
2564 /* cdp */
2565 return 1;
2566 }
2567 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2568 return 1;
2569 }
cc688901
PM
2570
2571 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2572 * instructions rather than a separate instruction.
2573 */
2574 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2575 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2576 * In v7, this must NOP.
2577 */
2578 if (!arm_feature(env, ARM_FEATURE_V7)) {
2579 /* Wait for interrupt. */
2580 gen_set_pc_im(s->pc);
2581 s->is_jmp = DISAS_WFI;
2582 }
9332f9da
FB
2583 return 0;
2584 }
cc688901
PM
2585
2586 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2587 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2588 * so this is slightly over-broad.
2589 */
2590 if (!arm_feature(env, ARM_FEATURE_V6)) {
2591 /* Wait for interrupt. */
2592 gen_set_pc_im(s->pc);
2593 s->is_jmp = DISAS_WFI;
2594 return 0;
2595 }
2596 /* Otherwise fall through to handle via helper function.
2597 * In particular, on v7 and some v6 cores this is one of
2598 * the VA-PA registers.
2599 */
2600 }
2601
b5ff1b31 2602 rd = (insn >> 12) & 0xf;
3f26c122
RV
2603
2604 if (cp15_tls_load_store(env, s, insn, rd))
2605 return 0;
2606
b75263d6 2607 tmp2 = tcg_const_i32(insn);
18c9b560 2608 if (insn & ARM_CP_RW_BIT) {
7d1b0095 2609 tmp = tcg_temp_new_i32();
b75263d6 2610 gen_helper_get_cp15(tmp, cpu_env, tmp2);
b5ff1b31
FB
2611 /* If the destination register is r15 then sets condition codes. */
2612 if (rd != 15)
8984bd2e
PB
2613 store_reg(s, rd, tmp);
2614 else
7d1b0095 2615 tcg_temp_free_i32(tmp);
b5ff1b31 2616 } else {
8984bd2e 2617 tmp = load_reg(s, rd);
b75263d6 2618 gen_helper_set_cp15(cpu_env, tmp2, tmp);
7d1b0095 2619 tcg_temp_free_i32(tmp);
a90b7318
AZ
2620 /* Normally we would always end the TB here, but Linux
2621 * arch/arm/mach-pxa/sleep.S expects two instructions following
2622 * an MMU enable to execute from cache. Imitate this behaviour. */
2623 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2624 (insn & 0x0fff0fff) != 0x0e010f10)
2625 gen_lookup_tb(s);
b5ff1b31 2626 }
b75263d6 2627 tcg_temp_free_i32(tmp2);
b5ff1b31
FB
2628 return 0;
2629}
2630
9ee6e8bb
PB
2631#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2632#define VFP_SREG(insn, bigbit, smallbit) \
2633 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2634#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2635 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2636 reg = (((insn) >> (bigbit)) & 0x0f) \
2637 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2638 } else { \
2639 if (insn & (1 << (smallbit))) \
2640 return 1; \
2641 reg = ((insn) >> (bigbit)) & 0x0f; \
2642 }} while (0)
2643
2644#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2645#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2646#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2647#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2648#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2649#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2650
4373f3ce
PB
2651/* Move between integer and VFP cores. */
2652static TCGv gen_vfp_mrs(void)
2653{
7d1b0095 2654 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2655 tcg_gen_mov_i32(tmp, cpu_F0s);
2656 return tmp;
2657}
2658
2659static void gen_vfp_msr(TCGv tmp)
2660{
2661 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2662 tcg_temp_free_i32(tmp);
4373f3ce
PB
2663}
2664
ad69471c
PB
2665static void gen_neon_dup_u8(TCGv var, int shift)
2666{
7d1b0095 2667 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2668 if (shift)
2669 tcg_gen_shri_i32(var, var, shift);
86831435 2670 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2671 tcg_gen_shli_i32(tmp, var, 8);
2672 tcg_gen_or_i32(var, var, tmp);
2673 tcg_gen_shli_i32(tmp, var, 16);
2674 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2675 tcg_temp_free_i32(tmp);
ad69471c
PB
2676}
2677
2678static void gen_neon_dup_low16(TCGv var)
2679{
7d1b0095 2680 TCGv tmp = tcg_temp_new_i32();
86831435 2681 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2682 tcg_gen_shli_i32(tmp, var, 16);
2683 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2684 tcg_temp_free_i32(tmp);
ad69471c
PB
2685}
2686
2687static void gen_neon_dup_high16(TCGv var)
2688{
7d1b0095 2689 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2690 tcg_gen_andi_i32(var, var, 0xffff0000);
2691 tcg_gen_shri_i32(tmp, var, 16);
2692 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2693 tcg_temp_free_i32(tmp);
ad69471c
PB
2694}
2695
8e18cde3
PM
2696static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2697{
2698 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2699 TCGv tmp;
2700 switch (size) {
2701 case 0:
2702 tmp = gen_ld8u(addr, IS_USER(s));
2703 gen_neon_dup_u8(tmp, 0);
2704 break;
2705 case 1:
2706 tmp = gen_ld16u(addr, IS_USER(s));
2707 gen_neon_dup_low16(tmp);
2708 break;
2709 case 2:
2710 tmp = gen_ld32(addr, IS_USER(s));
2711 break;
2712 default: /* Avoid compiler warnings. */
2713 abort();
2714 }
2715 return tmp;
2716}
2717
a1c7273b 2718/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95
FB
2719 (ie. an undefined instruction). */
2720static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2721{
2722 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2723 int dp, veclen;
312eea9f 2724 TCGv addr;
4373f3ce 2725 TCGv tmp;
ad69471c 2726 TCGv tmp2;
b7bcbe95 2727
40f137e1
PB
2728 if (!arm_feature(env, ARM_FEATURE_VFP))
2729 return 1;
2730
5df8bac1 2731 if (!s->vfp_enabled) {
9ee6e8bb 2732 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2733 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2734 return 1;
2735 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2736 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2737 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2738 return 1;
2739 }
b7bcbe95
FB
2740 dp = ((insn & 0xf00) == 0xb00);
2741 switch ((insn >> 24) & 0xf) {
2742 case 0xe:
2743 if (insn & (1 << 4)) {
2744 /* single register transfer */
b7bcbe95
FB
2745 rd = (insn >> 12) & 0xf;
2746 if (dp) {
9ee6e8bb
PB
2747 int size;
2748 int pass;
2749
2750 VFP_DREG_N(rn, insn);
2751 if (insn & 0xf)
b7bcbe95 2752 return 1;
9ee6e8bb
PB
2753 if (insn & 0x00c00060
2754 && !arm_feature(env, ARM_FEATURE_NEON))
2755 return 1;
2756
2757 pass = (insn >> 21) & 1;
2758 if (insn & (1 << 22)) {
2759 size = 0;
2760 offset = ((insn >> 5) & 3) * 8;
2761 } else if (insn & (1 << 5)) {
2762 size = 1;
2763 offset = (insn & (1 << 6)) ? 16 : 0;
2764 } else {
2765 size = 2;
2766 offset = 0;
2767 }
18c9b560 2768 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2769 /* vfp->arm */
ad69471c 2770 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2771 switch (size) {
2772 case 0:
9ee6e8bb 2773 if (offset)
ad69471c 2774 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2775 if (insn & (1 << 23))
ad69471c 2776 gen_uxtb(tmp);
9ee6e8bb 2777 else
ad69471c 2778 gen_sxtb(tmp);
9ee6e8bb
PB
2779 break;
2780 case 1:
9ee6e8bb
PB
2781 if (insn & (1 << 23)) {
2782 if (offset) {
ad69471c 2783 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2784 } else {
ad69471c 2785 gen_uxth(tmp);
9ee6e8bb
PB
2786 }
2787 } else {
2788 if (offset) {
ad69471c 2789 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2790 } else {
ad69471c 2791 gen_sxth(tmp);
9ee6e8bb
PB
2792 }
2793 }
2794 break;
2795 case 2:
9ee6e8bb
PB
2796 break;
2797 }
ad69471c 2798 store_reg(s, rd, tmp);
b7bcbe95
FB
2799 } else {
2800 /* arm->vfp */
ad69471c 2801 tmp = load_reg(s, rd);
9ee6e8bb
PB
2802 if (insn & (1 << 23)) {
2803 /* VDUP */
2804 if (size == 0) {
ad69471c 2805 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2806 } else if (size == 1) {
ad69471c 2807 gen_neon_dup_low16(tmp);
9ee6e8bb 2808 }
cbbccffc 2809 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2810 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2811 tcg_gen_mov_i32(tmp2, tmp);
2812 neon_store_reg(rn, n, tmp2);
2813 }
2814 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2815 } else {
2816 /* VMOV */
2817 switch (size) {
2818 case 0:
ad69471c
PB
2819 tmp2 = neon_load_reg(rn, pass);
2820 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
7d1b0095 2821 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2822 break;
2823 case 1:
ad69471c
PB
2824 tmp2 = neon_load_reg(rn, pass);
2825 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
7d1b0095 2826 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2827 break;
2828 case 2:
9ee6e8bb
PB
2829 break;
2830 }
ad69471c 2831 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2832 }
b7bcbe95 2833 }
9ee6e8bb
PB
2834 } else { /* !dp */
2835 if ((insn & 0x6f) != 0x00)
2836 return 1;
2837 rn = VFP_SREG_N(insn);
18c9b560 2838 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2839 /* vfp->arm */
2840 if (insn & (1 << 21)) {
2841 /* system register */
40f137e1 2842 rn >>= 1;
9ee6e8bb 2843
b7bcbe95 2844 switch (rn) {
40f137e1 2845 case ARM_VFP_FPSID:
4373f3ce 2846 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2847 VFP3 restricts all id registers to privileged
2848 accesses. */
2849 if (IS_USER(s)
2850 && arm_feature(env, ARM_FEATURE_VFP3))
2851 return 1;
4373f3ce 2852 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2853 break;
40f137e1 2854 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2855 if (IS_USER(s))
2856 return 1;
4373f3ce 2857 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2858 break;
40f137e1
PB
2859 case ARM_VFP_FPINST:
2860 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2861 /* Not present in VFP3. */
2862 if (IS_USER(s)
2863 || arm_feature(env, ARM_FEATURE_VFP3))
2864 return 1;
4373f3ce 2865 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2866 break;
40f137e1 2867 case ARM_VFP_FPSCR:
601d70b9 2868 if (rd == 15) {
4373f3ce
PB
2869 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2870 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2871 } else {
7d1b0095 2872 tmp = tcg_temp_new_i32();
4373f3ce
PB
2873 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2874 }
b7bcbe95 2875 break;
9ee6e8bb
PB
2876 case ARM_VFP_MVFR0:
2877 case ARM_VFP_MVFR1:
2878 if (IS_USER(s)
2879 || !arm_feature(env, ARM_FEATURE_VFP3))
2880 return 1;
4373f3ce 2881 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2882 break;
b7bcbe95
FB
2883 default:
2884 return 1;
2885 }
2886 } else {
2887 gen_mov_F0_vreg(0, rn);
4373f3ce 2888 tmp = gen_vfp_mrs();
b7bcbe95
FB
2889 }
2890 if (rd == 15) {
b5ff1b31 2891 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2892 gen_set_nzcv(tmp);
7d1b0095 2893 tcg_temp_free_i32(tmp);
4373f3ce
PB
2894 } else {
2895 store_reg(s, rd, tmp);
2896 }
b7bcbe95
FB
2897 } else {
2898 /* arm->vfp */
4373f3ce 2899 tmp = load_reg(s, rd);
b7bcbe95 2900 if (insn & (1 << 21)) {
40f137e1 2901 rn >>= 1;
b7bcbe95
FB
2902 /* system register */
2903 switch (rn) {
40f137e1 2904 case ARM_VFP_FPSID:
9ee6e8bb
PB
2905 case ARM_VFP_MVFR0:
2906 case ARM_VFP_MVFR1:
b7bcbe95
FB
2907 /* Writes are ignored. */
2908 break;
40f137e1 2909 case ARM_VFP_FPSCR:
4373f3ce 2910 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2911 tcg_temp_free_i32(tmp);
b5ff1b31 2912 gen_lookup_tb(s);
b7bcbe95 2913 break;
40f137e1 2914 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2915 if (IS_USER(s))
2916 return 1;
71b3c3de
JR
2917 /* TODO: VFP subarchitecture support.
2918 * For now, keep the EN bit only */
2919 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2920 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2921 gen_lookup_tb(s);
2922 break;
2923 case ARM_VFP_FPINST:
2924 case ARM_VFP_FPINST2:
4373f3ce 2925 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2926 break;
b7bcbe95
FB
2927 default:
2928 return 1;
2929 }
2930 } else {
4373f3ce 2931 gen_vfp_msr(tmp);
b7bcbe95
FB
2932 gen_mov_vreg_F0(0, rn);
2933 }
2934 }
2935 }
2936 } else {
2937 /* data processing */
2938 /* The opcode is in bits 23, 21, 20 and 6. */
2939 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2940 if (dp) {
2941 if (op == 15) {
2942 /* rn is opcode */
2943 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2944 } else {
2945 /* rn is register number */
9ee6e8bb 2946 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2947 }
2948
04595bf6 2949 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2950 /* Integer or single precision destination. */
9ee6e8bb 2951 rd = VFP_SREG_D(insn);
b7bcbe95 2952 } else {
9ee6e8bb 2953 VFP_DREG_D(rd, insn);
b7bcbe95 2954 }
04595bf6
PM
2955 if (op == 15 &&
2956 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2957 /* VCVT from int is always from S reg regardless of dp bit.
2958 * VCVT with immediate frac_bits has same format as SREG_M
2959 */
2960 rm = VFP_SREG_M(insn);
b7bcbe95 2961 } else {
9ee6e8bb 2962 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2963 }
2964 } else {
9ee6e8bb 2965 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2966 if (op == 15 && rn == 15) {
2967 /* Double precision destination. */
9ee6e8bb
PB
2968 VFP_DREG_D(rd, insn);
2969 } else {
2970 rd = VFP_SREG_D(insn);
2971 }
04595bf6
PM
2972 /* NB that we implicitly rely on the encoding for the frac_bits
2973 * in VCVT of fixed to float being the same as that of an SREG_M
2974 */
9ee6e8bb 2975 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2976 }
2977
69d1fc22 2978 veclen = s->vec_len;
b7bcbe95
FB
2979 if (op == 15 && rn > 3)
2980 veclen = 0;
2981
2982 /* Shut up compiler warnings. */
2983 delta_m = 0;
2984 delta_d = 0;
2985 bank_mask = 0;
3b46e624 2986
b7bcbe95
FB
2987 if (veclen > 0) {
2988 if (dp)
2989 bank_mask = 0xc;
2990 else
2991 bank_mask = 0x18;
2992
2993 /* Figure out what type of vector operation this is. */
2994 if ((rd & bank_mask) == 0) {
2995 /* scalar */
2996 veclen = 0;
2997 } else {
2998 if (dp)
69d1fc22 2999 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3000 else
69d1fc22 3001 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3002
3003 if ((rm & bank_mask) == 0) {
3004 /* mixed scalar/vector */
3005 delta_m = 0;
3006 } else {
3007 /* vector */
3008 delta_m = delta_d;
3009 }
3010 }
3011 }
3012
3013 /* Load the initial operands. */
3014 if (op == 15) {
3015 switch (rn) {
3016 case 16:
3017 case 17:
3018 /* Integer source */
3019 gen_mov_F0_vreg(0, rm);
3020 break;
3021 case 8:
3022 case 9:
3023 /* Compare */
3024 gen_mov_F0_vreg(dp, rd);
3025 gen_mov_F1_vreg(dp, rm);
3026 break;
3027 case 10:
3028 case 11:
3029 /* Compare with zero */
3030 gen_mov_F0_vreg(dp, rd);
3031 gen_vfp_F1_ld0(dp);
3032 break;
9ee6e8bb
PB
3033 case 20:
3034 case 21:
3035 case 22:
3036 case 23:
644ad806
PB
3037 case 28:
3038 case 29:
3039 case 30:
3040 case 31:
9ee6e8bb
PB
3041 /* Source and destination the same. */
3042 gen_mov_F0_vreg(dp, rd);
3043 break;
b7bcbe95
FB
3044 default:
3045 /* One source operand. */
3046 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3047 break;
b7bcbe95
FB
3048 }
3049 } else {
3050 /* Two source operands. */
3051 gen_mov_F0_vreg(dp, rn);
3052 gen_mov_F1_vreg(dp, rm);
3053 }
3054
3055 for (;;) {
3056 /* Perform the calculation. */
3057 switch (op) {
605a6aed
PM
3058 case 0: /* VMLA: fd + (fn * fm) */
3059 /* Note that order of inputs to the add matters for NaNs */
3060 gen_vfp_F1_mul(dp);
3061 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3062 gen_vfp_add(dp);
3063 break;
605a6aed 3064 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3065 gen_vfp_mul(dp);
605a6aed
PM
3066 gen_vfp_F1_neg(dp);
3067 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3068 gen_vfp_add(dp);
3069 break;
605a6aed
PM
3070 case 2: /* VNMLS: -fd + (fn * fm) */
3071 /* Note that it isn't valid to replace (-A + B) with (B - A)
3072 * or similar plausible looking simplifications
3073 * because this will give wrong results for NaNs.
3074 */
3075 gen_vfp_F1_mul(dp);
3076 gen_mov_F0_vreg(dp, rd);
3077 gen_vfp_neg(dp);
3078 gen_vfp_add(dp);
b7bcbe95 3079 break;
605a6aed 3080 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3081 gen_vfp_mul(dp);
605a6aed
PM
3082 gen_vfp_F1_neg(dp);
3083 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3084 gen_vfp_neg(dp);
605a6aed 3085 gen_vfp_add(dp);
b7bcbe95
FB
3086 break;
3087 case 4: /* mul: fn * fm */
3088 gen_vfp_mul(dp);
3089 break;
3090 case 5: /* nmul: -(fn * fm) */
3091 gen_vfp_mul(dp);
3092 gen_vfp_neg(dp);
3093 break;
3094 case 6: /* add: fn + fm */
3095 gen_vfp_add(dp);
3096 break;
3097 case 7: /* sub: fn - fm */
3098 gen_vfp_sub(dp);
3099 break;
3100 case 8: /* div: fn / fm */
3101 gen_vfp_div(dp);
3102 break;
9ee6e8bb
PB
3103 case 14: /* fconst */
3104 if (!arm_feature(env, ARM_FEATURE_VFP3))
3105 return 1;
3106
3107 n = (insn << 12) & 0x80000000;
3108 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3109 if (dp) {
3110 if (i & 0x40)
3111 i |= 0x3f80;
3112 else
3113 i |= 0x4000;
3114 n |= i << 16;
4373f3ce 3115 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3116 } else {
3117 if (i & 0x40)
3118 i |= 0x780;
3119 else
3120 i |= 0x800;
3121 n |= i << 19;
5b340b51 3122 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3123 }
9ee6e8bb 3124 break;
b7bcbe95
FB
3125 case 15: /* extension space */
3126 switch (rn) {
3127 case 0: /* cpy */
3128 /* no-op */
3129 break;
3130 case 1: /* abs */
3131 gen_vfp_abs(dp);
3132 break;
3133 case 2: /* neg */
3134 gen_vfp_neg(dp);
3135 break;
3136 case 3: /* sqrt */
3137 gen_vfp_sqrt(dp);
3138 break;
60011498
PB
3139 case 4: /* vcvtb.f32.f16 */
3140 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3141 return 1;
3142 tmp = gen_vfp_mrs();
3143 tcg_gen_ext16u_i32(tmp, tmp);
3144 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3145 tcg_temp_free_i32(tmp);
60011498
PB
3146 break;
3147 case 5: /* vcvtt.f32.f16 */
3148 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3149 return 1;
3150 tmp = gen_vfp_mrs();
3151 tcg_gen_shri_i32(tmp, tmp, 16);
3152 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3153 tcg_temp_free_i32(tmp);
60011498
PB
3154 break;
3155 case 6: /* vcvtb.f16.f32 */
3156 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3157 return 1;
7d1b0095 3158 tmp = tcg_temp_new_i32();
60011498
PB
3159 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3160 gen_mov_F0_vreg(0, rd);
3161 tmp2 = gen_vfp_mrs();
3162 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3163 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3164 tcg_temp_free_i32(tmp2);
60011498
PB
3165 gen_vfp_msr(tmp);
3166 break;
3167 case 7: /* vcvtt.f16.f32 */
3168 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3169 return 1;
7d1b0095 3170 tmp = tcg_temp_new_i32();
60011498
PB
3171 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3172 tcg_gen_shli_i32(tmp, tmp, 16);
3173 gen_mov_F0_vreg(0, rd);
3174 tmp2 = gen_vfp_mrs();
3175 tcg_gen_ext16u_i32(tmp2, tmp2);
3176 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3177 tcg_temp_free_i32(tmp2);
60011498
PB
3178 gen_vfp_msr(tmp);
3179 break;
b7bcbe95
FB
3180 case 8: /* cmp */
3181 gen_vfp_cmp(dp);
3182 break;
3183 case 9: /* cmpe */
3184 gen_vfp_cmpe(dp);
3185 break;
3186 case 10: /* cmpz */
3187 gen_vfp_cmp(dp);
3188 break;
3189 case 11: /* cmpez */
3190 gen_vfp_F1_ld0(dp);
3191 gen_vfp_cmpe(dp);
3192 break;
3193 case 15: /* single<->double conversion */
3194 if (dp)
4373f3ce 3195 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3196 else
4373f3ce 3197 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3198 break;
3199 case 16: /* fuito */
5500b06c 3200 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3201 break;
3202 case 17: /* fsito */
5500b06c 3203 gen_vfp_sito(dp, 0);
b7bcbe95 3204 break;
9ee6e8bb
PB
3205 case 20: /* fshto */
3206 if (!arm_feature(env, ARM_FEATURE_VFP3))
3207 return 1;
5500b06c 3208 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3209 break;
3210 case 21: /* fslto */
3211 if (!arm_feature(env, ARM_FEATURE_VFP3))
3212 return 1;
5500b06c 3213 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3214 break;
3215 case 22: /* fuhto */
3216 if (!arm_feature(env, ARM_FEATURE_VFP3))
3217 return 1;
5500b06c 3218 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3219 break;
3220 case 23: /* fulto */
3221 if (!arm_feature(env, ARM_FEATURE_VFP3))
3222 return 1;
5500b06c 3223 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3224 break;
b7bcbe95 3225 case 24: /* ftoui */
5500b06c 3226 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3227 break;
3228 case 25: /* ftouiz */
5500b06c 3229 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3230 break;
3231 case 26: /* ftosi */
5500b06c 3232 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3233 break;
3234 case 27: /* ftosiz */
5500b06c 3235 gen_vfp_tosiz(dp, 0);
b7bcbe95 3236 break;
9ee6e8bb
PB
3237 case 28: /* ftosh */
3238 if (!arm_feature(env, ARM_FEATURE_VFP3))
3239 return 1;
5500b06c 3240 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3241 break;
3242 case 29: /* ftosl */
3243 if (!arm_feature(env, ARM_FEATURE_VFP3))
3244 return 1;
5500b06c 3245 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3246 break;
3247 case 30: /* ftouh */
3248 if (!arm_feature(env, ARM_FEATURE_VFP3))
3249 return 1;
5500b06c 3250 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3251 break;
3252 case 31: /* ftoul */
3253 if (!arm_feature(env, ARM_FEATURE_VFP3))
3254 return 1;
5500b06c 3255 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3256 break;
b7bcbe95
FB
3257 default: /* undefined */
3258 printf ("rn:%d\n", rn);
3259 return 1;
3260 }
3261 break;
3262 default: /* undefined */
3263 printf ("op:%d\n", op);
3264 return 1;
3265 }
3266
3267 /* Write back the result. */
3268 if (op == 15 && (rn >= 8 && rn <= 11))
3269 ; /* Comparison, do nothing. */
04595bf6
PM
3270 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3271 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3272 gen_mov_vreg_F0(0, rd);
3273 else if (op == 15 && rn == 15)
3274 /* conversion */
3275 gen_mov_vreg_F0(!dp, rd);
3276 else
3277 gen_mov_vreg_F0(dp, rd);
3278
3279 /* break out of the loop if we have finished */
3280 if (veclen == 0)
3281 break;
3282
3283 if (op == 15 && delta_m == 0) {
3284 /* single source one-many */
3285 while (veclen--) {
3286 rd = ((rd + delta_d) & (bank_mask - 1))
3287 | (rd & bank_mask);
3288 gen_mov_vreg_F0(dp, rd);
3289 }
3290 break;
3291 }
3292 /* Setup the next operands. */
3293 veclen--;
3294 rd = ((rd + delta_d) & (bank_mask - 1))
3295 | (rd & bank_mask);
3296
3297 if (op == 15) {
3298 /* One source operand. */
3299 rm = ((rm + delta_m) & (bank_mask - 1))
3300 | (rm & bank_mask);
3301 gen_mov_F0_vreg(dp, rm);
3302 } else {
3303 /* Two source operands. */
3304 rn = ((rn + delta_d) & (bank_mask - 1))
3305 | (rn & bank_mask);
3306 gen_mov_F0_vreg(dp, rn);
3307 if (delta_m) {
3308 rm = ((rm + delta_m) & (bank_mask - 1))
3309 | (rm & bank_mask);
3310 gen_mov_F1_vreg(dp, rm);
3311 }
3312 }
3313 }
3314 }
3315 break;
3316 case 0xc:
3317 case 0xd:
8387da81 3318 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3319 /* two-register transfer */
3320 rn = (insn >> 16) & 0xf;
3321 rd = (insn >> 12) & 0xf;
3322 if (dp) {
9ee6e8bb
PB
3323 VFP_DREG_M(rm, insn);
3324 } else {
3325 rm = VFP_SREG_M(insn);
3326 }
b7bcbe95 3327
18c9b560 3328 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3329 /* vfp->arm */
3330 if (dp) {
4373f3ce
PB
3331 gen_mov_F0_vreg(0, rm * 2);
3332 tmp = gen_vfp_mrs();
3333 store_reg(s, rd, tmp);
3334 gen_mov_F0_vreg(0, rm * 2 + 1);
3335 tmp = gen_vfp_mrs();
3336 store_reg(s, rn, tmp);
b7bcbe95
FB
3337 } else {
3338 gen_mov_F0_vreg(0, rm);
4373f3ce 3339 tmp = gen_vfp_mrs();
8387da81 3340 store_reg(s, rd, tmp);
b7bcbe95 3341 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3342 tmp = gen_vfp_mrs();
8387da81 3343 store_reg(s, rn, tmp);
b7bcbe95
FB
3344 }
3345 } else {
3346 /* arm->vfp */
3347 if (dp) {
4373f3ce
PB
3348 tmp = load_reg(s, rd);
3349 gen_vfp_msr(tmp);
3350 gen_mov_vreg_F0(0, rm * 2);
3351 tmp = load_reg(s, rn);
3352 gen_vfp_msr(tmp);
3353 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3354 } else {
8387da81 3355 tmp = load_reg(s, rd);
4373f3ce 3356 gen_vfp_msr(tmp);
b7bcbe95 3357 gen_mov_vreg_F0(0, rm);
8387da81 3358 tmp = load_reg(s, rn);
4373f3ce 3359 gen_vfp_msr(tmp);
b7bcbe95
FB
3360 gen_mov_vreg_F0(0, rm + 1);
3361 }
3362 }
3363 } else {
3364 /* Load/store */
3365 rn = (insn >> 16) & 0xf;
3366 if (dp)
9ee6e8bb 3367 VFP_DREG_D(rd, insn);
b7bcbe95 3368 else
9ee6e8bb
PB
3369 rd = VFP_SREG_D(insn);
3370 if (s->thumb && rn == 15) {
7d1b0095 3371 addr = tcg_temp_new_i32();
312eea9f 3372 tcg_gen_movi_i32(addr, s->pc & ~2);
9ee6e8bb 3373 } else {
312eea9f 3374 addr = load_reg(s, rn);
9ee6e8bb 3375 }
b7bcbe95
FB
3376 if ((insn & 0x01200000) == 0x01000000) {
3377 /* Single load/store */
3378 offset = (insn & 0xff) << 2;
3379 if ((insn & (1 << 23)) == 0)
3380 offset = -offset;
312eea9f 3381 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3382 if (insn & (1 << 20)) {
312eea9f 3383 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3384 gen_mov_vreg_F0(dp, rd);
3385 } else {
3386 gen_mov_F0_vreg(dp, rd);
312eea9f 3387 gen_vfp_st(s, dp, addr);
b7bcbe95 3388 }
7d1b0095 3389 tcg_temp_free_i32(addr);
b7bcbe95
FB
3390 } else {
3391 /* load/store multiple */
3392 if (dp)
3393 n = (insn >> 1) & 0x7f;
3394 else
3395 n = insn & 0xff;
3396
3397 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3398 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3399
3400 if (dp)
3401 offset = 8;
3402 else
3403 offset = 4;
3404 for (i = 0; i < n; i++) {
18c9b560 3405 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3406 /* load */
312eea9f 3407 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3408 gen_mov_vreg_F0(dp, rd + i);
3409 } else {
3410 /* store */
3411 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3412 gen_vfp_st(s, dp, addr);
b7bcbe95 3413 }
312eea9f 3414 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95
FB
3415 }
3416 if (insn & (1 << 21)) {
3417 /* writeback */
3418 if (insn & (1 << 24))
3419 offset = -offset * n;
3420 else if (dp && (insn & 1))
3421 offset = 4;
3422 else
3423 offset = 0;
3424
3425 if (offset != 0)
312eea9f
FN
3426 tcg_gen_addi_i32(addr, addr, offset);
3427 store_reg(s, rn, addr);
3428 } else {
7d1b0095 3429 tcg_temp_free_i32(addr);
b7bcbe95
FB
3430 }
3431 }
3432 }
3433 break;
3434 default:
3435 /* Should never happen. */
3436 return 1;
3437 }
3438 return 0;
3439}
3440
6e256c93 3441static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3442{
6e256c93
FB
3443 TranslationBlock *tb;
3444
3445 tb = s->tb;
3446 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3447 tcg_gen_goto_tb(n);
8984bd2e 3448 gen_set_pc_im(dest);
4b4a72e5 3449 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3450 } else {
8984bd2e 3451 gen_set_pc_im(dest);
57fec1fe 3452 tcg_gen_exit_tb(0);
6e256c93 3453 }
c53be334
FB
3454}
3455
8aaca4c0
FB
3456static inline void gen_jmp (DisasContext *s, uint32_t dest)
3457{
551bd27f 3458 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3459 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3460 if (s->thumb)
d9ba4830
PB
3461 dest |= 1;
3462 gen_bx_im(s, dest);
8aaca4c0 3463 } else {
6e256c93 3464 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3465 s->is_jmp = DISAS_TB_JUMP;
3466 }
3467}
3468
d9ba4830 3469static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3470{
ee097184 3471 if (x)
d9ba4830 3472 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3473 else
d9ba4830 3474 gen_sxth(t0);
ee097184 3475 if (y)
d9ba4830 3476 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3477 else
d9ba4830
PB
3478 gen_sxth(t1);
3479 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3480}
3481
3482/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3483static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3484 uint32_t mask;
3485
3486 mask = 0;
3487 if (flags & (1 << 0))
3488 mask |= 0xff;
3489 if (flags & (1 << 1))
3490 mask |= 0xff00;
3491 if (flags & (1 << 2))
3492 mask |= 0xff0000;
3493 if (flags & (1 << 3))
3494 mask |= 0xff000000;
9ee6e8bb 3495
2ae23e75 3496 /* Mask out undefined bits. */
9ee6e8bb 3497 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3498 if (!arm_feature(env, ARM_FEATURE_V4T))
3499 mask &= ~CPSR_T;
3500 if (!arm_feature(env, ARM_FEATURE_V5))
3501 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3502 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3503 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3504 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3505 mask &= ~CPSR_IT;
9ee6e8bb 3506 /* Mask out execution state bits. */
2ae23e75 3507 if (!spsr)
e160c51c 3508 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3509 /* Mask out privileged bits. */
3510 if (IS_USER(s))
9ee6e8bb 3511 mask &= CPSR_USER;
b5ff1b31
FB
3512 return mask;
3513}
3514
2fbac54b
FN
3515/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3516static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3517{
d9ba4830 3518 TCGv tmp;
b5ff1b31
FB
3519 if (spsr) {
3520 /* ??? This is also undefined in system mode. */
3521 if (IS_USER(s))
3522 return 1;
d9ba4830
PB
3523
3524 tmp = load_cpu_field(spsr);
3525 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3526 tcg_gen_andi_i32(t0, t0, mask);
3527 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3528 store_cpu_field(tmp, spsr);
b5ff1b31 3529 } else {
2fbac54b 3530 gen_set_cpsr(t0, mask);
b5ff1b31 3531 }
7d1b0095 3532 tcg_temp_free_i32(t0);
b5ff1b31
FB
3533 gen_lookup_tb(s);
3534 return 0;
3535}
3536
2fbac54b
FN
3537/* Returns nonzero if access to the PSR is not permitted. */
3538static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3539{
3540 TCGv tmp;
7d1b0095 3541 tmp = tcg_temp_new_i32();
2fbac54b
FN
3542 tcg_gen_movi_i32(tmp, val);
3543 return gen_set_psr(s, mask, spsr, tmp);
3544}
3545
e9bb4aa9
JR
3546/* Generate an old-style exception return. Marks pc as dead. */
3547static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3548{
d9ba4830 3549 TCGv tmp;
e9bb4aa9 3550 store_reg(s, 15, pc);
d9ba4830
PB
3551 tmp = load_cpu_field(spsr);
3552 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3553 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3554 s->is_jmp = DISAS_UPDATE;
3555}
3556
b0109805
PB
3557/* Generate a v6 exception return. Marks both values as dead. */
3558static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3559{
b0109805 3560 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3561 tcg_temp_free_i32(cpsr);
b0109805 3562 store_reg(s, 15, pc);
9ee6e8bb
PB
3563 s->is_jmp = DISAS_UPDATE;
3564}
3b46e624 3565
9ee6e8bb
PB
3566static inline void
3567gen_set_condexec (DisasContext *s)
3568{
3569 if (s->condexec_mask) {
8f01245e 3570 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3571 TCGv tmp = tcg_temp_new_i32();
8f01245e 3572 tcg_gen_movi_i32(tmp, val);
d9ba4830 3573 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3574 }
3575}
3b46e624 3576
bc4a0de0
PM
3577static void gen_exception_insn(DisasContext *s, int offset, int excp)
3578{
3579 gen_set_condexec(s);
3580 gen_set_pc_im(s->pc - offset);
3581 gen_exception(excp);
3582 s->is_jmp = DISAS_JUMP;
3583}
3584
9ee6e8bb
PB
3585static void gen_nop_hint(DisasContext *s, int val)
3586{
3587 switch (val) {
3588 case 3: /* wfi */
8984bd2e 3589 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3590 s->is_jmp = DISAS_WFI;
3591 break;
3592 case 2: /* wfe */
3593 case 4: /* sev */
3594 /* TODO: Implement SEV and WFE. May help SMP performance. */
3595 default: /* nop */
3596 break;
3597 }
3598}
99c475ab 3599
ad69471c 3600#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3601
62698be3 3602static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3603{
3604 switch (size) {
dd8fbd78
FN
3605 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3606 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3607 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3608 default: abort();
9ee6e8bb 3609 }
9ee6e8bb
PB
3610}
3611
dd8fbd78 3612static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3613{
3614 switch (size) {
dd8fbd78
FN
3615 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3616 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3617 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3618 default: return;
3619 }
3620}
3621
3622/* 32-bit pairwise ops end up the same as the elementwise versions. */
3623#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3624#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3625#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3626#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3627
ad69471c
PB
3628#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3629 switch ((size << 1) | u) { \
3630 case 0: \
dd8fbd78 3631 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3632 break; \
3633 case 1: \
dd8fbd78 3634 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3635 break; \
3636 case 2: \
dd8fbd78 3637 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3638 break; \
3639 case 3: \
dd8fbd78 3640 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3641 break; \
3642 case 4: \
dd8fbd78 3643 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3644 break; \
3645 case 5: \
dd8fbd78 3646 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3647 break; \
3648 default: return 1; \
3649 }} while (0)
9ee6e8bb
PB
3650
3651#define GEN_NEON_INTEGER_OP(name) do { \
3652 switch ((size << 1) | u) { \
ad69471c 3653 case 0: \
dd8fbd78 3654 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3655 break; \
3656 case 1: \
dd8fbd78 3657 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3658 break; \
3659 case 2: \
dd8fbd78 3660 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3661 break; \
3662 case 3: \
dd8fbd78 3663 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3664 break; \
3665 case 4: \
dd8fbd78 3666 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3667 break; \
3668 case 5: \
dd8fbd78 3669 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3670 break; \
9ee6e8bb
PB
3671 default: return 1; \
3672 }} while (0)
3673
dd8fbd78 3674static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3675{
7d1b0095 3676 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3677 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3678 return tmp;
9ee6e8bb
PB
3679}
3680
dd8fbd78 3681static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3682{
dd8fbd78 3683 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3684 tcg_temp_free_i32(var);
9ee6e8bb
PB
3685}
3686
dd8fbd78 3687static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3688{
dd8fbd78 3689 TCGv tmp;
9ee6e8bb 3690 if (size == 1) {
0fad6efc
PM
3691 tmp = neon_load_reg(reg & 7, reg >> 4);
3692 if (reg & 8) {
dd8fbd78 3693 gen_neon_dup_high16(tmp);
0fad6efc
PM
3694 } else {
3695 gen_neon_dup_low16(tmp);
dd8fbd78 3696 }
0fad6efc
PM
3697 } else {
3698 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3699 }
dd8fbd78 3700 return tmp;
9ee6e8bb
PB
3701}
3702
02acedf9 3703static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3704{
02acedf9 3705 TCGv tmp, tmp2;
600b828c 3706 if (!q && size == 2) {
02acedf9
PM
3707 return 1;
3708 }
3709 tmp = tcg_const_i32(rd);
3710 tmp2 = tcg_const_i32(rm);
3711 if (q) {
3712 switch (size) {
3713 case 0:
2a3f75b4 3714 gen_helper_neon_qunzip8(tmp, tmp2);
02acedf9
PM
3715 break;
3716 case 1:
2a3f75b4 3717 gen_helper_neon_qunzip16(tmp, tmp2);
02acedf9
PM
3718 break;
3719 case 2:
2a3f75b4 3720 gen_helper_neon_qunzip32(tmp, tmp2);
02acedf9
PM
3721 break;
3722 default:
3723 abort();
3724 }
3725 } else {
3726 switch (size) {
3727 case 0:
2a3f75b4 3728 gen_helper_neon_unzip8(tmp, tmp2);
02acedf9
PM
3729 break;
3730 case 1:
2a3f75b4 3731 gen_helper_neon_unzip16(tmp, tmp2);
02acedf9
PM
3732 break;
3733 default:
3734 abort();
3735 }
3736 }
3737 tcg_temp_free_i32(tmp);
3738 tcg_temp_free_i32(tmp2);
3739 return 0;
19457615
FN
3740}
3741
d68a6f3a 3742static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3743{
3744 TCGv tmp, tmp2;
600b828c 3745 if (!q && size == 2) {
d68a6f3a
PM
3746 return 1;
3747 }
3748 tmp = tcg_const_i32(rd);
3749 tmp2 = tcg_const_i32(rm);
3750 if (q) {
3751 switch (size) {
3752 case 0:
2a3f75b4 3753 gen_helper_neon_qzip8(tmp, tmp2);
d68a6f3a
PM
3754 break;
3755 case 1:
2a3f75b4 3756 gen_helper_neon_qzip16(tmp, tmp2);
d68a6f3a
PM
3757 break;
3758 case 2:
2a3f75b4 3759 gen_helper_neon_qzip32(tmp, tmp2);
d68a6f3a
PM
3760 break;
3761 default:
3762 abort();
3763 }
3764 } else {
3765 switch (size) {
3766 case 0:
2a3f75b4 3767 gen_helper_neon_zip8(tmp, tmp2);
d68a6f3a
PM
3768 break;
3769 case 1:
2a3f75b4 3770 gen_helper_neon_zip16(tmp, tmp2);
d68a6f3a
PM
3771 break;
3772 default:
3773 abort();
3774 }
3775 }
3776 tcg_temp_free_i32(tmp);
3777 tcg_temp_free_i32(tmp2);
3778 return 0;
19457615
FN
3779}
3780
19457615
FN
3781static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3782{
3783 TCGv rd, tmp;
3784
7d1b0095
PM
3785 rd = tcg_temp_new_i32();
3786 tmp = tcg_temp_new_i32();
19457615
FN
3787
3788 tcg_gen_shli_i32(rd, t0, 8);
3789 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3790 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3791 tcg_gen_or_i32(rd, rd, tmp);
3792
3793 tcg_gen_shri_i32(t1, t1, 8);
3794 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3795 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3796 tcg_gen_or_i32(t1, t1, tmp);
3797 tcg_gen_mov_i32(t0, rd);
3798
7d1b0095
PM
3799 tcg_temp_free_i32(tmp);
3800 tcg_temp_free_i32(rd);
19457615
FN
3801}
3802
3803static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3804{
3805 TCGv rd, tmp;
3806
7d1b0095
PM
3807 rd = tcg_temp_new_i32();
3808 tmp = tcg_temp_new_i32();
19457615
FN
3809
3810 tcg_gen_shli_i32(rd, t0, 16);
3811 tcg_gen_andi_i32(tmp, t1, 0xffff);
3812 tcg_gen_or_i32(rd, rd, tmp);
3813 tcg_gen_shri_i32(t1, t1, 16);
3814 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3815 tcg_gen_or_i32(t1, t1, tmp);
3816 tcg_gen_mov_i32(t0, rd);
3817
7d1b0095
PM
3818 tcg_temp_free_i32(tmp);
3819 tcg_temp_free_i32(rd);
19457615
FN
3820}
3821
3822
9ee6e8bb
PB
3823static struct {
3824 int nregs;
3825 int interleave;
3826 int spacing;
3827} neon_ls_element_type[11] = {
3828 {4, 4, 1},
3829 {4, 4, 2},
3830 {4, 1, 1},
3831 {4, 2, 1},
3832 {3, 3, 1},
3833 {3, 3, 2},
3834 {3, 1, 1},
3835 {1, 1, 1},
3836 {2, 2, 1},
3837 {2, 2, 2},
3838 {2, 1, 1}
3839};
3840
3841/* Translate a NEON load/store element instruction. Return nonzero if the
3842 instruction is invalid. */
3843static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3844{
3845 int rd, rn, rm;
3846 int op;
3847 int nregs;
3848 int interleave;
84496233 3849 int spacing;
9ee6e8bb
PB
3850 int stride;
3851 int size;
3852 int reg;
3853 int pass;
3854 int load;
3855 int shift;
9ee6e8bb 3856 int n;
1b2b1e54 3857 TCGv addr;
b0109805 3858 TCGv tmp;
8f8e3aa4 3859 TCGv tmp2;
84496233 3860 TCGv_i64 tmp64;
9ee6e8bb 3861
5df8bac1 3862 if (!s->vfp_enabled)
9ee6e8bb
PB
3863 return 1;
3864 VFP_DREG_D(rd, insn);
3865 rn = (insn >> 16) & 0xf;
3866 rm = insn & 0xf;
3867 load = (insn & (1 << 21)) != 0;
3868 if ((insn & (1 << 23)) == 0) {
3869 /* Load store all elements. */
3870 op = (insn >> 8) & 0xf;
3871 size = (insn >> 6) & 3;
84496233 3872 if (op > 10)
9ee6e8bb 3873 return 1;
f2dd89d0
PM
3874 /* Catch UNDEF cases for bad values of align field */
3875 switch (op & 0xc) {
3876 case 4:
3877 if (((insn >> 5) & 1) == 1) {
3878 return 1;
3879 }
3880 break;
3881 case 8:
3882 if (((insn >> 4) & 3) == 3) {
3883 return 1;
3884 }
3885 break;
3886 default:
3887 break;
3888 }
9ee6e8bb
PB
3889 nregs = neon_ls_element_type[op].nregs;
3890 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3891 spacing = neon_ls_element_type[op].spacing;
3892 if (size == 3 && (interleave | spacing) != 1)
3893 return 1;
e318a60b 3894 addr = tcg_temp_new_i32();
dcc65026 3895 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3896 stride = (1 << size) * interleave;
3897 for (reg = 0; reg < nregs; reg++) {
3898 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3899 load_reg_var(s, addr, rn);
3900 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3901 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3902 load_reg_var(s, addr, rn);
3903 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3904 }
84496233
JR
3905 if (size == 3) {
3906 if (load) {
3907 tmp64 = gen_ld64(addr, IS_USER(s));
3908 neon_store_reg64(tmp64, rd);
3909 tcg_temp_free_i64(tmp64);
3910 } else {
3911 tmp64 = tcg_temp_new_i64();
3912 neon_load_reg64(tmp64, rd);
3913 gen_st64(tmp64, addr, IS_USER(s));
3914 }
3915 tcg_gen_addi_i32(addr, addr, stride);
3916 } else {
3917 for (pass = 0; pass < 2; pass++) {
3918 if (size == 2) {
3919 if (load) {
3920 tmp = gen_ld32(addr, IS_USER(s));
3921 neon_store_reg(rd, pass, tmp);
3922 } else {
3923 tmp = neon_load_reg(rd, pass);
3924 gen_st32(tmp, addr, IS_USER(s));
3925 }
1b2b1e54 3926 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3927 } else if (size == 1) {
3928 if (load) {
3929 tmp = gen_ld16u(addr, IS_USER(s));
3930 tcg_gen_addi_i32(addr, addr, stride);
3931 tmp2 = gen_ld16u(addr, IS_USER(s));
3932 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3933 tcg_gen_shli_i32(tmp2, tmp2, 16);
3934 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3935 tcg_temp_free_i32(tmp2);
84496233
JR
3936 neon_store_reg(rd, pass, tmp);
3937 } else {
3938 tmp = neon_load_reg(rd, pass);
7d1b0095 3939 tmp2 = tcg_temp_new_i32();
84496233
JR
3940 tcg_gen_shri_i32(tmp2, tmp, 16);
3941 gen_st16(tmp, addr, IS_USER(s));
3942 tcg_gen_addi_i32(addr, addr, stride);
3943 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3944 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3945 }
84496233
JR
3946 } else /* size == 0 */ {
3947 if (load) {
3948 TCGV_UNUSED(tmp2);
3949 for (n = 0; n < 4; n++) {
3950 tmp = gen_ld8u(addr, IS_USER(s));
3951 tcg_gen_addi_i32(addr, addr, stride);
3952 if (n == 0) {
3953 tmp2 = tmp;
3954 } else {
41ba8341
PB
3955 tcg_gen_shli_i32(tmp, tmp, n * 8);
3956 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3957 tcg_temp_free_i32(tmp);
84496233 3958 }
9ee6e8bb 3959 }
84496233
JR
3960 neon_store_reg(rd, pass, tmp2);
3961 } else {
3962 tmp2 = neon_load_reg(rd, pass);
3963 for (n = 0; n < 4; n++) {
7d1b0095 3964 tmp = tcg_temp_new_i32();
84496233
JR
3965 if (n == 0) {
3966 tcg_gen_mov_i32(tmp, tmp2);
3967 } else {
3968 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3969 }
3970 gen_st8(tmp, addr, IS_USER(s));
3971 tcg_gen_addi_i32(addr, addr, stride);
3972 }
7d1b0095 3973 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3974 }
3975 }
3976 }
3977 }
84496233 3978 rd += spacing;
9ee6e8bb 3979 }
e318a60b 3980 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3981 stride = nregs * 8;
3982 } else {
3983 size = (insn >> 10) & 3;
3984 if (size == 3) {
3985 /* Load single element to all lanes. */
8e18cde3
PM
3986 int a = (insn >> 4) & 1;
3987 if (!load) {
9ee6e8bb 3988 return 1;
8e18cde3 3989 }
9ee6e8bb
PB
3990 size = (insn >> 6) & 3;
3991 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3992
3993 if (size == 3) {
3994 if (nregs != 4 || a == 0) {
9ee6e8bb 3995 return 1;
99c475ab 3996 }
8e18cde3
PM
3997 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3998 size = 2;
3999 }
4000 if (nregs == 1 && a == 1 && size == 0) {
4001 return 1;
4002 }
4003 if (nregs == 3 && a == 1) {
4004 return 1;
4005 }
e318a60b 4006 addr = tcg_temp_new_i32();
8e18cde3
PM
4007 load_reg_var(s, addr, rn);
4008 if (nregs == 1) {
4009 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4010 tmp = gen_load_and_replicate(s, addr, size);
4011 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4012 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4013 if (insn & (1 << 5)) {
4014 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4015 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4016 }
4017 tcg_temp_free_i32(tmp);
4018 } else {
4019 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4020 stride = (insn & (1 << 5)) ? 2 : 1;
4021 for (reg = 0; reg < nregs; reg++) {
4022 tmp = gen_load_and_replicate(s, addr, size);
4023 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4024 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4025 tcg_temp_free_i32(tmp);
4026 tcg_gen_addi_i32(addr, addr, 1 << size);
4027 rd += stride;
4028 }
9ee6e8bb 4029 }
e318a60b 4030 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4031 stride = (1 << size) * nregs;
4032 } else {
4033 /* Single element. */
93262b16 4034 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4035 pass = (insn >> 7) & 1;
4036 switch (size) {
4037 case 0:
4038 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4039 stride = 1;
4040 break;
4041 case 1:
4042 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4043 stride = (insn & (1 << 5)) ? 2 : 1;
4044 break;
4045 case 2:
4046 shift = 0;
9ee6e8bb
PB
4047 stride = (insn & (1 << 6)) ? 2 : 1;
4048 break;
4049 default:
4050 abort();
4051 }
4052 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4053 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4054 switch (nregs) {
4055 case 1:
4056 if (((idx & (1 << size)) != 0) ||
4057 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4058 return 1;
4059 }
4060 break;
4061 case 3:
4062 if ((idx & 1) != 0) {
4063 return 1;
4064 }
4065 /* fall through */
4066 case 2:
4067 if (size == 2 && (idx & 2) != 0) {
4068 return 1;
4069 }
4070 break;
4071 case 4:
4072 if ((size == 2) && ((idx & 3) == 3)) {
4073 return 1;
4074 }
4075 break;
4076 default:
4077 abort();
4078 }
4079 if ((rd + stride * (nregs - 1)) > 31) {
4080 /* Attempts to write off the end of the register file
4081 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4082 * the neon_load_reg() would write off the end of the array.
4083 */
4084 return 1;
4085 }
e318a60b 4086 addr = tcg_temp_new_i32();
dcc65026 4087 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4088 for (reg = 0; reg < nregs; reg++) {
4089 if (load) {
9ee6e8bb
PB
4090 switch (size) {
4091 case 0:
1b2b1e54 4092 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
4093 break;
4094 case 1:
1b2b1e54 4095 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
4096 break;
4097 case 2:
1b2b1e54 4098 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 4099 break;
a50f5b91
PB
4100 default: /* Avoid compiler warnings. */
4101 abort();
9ee6e8bb
PB
4102 }
4103 if (size != 2) {
8f8e3aa4
PB
4104 tmp2 = neon_load_reg(rd, pass);
4105 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
7d1b0095 4106 tcg_temp_free_i32(tmp2);
9ee6e8bb 4107 }
8f8e3aa4 4108 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4109 } else { /* Store */
8f8e3aa4
PB
4110 tmp = neon_load_reg(rd, pass);
4111 if (shift)
4112 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4113 switch (size) {
4114 case 0:
1b2b1e54 4115 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4116 break;
4117 case 1:
1b2b1e54 4118 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4119 break;
4120 case 2:
1b2b1e54 4121 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4122 break;
99c475ab 4123 }
99c475ab 4124 }
9ee6e8bb 4125 rd += stride;
1b2b1e54 4126 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4127 }
e318a60b 4128 tcg_temp_free_i32(addr);
9ee6e8bb 4129 stride = nregs * (1 << size);
99c475ab 4130 }
9ee6e8bb
PB
4131 }
4132 if (rm != 15) {
b26eefb6
PB
4133 TCGv base;
4134
4135 base = load_reg(s, rn);
9ee6e8bb 4136 if (rm == 13) {
b26eefb6 4137 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4138 } else {
b26eefb6
PB
4139 TCGv index;
4140 index = load_reg(s, rm);
4141 tcg_gen_add_i32(base, base, index);
7d1b0095 4142 tcg_temp_free_i32(index);
9ee6e8bb 4143 }
b26eefb6 4144 store_reg(s, rn, base);
9ee6e8bb
PB
4145 }
4146 return 0;
4147}
3b46e624 4148
8f8e3aa4
PB
4149/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4150static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4151{
4152 tcg_gen_and_i32(t, t, c);
f669df27 4153 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4154 tcg_gen_or_i32(dest, t, f);
4155}
4156
a7812ae4 4157static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4158{
4159 switch (size) {
4160 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4161 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4162 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4163 default: abort();
4164 }
4165}
4166
a7812ae4 4167static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4168{
4169 switch (size) {
2a3f75b4
PM
4170 case 0: gen_helper_neon_narrow_sat_s8(dest, src); break;
4171 case 1: gen_helper_neon_narrow_sat_s16(dest, src); break;
4172 case 2: gen_helper_neon_narrow_sat_s32(dest, src); break;
ad69471c
PB
4173 default: abort();
4174 }
4175}
4176
a7812ae4 4177static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4178{
4179 switch (size) {
2a3f75b4
PM
4180 case 0: gen_helper_neon_narrow_sat_u8(dest, src); break;
4181 case 1: gen_helper_neon_narrow_sat_u16(dest, src); break;
4182 case 2: gen_helper_neon_narrow_sat_u32(dest, src); break;
ad69471c
PB
4183 default: abort();
4184 }
4185}
4186
af1bbf30
JR
4187static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4188{
4189 switch (size) {
2a3f75b4
PM
4190 case 0: gen_helper_neon_unarrow_sat8(dest, src); break;
4191 case 1: gen_helper_neon_unarrow_sat16(dest, src); break;
4192 case 2: gen_helper_neon_unarrow_sat32(dest, src); break;
af1bbf30
JR
4193 default: abort();
4194 }
4195}
4196
ad69471c
PB
4197static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4198 int q, int u)
4199{
4200 if (q) {
4201 if (u) {
4202 switch (size) {
4203 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4204 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4205 default: abort();
4206 }
4207 } else {
4208 switch (size) {
4209 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4210 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4211 default: abort();
4212 }
4213 }
4214 } else {
4215 if (u) {
4216 switch (size) {
b408a9b0
CL
4217 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4218 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4219 default: abort();
4220 }
4221 } else {
4222 switch (size) {
4223 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4224 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4225 default: abort();
4226 }
4227 }
4228 }
4229}
4230
a7812ae4 4231static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4232{
4233 if (u) {
4234 switch (size) {
4235 case 0: gen_helper_neon_widen_u8(dest, src); break;
4236 case 1: gen_helper_neon_widen_u16(dest, src); break;
4237 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4238 default: abort();
4239 }
4240 } else {
4241 switch (size) {
4242 case 0: gen_helper_neon_widen_s8(dest, src); break;
4243 case 1: gen_helper_neon_widen_s16(dest, src); break;
4244 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4245 default: abort();
4246 }
4247 }
7d1b0095 4248 tcg_temp_free_i32(src);
ad69471c
PB
4249}
4250
4251static inline void gen_neon_addl(int size)
4252{
4253 switch (size) {
4254 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4255 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4256 case 2: tcg_gen_add_i64(CPU_V001); break;
4257 default: abort();
4258 }
4259}
4260
4261static inline void gen_neon_subl(int size)
4262{
4263 switch (size) {
4264 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4265 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4266 case 2: tcg_gen_sub_i64(CPU_V001); break;
4267 default: abort();
4268 }
4269}
4270
a7812ae4 4271static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4272{
4273 switch (size) {
4274 case 0: gen_helper_neon_negl_u16(var, var); break;
4275 case 1: gen_helper_neon_negl_u32(var, var); break;
4276 case 2: gen_helper_neon_negl_u64(var, var); break;
4277 default: abort();
4278 }
4279}
4280
a7812ae4 4281static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4282{
4283 switch (size) {
2a3f75b4
PM
4284 case 1: gen_helper_neon_addl_saturate_s32(op0, op0, op1); break;
4285 case 2: gen_helper_neon_addl_saturate_s64(op0, op0, op1); break;
ad69471c
PB
4286 default: abort();
4287 }
4288}
4289
a7812ae4 4290static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4291{
a7812ae4 4292 TCGv_i64 tmp;
ad69471c
PB
4293
4294 switch ((size << 1) | u) {
4295 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4296 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4297 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4298 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4299 case 4:
4300 tmp = gen_muls_i64_i32(a, b);
4301 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4302 tcg_temp_free_i64(tmp);
ad69471c
PB
4303 break;
4304 case 5:
4305 tmp = gen_mulu_i64_i32(a, b);
4306 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4307 tcg_temp_free_i64(tmp);
ad69471c
PB
4308 break;
4309 default: abort();
4310 }
c6067f04
CL
4311
4312 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4313 Don't forget to clean them now. */
4314 if (size < 2) {
7d1b0095
PM
4315 tcg_temp_free_i32(a);
4316 tcg_temp_free_i32(b);
c6067f04 4317 }
ad69471c
PB
4318}
4319
c33171c7
PM
4320static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4321{
4322 if (op) {
4323 if (u) {
4324 gen_neon_unarrow_sats(size, dest, src);
4325 } else {
4326 gen_neon_narrow(size, dest, src);
4327 }
4328 } else {
4329 if (u) {
4330 gen_neon_narrow_satu(size, dest, src);
4331 } else {
4332 gen_neon_narrow_sats(size, dest, src);
4333 }
4334 }
4335}
4336
62698be3
PM
4337/* Symbolic constants for op fields for Neon 3-register same-length.
4338 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4339 * table A7-9.
4340 */
4341#define NEON_3R_VHADD 0
4342#define NEON_3R_VQADD 1
4343#define NEON_3R_VRHADD 2
4344#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4345#define NEON_3R_VHSUB 4
4346#define NEON_3R_VQSUB 5
4347#define NEON_3R_VCGT 6
4348#define NEON_3R_VCGE 7
4349#define NEON_3R_VSHL 8
4350#define NEON_3R_VQSHL 9
4351#define NEON_3R_VRSHL 10
4352#define NEON_3R_VQRSHL 11
4353#define NEON_3R_VMAX 12
4354#define NEON_3R_VMIN 13
4355#define NEON_3R_VABD 14
4356#define NEON_3R_VABA 15
4357#define NEON_3R_VADD_VSUB 16
4358#define NEON_3R_VTST_VCEQ 17
4359#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4360#define NEON_3R_VMUL 19
4361#define NEON_3R_VPMAX 20
4362#define NEON_3R_VPMIN 21
4363#define NEON_3R_VQDMULH_VQRDMULH 22
4364#define NEON_3R_VPADD 23
4365#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4366#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4367#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4368#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4369#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4370#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4371
4372static const uint8_t neon_3r_sizes[] = {
4373 [NEON_3R_VHADD] = 0x7,
4374 [NEON_3R_VQADD] = 0xf,
4375 [NEON_3R_VRHADD] = 0x7,
4376 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4377 [NEON_3R_VHSUB] = 0x7,
4378 [NEON_3R_VQSUB] = 0xf,
4379 [NEON_3R_VCGT] = 0x7,
4380 [NEON_3R_VCGE] = 0x7,
4381 [NEON_3R_VSHL] = 0xf,
4382 [NEON_3R_VQSHL] = 0xf,
4383 [NEON_3R_VRSHL] = 0xf,
4384 [NEON_3R_VQRSHL] = 0xf,
4385 [NEON_3R_VMAX] = 0x7,
4386 [NEON_3R_VMIN] = 0x7,
4387 [NEON_3R_VABD] = 0x7,
4388 [NEON_3R_VABA] = 0x7,
4389 [NEON_3R_VADD_VSUB] = 0xf,
4390 [NEON_3R_VTST_VCEQ] = 0x7,
4391 [NEON_3R_VML] = 0x7,
4392 [NEON_3R_VMUL] = 0x7,
4393 [NEON_3R_VPMAX] = 0x7,
4394 [NEON_3R_VPMIN] = 0x7,
4395 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4396 [NEON_3R_VPADD] = 0x7,
4397 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4398 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4399 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4400 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4401 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4402 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4403};
4404
600b828c
PM
4405/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4406 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4407 * table A7-13.
4408 */
4409#define NEON_2RM_VREV64 0
4410#define NEON_2RM_VREV32 1
4411#define NEON_2RM_VREV16 2
4412#define NEON_2RM_VPADDL 4
4413#define NEON_2RM_VPADDL_U 5
4414#define NEON_2RM_VCLS 8
4415#define NEON_2RM_VCLZ 9
4416#define NEON_2RM_VCNT 10
4417#define NEON_2RM_VMVN 11
4418#define NEON_2RM_VPADAL 12
4419#define NEON_2RM_VPADAL_U 13
4420#define NEON_2RM_VQABS 14
4421#define NEON_2RM_VQNEG 15
4422#define NEON_2RM_VCGT0 16
4423#define NEON_2RM_VCGE0 17
4424#define NEON_2RM_VCEQ0 18
4425#define NEON_2RM_VCLE0 19
4426#define NEON_2RM_VCLT0 20
4427#define NEON_2RM_VABS 22
4428#define NEON_2RM_VNEG 23
4429#define NEON_2RM_VCGT0_F 24
4430#define NEON_2RM_VCGE0_F 25
4431#define NEON_2RM_VCEQ0_F 26
4432#define NEON_2RM_VCLE0_F 27
4433#define NEON_2RM_VCLT0_F 28
4434#define NEON_2RM_VABS_F 30
4435#define NEON_2RM_VNEG_F 31
4436#define NEON_2RM_VSWP 32
4437#define NEON_2RM_VTRN 33
4438#define NEON_2RM_VUZP 34
4439#define NEON_2RM_VZIP 35
4440#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4441#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4442#define NEON_2RM_VSHLL 38
4443#define NEON_2RM_VCVT_F16_F32 44
4444#define NEON_2RM_VCVT_F32_F16 46
4445#define NEON_2RM_VRECPE 56
4446#define NEON_2RM_VRSQRTE 57
4447#define NEON_2RM_VRECPE_F 58
4448#define NEON_2RM_VRSQRTE_F 59
4449#define NEON_2RM_VCVT_FS 60
4450#define NEON_2RM_VCVT_FU 61
4451#define NEON_2RM_VCVT_SF 62
4452#define NEON_2RM_VCVT_UF 63
4453
4454static int neon_2rm_is_float_op(int op)
4455{
4456 /* Return true if this neon 2reg-misc op is float-to-float */
4457 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4458 op >= NEON_2RM_VRECPE_F);
4459}
4460
4461/* Each entry in this array has bit n set if the insn allows
4462 * size value n (otherwise it will UNDEF). Since unallocated
4463 * op values will have no bits set they always UNDEF.
4464 */
4465static const uint8_t neon_2rm_sizes[] = {
4466 [NEON_2RM_VREV64] = 0x7,
4467 [NEON_2RM_VREV32] = 0x3,
4468 [NEON_2RM_VREV16] = 0x1,
4469 [NEON_2RM_VPADDL] = 0x7,
4470 [NEON_2RM_VPADDL_U] = 0x7,
4471 [NEON_2RM_VCLS] = 0x7,
4472 [NEON_2RM_VCLZ] = 0x7,
4473 [NEON_2RM_VCNT] = 0x1,
4474 [NEON_2RM_VMVN] = 0x1,
4475 [NEON_2RM_VPADAL] = 0x7,
4476 [NEON_2RM_VPADAL_U] = 0x7,
4477 [NEON_2RM_VQABS] = 0x7,
4478 [NEON_2RM_VQNEG] = 0x7,
4479 [NEON_2RM_VCGT0] = 0x7,
4480 [NEON_2RM_VCGE0] = 0x7,
4481 [NEON_2RM_VCEQ0] = 0x7,
4482 [NEON_2RM_VCLE0] = 0x7,
4483 [NEON_2RM_VCLT0] = 0x7,
4484 [NEON_2RM_VABS] = 0x7,
4485 [NEON_2RM_VNEG] = 0x7,
4486 [NEON_2RM_VCGT0_F] = 0x4,
4487 [NEON_2RM_VCGE0_F] = 0x4,
4488 [NEON_2RM_VCEQ0_F] = 0x4,
4489 [NEON_2RM_VCLE0_F] = 0x4,
4490 [NEON_2RM_VCLT0_F] = 0x4,
4491 [NEON_2RM_VABS_F] = 0x4,
4492 [NEON_2RM_VNEG_F] = 0x4,
4493 [NEON_2RM_VSWP] = 0x1,
4494 [NEON_2RM_VTRN] = 0x7,
4495 [NEON_2RM_VUZP] = 0x7,
4496 [NEON_2RM_VZIP] = 0x7,
4497 [NEON_2RM_VMOVN] = 0x7,
4498 [NEON_2RM_VQMOVN] = 0x7,
4499 [NEON_2RM_VSHLL] = 0x7,
4500 [NEON_2RM_VCVT_F16_F32] = 0x2,
4501 [NEON_2RM_VCVT_F32_F16] = 0x2,
4502 [NEON_2RM_VRECPE] = 0x4,
4503 [NEON_2RM_VRSQRTE] = 0x4,
4504 [NEON_2RM_VRECPE_F] = 0x4,
4505 [NEON_2RM_VRSQRTE_F] = 0x4,
4506 [NEON_2RM_VCVT_FS] = 0x4,
4507 [NEON_2RM_VCVT_FU] = 0x4,
4508 [NEON_2RM_VCVT_SF] = 0x4,
4509 [NEON_2RM_VCVT_UF] = 0x4,
4510};
4511
9ee6e8bb
PB
4512/* Translate a NEON data processing instruction. Return nonzero if the
4513 instruction is invalid.
ad69471c
PB
4514 We process data in a mixture of 32-bit and 64-bit chunks.
4515 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4516
9ee6e8bb
PB
4517static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4518{
4519 int op;
4520 int q;
4521 int rd, rn, rm;
4522 int size;
4523 int shift;
4524 int pass;
4525 int count;
4526 int pairwise;
4527 int u;
ca9a32e4 4528 uint32_t imm, mask;
b75263d6 4529 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4530 TCGv_i64 tmp64;
9ee6e8bb 4531
5df8bac1 4532 if (!s->vfp_enabled)
9ee6e8bb
PB
4533 return 1;
4534 q = (insn & (1 << 6)) != 0;
4535 u = (insn >> 24) & 1;
4536 VFP_DREG_D(rd, insn);
4537 VFP_DREG_N(rn, insn);
4538 VFP_DREG_M(rm, insn);
4539 size = (insn >> 20) & 3;
4540 if ((insn & (1 << 23)) == 0) {
4541 /* Three register same length. */
4542 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4543 /* Catch invalid op and bad size combinations: UNDEF */
4544 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4545 return 1;
4546 }
25f84f79
PM
4547 /* All insns of this form UNDEF for either this condition or the
4548 * superset of cases "Q==1"; we catch the latter later.
4549 */
4550 if (q && ((rd | rn | rm) & 1)) {
4551 return 1;
4552 }
62698be3
PM
4553 if (size == 3 && op != NEON_3R_LOGIC) {
4554 /* 64-bit element instructions. */
9ee6e8bb 4555 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4556 neon_load_reg64(cpu_V0, rn + pass);
4557 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4558 switch (op) {
62698be3 4559 case NEON_3R_VQADD:
9ee6e8bb 4560 if (u) {
2a3f75b4 4561 gen_helper_neon_qadd_u64(cpu_V0, cpu_V0, cpu_V1);
2c0262af 4562 } else {
2a3f75b4 4563 gen_helper_neon_qadd_s64(cpu_V0, cpu_V0, cpu_V1);
2c0262af 4564 }
9ee6e8bb 4565 break;
62698be3 4566 case NEON_3R_VQSUB:
9ee6e8bb 4567 if (u) {
2a3f75b4 4568 gen_helper_neon_qsub_u64(cpu_V0, cpu_V0, cpu_V1);
ad69471c 4569 } else {
2a3f75b4 4570 gen_helper_neon_qsub_s64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
4571 }
4572 break;
62698be3 4573 case NEON_3R_VSHL:
ad69471c
PB
4574 if (u) {
4575 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4576 } else {
4577 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4578 }
4579 break;
62698be3 4580 case NEON_3R_VQSHL:
ad69471c 4581 if (u) {
2a3f75b4 4582 gen_helper_neon_qshl_u64(cpu_V0, cpu_V1, cpu_V0);
ad69471c 4583 } else {
2a3f75b4 4584 gen_helper_neon_qshl_s64(cpu_V0, cpu_V1, cpu_V0);
ad69471c
PB
4585 }
4586 break;
62698be3 4587 case NEON_3R_VRSHL:
ad69471c
PB
4588 if (u) {
4589 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4590 } else {
ad69471c
PB
4591 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4592 }
4593 break;
62698be3 4594 case NEON_3R_VQRSHL:
ad69471c 4595 if (u) {
2a3f75b4 4596 gen_helper_neon_qrshl_u64(cpu_V0, cpu_V1, cpu_V0);
ad69471c 4597 } else {
2a3f75b4 4598 gen_helper_neon_qrshl_s64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4599 }
9ee6e8bb 4600 break;
62698be3 4601 case NEON_3R_VADD_VSUB:
9ee6e8bb 4602 if (u) {
ad69471c 4603 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4604 } else {
ad69471c 4605 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4606 }
4607 break;
4608 default:
4609 abort();
2c0262af 4610 }
ad69471c 4611 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4612 }
9ee6e8bb 4613 return 0;
2c0262af 4614 }
25f84f79 4615 pairwise = 0;
9ee6e8bb 4616 switch (op) {
62698be3
PM
4617 case NEON_3R_VSHL:
4618 case NEON_3R_VQSHL:
4619 case NEON_3R_VRSHL:
4620 case NEON_3R_VQRSHL:
9ee6e8bb 4621 {
ad69471c
PB
4622 int rtmp;
4623 /* Shift instruction operands are reversed. */
4624 rtmp = rn;
9ee6e8bb 4625 rn = rm;
ad69471c 4626 rm = rtmp;
9ee6e8bb 4627 }
2c0262af 4628 break;
25f84f79
PM
4629 case NEON_3R_VPADD:
4630 if (u) {
4631 return 1;
4632 }
4633 /* Fall through */
62698be3
PM
4634 case NEON_3R_VPMAX:
4635 case NEON_3R_VPMIN:
9ee6e8bb 4636 pairwise = 1;
2c0262af 4637 break;
25f84f79
PM
4638 case NEON_3R_FLOAT_ARITH:
4639 pairwise = (u && size < 2); /* if VPADD (float) */
4640 break;
4641 case NEON_3R_FLOAT_MINMAX:
4642 pairwise = u; /* if VPMIN/VPMAX (float) */
4643 break;
4644 case NEON_3R_FLOAT_CMP:
4645 if (!u && size) {
4646 /* no encoding for U=0 C=1x */
4647 return 1;
4648 }
4649 break;
4650 case NEON_3R_FLOAT_ACMP:
4651 if (!u) {
4652 return 1;
4653 }
4654 break;
4655 case NEON_3R_VRECPS_VRSQRTS:
4656 if (u) {
4657 return 1;
4658 }
2c0262af 4659 break;
25f84f79
PM
4660 case NEON_3R_VMUL:
4661 if (u && (size != 0)) {
4662 /* UNDEF on invalid size for polynomial subcase */
4663 return 1;
4664 }
2c0262af 4665 break;
9ee6e8bb 4666 default:
2c0262af 4667 break;
9ee6e8bb 4668 }
dd8fbd78 4669
25f84f79
PM
4670 if (pairwise && q) {
4671 /* All the pairwise insns UNDEF if Q is set */
4672 return 1;
4673 }
4674
9ee6e8bb
PB
4675 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4676
4677 if (pairwise) {
4678 /* Pairwise. */
a5a14945
JR
4679 if (pass < 1) {
4680 tmp = neon_load_reg(rn, 0);
4681 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4682 } else {
a5a14945
JR
4683 tmp = neon_load_reg(rm, 0);
4684 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4685 }
4686 } else {
4687 /* Elementwise. */
dd8fbd78
FN
4688 tmp = neon_load_reg(rn, pass);
4689 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4690 }
4691 switch (op) {
62698be3 4692 case NEON_3R_VHADD:
9ee6e8bb
PB
4693 GEN_NEON_INTEGER_OP(hadd);
4694 break;
62698be3 4695 case NEON_3R_VQADD:
2a3f75b4 4696 GEN_NEON_INTEGER_OP(qadd);
2c0262af 4697 break;
62698be3 4698 case NEON_3R_VRHADD:
9ee6e8bb 4699 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4700 break;
62698be3 4701 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4702 switch ((u << 2) | size) {
4703 case 0: /* VAND */
dd8fbd78 4704 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4705 break;
4706 case 1: /* BIC */
f669df27 4707 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4708 break;
4709 case 2: /* VORR */
dd8fbd78 4710 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4711 break;
4712 case 3: /* VORN */
f669df27 4713 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4714 break;
4715 case 4: /* VEOR */
dd8fbd78 4716 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4717 break;
4718 case 5: /* VBSL */
dd8fbd78
FN
4719 tmp3 = neon_load_reg(rd, pass);
4720 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4721 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4722 break;
4723 case 6: /* VBIT */
dd8fbd78
FN
4724 tmp3 = neon_load_reg(rd, pass);
4725 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4726 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4727 break;
4728 case 7: /* VBIF */
dd8fbd78
FN
4729 tmp3 = neon_load_reg(rd, pass);
4730 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4731 tcg_temp_free_i32(tmp3);
9ee6e8bb 4732 break;
2c0262af
FB
4733 }
4734 break;
62698be3 4735 case NEON_3R_VHSUB:
9ee6e8bb
PB
4736 GEN_NEON_INTEGER_OP(hsub);
4737 break;
62698be3 4738 case NEON_3R_VQSUB:
2a3f75b4 4739 GEN_NEON_INTEGER_OP(qsub);
2c0262af 4740 break;
62698be3 4741 case NEON_3R_VCGT:
9ee6e8bb
PB
4742 GEN_NEON_INTEGER_OP(cgt);
4743 break;
62698be3 4744 case NEON_3R_VCGE:
9ee6e8bb
PB
4745 GEN_NEON_INTEGER_OP(cge);
4746 break;
62698be3 4747 case NEON_3R_VSHL:
ad69471c 4748 GEN_NEON_INTEGER_OP(shl);
2c0262af 4749 break;
62698be3 4750 case NEON_3R_VQSHL:
2a3f75b4 4751 GEN_NEON_INTEGER_OP(qshl);
2c0262af 4752 break;
62698be3 4753 case NEON_3R_VRSHL:
ad69471c 4754 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4755 break;
62698be3 4756 case NEON_3R_VQRSHL:
2a3f75b4 4757 GEN_NEON_INTEGER_OP(qrshl);
9ee6e8bb 4758 break;
62698be3 4759 case NEON_3R_VMAX:
9ee6e8bb
PB
4760 GEN_NEON_INTEGER_OP(max);
4761 break;
62698be3 4762 case NEON_3R_VMIN:
9ee6e8bb
PB
4763 GEN_NEON_INTEGER_OP(min);
4764 break;
62698be3 4765 case NEON_3R_VABD:
9ee6e8bb
PB
4766 GEN_NEON_INTEGER_OP(abd);
4767 break;
62698be3 4768 case NEON_3R_VABA:
9ee6e8bb 4769 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4770 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4771 tmp2 = neon_load_reg(rd, pass);
4772 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4773 break;
62698be3 4774 case NEON_3R_VADD_VSUB:
9ee6e8bb 4775 if (!u) { /* VADD */
62698be3 4776 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4777 } else { /* VSUB */
4778 switch (size) {
dd8fbd78
FN
4779 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4780 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4781 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4782 default: abort();
9ee6e8bb
PB
4783 }
4784 }
4785 break;
62698be3 4786 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4787 if (!u) { /* VTST */
4788 switch (size) {
dd8fbd78
FN
4789 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4790 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4791 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4792 default: abort();
9ee6e8bb
PB
4793 }
4794 } else { /* VCEQ */
4795 switch (size) {
dd8fbd78
FN
4796 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4797 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4798 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4799 default: abort();
9ee6e8bb
PB
4800 }
4801 }
4802 break;
62698be3 4803 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4804 switch (size) {
dd8fbd78
FN
4805 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4806 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4807 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4808 default: abort();
9ee6e8bb 4809 }
7d1b0095 4810 tcg_temp_free_i32(tmp2);
dd8fbd78 4811 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4812 if (u) { /* VMLS */
dd8fbd78 4813 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4814 } else { /* VMLA */
dd8fbd78 4815 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4816 }
4817 break;
62698be3 4818 case NEON_3R_VMUL:
9ee6e8bb 4819 if (u) { /* polynomial */
dd8fbd78 4820 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4821 } else { /* Integer */
4822 switch (size) {
dd8fbd78
FN
4823 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4824 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4825 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4826 default: abort();
9ee6e8bb
PB
4827 }
4828 }
4829 break;
62698be3 4830 case NEON_3R_VPMAX:
9ee6e8bb
PB
4831 GEN_NEON_INTEGER_OP(pmax);
4832 break;
62698be3 4833 case NEON_3R_VPMIN:
9ee6e8bb
PB
4834 GEN_NEON_INTEGER_OP(pmin);
4835 break;
62698be3 4836 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4837 if (!u) { /* VQDMULH */
4838 switch (size) {
2a3f75b4
PM
4839 case 1: gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); break;
4840 case 2: gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); break;
62698be3 4841 default: abort();
9ee6e8bb 4842 }
62698be3 4843 } else { /* VQRDMULH */
9ee6e8bb 4844 switch (size) {
2a3f75b4
PM
4845 case 1: gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); break;
4846 case 2: gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); break;
62698be3 4847 default: abort();
9ee6e8bb
PB
4848 }
4849 }
4850 break;
62698be3 4851 case NEON_3R_VPADD:
9ee6e8bb 4852 switch (size) {
dd8fbd78
FN
4853 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4854 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4855 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4856 default: abort();
9ee6e8bb
PB
4857 }
4858 break;
62698be3 4859 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
9ee6e8bb
PB
4860 switch ((u << 2) | size) {
4861 case 0: /* VADD */
dd8fbd78 4862 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4863 break;
4864 case 2: /* VSUB */
dd8fbd78 4865 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4866 break;
4867 case 4: /* VPADD */
dd8fbd78 4868 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4869 break;
4870 case 6: /* VABD */
dd8fbd78 4871 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
4872 break;
4873 default:
62698be3 4874 abort();
9ee6e8bb
PB
4875 }
4876 break;
62698be3 4877 case NEON_3R_FLOAT_MULTIPLY:
dd8fbd78 4878 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb 4879 if (!u) {
7d1b0095 4880 tcg_temp_free_i32(tmp2);
dd8fbd78 4881 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4882 if (size == 0) {
dd8fbd78 4883 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb 4884 } else {
dd8fbd78 4885 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
4886 }
4887 }
4888 break;
62698be3 4889 case NEON_3R_FLOAT_CMP:
9ee6e8bb 4890 if (!u) {
dd8fbd78 4891 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
b5ff1b31 4892 } else {
9ee6e8bb 4893 if (size == 0)
dd8fbd78 4894 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
9ee6e8bb 4895 else
dd8fbd78 4896 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
b5ff1b31 4897 }
2c0262af 4898 break;
62698be3 4899 case NEON_3R_FLOAT_ACMP:
9ee6e8bb 4900 if (size == 0)
dd8fbd78 4901 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
9ee6e8bb 4902 else
dd8fbd78 4903 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
2c0262af 4904 break;
62698be3 4905 case NEON_3R_FLOAT_MINMAX:
9ee6e8bb 4906 if (size == 0)
dd8fbd78 4907 gen_helper_neon_max_f32(tmp, tmp, tmp2);
9ee6e8bb 4908 else
dd8fbd78 4909 gen_helper_neon_min_f32(tmp, tmp, tmp2);
9ee6e8bb 4910 break;
62698be3 4911 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4912 if (size == 0)
dd8fbd78 4913 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4914 else
dd8fbd78 4915 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4916 break;
9ee6e8bb
PB
4917 default:
4918 abort();
2c0262af 4919 }
7d1b0095 4920 tcg_temp_free_i32(tmp2);
dd8fbd78 4921
9ee6e8bb
PB
4922 /* Save the result. For elementwise operations we can put it
4923 straight into the destination register. For pairwise operations
4924 we have to be careful to avoid clobbering the source operands. */
4925 if (pairwise && rd == rm) {
dd8fbd78 4926 neon_store_scratch(pass, tmp);
9ee6e8bb 4927 } else {
dd8fbd78 4928 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4929 }
4930
4931 } /* for pass */
4932 if (pairwise && rd == rm) {
4933 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4934 tmp = neon_load_scratch(pass);
4935 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4936 }
4937 }
ad69471c 4938 /* End of 3 register same size operations. */
9ee6e8bb
PB
4939 } else if (insn & (1 << 4)) {
4940 if ((insn & 0x00380080) != 0) {
4941 /* Two registers and shift. */
4942 op = (insn >> 8) & 0xf;
4943 if (insn & (1 << 7)) {
cc13115b
PM
4944 /* 64-bit shift. */
4945 if (op > 7) {
4946 return 1;
4947 }
9ee6e8bb
PB
4948 size = 3;
4949 } else {
4950 size = 2;
4951 while ((insn & (1 << (size + 19))) == 0)
4952 size--;
4953 }
4954 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4955 /* To avoid excessive dumplication of ops we implement shift
4956 by immediate using the variable shift operations. */
4957 if (op < 8) {
4958 /* Shift by immediate:
4959 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
4960 if (q && ((rd | rm) & 1)) {
4961 return 1;
4962 }
4963 if (!u && (op == 4 || op == 6)) {
4964 return 1;
4965 }
9ee6e8bb
PB
4966 /* Right shifts are encoded as N - shift, where N is the
4967 element size in bits. */
4968 if (op <= 4)
4969 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4970 if (size == 3) {
4971 count = q + 1;
4972 } else {
4973 count = q ? 4: 2;
4974 }
4975 switch (size) {
4976 case 0:
4977 imm = (uint8_t) shift;
4978 imm |= imm << 8;
4979 imm |= imm << 16;
4980 break;
4981 case 1:
4982 imm = (uint16_t) shift;
4983 imm |= imm << 16;
4984 break;
4985 case 2:
4986 case 3:
4987 imm = shift;
4988 break;
4989 default:
4990 abort();
4991 }
4992
4993 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4994 if (size == 3) {
4995 neon_load_reg64(cpu_V0, rm + pass);
4996 tcg_gen_movi_i64(cpu_V1, imm);
4997 switch (op) {
4998 case 0: /* VSHR */
4999 case 1: /* VSRA */
5000 if (u)
5001 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5002 else
ad69471c 5003 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5004 break;
ad69471c
PB
5005 case 2: /* VRSHR */
5006 case 3: /* VRSRA */
5007 if (u)
5008 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5009 else
ad69471c 5010 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5011 break;
ad69471c 5012 case 4: /* VSRI */
ad69471c
PB
5013 case 5: /* VSHL, VSLI */
5014 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5015 break;
0322b26e 5016 case 6: /* VQSHLU */
cc13115b 5017 gen_helper_neon_qshlu_s64(cpu_V0, cpu_V0, cpu_V1);
ad69471c 5018 break;
0322b26e
PM
5019 case 7: /* VQSHL */
5020 if (u) {
2a3f75b4 5021 gen_helper_neon_qshl_u64(cpu_V0,
0322b26e
PM
5022 cpu_V0, cpu_V1);
5023 } else {
2a3f75b4 5024 gen_helper_neon_qshl_s64(cpu_V0,
0322b26e
PM
5025 cpu_V0, cpu_V1);
5026 }
9ee6e8bb 5027 break;
9ee6e8bb 5028 }
ad69471c
PB
5029 if (op == 1 || op == 3) {
5030 /* Accumulate. */
5371cb81 5031 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5032 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5033 } else if (op == 4 || (op == 5 && u)) {
5034 /* Insert */
923e6509
CL
5035 neon_load_reg64(cpu_V1, rd + pass);
5036 uint64_t mask;
5037 if (shift < -63 || shift > 63) {
5038 mask = 0;
5039 } else {
5040 if (op == 4) {
5041 mask = 0xffffffffffffffffull >> -shift;
5042 } else {
5043 mask = 0xffffffffffffffffull << shift;
5044 }
5045 }
5046 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5047 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5048 }
5049 neon_store_reg64(cpu_V0, rd + pass);
5050 } else { /* size < 3 */
5051 /* Operands in T0 and T1. */
dd8fbd78 5052 tmp = neon_load_reg(rm, pass);
7d1b0095 5053 tmp2 = tcg_temp_new_i32();
dd8fbd78 5054 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5055 switch (op) {
5056 case 0: /* VSHR */
5057 case 1: /* VSRA */
5058 GEN_NEON_INTEGER_OP(shl);
5059 break;
5060 case 2: /* VRSHR */
5061 case 3: /* VRSRA */
5062 GEN_NEON_INTEGER_OP(rshl);
5063 break;
5064 case 4: /* VSRI */
ad69471c
PB
5065 case 5: /* VSHL, VSLI */
5066 switch (size) {
dd8fbd78
FN
5067 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5068 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5069 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5070 default: abort();
ad69471c
PB
5071 }
5072 break;
0322b26e 5073 case 6: /* VQSHLU */
ad69471c 5074 switch (size) {
0322b26e 5075 case 0:
2a3f75b4 5076 gen_helper_neon_qshlu_s8(tmp, tmp, tmp2);
0322b26e
PM
5077 break;
5078 case 1:
2a3f75b4 5079 gen_helper_neon_qshlu_s16(tmp, tmp, tmp2);
0322b26e
PM
5080 break;
5081 case 2:
2a3f75b4 5082 gen_helper_neon_qshlu_s32(tmp, tmp, tmp2);
0322b26e
PM
5083 break;
5084 default:
cc13115b 5085 abort();
ad69471c
PB
5086 }
5087 break;
0322b26e 5088 case 7: /* VQSHL */
2a3f75b4 5089 GEN_NEON_INTEGER_OP(qshl);
0322b26e 5090 break;
ad69471c 5091 }
7d1b0095 5092 tcg_temp_free_i32(tmp2);
ad69471c
PB
5093
5094 if (op == 1 || op == 3) {
5095 /* Accumulate. */
dd8fbd78 5096 tmp2 = neon_load_reg(rd, pass);
5371cb81 5097 gen_neon_add(size, tmp, tmp2);
7d1b0095 5098 tcg_temp_free_i32(tmp2);
ad69471c
PB
5099 } else if (op == 4 || (op == 5 && u)) {
5100 /* Insert */
5101 switch (size) {
5102 case 0:
5103 if (op == 4)
ca9a32e4 5104 mask = 0xff >> -shift;
ad69471c 5105 else
ca9a32e4
JR
5106 mask = (uint8_t)(0xff << shift);
5107 mask |= mask << 8;
5108 mask |= mask << 16;
ad69471c
PB
5109 break;
5110 case 1:
5111 if (op == 4)
ca9a32e4 5112 mask = 0xffff >> -shift;
ad69471c 5113 else
ca9a32e4
JR
5114 mask = (uint16_t)(0xffff << shift);
5115 mask |= mask << 16;
ad69471c
PB
5116 break;
5117 case 2:
ca9a32e4
JR
5118 if (shift < -31 || shift > 31) {
5119 mask = 0;
5120 } else {
5121 if (op == 4)
5122 mask = 0xffffffffu >> -shift;
5123 else
5124 mask = 0xffffffffu << shift;
5125 }
ad69471c
PB
5126 break;
5127 default:
5128 abort();
5129 }
dd8fbd78 5130 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5131 tcg_gen_andi_i32(tmp, tmp, mask);
5132 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5133 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5134 tcg_temp_free_i32(tmp2);
ad69471c 5135 }
dd8fbd78 5136 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5137 }
5138 } /* for pass */
5139 } else if (op < 10) {
ad69471c 5140 /* Shift by immediate and narrow:
9ee6e8bb 5141 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5142 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5143 if (rm & 1) {
5144 return 1;
5145 }
9ee6e8bb
PB
5146 shift = shift - (1 << (size + 3));
5147 size++;
92cdfaeb 5148 if (size == 3) {
a7812ae4 5149 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5150 neon_load_reg64(cpu_V0, rm);
5151 neon_load_reg64(cpu_V1, rm + 1);
5152 for (pass = 0; pass < 2; pass++) {
5153 TCGv_i64 in;
5154 if (pass == 0) {
5155 in = cpu_V0;
5156 } else {
5157 in = cpu_V1;
5158 }
ad69471c 5159 if (q) {
0b36f4cd 5160 if (input_unsigned) {
92cdfaeb 5161 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5162 } else {
92cdfaeb 5163 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5164 }
ad69471c 5165 } else {
0b36f4cd 5166 if (input_unsigned) {
92cdfaeb 5167 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5168 } else {
92cdfaeb 5169 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5170 }
ad69471c 5171 }
7d1b0095 5172 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5173 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5174 neon_store_reg(rd, pass, tmp);
5175 } /* for pass */
5176 tcg_temp_free_i64(tmp64);
5177 } else {
5178 if (size == 1) {
5179 imm = (uint16_t)shift;
5180 imm |= imm << 16;
2c0262af 5181 } else {
92cdfaeb
PM
5182 /* size == 2 */
5183 imm = (uint32_t)shift;
5184 }
5185 tmp2 = tcg_const_i32(imm);
5186 tmp4 = neon_load_reg(rm + 1, 0);
5187 tmp5 = neon_load_reg(rm + 1, 1);
5188 for (pass = 0; pass < 2; pass++) {
5189 if (pass == 0) {
5190 tmp = neon_load_reg(rm, 0);
5191 } else {
5192 tmp = tmp4;
5193 }
0b36f4cd
CL
5194 gen_neon_shift_narrow(size, tmp, tmp2, q,
5195 input_unsigned);
92cdfaeb
PM
5196 if (pass == 0) {
5197 tmp3 = neon_load_reg(rm, 1);
5198 } else {
5199 tmp3 = tmp5;
5200 }
0b36f4cd
CL
5201 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5202 input_unsigned);
36aa55dc 5203 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5204 tcg_temp_free_i32(tmp);
5205 tcg_temp_free_i32(tmp3);
5206 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5207 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5208 neon_store_reg(rd, pass, tmp);
5209 } /* for pass */
c6067f04 5210 tcg_temp_free_i32(tmp2);
b75263d6 5211 }
9ee6e8bb 5212 } else if (op == 10) {
cc13115b
PM
5213 /* VSHLL, VMOVL */
5214 if (q || (rd & 1)) {
9ee6e8bb 5215 return 1;
cc13115b 5216 }
ad69471c
PB
5217 tmp = neon_load_reg(rm, 0);
5218 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5219 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5220 if (pass == 1)
5221 tmp = tmp2;
5222
5223 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5224
9ee6e8bb
PB
5225 if (shift != 0) {
5226 /* The shift is less than the width of the source
ad69471c
PB
5227 type, so we can just shift the whole register. */
5228 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5229 /* Widen the result of shift: we need to clear
5230 * the potential overflow bits resulting from
5231 * left bits of the narrow input appearing as
5232 * right bits of left the neighbour narrow
5233 * input. */
ad69471c
PB
5234 if (size < 2 || !u) {
5235 uint64_t imm64;
5236 if (size == 0) {
5237 imm = (0xffu >> (8 - shift));
5238 imm |= imm << 16;
acdf01ef 5239 } else if (size == 1) {
ad69471c 5240 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5241 } else {
5242 /* size == 2 */
5243 imm = 0xffffffff >> (32 - shift);
5244 }
5245 if (size < 2) {
5246 imm64 = imm | (((uint64_t)imm) << 32);
5247 } else {
5248 imm64 = imm;
9ee6e8bb 5249 }
acdf01ef 5250 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5251 }
5252 }
ad69471c 5253 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5254 }
f73534a5 5255 } else if (op >= 14) {
9ee6e8bb 5256 /* VCVT fixed-point. */
cc13115b
PM
5257 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5258 return 1;
5259 }
f73534a5
PM
5260 /* We have already masked out the must-be-1 top bit of imm6,
5261 * hence this 32-shift where the ARM ARM has 64-imm6.
5262 */
5263 shift = 32 - shift;
9ee6e8bb 5264 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5265 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5266 if (!(op & 1)) {
9ee6e8bb 5267 if (u)
5500b06c 5268 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5269 else
5500b06c 5270 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5271 } else {
5272 if (u)
5500b06c 5273 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5274 else
5500b06c 5275 gen_vfp_tosl(0, shift, 1);
2c0262af 5276 }
4373f3ce 5277 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5278 }
5279 } else {
9ee6e8bb
PB
5280 return 1;
5281 }
5282 } else { /* (insn & 0x00380080) == 0 */
5283 int invert;
7d80fee5
PM
5284 if (q && (rd & 1)) {
5285 return 1;
5286 }
9ee6e8bb
PB
5287
5288 op = (insn >> 8) & 0xf;
5289 /* One register and immediate. */
5290 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5291 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5292 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5293 * We choose to not special-case this and will behave as if a
5294 * valid constant encoding of 0 had been given.
5295 */
9ee6e8bb
PB
5296 switch (op) {
5297 case 0: case 1:
5298 /* no-op */
5299 break;
5300 case 2: case 3:
5301 imm <<= 8;
5302 break;
5303 case 4: case 5:
5304 imm <<= 16;
5305 break;
5306 case 6: case 7:
5307 imm <<= 24;
5308 break;
5309 case 8: case 9:
5310 imm |= imm << 16;
5311 break;
5312 case 10: case 11:
5313 imm = (imm << 8) | (imm << 24);
5314 break;
5315 case 12:
8e31209e 5316 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5317 break;
5318 case 13:
5319 imm = (imm << 16) | 0xffff;
5320 break;
5321 case 14:
5322 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5323 if (invert)
5324 imm = ~imm;
5325 break;
5326 case 15:
7d80fee5
PM
5327 if (invert) {
5328 return 1;
5329 }
9ee6e8bb
PB
5330 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5331 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5332 break;
5333 }
5334 if (invert)
5335 imm = ~imm;
5336
9ee6e8bb
PB
5337 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5338 if (op & 1 && op < 12) {
ad69471c 5339 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5340 if (invert) {
5341 /* The immediate value has already been inverted, so
5342 BIC becomes AND. */
ad69471c 5343 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5344 } else {
ad69471c 5345 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5346 }
9ee6e8bb 5347 } else {
ad69471c 5348 /* VMOV, VMVN. */
7d1b0095 5349 tmp = tcg_temp_new_i32();
9ee6e8bb 5350 if (op == 14 && invert) {
a5a14945 5351 int n;
ad69471c
PB
5352 uint32_t val;
5353 val = 0;
9ee6e8bb
PB
5354 for (n = 0; n < 4; n++) {
5355 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5356 val |= 0xff << (n * 8);
9ee6e8bb 5357 }
ad69471c
PB
5358 tcg_gen_movi_i32(tmp, val);
5359 } else {
5360 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5361 }
9ee6e8bb 5362 }
ad69471c 5363 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5364 }
5365 }
e4b3861d 5366 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5367 if (size != 3) {
5368 op = (insn >> 8) & 0xf;
5369 if ((insn & (1 << 6)) == 0) {
5370 /* Three registers of different lengths. */
5371 int src1_wide;
5372 int src2_wide;
5373 int prewiden;
695272dc
PM
5374 /* undefreq: bit 0 : UNDEF if size != 0
5375 * bit 1 : UNDEF if size == 0
5376 * bit 2 : UNDEF if U == 1
5377 * Note that [1:0] set implies 'always UNDEF'
5378 */
5379 int undefreq;
5380 /* prewiden, src1_wide, src2_wide, undefreq */
5381 static const int neon_3reg_wide[16][4] = {
5382 {1, 0, 0, 0}, /* VADDL */
5383 {1, 1, 0, 0}, /* VADDW */
5384 {1, 0, 0, 0}, /* VSUBL */
5385 {1, 1, 0, 0}, /* VSUBW */
5386 {0, 1, 1, 0}, /* VADDHN */
5387 {0, 0, 0, 0}, /* VABAL */
5388 {0, 1, 1, 0}, /* VSUBHN */
5389 {0, 0, 0, 0}, /* VABDL */
5390 {0, 0, 0, 0}, /* VMLAL */
5391 {0, 0, 0, 6}, /* VQDMLAL */
5392 {0, 0, 0, 0}, /* VMLSL */
5393 {0, 0, 0, 6}, /* VQDMLSL */
5394 {0, 0, 0, 0}, /* Integer VMULL */
5395 {0, 0, 0, 2}, /* VQDMULL */
5396 {0, 0, 0, 5}, /* Polynomial VMULL */
5397 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5398 };
5399
5400 prewiden = neon_3reg_wide[op][0];
5401 src1_wide = neon_3reg_wide[op][1];
5402 src2_wide = neon_3reg_wide[op][2];
695272dc 5403 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5404
695272dc
PM
5405 if (((undefreq & 1) && (size != 0)) ||
5406 ((undefreq & 2) && (size == 0)) ||
5407 ((undefreq & 4) && u)) {
5408 return 1;
5409 }
5410 if ((src1_wide && (rn & 1)) ||
5411 (src2_wide && (rm & 1)) ||
5412 (!src2_wide && (rd & 1))) {
ad69471c 5413 return 1;
695272dc 5414 }
ad69471c 5415
9ee6e8bb
PB
5416 /* Avoid overlapping operands. Wide source operands are
5417 always aligned so will never overlap with wide
5418 destinations in problematic ways. */
8f8e3aa4 5419 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5420 tmp = neon_load_reg(rm, 1);
5421 neon_store_scratch(2, tmp);
8f8e3aa4 5422 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5423 tmp = neon_load_reg(rn, 1);
5424 neon_store_scratch(2, tmp);
9ee6e8bb 5425 }
a50f5b91 5426 TCGV_UNUSED(tmp3);
9ee6e8bb 5427 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5428 if (src1_wide) {
5429 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5430 TCGV_UNUSED(tmp);
9ee6e8bb 5431 } else {
ad69471c 5432 if (pass == 1 && rd == rn) {
dd8fbd78 5433 tmp = neon_load_scratch(2);
9ee6e8bb 5434 } else {
ad69471c
PB
5435 tmp = neon_load_reg(rn, pass);
5436 }
5437 if (prewiden) {
5438 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5439 }
5440 }
ad69471c
PB
5441 if (src2_wide) {
5442 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5443 TCGV_UNUSED(tmp2);
9ee6e8bb 5444 } else {
ad69471c 5445 if (pass == 1 && rd == rm) {
dd8fbd78 5446 tmp2 = neon_load_scratch(2);
9ee6e8bb 5447 } else {
ad69471c
PB
5448 tmp2 = neon_load_reg(rm, pass);
5449 }
5450 if (prewiden) {
5451 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5452 }
9ee6e8bb
PB
5453 }
5454 switch (op) {
5455 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5456 gen_neon_addl(size);
9ee6e8bb 5457 break;
79b0e534 5458 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5459 gen_neon_subl(size);
9ee6e8bb
PB
5460 break;
5461 case 5: case 7: /* VABAL, VABDL */
5462 switch ((size << 1) | u) {
ad69471c
PB
5463 case 0:
5464 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5465 break;
5466 case 1:
5467 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5468 break;
5469 case 2:
5470 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5471 break;
5472 case 3:
5473 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5474 break;
5475 case 4:
5476 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5477 break;
5478 case 5:
5479 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5480 break;
9ee6e8bb
PB
5481 default: abort();
5482 }
7d1b0095
PM
5483 tcg_temp_free_i32(tmp2);
5484 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5485 break;
5486 case 8: case 9: case 10: case 11: case 12: case 13:
5487 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5488 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5489 break;
5490 case 14: /* Polynomial VMULL */
e5ca24cb 5491 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5492 tcg_temp_free_i32(tmp2);
5493 tcg_temp_free_i32(tmp);
e5ca24cb 5494 break;
695272dc
PM
5495 default: /* 15 is RESERVED: caught earlier */
5496 abort();
9ee6e8bb 5497 }
ebcd88ce
PM
5498 if (op == 13) {
5499 /* VQDMULL */
5500 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5501 neon_store_reg64(cpu_V0, rd + pass);
5502 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5503 /* Accumulate. */
ebcd88ce 5504 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5505 switch (op) {
4dc064e6
PM
5506 case 10: /* VMLSL */
5507 gen_neon_negl(cpu_V0, size);
5508 /* Fall through */
5509 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5510 gen_neon_addl(size);
9ee6e8bb
PB
5511 break;
5512 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5513 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5514 if (op == 11) {
5515 gen_neon_negl(cpu_V0, size);
5516 }
ad69471c
PB
5517 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5518 break;
9ee6e8bb
PB
5519 default:
5520 abort();
5521 }
ad69471c 5522 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5523 } else if (op == 4 || op == 6) {
5524 /* Narrowing operation. */
7d1b0095 5525 tmp = tcg_temp_new_i32();
79b0e534 5526 if (!u) {
9ee6e8bb 5527 switch (size) {
ad69471c
PB
5528 case 0:
5529 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5530 break;
5531 case 1:
5532 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5533 break;
5534 case 2:
5535 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5536 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5537 break;
9ee6e8bb
PB
5538 default: abort();
5539 }
5540 } else {
5541 switch (size) {
ad69471c
PB
5542 case 0:
5543 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5544 break;
5545 case 1:
5546 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5547 break;
5548 case 2:
5549 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5550 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5551 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5552 break;
9ee6e8bb
PB
5553 default: abort();
5554 }
5555 }
ad69471c
PB
5556 if (pass == 0) {
5557 tmp3 = tmp;
5558 } else {
5559 neon_store_reg(rd, 0, tmp3);
5560 neon_store_reg(rd, 1, tmp);
5561 }
9ee6e8bb
PB
5562 } else {
5563 /* Write back the result. */
ad69471c 5564 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5565 }
5566 }
5567 } else {
3e3326df
PM
5568 /* Two registers and a scalar. NB that for ops of this form
5569 * the ARM ARM labels bit 24 as Q, but it is in our variable
5570 * 'u', not 'q'.
5571 */
5572 if (size == 0) {
5573 return 1;
5574 }
9ee6e8bb 5575 switch (op) {
9ee6e8bb 5576 case 1: /* Float VMLA scalar */
9ee6e8bb 5577 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5578 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5579 if (size == 1) {
5580 return 1;
5581 }
5582 /* fall through */
5583 case 0: /* Integer VMLA scalar */
5584 case 4: /* Integer VMLS scalar */
5585 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5586 case 12: /* VQDMULH scalar */
5587 case 13: /* VQRDMULH scalar */
3e3326df
PM
5588 if (u && ((rd | rn) & 1)) {
5589 return 1;
5590 }
dd8fbd78
FN
5591 tmp = neon_get_scalar(size, rm);
5592 neon_store_scratch(0, tmp);
9ee6e8bb 5593 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5594 tmp = neon_load_scratch(0);
5595 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5596 if (op == 12) {
5597 if (size == 1) {
2a3f75b4 5598 gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2);
9ee6e8bb 5599 } else {
2a3f75b4 5600 gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2);
9ee6e8bb
PB
5601 }
5602 } else if (op == 13) {
5603 if (size == 1) {
2a3f75b4 5604 gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2);
9ee6e8bb 5605 } else {
2a3f75b4 5606 gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2);
9ee6e8bb
PB
5607 }
5608 } else if (op & 1) {
dd8fbd78 5609 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5610 } else {
5611 switch (size) {
dd8fbd78
FN
5612 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5613 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5614 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5615 default: abort();
9ee6e8bb
PB
5616 }
5617 }
7d1b0095 5618 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5619 if (op < 8) {
5620 /* Accumulate. */
dd8fbd78 5621 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5622 switch (op) {
5623 case 0:
dd8fbd78 5624 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5625 break;
5626 case 1:
dd8fbd78 5627 gen_helper_neon_add_f32(tmp, tmp, tmp2);
9ee6e8bb
PB
5628 break;
5629 case 4:
dd8fbd78 5630 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5631 break;
5632 case 5:
dd8fbd78 5633 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
9ee6e8bb
PB
5634 break;
5635 default:
5636 abort();
5637 }
7d1b0095 5638 tcg_temp_free_i32(tmp2);
9ee6e8bb 5639 }
dd8fbd78 5640 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5641 }
5642 break;
9ee6e8bb 5643 case 3: /* VQDMLAL scalar */
9ee6e8bb 5644 case 7: /* VQDMLSL scalar */
9ee6e8bb 5645 case 11: /* VQDMULL scalar */
3e3326df 5646 if (u == 1) {
ad69471c 5647 return 1;
3e3326df
PM
5648 }
5649 /* fall through */
5650 case 2: /* VMLAL sclar */
5651 case 6: /* VMLSL scalar */
5652 case 10: /* VMULL scalar */
5653 if (rd & 1) {
5654 return 1;
5655 }
dd8fbd78 5656 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5657 /* We need a copy of tmp2 because gen_neon_mull
5658 * deletes it during pass 0. */
7d1b0095 5659 tmp4 = tcg_temp_new_i32();
c6067f04 5660 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5661 tmp3 = neon_load_reg(rn, 1);
ad69471c 5662
9ee6e8bb 5663 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5664 if (pass == 0) {
5665 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5666 } else {
dd8fbd78 5667 tmp = tmp3;
c6067f04 5668 tmp2 = tmp4;
9ee6e8bb 5669 }
ad69471c 5670 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5671 if (op != 11) {
5672 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5673 }
9ee6e8bb 5674 switch (op) {
4dc064e6
PM
5675 case 6:
5676 gen_neon_negl(cpu_V0, size);
5677 /* Fall through */
5678 case 2:
ad69471c 5679 gen_neon_addl(size);
9ee6e8bb
PB
5680 break;
5681 case 3: case 7:
ad69471c 5682 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5683 if (op == 7) {
5684 gen_neon_negl(cpu_V0, size);
5685 }
ad69471c 5686 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5687 break;
5688 case 10:
5689 /* no-op */
5690 break;
5691 case 11:
ad69471c 5692 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5693 break;
5694 default:
5695 abort();
5696 }
ad69471c 5697 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5698 }
dd8fbd78 5699
dd8fbd78 5700
9ee6e8bb
PB
5701 break;
5702 default: /* 14 and 15 are RESERVED */
5703 return 1;
5704 }
5705 }
5706 } else { /* size == 3 */
5707 if (!u) {
5708 /* Extract. */
9ee6e8bb 5709 imm = (insn >> 8) & 0xf;
ad69471c
PB
5710
5711 if (imm > 7 && !q)
5712 return 1;
5713
52579ea1
PM
5714 if (q && ((rd | rn | rm) & 1)) {
5715 return 1;
5716 }
5717
ad69471c
PB
5718 if (imm == 0) {
5719 neon_load_reg64(cpu_V0, rn);
5720 if (q) {
5721 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5722 }
ad69471c
PB
5723 } else if (imm == 8) {
5724 neon_load_reg64(cpu_V0, rn + 1);
5725 if (q) {
5726 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5727 }
ad69471c 5728 } else if (q) {
a7812ae4 5729 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5730 if (imm < 8) {
5731 neon_load_reg64(cpu_V0, rn);
a7812ae4 5732 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5733 } else {
5734 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5735 neon_load_reg64(tmp64, rm);
ad69471c
PB
5736 }
5737 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5738 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5739 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5740 if (imm < 8) {
5741 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5742 } else {
ad69471c
PB
5743 neon_load_reg64(cpu_V1, rm + 1);
5744 imm -= 8;
9ee6e8bb 5745 }
ad69471c 5746 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5747 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5748 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5749 tcg_temp_free_i64(tmp64);
ad69471c 5750 } else {
a7812ae4 5751 /* BUGFIX */
ad69471c 5752 neon_load_reg64(cpu_V0, rn);
a7812ae4 5753 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5754 neon_load_reg64(cpu_V1, rm);
a7812ae4 5755 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5756 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5757 }
5758 neon_store_reg64(cpu_V0, rd);
5759 if (q) {
5760 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5761 }
5762 } else if ((insn & (1 << 11)) == 0) {
5763 /* Two register misc. */
5764 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5765 size = (insn >> 18) & 3;
600b828c
PM
5766 /* UNDEF for unknown op values and bad op-size combinations */
5767 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5768 return 1;
5769 }
fc2a9b37
PM
5770 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5771 q && ((rm | rd) & 1)) {
5772 return 1;
5773 }
9ee6e8bb 5774 switch (op) {
600b828c 5775 case NEON_2RM_VREV64:
9ee6e8bb 5776 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5777 tmp = neon_load_reg(rm, pass * 2);
5778 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5779 switch (size) {
dd8fbd78
FN
5780 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5781 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5782 case 2: /* no-op */ break;
5783 default: abort();
5784 }
dd8fbd78 5785 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5786 if (size == 2) {
dd8fbd78 5787 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5788 } else {
9ee6e8bb 5789 switch (size) {
dd8fbd78
FN
5790 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5791 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5792 default: abort();
5793 }
dd8fbd78 5794 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5795 }
5796 }
5797 break;
600b828c
PM
5798 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5799 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5800 for (pass = 0; pass < q + 1; pass++) {
5801 tmp = neon_load_reg(rm, pass * 2);
5802 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5803 tmp = neon_load_reg(rm, pass * 2 + 1);
5804 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5805 switch (size) {
5806 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5807 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5808 case 2: tcg_gen_add_i64(CPU_V001); break;
5809 default: abort();
5810 }
600b828c 5811 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5812 /* Accumulate. */
ad69471c
PB
5813 neon_load_reg64(cpu_V1, rd + pass);
5814 gen_neon_addl(size);
9ee6e8bb 5815 }
ad69471c 5816 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5817 }
5818 break;
600b828c 5819 case NEON_2RM_VTRN:
9ee6e8bb 5820 if (size == 2) {
a5a14945 5821 int n;
9ee6e8bb 5822 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5823 tmp = neon_load_reg(rm, n);
5824 tmp2 = neon_load_reg(rd, n + 1);
5825 neon_store_reg(rm, n, tmp2);
5826 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5827 }
5828 } else {
5829 goto elementwise;
5830 }
5831 break;
600b828c 5832 case NEON_2RM_VUZP:
02acedf9 5833 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5834 return 1;
9ee6e8bb
PB
5835 }
5836 break;
600b828c 5837 case NEON_2RM_VZIP:
d68a6f3a 5838 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5839 return 1;
9ee6e8bb
PB
5840 }
5841 break;
600b828c
PM
5842 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5843 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5844 if (rm & 1) {
5845 return 1;
5846 }
a50f5b91 5847 TCGV_UNUSED(tmp2);
9ee6e8bb 5848 for (pass = 0; pass < 2; pass++) {
ad69471c 5849 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5850 tmp = tcg_temp_new_i32();
600b828c
PM
5851 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5852 tmp, cpu_V0);
ad69471c
PB
5853 if (pass == 0) {
5854 tmp2 = tmp;
5855 } else {
5856 neon_store_reg(rd, 0, tmp2);
5857 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5858 }
9ee6e8bb
PB
5859 }
5860 break;
600b828c 5861 case NEON_2RM_VSHLL:
fc2a9b37 5862 if (q || (rd & 1)) {
9ee6e8bb 5863 return 1;
600b828c 5864 }
ad69471c
PB
5865 tmp = neon_load_reg(rm, 0);
5866 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5867 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5868 if (pass == 1)
5869 tmp = tmp2;
5870 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5871 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5872 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5873 }
5874 break;
600b828c 5875 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5876 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5877 q || (rm & 1)) {
5878 return 1;
5879 }
7d1b0095
PM
5880 tmp = tcg_temp_new_i32();
5881 tmp2 = tcg_temp_new_i32();
60011498 5882 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5883 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5884 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5885 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5886 tcg_gen_shli_i32(tmp2, tmp2, 16);
5887 tcg_gen_or_i32(tmp2, tmp2, tmp);
5888 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5889 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5890 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5891 neon_store_reg(rd, 0, tmp2);
7d1b0095 5892 tmp2 = tcg_temp_new_i32();
2d981da7 5893 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5894 tcg_gen_shli_i32(tmp2, tmp2, 16);
5895 tcg_gen_or_i32(tmp2, tmp2, tmp);
5896 neon_store_reg(rd, 1, tmp2);
7d1b0095 5897 tcg_temp_free_i32(tmp);
60011498 5898 break;
600b828c 5899 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5900 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5901 q || (rd & 1)) {
5902 return 1;
5903 }
7d1b0095 5904 tmp3 = tcg_temp_new_i32();
60011498
PB
5905 tmp = neon_load_reg(rm, 0);
5906 tmp2 = neon_load_reg(rm, 1);
5907 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5908 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5909 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5910 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5911 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5912 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5913 tcg_temp_free_i32(tmp);
60011498 5914 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5915 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5916 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5917 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5918 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5919 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5920 tcg_temp_free_i32(tmp2);
5921 tcg_temp_free_i32(tmp3);
60011498 5922 break;
9ee6e8bb
PB
5923 default:
5924 elementwise:
5925 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 5926 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
5927 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5928 neon_reg_offset(rm, pass));
dd8fbd78 5929 TCGV_UNUSED(tmp);
9ee6e8bb 5930 } else {
dd8fbd78 5931 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5932 }
5933 switch (op) {
600b828c 5934 case NEON_2RM_VREV32:
9ee6e8bb 5935 switch (size) {
dd8fbd78
FN
5936 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5937 case 1: gen_swap_half(tmp); break;
600b828c 5938 default: abort();
9ee6e8bb
PB
5939 }
5940 break;
600b828c 5941 case NEON_2RM_VREV16:
dd8fbd78 5942 gen_rev16(tmp);
9ee6e8bb 5943 break;
600b828c 5944 case NEON_2RM_VCLS:
9ee6e8bb 5945 switch (size) {
dd8fbd78
FN
5946 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5947 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5948 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 5949 default: abort();
9ee6e8bb
PB
5950 }
5951 break;
600b828c 5952 case NEON_2RM_VCLZ:
9ee6e8bb 5953 switch (size) {
dd8fbd78
FN
5954 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5955 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5956 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 5957 default: abort();
9ee6e8bb
PB
5958 }
5959 break;
600b828c 5960 case NEON_2RM_VCNT:
dd8fbd78 5961 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 5962 break;
600b828c 5963 case NEON_2RM_VMVN:
dd8fbd78 5964 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 5965 break;
600b828c 5966 case NEON_2RM_VQABS:
9ee6e8bb 5967 switch (size) {
2a3f75b4
PM
5968 case 0: gen_helper_neon_qabs_s8(tmp, tmp); break;
5969 case 1: gen_helper_neon_qabs_s16(tmp, tmp); break;
5970 case 2: gen_helper_neon_qabs_s32(tmp, tmp); break;
600b828c 5971 default: abort();
9ee6e8bb
PB
5972 }
5973 break;
600b828c 5974 case NEON_2RM_VQNEG:
9ee6e8bb 5975 switch (size) {
2a3f75b4
PM
5976 case 0: gen_helper_neon_qneg_s8(tmp, tmp); break;
5977 case 1: gen_helper_neon_qneg_s16(tmp, tmp); break;
5978 case 2: gen_helper_neon_qneg_s32(tmp, tmp); break;
600b828c 5979 default: abort();
9ee6e8bb
PB
5980 }
5981 break;
600b828c 5982 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 5983 tmp2 = tcg_const_i32(0);
9ee6e8bb 5984 switch(size) {
dd8fbd78
FN
5985 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5986 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5987 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 5988 default: abort();
9ee6e8bb 5989 }
dd8fbd78 5990 tcg_temp_free(tmp2);
600b828c 5991 if (op == NEON_2RM_VCLE0) {
dd8fbd78 5992 tcg_gen_not_i32(tmp, tmp);
600b828c 5993 }
9ee6e8bb 5994 break;
600b828c 5995 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 5996 tmp2 = tcg_const_i32(0);
9ee6e8bb 5997 switch(size) {
dd8fbd78
FN
5998 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5999 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6000 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6001 default: abort();
9ee6e8bb 6002 }
dd8fbd78 6003 tcg_temp_free(tmp2);
600b828c 6004 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6005 tcg_gen_not_i32(tmp, tmp);
600b828c 6006 }
9ee6e8bb 6007 break;
600b828c 6008 case NEON_2RM_VCEQ0:
dd8fbd78 6009 tmp2 = tcg_const_i32(0);
9ee6e8bb 6010 switch(size) {
dd8fbd78
FN
6011 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6012 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6013 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6014 default: abort();
9ee6e8bb 6015 }
dd8fbd78 6016 tcg_temp_free(tmp2);
9ee6e8bb 6017 break;
600b828c 6018 case NEON_2RM_VABS:
9ee6e8bb 6019 switch(size) {
dd8fbd78
FN
6020 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6021 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6022 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6023 default: abort();
9ee6e8bb
PB
6024 }
6025 break;
600b828c 6026 case NEON_2RM_VNEG:
dd8fbd78
FN
6027 tmp2 = tcg_const_i32(0);
6028 gen_neon_rsb(size, tmp, tmp2);
6029 tcg_temp_free(tmp2);
9ee6e8bb 6030 break;
600b828c 6031 case NEON_2RM_VCGT0_F:
dd8fbd78
FN
6032 tmp2 = tcg_const_i32(0);
6033 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
6034 tcg_temp_free(tmp2);
9ee6e8bb 6035 break;
600b828c 6036 case NEON_2RM_VCGE0_F:
dd8fbd78
FN
6037 tmp2 = tcg_const_i32(0);
6038 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
6039 tcg_temp_free(tmp2);
9ee6e8bb 6040 break;
600b828c 6041 case NEON_2RM_VCEQ0_F:
dd8fbd78
FN
6042 tmp2 = tcg_const_i32(0);
6043 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
6044 tcg_temp_free(tmp2);
9ee6e8bb 6045 break;
600b828c 6046 case NEON_2RM_VCLE0_F:
0e326109
PM
6047 tmp2 = tcg_const_i32(0);
6048 gen_helper_neon_cge_f32(tmp, tmp2, tmp);
6049 tcg_temp_free(tmp2);
6050 break;
600b828c 6051 case NEON_2RM_VCLT0_F:
0e326109
PM
6052 tmp2 = tcg_const_i32(0);
6053 gen_helper_neon_cgt_f32(tmp, tmp2, tmp);
6054 tcg_temp_free(tmp2);
6055 break;
600b828c 6056 case NEON_2RM_VABS_F:
4373f3ce 6057 gen_vfp_abs(0);
9ee6e8bb 6058 break;
600b828c 6059 case NEON_2RM_VNEG_F:
4373f3ce 6060 gen_vfp_neg(0);
9ee6e8bb 6061 break;
600b828c 6062 case NEON_2RM_VSWP:
dd8fbd78
FN
6063 tmp2 = neon_load_reg(rd, pass);
6064 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6065 break;
600b828c 6066 case NEON_2RM_VTRN:
dd8fbd78 6067 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6068 switch (size) {
dd8fbd78
FN
6069 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6070 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6071 default: abort();
9ee6e8bb 6072 }
dd8fbd78 6073 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6074 break;
600b828c 6075 case NEON_2RM_VRECPE:
dd8fbd78 6076 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6077 break;
600b828c 6078 case NEON_2RM_VRSQRTE:
dd8fbd78 6079 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6080 break;
600b828c 6081 case NEON_2RM_VRECPE_F:
4373f3ce 6082 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6083 break;
600b828c 6084 case NEON_2RM_VRSQRTE_F:
4373f3ce 6085 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6086 break;
600b828c 6087 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6088 gen_vfp_sito(0, 1);
9ee6e8bb 6089 break;
600b828c 6090 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6091 gen_vfp_uito(0, 1);
9ee6e8bb 6092 break;
600b828c 6093 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6094 gen_vfp_tosiz(0, 1);
9ee6e8bb 6095 break;
600b828c 6096 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6097 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6098 break;
6099 default:
600b828c
PM
6100 /* Reserved op values were caught by the
6101 * neon_2rm_sizes[] check earlier.
6102 */
6103 abort();
9ee6e8bb 6104 }
600b828c 6105 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6106 tcg_gen_st_f32(cpu_F0s, cpu_env,
6107 neon_reg_offset(rd, pass));
9ee6e8bb 6108 } else {
dd8fbd78 6109 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6110 }
6111 }
6112 break;
6113 }
6114 } else if ((insn & (1 << 10)) == 0) {
6115 /* VTBL, VTBX. */
56907d77
PM
6116 int n = ((insn >> 8) & 3) + 1;
6117 if ((rn + n) > 32) {
6118 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6119 * helper function running off the end of the register file.
6120 */
6121 return 1;
6122 }
6123 n <<= 3;
9ee6e8bb 6124 if (insn & (1 << 6)) {
8f8e3aa4 6125 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6126 } else {
7d1b0095 6127 tmp = tcg_temp_new_i32();
8f8e3aa4 6128 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6129 }
8f8e3aa4 6130 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6131 tmp4 = tcg_const_i32(rn);
6132 tmp5 = tcg_const_i32(n);
6133 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
7d1b0095 6134 tcg_temp_free_i32(tmp);
9ee6e8bb 6135 if (insn & (1 << 6)) {
8f8e3aa4 6136 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6137 } else {
7d1b0095 6138 tmp = tcg_temp_new_i32();
8f8e3aa4 6139 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6140 }
8f8e3aa4 6141 tmp3 = neon_load_reg(rm, 1);
b75263d6 6142 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6143 tcg_temp_free_i32(tmp5);
6144 tcg_temp_free_i32(tmp4);
8f8e3aa4 6145 neon_store_reg(rd, 0, tmp2);
3018f259 6146 neon_store_reg(rd, 1, tmp3);
7d1b0095 6147 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6148 } else if ((insn & 0x380) == 0) {
6149 /* VDUP */
133da6aa
JR
6150 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6151 return 1;
6152 }
9ee6e8bb 6153 if (insn & (1 << 19)) {
dd8fbd78 6154 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6155 } else {
dd8fbd78 6156 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6157 }
6158 if (insn & (1 << 16)) {
dd8fbd78 6159 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6160 } else if (insn & (1 << 17)) {
6161 if ((insn >> 18) & 1)
dd8fbd78 6162 gen_neon_dup_high16(tmp);
9ee6e8bb 6163 else
dd8fbd78 6164 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6165 }
6166 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6167 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6168 tcg_gen_mov_i32(tmp2, tmp);
6169 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6170 }
7d1b0095 6171 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6172 } else {
6173 return 1;
6174 }
6175 }
6176 }
6177 return 0;
6178}
6179
fe1479c3
PB
6180static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
6181{
6182 int crn = (insn >> 16) & 0xf;
6183 int crm = insn & 0xf;
6184 int op1 = (insn >> 21) & 7;
6185 int op2 = (insn >> 5) & 7;
6186 int rt = (insn >> 12) & 0xf;
6187 TCGv tmp;
6188
ca27c052
PM
6189 /* Minimal set of debug registers, since we don't support debug */
6190 if (op1 == 0 && crn == 0 && op2 == 0) {
6191 switch (crm) {
6192 case 0:
6193 /* DBGDIDR: just RAZ. In particular this means the
6194 * "debug architecture version" bits will read as
6195 * a reserved value, which should cause Linux to
6196 * not try to use the debug hardware.
6197 */
6198 tmp = tcg_const_i32(0);
6199 store_reg(s, rt, tmp);
6200 return 0;
6201 case 1:
6202 case 2:
6203 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6204 * don't implement memory mapped debug components
6205 */
6206 if (ENABLE_ARCH_7) {
6207 tmp = tcg_const_i32(0);
6208 store_reg(s, rt, tmp);
6209 return 0;
6210 }
6211 break;
6212 default:
6213 break;
6214 }
6215 }
6216
fe1479c3
PB
6217 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6218 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6219 /* TEECR */
6220 if (IS_USER(s))
6221 return 1;
6222 tmp = load_cpu_field(teecr);
6223 store_reg(s, rt, tmp);
6224 return 0;
6225 }
6226 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6227 /* TEEHBR */
6228 if (IS_USER(s) && (env->teecr & 1))
6229 return 1;
6230 tmp = load_cpu_field(teehbr);
6231 store_reg(s, rt, tmp);
6232 return 0;
6233 }
6234 }
6235 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
6236 op1, crn, crm, op2);
6237 return 1;
6238}
6239
6240static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
6241{
6242 int crn = (insn >> 16) & 0xf;
6243 int crm = insn & 0xf;
6244 int op1 = (insn >> 21) & 7;
6245 int op2 = (insn >> 5) & 7;
6246 int rt = (insn >> 12) & 0xf;
6247 TCGv tmp;
6248
6249 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6250 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6251 /* TEECR */
6252 if (IS_USER(s))
6253 return 1;
6254 tmp = load_reg(s, rt);
6255 gen_helper_set_teecr(cpu_env, tmp);
7d1b0095 6256 tcg_temp_free_i32(tmp);
fe1479c3
PB
6257 return 0;
6258 }
6259 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6260 /* TEEHBR */
6261 if (IS_USER(s) && (env->teecr & 1))
6262 return 1;
6263 tmp = load_reg(s, rt);
6264 store_cpu_field(tmp, teehbr);
6265 return 0;
6266 }
6267 }
6268 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6269 op1, crn, crm, op2);
6270 return 1;
6271}
6272
9ee6e8bb
PB
6273static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
6274{
6275 int cpnum;
6276
6277 cpnum = (insn >> 8) & 0xf;
6278 if (arm_feature(env, ARM_FEATURE_XSCALE)
6279 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6280 return 1;
6281
6282 switch (cpnum) {
6283 case 0:
6284 case 1:
6285 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6286 return disas_iwmmxt_insn(env, s, insn);
6287 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6288 return disas_dsp_insn(env, s, insn);
6289 }
6290 return 1;
6291 case 10:
6292 case 11:
6293 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
6294 case 14:
6295 /* Coprocessors 7-15 are architecturally reserved by ARM.
6296 Unfortunately Intel decided to ignore this. */
6297 if (arm_feature(env, ARM_FEATURE_XSCALE))
6298 goto board;
6299 if (insn & (1 << 20))
6300 return disas_cp14_read(env, s, insn);
6301 else
6302 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
6303 case 15:
6304 return disas_cp15_insn (env, s, insn);
6305 default:
fe1479c3 6306 board:
9ee6e8bb
PB
6307 /* Unknown coprocessor. See if the board has hooked it. */
6308 return disas_cp_insn (env, s, insn);
6309 }
6310}
6311
5e3f878a
PB
6312
6313/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6314static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
6315{
6316 TCGv tmp;
7d1b0095 6317 tmp = tcg_temp_new_i32();
5e3f878a
PB
6318 tcg_gen_trunc_i64_i32(tmp, val);
6319 store_reg(s, rlow, tmp);
7d1b0095 6320 tmp = tcg_temp_new_i32();
5e3f878a
PB
6321 tcg_gen_shri_i64(val, val, 32);
6322 tcg_gen_trunc_i64_i32(tmp, val);
6323 store_reg(s, rhigh, tmp);
6324}
6325
6326/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6327static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6328{
a7812ae4 6329 TCGv_i64 tmp;
5e3f878a
PB
6330 TCGv tmp2;
6331
36aa55dc 6332 /* Load value and extend to 64 bits. */
a7812ae4 6333 tmp = tcg_temp_new_i64();
5e3f878a
PB
6334 tmp2 = load_reg(s, rlow);
6335 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6336 tcg_temp_free_i32(tmp2);
5e3f878a 6337 tcg_gen_add_i64(val, val, tmp);
b75263d6 6338 tcg_temp_free_i64(tmp);
5e3f878a
PB
6339}
6340
6341/* load and add a 64-bit value from a register pair. */
a7812ae4 6342static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6343{
a7812ae4 6344 TCGv_i64 tmp;
36aa55dc
PB
6345 TCGv tmpl;
6346 TCGv tmph;
5e3f878a
PB
6347
6348 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6349 tmpl = load_reg(s, rlow);
6350 tmph = load_reg(s, rhigh);
a7812ae4 6351 tmp = tcg_temp_new_i64();
36aa55dc 6352 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6353 tcg_temp_free_i32(tmpl);
6354 tcg_temp_free_i32(tmph);
5e3f878a 6355 tcg_gen_add_i64(val, val, tmp);
b75263d6 6356 tcg_temp_free_i64(tmp);
5e3f878a
PB
6357}
6358
6359/* Set N and Z flags from a 64-bit value. */
a7812ae4 6360static void gen_logicq_cc(TCGv_i64 val)
5e3f878a 6361{
7d1b0095 6362 TCGv tmp = tcg_temp_new_i32();
5e3f878a 6363 gen_helper_logicq_cc(tmp, val);
6fbe23d5 6364 gen_logic_CC(tmp);
7d1b0095 6365 tcg_temp_free_i32(tmp);
5e3f878a
PB
6366}
6367
426f5abc
PB
6368/* Load/Store exclusive instructions are implemented by remembering
6369 the value/address loaded, and seeing if these are the same
6370 when the store is performed. This should be is sufficient to implement
6371 the architecturally mandated semantics, and avoids having to monitor
6372 regular stores.
6373
6374 In system emulation mode only one CPU will be running at once, so
6375 this sequence is effectively atomic. In user emulation mode we
6376 throw an exception and handle the atomic operation elsewhere. */
6377static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6378 TCGv addr, int size)
6379{
6380 TCGv tmp;
6381
6382 switch (size) {
6383 case 0:
6384 tmp = gen_ld8u(addr, IS_USER(s));
6385 break;
6386 case 1:
6387 tmp = gen_ld16u(addr, IS_USER(s));
6388 break;
6389 case 2:
6390 case 3:
6391 tmp = gen_ld32(addr, IS_USER(s));
6392 break;
6393 default:
6394 abort();
6395 }
6396 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6397 store_reg(s, rt, tmp);
6398 if (size == 3) {
7d1b0095 6399 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6400 tcg_gen_addi_i32(tmp2, addr, 4);
6401 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6402 tcg_temp_free_i32(tmp2);
426f5abc
PB
6403 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6404 store_reg(s, rt2, tmp);
6405 }
6406 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6407}
6408
6409static void gen_clrex(DisasContext *s)
6410{
6411 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6412}
6413
6414#ifdef CONFIG_USER_ONLY
6415static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6416 TCGv addr, int size)
6417{
6418 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6419 tcg_gen_movi_i32(cpu_exclusive_info,
6420 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6421 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6422}
6423#else
6424static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6425 TCGv addr, int size)
6426{
6427 TCGv tmp;
6428 int done_label;
6429 int fail_label;
6430
6431 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6432 [addr] = {Rt};
6433 {Rd} = 0;
6434 } else {
6435 {Rd} = 1;
6436 } */
6437 fail_label = gen_new_label();
6438 done_label = gen_new_label();
6439 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6440 switch (size) {
6441 case 0:
6442 tmp = gen_ld8u(addr, IS_USER(s));
6443 break;
6444 case 1:
6445 tmp = gen_ld16u(addr, IS_USER(s));
6446 break;
6447 case 2:
6448 case 3:
6449 tmp = gen_ld32(addr, IS_USER(s));
6450 break;
6451 default:
6452 abort();
6453 }
6454 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6455 tcg_temp_free_i32(tmp);
426f5abc 6456 if (size == 3) {
7d1b0095 6457 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6458 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6459 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6460 tcg_temp_free_i32(tmp2);
426f5abc 6461 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6462 tcg_temp_free_i32(tmp);
426f5abc
PB
6463 }
6464 tmp = load_reg(s, rt);
6465 switch (size) {
6466 case 0:
6467 gen_st8(tmp, addr, IS_USER(s));
6468 break;
6469 case 1:
6470 gen_st16(tmp, addr, IS_USER(s));
6471 break;
6472 case 2:
6473 case 3:
6474 gen_st32(tmp, addr, IS_USER(s));
6475 break;
6476 default:
6477 abort();
6478 }
6479 if (size == 3) {
6480 tcg_gen_addi_i32(addr, addr, 4);
6481 tmp = load_reg(s, rt2);
6482 gen_st32(tmp, addr, IS_USER(s));
6483 }
6484 tcg_gen_movi_i32(cpu_R[rd], 0);
6485 tcg_gen_br(done_label);
6486 gen_set_label(fail_label);
6487 tcg_gen_movi_i32(cpu_R[rd], 1);
6488 gen_set_label(done_label);
6489 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6490}
6491#endif
6492
9ee6e8bb
PB
6493static void disas_arm_insn(CPUState * env, DisasContext *s)
6494{
6495 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6496 TCGv tmp;
3670669c 6497 TCGv tmp2;
6ddbc6e4 6498 TCGv tmp3;
b0109805 6499 TCGv addr;
a7812ae4 6500 TCGv_i64 tmp64;
9ee6e8bb
PB
6501
6502 insn = ldl_code(s->pc);
6503 s->pc += 4;
6504
6505 /* M variants do not implement ARM mode. */
6506 if (IS_M(env))
6507 goto illegal_op;
6508 cond = insn >> 28;
6509 if (cond == 0xf){
be5e7a76
DES
6510 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6511 * choose to UNDEF. In ARMv5 and above the space is used
6512 * for miscellaneous unconditional instructions.
6513 */
6514 ARCH(5);
6515
9ee6e8bb
PB
6516 /* Unconditional instructions. */
6517 if (((insn >> 25) & 7) == 1) {
6518 /* NEON Data processing. */
6519 if (!arm_feature(env, ARM_FEATURE_NEON))
6520 goto illegal_op;
6521
6522 if (disas_neon_data_insn(env, s, insn))
6523 goto illegal_op;
6524 return;
6525 }
6526 if ((insn & 0x0f100000) == 0x04000000) {
6527 /* NEON load/store. */
6528 if (!arm_feature(env, ARM_FEATURE_NEON))
6529 goto illegal_op;
6530
6531 if (disas_neon_ls_insn(env, s, insn))
6532 goto illegal_op;
6533 return;
6534 }
3d185e5d
PM
6535 if (((insn & 0x0f30f000) == 0x0510f000) ||
6536 ((insn & 0x0f30f010) == 0x0710f000)) {
6537 if ((insn & (1 << 22)) == 0) {
6538 /* PLDW; v7MP */
6539 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6540 goto illegal_op;
6541 }
6542 }
6543 /* Otherwise PLD; v5TE+ */
be5e7a76 6544 ARCH(5TE);
3d185e5d
PM
6545 return;
6546 }
6547 if (((insn & 0x0f70f000) == 0x0450f000) ||
6548 ((insn & 0x0f70f010) == 0x0650f000)) {
6549 ARCH(7);
6550 return; /* PLI; V7 */
6551 }
6552 if (((insn & 0x0f700000) == 0x04100000) ||
6553 ((insn & 0x0f700010) == 0x06100000)) {
6554 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6555 goto illegal_op;
6556 }
6557 return; /* v7MP: Unallocated memory hint: must NOP */
6558 }
6559
6560 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6561 ARCH(6);
6562 /* setend */
6563 if (insn & (1 << 9)) {
6564 /* BE8 mode not implemented. */
6565 goto illegal_op;
6566 }
6567 return;
6568 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6569 switch ((insn >> 4) & 0xf) {
6570 case 1: /* clrex */
6571 ARCH(6K);
426f5abc 6572 gen_clrex(s);
9ee6e8bb
PB
6573 return;
6574 case 4: /* dsb */
6575 case 5: /* dmb */
6576 case 6: /* isb */
6577 ARCH(7);
6578 /* We don't emulate caches so these are a no-op. */
6579 return;
6580 default:
6581 goto illegal_op;
6582 }
6583 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6584 /* srs */
c67b6b71 6585 int32_t offset;
9ee6e8bb
PB
6586 if (IS_USER(s))
6587 goto illegal_op;
6588 ARCH(6);
6589 op1 = (insn & 0x1f);
7d1b0095 6590 addr = tcg_temp_new_i32();
39ea3d4e
PM
6591 tmp = tcg_const_i32(op1);
6592 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6593 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6594 i = (insn >> 23) & 3;
6595 switch (i) {
6596 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6597 case 1: offset = 0; break; /* IA */
6598 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6599 case 3: offset = 4; break; /* IB */
6600 default: abort();
6601 }
6602 if (offset)
b0109805
PB
6603 tcg_gen_addi_i32(addr, addr, offset);
6604 tmp = load_reg(s, 14);
6605 gen_st32(tmp, addr, 0);
c67b6b71 6606 tmp = load_cpu_field(spsr);
b0109805
PB
6607 tcg_gen_addi_i32(addr, addr, 4);
6608 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6609 if (insn & (1 << 21)) {
6610 /* Base writeback. */
6611 switch (i) {
6612 case 0: offset = -8; break;
c67b6b71
FN
6613 case 1: offset = 4; break;
6614 case 2: offset = -4; break;
9ee6e8bb
PB
6615 case 3: offset = 0; break;
6616 default: abort();
6617 }
6618 if (offset)
c67b6b71 6619 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6620 tmp = tcg_const_i32(op1);
6621 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6622 tcg_temp_free_i32(tmp);
7d1b0095 6623 tcg_temp_free_i32(addr);
b0109805 6624 } else {
7d1b0095 6625 tcg_temp_free_i32(addr);
9ee6e8bb 6626 }
a990f58f 6627 return;
ea825eee 6628 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6629 /* rfe */
c67b6b71 6630 int32_t offset;
9ee6e8bb
PB
6631 if (IS_USER(s))
6632 goto illegal_op;
6633 ARCH(6);
6634 rn = (insn >> 16) & 0xf;
b0109805 6635 addr = load_reg(s, rn);
9ee6e8bb
PB
6636 i = (insn >> 23) & 3;
6637 switch (i) {
b0109805 6638 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6639 case 1: offset = 0; break; /* IA */
6640 case 2: offset = -8; break; /* DB */
b0109805 6641 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6642 default: abort();
6643 }
6644 if (offset)
b0109805
PB
6645 tcg_gen_addi_i32(addr, addr, offset);
6646 /* Load PC into tmp and CPSR into tmp2. */
6647 tmp = gen_ld32(addr, 0);
6648 tcg_gen_addi_i32(addr, addr, 4);
6649 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6650 if (insn & (1 << 21)) {
6651 /* Base writeback. */
6652 switch (i) {
b0109805 6653 case 0: offset = -8; break;
c67b6b71
FN
6654 case 1: offset = 4; break;
6655 case 2: offset = -4; break;
b0109805 6656 case 3: offset = 0; break;
9ee6e8bb
PB
6657 default: abort();
6658 }
6659 if (offset)
b0109805
PB
6660 tcg_gen_addi_i32(addr, addr, offset);
6661 store_reg(s, rn, addr);
6662 } else {
7d1b0095 6663 tcg_temp_free_i32(addr);
9ee6e8bb 6664 }
b0109805 6665 gen_rfe(s, tmp, tmp2);
c67b6b71 6666 return;
9ee6e8bb
PB
6667 } else if ((insn & 0x0e000000) == 0x0a000000) {
6668 /* branch link and change to thumb (blx <offset>) */
6669 int32_t offset;
6670
6671 val = (uint32_t)s->pc;
7d1b0095 6672 tmp = tcg_temp_new_i32();
d9ba4830
PB
6673 tcg_gen_movi_i32(tmp, val);
6674 store_reg(s, 14, tmp);
9ee6e8bb
PB
6675 /* Sign-extend the 24-bit offset */
6676 offset = (((int32_t)insn) << 8) >> 8;
6677 /* offset * 4 + bit24 * 2 + (thumb bit) */
6678 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6679 /* pipeline offset */
6680 val += 4;
be5e7a76 6681 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6682 gen_bx_im(s, val);
9ee6e8bb
PB
6683 return;
6684 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6685 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6686 /* iWMMXt register transfer. */
6687 if (env->cp15.c15_cpar & (1 << 1))
6688 if (!disas_iwmmxt_insn(env, s, insn))
6689 return;
6690 }
6691 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6692 /* Coprocessor double register transfer. */
be5e7a76 6693 ARCH(5TE);
9ee6e8bb
PB
6694 } else if ((insn & 0x0f000010) == 0x0e000010) {
6695 /* Additional coprocessor register transfer. */
7997d92f 6696 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6697 uint32_t mask;
6698 uint32_t val;
6699 /* cps (privileged) */
6700 if (IS_USER(s))
6701 return;
6702 mask = val = 0;
6703 if (insn & (1 << 19)) {
6704 if (insn & (1 << 8))
6705 mask |= CPSR_A;
6706 if (insn & (1 << 7))
6707 mask |= CPSR_I;
6708 if (insn & (1 << 6))
6709 mask |= CPSR_F;
6710 if (insn & (1 << 18))
6711 val |= mask;
6712 }
7997d92f 6713 if (insn & (1 << 17)) {
9ee6e8bb
PB
6714 mask |= CPSR_M;
6715 val |= (insn & 0x1f);
6716 }
6717 if (mask) {
2fbac54b 6718 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6719 }
6720 return;
6721 }
6722 goto illegal_op;
6723 }
6724 if (cond != 0xe) {
6725 /* if not always execute, we generate a conditional jump to
6726 next instruction */
6727 s->condlabel = gen_new_label();
d9ba4830 6728 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6729 s->condjmp = 1;
6730 }
6731 if ((insn & 0x0f900000) == 0x03000000) {
6732 if ((insn & (1 << 21)) == 0) {
6733 ARCH(6T2);
6734 rd = (insn >> 12) & 0xf;
6735 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6736 if ((insn & (1 << 22)) == 0) {
6737 /* MOVW */
7d1b0095 6738 tmp = tcg_temp_new_i32();
5e3f878a 6739 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6740 } else {
6741 /* MOVT */
5e3f878a 6742 tmp = load_reg(s, rd);
86831435 6743 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6744 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6745 }
5e3f878a 6746 store_reg(s, rd, tmp);
9ee6e8bb
PB
6747 } else {
6748 if (((insn >> 12) & 0xf) != 0xf)
6749 goto illegal_op;
6750 if (((insn >> 16) & 0xf) == 0) {
6751 gen_nop_hint(s, insn & 0xff);
6752 } else {
6753 /* CPSR = immediate */
6754 val = insn & 0xff;
6755 shift = ((insn >> 8) & 0xf) * 2;
6756 if (shift)
6757 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6758 i = ((insn & (1 << 22)) != 0);
2fbac54b 6759 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6760 goto illegal_op;
6761 }
6762 }
6763 } else if ((insn & 0x0f900000) == 0x01000000
6764 && (insn & 0x00000090) != 0x00000090) {
6765 /* miscellaneous instructions */
6766 op1 = (insn >> 21) & 3;
6767 sh = (insn >> 4) & 0xf;
6768 rm = insn & 0xf;
6769 switch (sh) {
6770 case 0x0: /* move program status register */
6771 if (op1 & 1) {
6772 /* PSR = reg */
2fbac54b 6773 tmp = load_reg(s, rm);
9ee6e8bb 6774 i = ((op1 & 2) != 0);
2fbac54b 6775 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6776 goto illegal_op;
6777 } else {
6778 /* reg = PSR */
6779 rd = (insn >> 12) & 0xf;
6780 if (op1 & 2) {
6781 if (IS_USER(s))
6782 goto illegal_op;
d9ba4830 6783 tmp = load_cpu_field(spsr);
9ee6e8bb 6784 } else {
7d1b0095 6785 tmp = tcg_temp_new_i32();
d9ba4830 6786 gen_helper_cpsr_read(tmp);
9ee6e8bb 6787 }
d9ba4830 6788 store_reg(s, rd, tmp);
9ee6e8bb
PB
6789 }
6790 break;
6791 case 0x1:
6792 if (op1 == 1) {
6793 /* branch/exchange thumb (bx). */
be5e7a76 6794 ARCH(4T);
d9ba4830
PB
6795 tmp = load_reg(s, rm);
6796 gen_bx(s, tmp);
9ee6e8bb
PB
6797 } else if (op1 == 3) {
6798 /* clz */
be5e7a76 6799 ARCH(5);
9ee6e8bb 6800 rd = (insn >> 12) & 0xf;
1497c961
PB
6801 tmp = load_reg(s, rm);
6802 gen_helper_clz(tmp, tmp);
6803 store_reg(s, rd, tmp);
9ee6e8bb
PB
6804 } else {
6805 goto illegal_op;
6806 }
6807 break;
6808 case 0x2:
6809 if (op1 == 1) {
6810 ARCH(5J); /* bxj */
6811 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6812 tmp = load_reg(s, rm);
6813 gen_bx(s, tmp);
9ee6e8bb
PB
6814 } else {
6815 goto illegal_op;
6816 }
6817 break;
6818 case 0x3:
6819 if (op1 != 1)
6820 goto illegal_op;
6821
be5e7a76 6822 ARCH(5);
9ee6e8bb 6823 /* branch link/exchange thumb (blx) */
d9ba4830 6824 tmp = load_reg(s, rm);
7d1b0095 6825 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6826 tcg_gen_movi_i32(tmp2, s->pc);
6827 store_reg(s, 14, tmp2);
6828 gen_bx(s, tmp);
9ee6e8bb
PB
6829 break;
6830 case 0x5: /* saturating add/subtract */
be5e7a76 6831 ARCH(5TE);
9ee6e8bb
PB
6832 rd = (insn >> 12) & 0xf;
6833 rn = (insn >> 16) & 0xf;
b40d0353 6834 tmp = load_reg(s, rm);
5e3f878a 6835 tmp2 = load_reg(s, rn);
9ee6e8bb 6836 if (op1 & 2)
5e3f878a 6837 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6838 if (op1 & 1)
5e3f878a 6839 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6840 else
5e3f878a 6841 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 6842 tcg_temp_free_i32(tmp2);
5e3f878a 6843 store_reg(s, rd, tmp);
9ee6e8bb 6844 break;
49e14940
AL
6845 case 7:
6846 /* SMC instruction (op1 == 3)
6847 and undefined instructions (op1 == 0 || op1 == 2)
6848 will trap */
6849 if (op1 != 1) {
6850 goto illegal_op;
6851 }
6852 /* bkpt */
be5e7a76 6853 ARCH(5);
bc4a0de0 6854 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6855 break;
6856 case 0x8: /* signed multiply */
6857 case 0xa:
6858 case 0xc:
6859 case 0xe:
be5e7a76 6860 ARCH(5TE);
9ee6e8bb
PB
6861 rs = (insn >> 8) & 0xf;
6862 rn = (insn >> 12) & 0xf;
6863 rd = (insn >> 16) & 0xf;
6864 if (op1 == 1) {
6865 /* (32 * 16) >> 16 */
5e3f878a
PB
6866 tmp = load_reg(s, rm);
6867 tmp2 = load_reg(s, rs);
9ee6e8bb 6868 if (sh & 4)
5e3f878a 6869 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6870 else
5e3f878a 6871 gen_sxth(tmp2);
a7812ae4
PB
6872 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6873 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 6874 tmp = tcg_temp_new_i32();
a7812ae4 6875 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6876 tcg_temp_free_i64(tmp64);
9ee6e8bb 6877 if ((sh & 2) == 0) {
5e3f878a
PB
6878 tmp2 = load_reg(s, rn);
6879 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6880 tcg_temp_free_i32(tmp2);
9ee6e8bb 6881 }
5e3f878a 6882 store_reg(s, rd, tmp);
9ee6e8bb
PB
6883 } else {
6884 /* 16 * 16 */
5e3f878a
PB
6885 tmp = load_reg(s, rm);
6886 tmp2 = load_reg(s, rs);
6887 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 6888 tcg_temp_free_i32(tmp2);
9ee6e8bb 6889 if (op1 == 2) {
a7812ae4
PB
6890 tmp64 = tcg_temp_new_i64();
6891 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 6892 tcg_temp_free_i32(tmp);
a7812ae4
PB
6893 gen_addq(s, tmp64, rn, rd);
6894 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 6895 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
6896 } else {
6897 if (op1 == 0) {
5e3f878a
PB
6898 tmp2 = load_reg(s, rn);
6899 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 6900 tcg_temp_free_i32(tmp2);
9ee6e8bb 6901 }
5e3f878a 6902 store_reg(s, rd, tmp);
9ee6e8bb
PB
6903 }
6904 }
6905 break;
6906 default:
6907 goto illegal_op;
6908 }
6909 } else if (((insn & 0x0e000000) == 0 &&
6910 (insn & 0x00000090) != 0x90) ||
6911 ((insn & 0x0e000000) == (1 << 25))) {
6912 int set_cc, logic_cc, shiftop;
6913
6914 op1 = (insn >> 21) & 0xf;
6915 set_cc = (insn >> 20) & 1;
6916 logic_cc = table_logic_cc[op1] & set_cc;
6917
6918 /* data processing instruction */
6919 if (insn & (1 << 25)) {
6920 /* immediate operand */
6921 val = insn & 0xff;
6922 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6923 if (shift) {
9ee6e8bb 6924 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 6925 }
7d1b0095 6926 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
6927 tcg_gen_movi_i32(tmp2, val);
6928 if (logic_cc && shift) {
6929 gen_set_CF_bit31(tmp2);
6930 }
9ee6e8bb
PB
6931 } else {
6932 /* register */
6933 rm = (insn) & 0xf;
e9bb4aa9 6934 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6935 shiftop = (insn >> 5) & 3;
6936 if (!(insn & (1 << 4))) {
6937 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6938 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6939 } else {
6940 rs = (insn >> 8) & 0xf;
8984bd2e 6941 tmp = load_reg(s, rs);
e9bb4aa9 6942 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6943 }
6944 }
6945 if (op1 != 0x0f && op1 != 0x0d) {
6946 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6947 tmp = load_reg(s, rn);
6948 } else {
6949 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6950 }
6951 rd = (insn >> 12) & 0xf;
6952 switch(op1) {
6953 case 0x00:
e9bb4aa9
JR
6954 tcg_gen_and_i32(tmp, tmp, tmp2);
6955 if (logic_cc) {
6956 gen_logic_CC(tmp);
6957 }
21aeb343 6958 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6959 break;
6960 case 0x01:
e9bb4aa9
JR
6961 tcg_gen_xor_i32(tmp, tmp, tmp2);
6962 if (logic_cc) {
6963 gen_logic_CC(tmp);
6964 }
21aeb343 6965 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6966 break;
6967 case 0x02:
6968 if (set_cc && rd == 15) {
6969 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6970 if (IS_USER(s)) {
9ee6e8bb 6971 goto illegal_op;
e9bb4aa9
JR
6972 }
6973 gen_helper_sub_cc(tmp, tmp, tmp2);
6974 gen_exception_return(s, tmp);
9ee6e8bb 6975 } else {
e9bb4aa9
JR
6976 if (set_cc) {
6977 gen_helper_sub_cc(tmp, tmp, tmp2);
6978 } else {
6979 tcg_gen_sub_i32(tmp, tmp, tmp2);
6980 }
21aeb343 6981 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6982 }
6983 break;
6984 case 0x03:
e9bb4aa9
JR
6985 if (set_cc) {
6986 gen_helper_sub_cc(tmp, tmp2, tmp);
6987 } else {
6988 tcg_gen_sub_i32(tmp, tmp2, tmp);
6989 }
21aeb343 6990 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6991 break;
6992 case 0x04:
e9bb4aa9
JR
6993 if (set_cc) {
6994 gen_helper_add_cc(tmp, tmp, tmp2);
6995 } else {
6996 tcg_gen_add_i32(tmp, tmp, tmp2);
6997 }
21aeb343 6998 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6999 break;
7000 case 0x05:
e9bb4aa9
JR
7001 if (set_cc) {
7002 gen_helper_adc_cc(tmp, tmp, tmp2);
7003 } else {
7004 gen_add_carry(tmp, tmp, tmp2);
7005 }
21aeb343 7006 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7007 break;
7008 case 0x06:
e9bb4aa9
JR
7009 if (set_cc) {
7010 gen_helper_sbc_cc(tmp, tmp, tmp2);
7011 } else {
7012 gen_sub_carry(tmp, tmp, tmp2);
7013 }
21aeb343 7014 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7015 break;
7016 case 0x07:
e9bb4aa9
JR
7017 if (set_cc) {
7018 gen_helper_sbc_cc(tmp, tmp2, tmp);
7019 } else {
7020 gen_sub_carry(tmp, tmp2, tmp);
7021 }
21aeb343 7022 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7023 break;
7024 case 0x08:
7025 if (set_cc) {
e9bb4aa9
JR
7026 tcg_gen_and_i32(tmp, tmp, tmp2);
7027 gen_logic_CC(tmp);
9ee6e8bb 7028 }
7d1b0095 7029 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7030 break;
7031 case 0x09:
7032 if (set_cc) {
e9bb4aa9
JR
7033 tcg_gen_xor_i32(tmp, tmp, tmp2);
7034 gen_logic_CC(tmp);
9ee6e8bb 7035 }
7d1b0095 7036 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7037 break;
7038 case 0x0a:
7039 if (set_cc) {
e9bb4aa9 7040 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 7041 }
7d1b0095 7042 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7043 break;
7044 case 0x0b:
7045 if (set_cc) {
e9bb4aa9 7046 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 7047 }
7d1b0095 7048 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7049 break;
7050 case 0x0c:
e9bb4aa9
JR
7051 tcg_gen_or_i32(tmp, tmp, tmp2);
7052 if (logic_cc) {
7053 gen_logic_CC(tmp);
7054 }
21aeb343 7055 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7056 break;
7057 case 0x0d:
7058 if (logic_cc && rd == 15) {
7059 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7060 if (IS_USER(s)) {
9ee6e8bb 7061 goto illegal_op;
e9bb4aa9
JR
7062 }
7063 gen_exception_return(s, tmp2);
9ee6e8bb 7064 } else {
e9bb4aa9
JR
7065 if (logic_cc) {
7066 gen_logic_CC(tmp2);
7067 }
21aeb343 7068 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7069 }
7070 break;
7071 case 0x0e:
f669df27 7072 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7073 if (logic_cc) {
7074 gen_logic_CC(tmp);
7075 }
21aeb343 7076 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7077 break;
7078 default:
7079 case 0x0f:
e9bb4aa9
JR
7080 tcg_gen_not_i32(tmp2, tmp2);
7081 if (logic_cc) {
7082 gen_logic_CC(tmp2);
7083 }
21aeb343 7084 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7085 break;
7086 }
e9bb4aa9 7087 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7088 tcg_temp_free_i32(tmp2);
e9bb4aa9 7089 }
9ee6e8bb
PB
7090 } else {
7091 /* other instructions */
7092 op1 = (insn >> 24) & 0xf;
7093 switch(op1) {
7094 case 0x0:
7095 case 0x1:
7096 /* multiplies, extra load/stores */
7097 sh = (insn >> 5) & 3;
7098 if (sh == 0) {
7099 if (op1 == 0x0) {
7100 rd = (insn >> 16) & 0xf;
7101 rn = (insn >> 12) & 0xf;
7102 rs = (insn >> 8) & 0xf;
7103 rm = (insn) & 0xf;
7104 op1 = (insn >> 20) & 0xf;
7105 switch (op1) {
7106 case 0: case 1: case 2: case 3: case 6:
7107 /* 32 bit mul */
5e3f878a
PB
7108 tmp = load_reg(s, rs);
7109 tmp2 = load_reg(s, rm);
7110 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7111 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7112 if (insn & (1 << 22)) {
7113 /* Subtract (mls) */
7114 ARCH(6T2);
5e3f878a
PB
7115 tmp2 = load_reg(s, rn);
7116 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7117 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7118 } else if (insn & (1 << 21)) {
7119 /* Add */
5e3f878a
PB
7120 tmp2 = load_reg(s, rn);
7121 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7122 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7123 }
7124 if (insn & (1 << 20))
5e3f878a
PB
7125 gen_logic_CC(tmp);
7126 store_reg(s, rd, tmp);
9ee6e8bb 7127 break;
8aac08b1
AJ
7128 case 4:
7129 /* 64 bit mul double accumulate (UMAAL) */
7130 ARCH(6);
7131 tmp = load_reg(s, rs);
7132 tmp2 = load_reg(s, rm);
7133 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7134 gen_addq_lo(s, tmp64, rn);
7135 gen_addq_lo(s, tmp64, rd);
7136 gen_storeq_reg(s, rn, rd, tmp64);
7137 tcg_temp_free_i64(tmp64);
7138 break;
7139 case 8: case 9: case 10: case 11:
7140 case 12: case 13: case 14: case 15:
7141 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7142 tmp = load_reg(s, rs);
7143 tmp2 = load_reg(s, rm);
8aac08b1 7144 if (insn & (1 << 22)) {
a7812ae4 7145 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8aac08b1 7146 } else {
a7812ae4 7147 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8aac08b1
AJ
7148 }
7149 if (insn & (1 << 21)) { /* mult accumulate */
a7812ae4 7150 gen_addq(s, tmp64, rn, rd);
9ee6e8bb 7151 }
8aac08b1 7152 if (insn & (1 << 20)) {
a7812ae4 7153 gen_logicq_cc(tmp64);
8aac08b1 7154 }
a7812ae4 7155 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7156 tcg_temp_free_i64(tmp64);
9ee6e8bb 7157 break;
8aac08b1
AJ
7158 default:
7159 goto illegal_op;
9ee6e8bb
PB
7160 }
7161 } else {
7162 rn = (insn >> 16) & 0xf;
7163 rd = (insn >> 12) & 0xf;
7164 if (insn & (1 << 23)) {
7165 /* load/store exclusive */
86753403
PB
7166 op1 = (insn >> 21) & 0x3;
7167 if (op1)
a47f43d2 7168 ARCH(6K);
86753403
PB
7169 else
7170 ARCH(6);
3174f8e9 7171 addr = tcg_temp_local_new_i32();
98a46317 7172 load_reg_var(s, addr, rn);
9ee6e8bb 7173 if (insn & (1 << 20)) {
86753403
PB
7174 switch (op1) {
7175 case 0: /* ldrex */
426f5abc 7176 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7177 break;
7178 case 1: /* ldrexd */
426f5abc 7179 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7180 break;
7181 case 2: /* ldrexb */
426f5abc 7182 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7183 break;
7184 case 3: /* ldrexh */
426f5abc 7185 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7186 break;
7187 default:
7188 abort();
7189 }
9ee6e8bb
PB
7190 } else {
7191 rm = insn & 0xf;
86753403
PB
7192 switch (op1) {
7193 case 0: /* strex */
426f5abc 7194 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7195 break;
7196 case 1: /* strexd */
502e64fe 7197 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7198 break;
7199 case 2: /* strexb */
426f5abc 7200 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7201 break;
7202 case 3: /* strexh */
426f5abc 7203 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7204 break;
7205 default:
7206 abort();
7207 }
9ee6e8bb 7208 }
3174f8e9 7209 tcg_temp_free(addr);
9ee6e8bb
PB
7210 } else {
7211 /* SWP instruction */
7212 rm = (insn) & 0xf;
7213
8984bd2e
PB
7214 /* ??? This is not really atomic. However we know
7215 we never have multiple CPUs running in parallel,
7216 so it is good enough. */
7217 addr = load_reg(s, rn);
7218 tmp = load_reg(s, rm);
9ee6e8bb 7219 if (insn & (1 << 22)) {
8984bd2e
PB
7220 tmp2 = gen_ld8u(addr, IS_USER(s));
7221 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7222 } else {
8984bd2e
PB
7223 tmp2 = gen_ld32(addr, IS_USER(s));
7224 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7225 }
7d1b0095 7226 tcg_temp_free_i32(addr);
8984bd2e 7227 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7228 }
7229 }
7230 } else {
7231 int address_offset;
7232 int load;
7233 /* Misc load/store */
7234 rn = (insn >> 16) & 0xf;
7235 rd = (insn >> 12) & 0xf;
b0109805 7236 addr = load_reg(s, rn);
9ee6e8bb 7237 if (insn & (1 << 24))
b0109805 7238 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7239 address_offset = 0;
7240 if (insn & (1 << 20)) {
7241 /* load */
7242 switch(sh) {
7243 case 1:
b0109805 7244 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
7245 break;
7246 case 2:
b0109805 7247 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
7248 break;
7249 default:
7250 case 3:
b0109805 7251 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
7252 break;
7253 }
7254 load = 1;
7255 } else if (sh & 2) {
be5e7a76 7256 ARCH(5TE);
9ee6e8bb
PB
7257 /* doubleword */
7258 if (sh & 1) {
7259 /* store */
b0109805
PB
7260 tmp = load_reg(s, rd);
7261 gen_st32(tmp, addr, IS_USER(s));
7262 tcg_gen_addi_i32(addr, addr, 4);
7263 tmp = load_reg(s, rd + 1);
7264 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7265 load = 0;
7266 } else {
7267 /* load */
b0109805
PB
7268 tmp = gen_ld32(addr, IS_USER(s));
7269 store_reg(s, rd, tmp);
7270 tcg_gen_addi_i32(addr, addr, 4);
7271 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
7272 rd++;
7273 load = 1;
7274 }
7275 address_offset = -4;
7276 } else {
7277 /* store */
b0109805
PB
7278 tmp = load_reg(s, rd);
7279 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7280 load = 0;
7281 }
7282 /* Perform base writeback before the loaded value to
7283 ensure correct behavior with overlapping index registers.
7284 ldrd with base writeback is is undefined if the
7285 destination and index registers overlap. */
7286 if (!(insn & (1 << 24))) {
b0109805
PB
7287 gen_add_datah_offset(s, insn, address_offset, addr);
7288 store_reg(s, rn, addr);
9ee6e8bb
PB
7289 } else if (insn & (1 << 21)) {
7290 if (address_offset)
b0109805
PB
7291 tcg_gen_addi_i32(addr, addr, address_offset);
7292 store_reg(s, rn, addr);
7293 } else {
7d1b0095 7294 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7295 }
7296 if (load) {
7297 /* Complete the load. */
b0109805 7298 store_reg(s, rd, tmp);
9ee6e8bb
PB
7299 }
7300 }
7301 break;
7302 case 0x4:
7303 case 0x5:
7304 goto do_ldst;
7305 case 0x6:
7306 case 0x7:
7307 if (insn & (1 << 4)) {
7308 ARCH(6);
7309 /* Armv6 Media instructions. */
7310 rm = insn & 0xf;
7311 rn = (insn >> 16) & 0xf;
2c0262af 7312 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7313 rs = (insn >> 8) & 0xf;
7314 switch ((insn >> 23) & 3) {
7315 case 0: /* Parallel add/subtract. */
7316 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7317 tmp = load_reg(s, rn);
7318 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7319 sh = (insn >> 5) & 7;
7320 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7321 goto illegal_op;
6ddbc6e4 7322 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7323 tcg_temp_free_i32(tmp2);
6ddbc6e4 7324 store_reg(s, rd, tmp);
9ee6e8bb
PB
7325 break;
7326 case 1:
7327 if ((insn & 0x00700020) == 0) {
6c95676b 7328 /* Halfword pack. */
3670669c
PB
7329 tmp = load_reg(s, rn);
7330 tmp2 = load_reg(s, rm);
9ee6e8bb 7331 shift = (insn >> 7) & 0x1f;
3670669c
PB
7332 if (insn & (1 << 6)) {
7333 /* pkhtb */
22478e79
AZ
7334 if (shift == 0)
7335 shift = 31;
7336 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7337 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7338 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7339 } else {
7340 /* pkhbt */
22478e79
AZ
7341 if (shift)
7342 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7343 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7344 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7345 }
7346 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7347 tcg_temp_free_i32(tmp2);
3670669c 7348 store_reg(s, rd, tmp);
9ee6e8bb
PB
7349 } else if ((insn & 0x00200020) == 0x00200000) {
7350 /* [us]sat */
6ddbc6e4 7351 tmp = load_reg(s, rm);
9ee6e8bb
PB
7352 shift = (insn >> 7) & 0x1f;
7353 if (insn & (1 << 6)) {
7354 if (shift == 0)
7355 shift = 31;
6ddbc6e4 7356 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7357 } else {
6ddbc6e4 7358 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7359 }
7360 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7361 tmp2 = tcg_const_i32(sh);
7362 if (insn & (1 << 22))
7363 gen_helper_usat(tmp, tmp, tmp2);
7364 else
7365 gen_helper_ssat(tmp, tmp, tmp2);
7366 tcg_temp_free_i32(tmp2);
6ddbc6e4 7367 store_reg(s, rd, tmp);
9ee6e8bb
PB
7368 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7369 /* [us]sat16 */
6ddbc6e4 7370 tmp = load_reg(s, rm);
9ee6e8bb 7371 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7372 tmp2 = tcg_const_i32(sh);
7373 if (insn & (1 << 22))
7374 gen_helper_usat16(tmp, tmp, tmp2);
7375 else
7376 gen_helper_ssat16(tmp, tmp, tmp2);
7377 tcg_temp_free_i32(tmp2);
6ddbc6e4 7378 store_reg(s, rd, tmp);
9ee6e8bb
PB
7379 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7380 /* Select bytes. */
6ddbc6e4
PB
7381 tmp = load_reg(s, rn);
7382 tmp2 = load_reg(s, rm);
7d1b0095 7383 tmp3 = tcg_temp_new_i32();
6ddbc6e4
PB
7384 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7385 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7386 tcg_temp_free_i32(tmp3);
7387 tcg_temp_free_i32(tmp2);
6ddbc6e4 7388 store_reg(s, rd, tmp);
9ee6e8bb 7389 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7390 tmp = load_reg(s, rm);
9ee6e8bb 7391 shift = (insn >> 10) & 3;
1301f322 7392 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7393 rotate, a shift is sufficient. */
7394 if (shift != 0)
f669df27 7395 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7396 op1 = (insn >> 20) & 7;
7397 switch (op1) {
5e3f878a
PB
7398 case 0: gen_sxtb16(tmp); break;
7399 case 2: gen_sxtb(tmp); break;
7400 case 3: gen_sxth(tmp); break;
7401 case 4: gen_uxtb16(tmp); break;
7402 case 6: gen_uxtb(tmp); break;
7403 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7404 default: goto illegal_op;
7405 }
7406 if (rn != 15) {
5e3f878a 7407 tmp2 = load_reg(s, rn);
9ee6e8bb 7408 if ((op1 & 3) == 0) {
5e3f878a 7409 gen_add16(tmp, tmp2);
9ee6e8bb 7410 } else {
5e3f878a 7411 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7412 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7413 }
7414 }
6c95676b 7415 store_reg(s, rd, tmp);
9ee6e8bb
PB
7416 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7417 /* rev */
b0109805 7418 tmp = load_reg(s, rm);
9ee6e8bb
PB
7419 if (insn & (1 << 22)) {
7420 if (insn & (1 << 7)) {
b0109805 7421 gen_revsh(tmp);
9ee6e8bb
PB
7422 } else {
7423 ARCH(6T2);
b0109805 7424 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7425 }
7426 } else {
7427 if (insn & (1 << 7))
b0109805 7428 gen_rev16(tmp);
9ee6e8bb 7429 else
66896cb8 7430 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7431 }
b0109805 7432 store_reg(s, rd, tmp);
9ee6e8bb
PB
7433 } else {
7434 goto illegal_op;
7435 }
7436 break;
7437 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
7438 tmp = load_reg(s, rm);
7439 tmp2 = load_reg(s, rs);
9ee6e8bb 7440 if (insn & (1 << 20)) {
838fa72d
AJ
7441 /* Signed multiply most significant [accumulate].
7442 (SMMUL, SMMLA, SMMLS) */
a7812ae4 7443 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7444
955a7dd5 7445 if (rd != 15) {
838fa72d 7446 tmp = load_reg(s, rd);
9ee6e8bb 7447 if (insn & (1 << 6)) {
838fa72d 7448 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7449 } else {
838fa72d 7450 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7451 }
7452 }
838fa72d
AJ
7453 if (insn & (1 << 5)) {
7454 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7455 }
7456 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7457 tmp = tcg_temp_new_i32();
838fa72d
AJ
7458 tcg_gen_trunc_i64_i32(tmp, tmp64);
7459 tcg_temp_free_i64(tmp64);
955a7dd5 7460 store_reg(s, rn, tmp);
9ee6e8bb
PB
7461 } else {
7462 if (insn & (1 << 5))
5e3f878a
PB
7463 gen_swap_half(tmp2);
7464 gen_smul_dual(tmp, tmp2);
5e3f878a 7465 if (insn & (1 << 6)) {
e1d177b9 7466 /* This subtraction cannot overflow. */
5e3f878a
PB
7467 tcg_gen_sub_i32(tmp, tmp, tmp2);
7468 } else {
e1d177b9
PM
7469 /* This addition cannot overflow 32 bits;
7470 * however it may overflow considered as a signed
7471 * operation, in which case we must set the Q flag.
7472 */
7473 gen_helper_add_setq(tmp, tmp, tmp2);
5e3f878a 7474 }
7d1b0095 7475 tcg_temp_free_i32(tmp2);
9ee6e8bb 7476 if (insn & (1 << 22)) {
5e3f878a 7477 /* smlald, smlsld */
a7812ae4
PB
7478 tmp64 = tcg_temp_new_i64();
7479 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7480 tcg_temp_free_i32(tmp);
a7812ae4
PB
7481 gen_addq(s, tmp64, rd, rn);
7482 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7483 tcg_temp_free_i64(tmp64);
9ee6e8bb 7484 } else {
5e3f878a 7485 /* smuad, smusd, smlad, smlsd */
22478e79 7486 if (rd != 15)
9ee6e8bb 7487 {
22478e79 7488 tmp2 = load_reg(s, rd);
5e3f878a 7489 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 7490 tcg_temp_free_i32(tmp2);
9ee6e8bb 7491 }
22478e79 7492 store_reg(s, rn, tmp);
9ee6e8bb
PB
7493 }
7494 }
7495 break;
7496 case 3:
7497 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7498 switch (op1) {
7499 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7500 ARCH(6);
7501 tmp = load_reg(s, rm);
7502 tmp2 = load_reg(s, rs);
7503 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7504 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7505 if (rd != 15) {
7506 tmp2 = load_reg(s, rd);
6ddbc6e4 7507 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7508 tcg_temp_free_i32(tmp2);
9ee6e8bb 7509 }
ded9d295 7510 store_reg(s, rn, tmp);
9ee6e8bb
PB
7511 break;
7512 case 0x20: case 0x24: case 0x28: case 0x2c:
7513 /* Bitfield insert/clear. */
7514 ARCH(6T2);
7515 shift = (insn >> 7) & 0x1f;
7516 i = (insn >> 16) & 0x1f;
7517 i = i + 1 - shift;
7518 if (rm == 15) {
7d1b0095 7519 tmp = tcg_temp_new_i32();
5e3f878a 7520 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7521 } else {
5e3f878a 7522 tmp = load_reg(s, rm);
9ee6e8bb
PB
7523 }
7524 if (i != 32) {
5e3f878a 7525 tmp2 = load_reg(s, rd);
8f8e3aa4 7526 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7d1b0095 7527 tcg_temp_free_i32(tmp2);
9ee6e8bb 7528 }
5e3f878a 7529 store_reg(s, rd, tmp);
9ee6e8bb
PB
7530 break;
7531 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7532 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7533 ARCH(6T2);
5e3f878a 7534 tmp = load_reg(s, rm);
9ee6e8bb
PB
7535 shift = (insn >> 7) & 0x1f;
7536 i = ((insn >> 16) & 0x1f) + 1;
7537 if (shift + i > 32)
7538 goto illegal_op;
7539 if (i < 32) {
7540 if (op1 & 0x20) {
5e3f878a 7541 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7542 } else {
5e3f878a 7543 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7544 }
7545 }
5e3f878a 7546 store_reg(s, rd, tmp);
9ee6e8bb
PB
7547 break;
7548 default:
7549 goto illegal_op;
7550 }
7551 break;
7552 }
7553 break;
7554 }
7555 do_ldst:
7556 /* Check for undefined extension instructions
7557 * per the ARM Bible IE:
7558 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7559 */
7560 sh = (0xf << 20) | (0xf << 4);
7561 if (op1 == 0x7 && ((insn & sh) == sh))
7562 {
7563 goto illegal_op;
7564 }
7565 /* load/store byte/word */
7566 rn = (insn >> 16) & 0xf;
7567 rd = (insn >> 12) & 0xf;
b0109805 7568 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7569 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7570 if (insn & (1 << 24))
b0109805 7571 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7572 if (insn & (1 << 20)) {
7573 /* load */
9ee6e8bb 7574 if (insn & (1 << 22)) {
b0109805 7575 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7576 } else {
b0109805 7577 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7578 }
9ee6e8bb
PB
7579 } else {
7580 /* store */
b0109805 7581 tmp = load_reg(s, rd);
9ee6e8bb 7582 if (insn & (1 << 22))
b0109805 7583 gen_st8(tmp, tmp2, i);
9ee6e8bb 7584 else
b0109805 7585 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7586 }
7587 if (!(insn & (1 << 24))) {
b0109805
PB
7588 gen_add_data_offset(s, insn, tmp2);
7589 store_reg(s, rn, tmp2);
7590 } else if (insn & (1 << 21)) {
7591 store_reg(s, rn, tmp2);
7592 } else {
7d1b0095 7593 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7594 }
7595 if (insn & (1 << 20)) {
7596 /* Complete the load. */
be5e7a76 7597 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7598 }
7599 break;
7600 case 0x08:
7601 case 0x09:
7602 {
7603 int j, n, user, loaded_base;
b0109805 7604 TCGv loaded_var;
9ee6e8bb
PB
7605 /* load/store multiple words */
7606 /* XXX: store correct base if write back */
7607 user = 0;
7608 if (insn & (1 << 22)) {
7609 if (IS_USER(s))
7610 goto illegal_op; /* only usable in supervisor mode */
7611
7612 if ((insn & (1 << 15)) == 0)
7613 user = 1;
7614 }
7615 rn = (insn >> 16) & 0xf;
b0109805 7616 addr = load_reg(s, rn);
9ee6e8bb
PB
7617
7618 /* compute total size */
7619 loaded_base = 0;
a50f5b91 7620 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7621 n = 0;
7622 for(i=0;i<16;i++) {
7623 if (insn & (1 << i))
7624 n++;
7625 }
7626 /* XXX: test invalid n == 0 case ? */
7627 if (insn & (1 << 23)) {
7628 if (insn & (1 << 24)) {
7629 /* pre increment */
b0109805 7630 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7631 } else {
7632 /* post increment */
7633 }
7634 } else {
7635 if (insn & (1 << 24)) {
7636 /* pre decrement */
b0109805 7637 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7638 } else {
7639 /* post decrement */
7640 if (n != 1)
b0109805 7641 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7642 }
7643 }
7644 j = 0;
7645 for(i=0;i<16;i++) {
7646 if (insn & (1 << i)) {
7647 if (insn & (1 << 20)) {
7648 /* load */
b0109805 7649 tmp = gen_ld32(addr, IS_USER(s));
be5e7a76 7650 if (user) {
b75263d6
JR
7651 tmp2 = tcg_const_i32(i);
7652 gen_helper_set_user_reg(tmp2, tmp);
7653 tcg_temp_free_i32(tmp2);
7d1b0095 7654 tcg_temp_free_i32(tmp);
9ee6e8bb 7655 } else if (i == rn) {
b0109805 7656 loaded_var = tmp;
9ee6e8bb
PB
7657 loaded_base = 1;
7658 } else {
be5e7a76 7659 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7660 }
7661 } else {
7662 /* store */
7663 if (i == 15) {
7664 /* special case: r15 = PC + 8 */
7665 val = (long)s->pc + 4;
7d1b0095 7666 tmp = tcg_temp_new_i32();
b0109805 7667 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7668 } else if (user) {
7d1b0095 7669 tmp = tcg_temp_new_i32();
b75263d6
JR
7670 tmp2 = tcg_const_i32(i);
7671 gen_helper_get_user_reg(tmp, tmp2);
7672 tcg_temp_free_i32(tmp2);
9ee6e8bb 7673 } else {
b0109805 7674 tmp = load_reg(s, i);
9ee6e8bb 7675 }
b0109805 7676 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7677 }
7678 j++;
7679 /* no need to add after the last transfer */
7680 if (j != n)
b0109805 7681 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7682 }
7683 }
7684 if (insn & (1 << 21)) {
7685 /* write back */
7686 if (insn & (1 << 23)) {
7687 if (insn & (1 << 24)) {
7688 /* pre increment */
7689 } else {
7690 /* post increment */
b0109805 7691 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7692 }
7693 } else {
7694 if (insn & (1 << 24)) {
7695 /* pre decrement */
7696 if (n != 1)
b0109805 7697 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7698 } else {
7699 /* post decrement */
b0109805 7700 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7701 }
7702 }
b0109805
PB
7703 store_reg(s, rn, addr);
7704 } else {
7d1b0095 7705 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7706 }
7707 if (loaded_base) {
b0109805 7708 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7709 }
7710 if ((insn & (1 << 22)) && !user) {
7711 /* Restore CPSR from SPSR. */
d9ba4830
PB
7712 tmp = load_cpu_field(spsr);
7713 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7714 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7715 s->is_jmp = DISAS_UPDATE;
7716 }
7717 }
7718 break;
7719 case 0xa:
7720 case 0xb:
7721 {
7722 int32_t offset;
7723
7724 /* branch (and link) */
7725 val = (int32_t)s->pc;
7726 if (insn & (1 << 24)) {
7d1b0095 7727 tmp = tcg_temp_new_i32();
5e3f878a
PB
7728 tcg_gen_movi_i32(tmp, val);
7729 store_reg(s, 14, tmp);
9ee6e8bb
PB
7730 }
7731 offset = (((int32_t)insn << 8) >> 8);
7732 val += (offset << 2) + 4;
7733 gen_jmp(s, val);
7734 }
7735 break;
7736 case 0xc:
7737 case 0xd:
7738 case 0xe:
7739 /* Coprocessor. */
7740 if (disas_coproc_insn(env, s, insn))
7741 goto illegal_op;
7742 break;
7743 case 0xf:
7744 /* swi */
5e3f878a 7745 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7746 s->is_jmp = DISAS_SWI;
7747 break;
7748 default:
7749 illegal_op:
bc4a0de0 7750 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7751 break;
7752 }
7753 }
7754}
7755
7756/* Return true if this is a Thumb-2 logical op. */
7757static int
7758thumb2_logic_op(int op)
7759{
7760 return (op < 8);
7761}
7762
7763/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7764 then set condition code flags based on the result of the operation.
7765 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7766 to the high bit of T1.
7767 Returns zero if the opcode is valid. */
7768
7769static int
396e467c 7770gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7771{
7772 int logic_cc;
7773
7774 logic_cc = 0;
7775 switch (op) {
7776 case 0: /* and */
396e467c 7777 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7778 logic_cc = conds;
7779 break;
7780 case 1: /* bic */
f669df27 7781 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7782 logic_cc = conds;
7783 break;
7784 case 2: /* orr */
396e467c 7785 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7786 logic_cc = conds;
7787 break;
7788 case 3: /* orn */
29501f1b 7789 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7790 logic_cc = conds;
7791 break;
7792 case 4: /* eor */
396e467c 7793 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7794 logic_cc = conds;
7795 break;
7796 case 8: /* add */
7797 if (conds)
396e467c 7798 gen_helper_add_cc(t0, t0, t1);
9ee6e8bb 7799 else
396e467c 7800 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7801 break;
7802 case 10: /* adc */
7803 if (conds)
396e467c 7804 gen_helper_adc_cc(t0, t0, t1);
9ee6e8bb 7805 else
396e467c 7806 gen_adc(t0, t1);
9ee6e8bb
PB
7807 break;
7808 case 11: /* sbc */
7809 if (conds)
396e467c 7810 gen_helper_sbc_cc(t0, t0, t1);
9ee6e8bb 7811 else
396e467c 7812 gen_sub_carry(t0, t0, t1);
9ee6e8bb
PB
7813 break;
7814 case 13: /* sub */
7815 if (conds)
396e467c 7816 gen_helper_sub_cc(t0, t0, t1);
9ee6e8bb 7817 else
396e467c 7818 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7819 break;
7820 case 14: /* rsb */
7821 if (conds)
396e467c 7822 gen_helper_sub_cc(t0, t1, t0);
9ee6e8bb 7823 else
396e467c 7824 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7825 break;
7826 default: /* 5, 6, 7, 9, 12, 15. */
7827 return 1;
7828 }
7829 if (logic_cc) {
396e467c 7830 gen_logic_CC(t0);
9ee6e8bb 7831 if (shifter_out)
396e467c 7832 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7833 }
7834 return 0;
7835}
7836
7837/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7838 is not legal. */
7839static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7840{
b0109805 7841 uint32_t insn, imm, shift, offset;
9ee6e8bb 7842 uint32_t rd, rn, rm, rs;
b26eefb6 7843 TCGv tmp;
6ddbc6e4
PB
7844 TCGv tmp2;
7845 TCGv tmp3;
b0109805 7846 TCGv addr;
a7812ae4 7847 TCGv_i64 tmp64;
9ee6e8bb
PB
7848 int op;
7849 int shiftop;
7850 int conds;
7851 int logic_cc;
7852
7853 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7854 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7855 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7856 16-bit instructions to get correct prefetch abort behavior. */
7857 insn = insn_hw1;
7858 if ((insn & (1 << 12)) == 0) {
be5e7a76 7859 ARCH(5);
9ee6e8bb
PB
7860 /* Second half of blx. */
7861 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7862 tmp = load_reg(s, 14);
7863 tcg_gen_addi_i32(tmp, tmp, offset);
7864 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7865
7d1b0095 7866 tmp2 = tcg_temp_new_i32();
b0109805 7867 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7868 store_reg(s, 14, tmp2);
7869 gen_bx(s, tmp);
9ee6e8bb
PB
7870 return 0;
7871 }
7872 if (insn & (1 << 11)) {
7873 /* Second half of bl. */
7874 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7875 tmp = load_reg(s, 14);
6a0d8a1d 7876 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7877
7d1b0095 7878 tmp2 = tcg_temp_new_i32();
b0109805 7879 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7880 store_reg(s, 14, tmp2);
7881 gen_bx(s, tmp);
9ee6e8bb
PB
7882 return 0;
7883 }
7884 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7885 /* Instruction spans a page boundary. Implement it as two
7886 16-bit instructions in case the second half causes an
7887 prefetch abort. */
7888 offset = ((int32_t)insn << 21) >> 9;
396e467c 7889 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
7890 return 0;
7891 }
7892 /* Fall through to 32-bit decode. */
7893 }
7894
7895 insn = lduw_code(s->pc);
7896 s->pc += 2;
7897 insn |= (uint32_t)insn_hw1 << 16;
7898
7899 if ((insn & 0xf800e800) != 0xf000e800) {
7900 ARCH(6T2);
7901 }
7902
7903 rn = (insn >> 16) & 0xf;
7904 rs = (insn >> 12) & 0xf;
7905 rd = (insn >> 8) & 0xf;
7906 rm = insn & 0xf;
7907 switch ((insn >> 25) & 0xf) {
7908 case 0: case 1: case 2: case 3:
7909 /* 16-bit instructions. Should never happen. */
7910 abort();
7911 case 4:
7912 if (insn & (1 << 22)) {
7913 /* Other load/store, table branch. */
7914 if (insn & 0x01200000) {
7915 /* Load/store doubleword. */
7916 if (rn == 15) {
7d1b0095 7917 addr = tcg_temp_new_i32();
b0109805 7918 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7919 } else {
b0109805 7920 addr = load_reg(s, rn);
9ee6e8bb
PB
7921 }
7922 offset = (insn & 0xff) * 4;
7923 if ((insn & (1 << 23)) == 0)
7924 offset = -offset;
7925 if (insn & (1 << 24)) {
b0109805 7926 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7927 offset = 0;
7928 }
7929 if (insn & (1 << 20)) {
7930 /* ldrd */
b0109805
PB
7931 tmp = gen_ld32(addr, IS_USER(s));
7932 store_reg(s, rs, tmp);
7933 tcg_gen_addi_i32(addr, addr, 4);
7934 tmp = gen_ld32(addr, IS_USER(s));
7935 store_reg(s, rd, tmp);
9ee6e8bb
PB
7936 } else {
7937 /* strd */
b0109805
PB
7938 tmp = load_reg(s, rs);
7939 gen_st32(tmp, addr, IS_USER(s));
7940 tcg_gen_addi_i32(addr, addr, 4);
7941 tmp = load_reg(s, rd);
7942 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7943 }
7944 if (insn & (1 << 21)) {
7945 /* Base writeback. */
7946 if (rn == 15)
7947 goto illegal_op;
b0109805
PB
7948 tcg_gen_addi_i32(addr, addr, offset - 4);
7949 store_reg(s, rn, addr);
7950 } else {
7d1b0095 7951 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7952 }
7953 } else if ((insn & (1 << 23)) == 0) {
7954 /* Load/store exclusive word. */
3174f8e9 7955 addr = tcg_temp_local_new();
98a46317 7956 load_reg_var(s, addr, rn);
426f5abc 7957 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 7958 if (insn & (1 << 20)) {
426f5abc 7959 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 7960 } else {
426f5abc 7961 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 7962 }
3174f8e9 7963 tcg_temp_free(addr);
9ee6e8bb
PB
7964 } else if ((insn & (1 << 6)) == 0) {
7965 /* Table Branch. */
7966 if (rn == 15) {
7d1b0095 7967 addr = tcg_temp_new_i32();
b0109805 7968 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7969 } else {
b0109805 7970 addr = load_reg(s, rn);
9ee6e8bb 7971 }
b26eefb6 7972 tmp = load_reg(s, rm);
b0109805 7973 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7974 if (insn & (1 << 4)) {
7975 /* tbh */
b0109805 7976 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 7977 tcg_temp_free_i32(tmp);
b0109805 7978 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7979 } else { /* tbb */
7d1b0095 7980 tcg_temp_free_i32(tmp);
b0109805 7981 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7982 }
7d1b0095 7983 tcg_temp_free_i32(addr);
b0109805
PB
7984 tcg_gen_shli_i32(tmp, tmp, 1);
7985 tcg_gen_addi_i32(tmp, tmp, s->pc);
7986 store_reg(s, 15, tmp);
9ee6e8bb
PB
7987 } else {
7988 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 7989 ARCH(7);
9ee6e8bb 7990 op = (insn >> 4) & 0x3;
426f5abc
PB
7991 if (op == 2) {
7992 goto illegal_op;
7993 }
3174f8e9 7994 addr = tcg_temp_local_new();
98a46317 7995 load_reg_var(s, addr, rn);
9ee6e8bb 7996 if (insn & (1 << 20)) {
426f5abc 7997 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 7998 } else {
426f5abc 7999 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8000 }
3174f8e9 8001 tcg_temp_free(addr);
9ee6e8bb
PB
8002 }
8003 } else {
8004 /* Load/store multiple, RFE, SRS. */
8005 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8006 /* Not available in user mode. */
b0109805 8007 if (IS_USER(s))
9ee6e8bb
PB
8008 goto illegal_op;
8009 if (insn & (1 << 20)) {
8010 /* rfe */
b0109805
PB
8011 addr = load_reg(s, rn);
8012 if ((insn & (1 << 24)) == 0)
8013 tcg_gen_addi_i32(addr, addr, -8);
8014 /* Load PC into tmp and CPSR into tmp2. */
8015 tmp = gen_ld32(addr, 0);
8016 tcg_gen_addi_i32(addr, addr, 4);
8017 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
8018 if (insn & (1 << 21)) {
8019 /* Base writeback. */
b0109805
PB
8020 if (insn & (1 << 24)) {
8021 tcg_gen_addi_i32(addr, addr, 4);
8022 } else {
8023 tcg_gen_addi_i32(addr, addr, -4);
8024 }
8025 store_reg(s, rn, addr);
8026 } else {
7d1b0095 8027 tcg_temp_free_i32(addr);
9ee6e8bb 8028 }
b0109805 8029 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8030 } else {
8031 /* srs */
8032 op = (insn & 0x1f);
7d1b0095 8033 addr = tcg_temp_new_i32();
39ea3d4e
PM
8034 tmp = tcg_const_i32(op);
8035 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8036 tcg_temp_free_i32(tmp);
9ee6e8bb 8037 if ((insn & (1 << 24)) == 0) {
b0109805 8038 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 8039 }
b0109805
PB
8040 tmp = load_reg(s, 14);
8041 gen_st32(tmp, addr, 0);
8042 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 8043 tmp = tcg_temp_new_i32();
b0109805
PB
8044 gen_helper_cpsr_read(tmp);
8045 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
8046 if (insn & (1 << 21)) {
8047 if ((insn & (1 << 24)) == 0) {
b0109805 8048 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 8049 } else {
b0109805 8050 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8051 }
39ea3d4e
PM
8052 tmp = tcg_const_i32(op);
8053 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8054 tcg_temp_free_i32(tmp);
b0109805 8055 } else {
7d1b0095 8056 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8057 }
8058 }
8059 } else {
5856d44e
YO
8060 int i, loaded_base = 0;
8061 TCGv loaded_var;
9ee6e8bb 8062 /* Load/store multiple. */
b0109805 8063 addr = load_reg(s, rn);
9ee6e8bb
PB
8064 offset = 0;
8065 for (i = 0; i < 16; i++) {
8066 if (insn & (1 << i))
8067 offset += 4;
8068 }
8069 if (insn & (1 << 24)) {
b0109805 8070 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8071 }
8072
5856d44e 8073 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
8074 for (i = 0; i < 16; i++) {
8075 if ((insn & (1 << i)) == 0)
8076 continue;
8077 if (insn & (1 << 20)) {
8078 /* Load. */
b0109805 8079 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 8080 if (i == 15) {
b0109805 8081 gen_bx(s, tmp);
5856d44e
YO
8082 } else if (i == rn) {
8083 loaded_var = tmp;
8084 loaded_base = 1;
9ee6e8bb 8085 } else {
b0109805 8086 store_reg(s, i, tmp);
9ee6e8bb
PB
8087 }
8088 } else {
8089 /* Store. */
b0109805
PB
8090 tmp = load_reg(s, i);
8091 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8092 }
b0109805 8093 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8094 }
5856d44e
YO
8095 if (loaded_base) {
8096 store_reg(s, rn, loaded_var);
8097 }
9ee6e8bb
PB
8098 if (insn & (1 << 21)) {
8099 /* Base register writeback. */
8100 if (insn & (1 << 24)) {
b0109805 8101 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8102 }
8103 /* Fault if writeback register is in register list. */
8104 if (insn & (1 << rn))
8105 goto illegal_op;
b0109805
PB
8106 store_reg(s, rn, addr);
8107 } else {
7d1b0095 8108 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8109 }
8110 }
8111 }
8112 break;
2af9ab77
JB
8113 case 5:
8114
9ee6e8bb 8115 op = (insn >> 21) & 0xf;
2af9ab77
JB
8116 if (op == 6) {
8117 /* Halfword pack. */
8118 tmp = load_reg(s, rn);
8119 tmp2 = load_reg(s, rm);
8120 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8121 if (insn & (1 << 5)) {
8122 /* pkhtb */
8123 if (shift == 0)
8124 shift = 31;
8125 tcg_gen_sari_i32(tmp2, tmp2, shift);
8126 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8127 tcg_gen_ext16u_i32(tmp2, tmp2);
8128 } else {
8129 /* pkhbt */
8130 if (shift)
8131 tcg_gen_shli_i32(tmp2, tmp2, shift);
8132 tcg_gen_ext16u_i32(tmp, tmp);
8133 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8134 }
8135 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8136 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8137 store_reg(s, rd, tmp);
8138 } else {
2af9ab77
JB
8139 /* Data processing register constant shift. */
8140 if (rn == 15) {
7d1b0095 8141 tmp = tcg_temp_new_i32();
2af9ab77
JB
8142 tcg_gen_movi_i32(tmp, 0);
8143 } else {
8144 tmp = load_reg(s, rn);
8145 }
8146 tmp2 = load_reg(s, rm);
8147
8148 shiftop = (insn >> 4) & 3;
8149 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8150 conds = (insn & (1 << 20)) != 0;
8151 logic_cc = (conds && thumb2_logic_op(op));
8152 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8153 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8154 goto illegal_op;
7d1b0095 8155 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8156 if (rd != 15) {
8157 store_reg(s, rd, tmp);
8158 } else {
7d1b0095 8159 tcg_temp_free_i32(tmp);
2af9ab77 8160 }
3174f8e9 8161 }
9ee6e8bb
PB
8162 break;
8163 case 13: /* Misc data processing. */
8164 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8165 if (op < 4 && (insn & 0xf000) != 0xf000)
8166 goto illegal_op;
8167 switch (op) {
8168 case 0: /* Register controlled shift. */
8984bd2e
PB
8169 tmp = load_reg(s, rn);
8170 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8171 if ((insn & 0x70) != 0)
8172 goto illegal_op;
8173 op = (insn >> 21) & 3;
8984bd2e
PB
8174 logic_cc = (insn & (1 << 20)) != 0;
8175 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8176 if (logic_cc)
8177 gen_logic_CC(tmp);
21aeb343 8178 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8179 break;
8180 case 1: /* Sign/zero extend. */
5e3f878a 8181 tmp = load_reg(s, rm);
9ee6e8bb 8182 shift = (insn >> 4) & 3;
1301f322 8183 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8184 rotate, a shift is sufficient. */
8185 if (shift != 0)
f669df27 8186 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8187 op = (insn >> 20) & 7;
8188 switch (op) {
5e3f878a
PB
8189 case 0: gen_sxth(tmp); break;
8190 case 1: gen_uxth(tmp); break;
8191 case 2: gen_sxtb16(tmp); break;
8192 case 3: gen_uxtb16(tmp); break;
8193 case 4: gen_sxtb(tmp); break;
8194 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8195 default: goto illegal_op;
8196 }
8197 if (rn != 15) {
5e3f878a 8198 tmp2 = load_reg(s, rn);
9ee6e8bb 8199 if ((op >> 1) == 1) {
5e3f878a 8200 gen_add16(tmp, tmp2);
9ee6e8bb 8201 } else {
5e3f878a 8202 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8203 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8204 }
8205 }
5e3f878a 8206 store_reg(s, rd, tmp);
9ee6e8bb
PB
8207 break;
8208 case 2: /* SIMD add/subtract. */
8209 op = (insn >> 20) & 7;
8210 shift = (insn >> 4) & 7;
8211 if ((op & 3) == 3 || (shift & 3) == 3)
8212 goto illegal_op;
6ddbc6e4
PB
8213 tmp = load_reg(s, rn);
8214 tmp2 = load_reg(s, rm);
8215 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8216 tcg_temp_free_i32(tmp2);
6ddbc6e4 8217 store_reg(s, rd, tmp);
9ee6e8bb
PB
8218 break;
8219 case 3: /* Other data processing. */
8220 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8221 if (op < 4) {
8222 /* Saturating add/subtract. */
d9ba4830
PB
8223 tmp = load_reg(s, rn);
8224 tmp2 = load_reg(s, rm);
9ee6e8bb 8225 if (op & 1)
4809c612
JB
8226 gen_helper_double_saturate(tmp, tmp);
8227 if (op & 2)
d9ba4830 8228 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 8229 else
d9ba4830 8230 gen_helper_add_saturate(tmp, tmp, tmp2);
7d1b0095 8231 tcg_temp_free_i32(tmp2);
9ee6e8bb 8232 } else {
d9ba4830 8233 tmp = load_reg(s, rn);
9ee6e8bb
PB
8234 switch (op) {
8235 case 0x0a: /* rbit */
d9ba4830 8236 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8237 break;
8238 case 0x08: /* rev */
66896cb8 8239 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8240 break;
8241 case 0x09: /* rev16 */
d9ba4830 8242 gen_rev16(tmp);
9ee6e8bb
PB
8243 break;
8244 case 0x0b: /* revsh */
d9ba4830 8245 gen_revsh(tmp);
9ee6e8bb
PB
8246 break;
8247 case 0x10: /* sel */
d9ba4830 8248 tmp2 = load_reg(s, rm);
7d1b0095 8249 tmp3 = tcg_temp_new_i32();
6ddbc6e4 8250 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 8251 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8252 tcg_temp_free_i32(tmp3);
8253 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8254 break;
8255 case 0x18: /* clz */
d9ba4830 8256 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8257 break;
8258 default:
8259 goto illegal_op;
8260 }
8261 }
d9ba4830 8262 store_reg(s, rd, tmp);
9ee6e8bb
PB
8263 break;
8264 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8265 op = (insn >> 4) & 0xf;
d9ba4830
PB
8266 tmp = load_reg(s, rn);
8267 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8268 switch ((insn >> 20) & 7) {
8269 case 0: /* 32 x 32 -> 32 */
d9ba4830 8270 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8271 tcg_temp_free_i32(tmp2);
9ee6e8bb 8272 if (rs != 15) {
d9ba4830 8273 tmp2 = load_reg(s, rs);
9ee6e8bb 8274 if (op)
d9ba4830 8275 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8276 else
d9ba4830 8277 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8278 tcg_temp_free_i32(tmp2);
9ee6e8bb 8279 }
9ee6e8bb
PB
8280 break;
8281 case 1: /* 16 x 16 -> 32 */
d9ba4830 8282 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8283 tcg_temp_free_i32(tmp2);
9ee6e8bb 8284 if (rs != 15) {
d9ba4830
PB
8285 tmp2 = load_reg(s, rs);
8286 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8287 tcg_temp_free_i32(tmp2);
9ee6e8bb 8288 }
9ee6e8bb
PB
8289 break;
8290 case 2: /* Dual multiply add. */
8291 case 4: /* Dual multiply subtract. */
8292 if (op)
d9ba4830
PB
8293 gen_swap_half(tmp2);
8294 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8295 if (insn & (1 << 22)) {
e1d177b9 8296 /* This subtraction cannot overflow. */
d9ba4830 8297 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8298 } else {
e1d177b9
PM
8299 /* This addition cannot overflow 32 bits;
8300 * however it may overflow considered as a signed
8301 * operation, in which case we must set the Q flag.
8302 */
8303 gen_helper_add_setq(tmp, tmp, tmp2);
9ee6e8bb 8304 }
7d1b0095 8305 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8306 if (rs != 15)
8307 {
d9ba4830
PB
8308 tmp2 = load_reg(s, rs);
8309 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8310 tcg_temp_free_i32(tmp2);
9ee6e8bb 8311 }
9ee6e8bb
PB
8312 break;
8313 case 3: /* 32 * 16 -> 32msb */
8314 if (op)
d9ba4830 8315 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8316 else
d9ba4830 8317 gen_sxth(tmp2);
a7812ae4
PB
8318 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8319 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8320 tmp = tcg_temp_new_i32();
a7812ae4 8321 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8322 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8323 if (rs != 15)
8324 {
d9ba4830
PB
8325 tmp2 = load_reg(s, rs);
8326 gen_helper_add_setq(tmp, tmp, tmp2);
7d1b0095 8327 tcg_temp_free_i32(tmp2);
9ee6e8bb 8328 }
9ee6e8bb 8329 break;
838fa72d
AJ
8330 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8331 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8332 if (rs != 15) {
838fa72d
AJ
8333 tmp = load_reg(s, rs);
8334 if (insn & (1 << 20)) {
8335 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8336 } else {
838fa72d 8337 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8338 }
2c0262af 8339 }
838fa72d
AJ
8340 if (insn & (1 << 4)) {
8341 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8342 }
8343 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8344 tmp = tcg_temp_new_i32();
838fa72d
AJ
8345 tcg_gen_trunc_i64_i32(tmp, tmp64);
8346 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8347 break;
8348 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8349 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8350 tcg_temp_free_i32(tmp2);
9ee6e8bb 8351 if (rs != 15) {
d9ba4830
PB
8352 tmp2 = load_reg(s, rs);
8353 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8354 tcg_temp_free_i32(tmp2);
5fd46862 8355 }
9ee6e8bb 8356 break;
2c0262af 8357 }
d9ba4830 8358 store_reg(s, rd, tmp);
2c0262af 8359 break;
9ee6e8bb
PB
8360 case 6: case 7: /* 64-bit multiply, Divide. */
8361 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8362 tmp = load_reg(s, rn);
8363 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8364 if ((op & 0x50) == 0x10) {
8365 /* sdiv, udiv */
8366 if (!arm_feature(env, ARM_FEATURE_DIV))
8367 goto illegal_op;
8368 if (op & 0x20)
5e3f878a 8369 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8370 else
5e3f878a 8371 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8372 tcg_temp_free_i32(tmp2);
5e3f878a 8373 store_reg(s, rd, tmp);
9ee6e8bb
PB
8374 } else if ((op & 0xe) == 0xc) {
8375 /* Dual multiply accumulate long. */
8376 if (op & 1)
5e3f878a
PB
8377 gen_swap_half(tmp2);
8378 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8379 if (op & 0x10) {
5e3f878a 8380 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8381 } else {
5e3f878a 8382 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8383 }
7d1b0095 8384 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8385 /* BUGFIX */
8386 tmp64 = tcg_temp_new_i64();
8387 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8388 tcg_temp_free_i32(tmp);
a7812ae4
PB
8389 gen_addq(s, tmp64, rs, rd);
8390 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8391 tcg_temp_free_i64(tmp64);
2c0262af 8392 } else {
9ee6e8bb
PB
8393 if (op & 0x20) {
8394 /* Unsigned 64-bit multiply */
a7812ae4 8395 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8396 } else {
9ee6e8bb
PB
8397 if (op & 8) {
8398 /* smlalxy */
5e3f878a 8399 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8400 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8401 tmp64 = tcg_temp_new_i64();
8402 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8403 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8404 } else {
8405 /* Signed 64-bit multiply */
a7812ae4 8406 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8407 }
b5ff1b31 8408 }
9ee6e8bb
PB
8409 if (op & 4) {
8410 /* umaal */
a7812ae4
PB
8411 gen_addq_lo(s, tmp64, rs);
8412 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8413 } else if (op & 0x40) {
8414 /* 64-bit accumulate. */
a7812ae4 8415 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8416 }
a7812ae4 8417 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8418 tcg_temp_free_i64(tmp64);
5fd46862 8419 }
2c0262af 8420 break;
9ee6e8bb
PB
8421 }
8422 break;
8423 case 6: case 7: case 14: case 15:
8424 /* Coprocessor. */
8425 if (((insn >> 24) & 3) == 3) {
8426 /* Translate into the equivalent ARM encoding. */
f06053e3 8427 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8428 if (disas_neon_data_insn(env, s, insn))
8429 goto illegal_op;
8430 } else {
8431 if (insn & (1 << 28))
8432 goto illegal_op;
8433 if (disas_coproc_insn (env, s, insn))
8434 goto illegal_op;
8435 }
8436 break;
8437 case 8: case 9: case 10: case 11:
8438 if (insn & (1 << 15)) {
8439 /* Branches, misc control. */
8440 if (insn & 0x5000) {
8441 /* Unconditional branch. */
8442 /* signextend(hw1[10:0]) -> offset[:12]. */
8443 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8444 /* hw1[10:0] -> offset[11:1]. */
8445 offset |= (insn & 0x7ff) << 1;
8446 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8447 offset[24:22] already have the same value because of the
8448 sign extension above. */
8449 offset ^= ((~insn) & (1 << 13)) << 10;
8450 offset ^= ((~insn) & (1 << 11)) << 11;
8451
9ee6e8bb
PB
8452 if (insn & (1 << 14)) {
8453 /* Branch and link. */
3174f8e9 8454 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8455 }
3b46e624 8456
b0109805 8457 offset += s->pc;
9ee6e8bb
PB
8458 if (insn & (1 << 12)) {
8459 /* b/bl */
b0109805 8460 gen_jmp(s, offset);
9ee6e8bb
PB
8461 } else {
8462 /* blx */
b0109805 8463 offset &= ~(uint32_t)2;
be5e7a76 8464 /* thumb2 bx, no need to check */
b0109805 8465 gen_bx_im(s, offset);
2c0262af 8466 }
9ee6e8bb
PB
8467 } else if (((insn >> 23) & 7) == 7) {
8468 /* Misc control */
8469 if (insn & (1 << 13))
8470 goto illegal_op;
8471
8472 if (insn & (1 << 26)) {
8473 /* Secure monitor call (v6Z) */
8474 goto illegal_op; /* not implemented. */
2c0262af 8475 } else {
9ee6e8bb
PB
8476 op = (insn >> 20) & 7;
8477 switch (op) {
8478 case 0: /* msr cpsr. */
8479 if (IS_M(env)) {
8984bd2e
PB
8480 tmp = load_reg(s, rn);
8481 addr = tcg_const_i32(insn & 0xff);
8482 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8483 tcg_temp_free_i32(addr);
7d1b0095 8484 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8485 gen_lookup_tb(s);
8486 break;
8487 }
8488 /* fall through */
8489 case 1: /* msr spsr. */
8490 if (IS_M(env))
8491 goto illegal_op;
2fbac54b
FN
8492 tmp = load_reg(s, rn);
8493 if (gen_set_psr(s,
9ee6e8bb 8494 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8495 op == 1, tmp))
9ee6e8bb
PB
8496 goto illegal_op;
8497 break;
8498 case 2: /* cps, nop-hint. */
8499 if (((insn >> 8) & 7) == 0) {
8500 gen_nop_hint(s, insn & 0xff);
8501 }
8502 /* Implemented as NOP in user mode. */
8503 if (IS_USER(s))
8504 break;
8505 offset = 0;
8506 imm = 0;
8507 if (insn & (1 << 10)) {
8508 if (insn & (1 << 7))
8509 offset |= CPSR_A;
8510 if (insn & (1 << 6))
8511 offset |= CPSR_I;
8512 if (insn & (1 << 5))
8513 offset |= CPSR_F;
8514 if (insn & (1 << 9))
8515 imm = CPSR_A | CPSR_I | CPSR_F;
8516 }
8517 if (insn & (1 << 8)) {
8518 offset |= 0x1f;
8519 imm |= (insn & 0x1f);
8520 }
8521 if (offset) {
2fbac54b 8522 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8523 }
8524 break;
8525 case 3: /* Special control operations. */
426f5abc 8526 ARCH(7);
9ee6e8bb
PB
8527 op = (insn >> 4) & 0xf;
8528 switch (op) {
8529 case 2: /* clrex */
426f5abc 8530 gen_clrex(s);
9ee6e8bb
PB
8531 break;
8532 case 4: /* dsb */
8533 case 5: /* dmb */
8534 case 6: /* isb */
8535 /* These execute as NOPs. */
9ee6e8bb
PB
8536 break;
8537 default:
8538 goto illegal_op;
8539 }
8540 break;
8541 case 4: /* bxj */
8542 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8543 tmp = load_reg(s, rn);
8544 gen_bx(s, tmp);
9ee6e8bb
PB
8545 break;
8546 case 5: /* Exception return. */
b8b45b68
RV
8547 if (IS_USER(s)) {
8548 goto illegal_op;
8549 }
8550 if (rn != 14 || rd != 15) {
8551 goto illegal_op;
8552 }
8553 tmp = load_reg(s, rn);
8554 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8555 gen_exception_return(s, tmp);
8556 break;
9ee6e8bb 8557 case 6: /* mrs cpsr. */
7d1b0095 8558 tmp = tcg_temp_new_i32();
9ee6e8bb 8559 if (IS_M(env)) {
8984bd2e
PB
8560 addr = tcg_const_i32(insn & 0xff);
8561 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8562 tcg_temp_free_i32(addr);
9ee6e8bb 8563 } else {
8984bd2e 8564 gen_helper_cpsr_read(tmp);
9ee6e8bb 8565 }
8984bd2e 8566 store_reg(s, rd, tmp);
9ee6e8bb
PB
8567 break;
8568 case 7: /* mrs spsr. */
8569 /* Not accessible in user mode. */
8570 if (IS_USER(s) || IS_M(env))
8571 goto illegal_op;
d9ba4830
PB
8572 tmp = load_cpu_field(spsr);
8573 store_reg(s, rd, tmp);
9ee6e8bb 8574 break;
2c0262af
FB
8575 }
8576 }
9ee6e8bb
PB
8577 } else {
8578 /* Conditional branch. */
8579 op = (insn >> 22) & 0xf;
8580 /* Generate a conditional jump to next instruction. */
8581 s->condlabel = gen_new_label();
d9ba4830 8582 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8583 s->condjmp = 1;
8584
8585 /* offset[11:1] = insn[10:0] */
8586 offset = (insn & 0x7ff) << 1;
8587 /* offset[17:12] = insn[21:16]. */
8588 offset |= (insn & 0x003f0000) >> 4;
8589 /* offset[31:20] = insn[26]. */
8590 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8591 /* offset[18] = insn[13]. */
8592 offset |= (insn & (1 << 13)) << 5;
8593 /* offset[19] = insn[11]. */
8594 offset |= (insn & (1 << 11)) << 8;
8595
8596 /* jump to the offset */
b0109805 8597 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8598 }
8599 } else {
8600 /* Data processing immediate. */
8601 if (insn & (1 << 25)) {
8602 if (insn & (1 << 24)) {
8603 if (insn & (1 << 20))
8604 goto illegal_op;
8605 /* Bitfield/Saturate. */
8606 op = (insn >> 21) & 7;
8607 imm = insn & 0x1f;
8608 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8609 if (rn == 15) {
7d1b0095 8610 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8611 tcg_gen_movi_i32(tmp, 0);
8612 } else {
8613 tmp = load_reg(s, rn);
8614 }
9ee6e8bb
PB
8615 switch (op) {
8616 case 2: /* Signed bitfield extract. */
8617 imm++;
8618 if (shift + imm > 32)
8619 goto illegal_op;
8620 if (imm < 32)
6ddbc6e4 8621 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8622 break;
8623 case 6: /* Unsigned bitfield extract. */
8624 imm++;
8625 if (shift + imm > 32)
8626 goto illegal_op;
8627 if (imm < 32)
6ddbc6e4 8628 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8629 break;
8630 case 3: /* Bitfield insert/clear. */
8631 if (imm < shift)
8632 goto illegal_op;
8633 imm = imm + 1 - shift;
8634 if (imm != 32) {
6ddbc6e4 8635 tmp2 = load_reg(s, rd);
8f8e3aa4 8636 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7d1b0095 8637 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8638 }
8639 break;
8640 case 7:
8641 goto illegal_op;
8642 default: /* Saturate. */
9ee6e8bb
PB
8643 if (shift) {
8644 if (op & 1)
6ddbc6e4 8645 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8646 else
6ddbc6e4 8647 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8648 }
6ddbc6e4 8649 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8650 if (op & 4) {
8651 /* Unsigned. */
9ee6e8bb 8652 if ((op & 1) && shift == 0)
6ddbc6e4 8653 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 8654 else
6ddbc6e4 8655 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 8656 } else {
9ee6e8bb 8657 /* Signed. */
9ee6e8bb 8658 if ((op & 1) && shift == 0)
6ddbc6e4 8659 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 8660 else
6ddbc6e4 8661 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 8662 }
b75263d6 8663 tcg_temp_free_i32(tmp2);
9ee6e8bb 8664 break;
2c0262af 8665 }
6ddbc6e4 8666 store_reg(s, rd, tmp);
9ee6e8bb
PB
8667 } else {
8668 imm = ((insn & 0x04000000) >> 15)
8669 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8670 if (insn & (1 << 22)) {
8671 /* 16-bit immediate. */
8672 imm |= (insn >> 4) & 0xf000;
8673 if (insn & (1 << 23)) {
8674 /* movt */
5e3f878a 8675 tmp = load_reg(s, rd);
86831435 8676 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8677 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8678 } else {
9ee6e8bb 8679 /* movw */
7d1b0095 8680 tmp = tcg_temp_new_i32();
5e3f878a 8681 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8682 }
8683 } else {
9ee6e8bb
PB
8684 /* Add/sub 12-bit immediate. */
8685 if (rn == 15) {
b0109805 8686 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8687 if (insn & (1 << 23))
b0109805 8688 offset -= imm;
9ee6e8bb 8689 else
b0109805 8690 offset += imm;
7d1b0095 8691 tmp = tcg_temp_new_i32();
5e3f878a 8692 tcg_gen_movi_i32(tmp, offset);
2c0262af 8693 } else {
5e3f878a 8694 tmp = load_reg(s, rn);
9ee6e8bb 8695 if (insn & (1 << 23))
5e3f878a 8696 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8697 else
5e3f878a 8698 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8699 }
9ee6e8bb 8700 }
5e3f878a 8701 store_reg(s, rd, tmp);
191abaa2 8702 }
9ee6e8bb
PB
8703 } else {
8704 int shifter_out = 0;
8705 /* modified 12-bit immediate. */
8706 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8707 imm = (insn & 0xff);
8708 switch (shift) {
8709 case 0: /* XY */
8710 /* Nothing to do. */
8711 break;
8712 case 1: /* 00XY00XY */
8713 imm |= imm << 16;
8714 break;
8715 case 2: /* XY00XY00 */
8716 imm |= imm << 16;
8717 imm <<= 8;
8718 break;
8719 case 3: /* XYXYXYXY */
8720 imm |= imm << 16;
8721 imm |= imm << 8;
8722 break;
8723 default: /* Rotated constant. */
8724 shift = (shift << 1) | (imm >> 7);
8725 imm |= 0x80;
8726 imm = imm << (32 - shift);
8727 shifter_out = 1;
8728 break;
b5ff1b31 8729 }
7d1b0095 8730 tmp2 = tcg_temp_new_i32();
3174f8e9 8731 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8732 rn = (insn >> 16) & 0xf;
3174f8e9 8733 if (rn == 15) {
7d1b0095 8734 tmp = tcg_temp_new_i32();
3174f8e9
FN
8735 tcg_gen_movi_i32(tmp, 0);
8736 } else {
8737 tmp = load_reg(s, rn);
8738 }
9ee6e8bb
PB
8739 op = (insn >> 21) & 0xf;
8740 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8741 shifter_out, tmp, tmp2))
9ee6e8bb 8742 goto illegal_op;
7d1b0095 8743 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8744 rd = (insn >> 8) & 0xf;
8745 if (rd != 15) {
3174f8e9
FN
8746 store_reg(s, rd, tmp);
8747 } else {
7d1b0095 8748 tcg_temp_free_i32(tmp);
2c0262af 8749 }
2c0262af 8750 }
9ee6e8bb
PB
8751 }
8752 break;
8753 case 12: /* Load/store single data item. */
8754 {
8755 int postinc = 0;
8756 int writeback = 0;
b0109805 8757 int user;
9ee6e8bb
PB
8758 if ((insn & 0x01100000) == 0x01000000) {
8759 if (disas_neon_ls_insn(env, s, insn))
c1713132 8760 goto illegal_op;
9ee6e8bb
PB
8761 break;
8762 }
a2fdc890
PM
8763 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8764 if (rs == 15) {
8765 if (!(insn & (1 << 20))) {
8766 goto illegal_op;
8767 }
8768 if (op != 2) {
8769 /* Byte or halfword load space with dest == r15 : memory hints.
8770 * Catch them early so we don't emit pointless addressing code.
8771 * This space is a mix of:
8772 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8773 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8774 * cores)
8775 * unallocated hints, which must be treated as NOPs
8776 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8777 * which is easiest for the decoding logic
8778 * Some space which must UNDEF
8779 */
8780 int op1 = (insn >> 23) & 3;
8781 int op2 = (insn >> 6) & 0x3f;
8782 if (op & 2) {
8783 goto illegal_op;
8784 }
8785 if (rn == 15) {
8786 /* UNPREDICTABLE or unallocated hint */
8787 return 0;
8788 }
8789 if (op1 & 1) {
8790 return 0; /* PLD* or unallocated hint */
8791 }
8792 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8793 return 0; /* PLD* or unallocated hint */
8794 }
8795 /* UNDEF space, or an UNPREDICTABLE */
8796 return 1;
8797 }
8798 }
b0109805 8799 user = IS_USER(s);
9ee6e8bb 8800 if (rn == 15) {
7d1b0095 8801 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8802 /* PC relative. */
8803 /* s->pc has already been incremented by 4. */
8804 imm = s->pc & 0xfffffffc;
8805 if (insn & (1 << 23))
8806 imm += insn & 0xfff;
8807 else
8808 imm -= insn & 0xfff;
b0109805 8809 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8810 } else {
b0109805 8811 addr = load_reg(s, rn);
9ee6e8bb
PB
8812 if (insn & (1 << 23)) {
8813 /* Positive offset. */
8814 imm = insn & 0xfff;
b0109805 8815 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8816 } else {
9ee6e8bb 8817 imm = insn & 0xff;
2a0308c5
PM
8818 switch ((insn >> 8) & 0xf) {
8819 case 0x0: /* Shifted Register. */
9ee6e8bb 8820 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8821 if (shift > 3) {
8822 tcg_temp_free_i32(addr);
18c9b560 8823 goto illegal_op;
2a0308c5 8824 }
b26eefb6 8825 tmp = load_reg(s, rm);
9ee6e8bb 8826 if (shift)
b26eefb6 8827 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8828 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8829 tcg_temp_free_i32(tmp);
9ee6e8bb 8830 break;
2a0308c5 8831 case 0xc: /* Negative offset. */
b0109805 8832 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8833 break;
2a0308c5 8834 case 0xe: /* User privilege. */
b0109805
PB
8835 tcg_gen_addi_i32(addr, addr, imm);
8836 user = 1;
9ee6e8bb 8837 break;
2a0308c5 8838 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8839 imm = -imm;
8840 /* Fall through. */
2a0308c5 8841 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8842 postinc = 1;
8843 writeback = 1;
8844 break;
2a0308c5 8845 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
8846 imm = -imm;
8847 /* Fall through. */
2a0308c5 8848 case 0xf: /* Pre-increment. */
b0109805 8849 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
8850 writeback = 1;
8851 break;
8852 default:
2a0308c5 8853 tcg_temp_free_i32(addr);
b7bcbe95 8854 goto illegal_op;
9ee6e8bb
PB
8855 }
8856 }
8857 }
9ee6e8bb
PB
8858 if (insn & (1 << 20)) {
8859 /* Load. */
a2fdc890
PM
8860 switch (op) {
8861 case 0: tmp = gen_ld8u(addr, user); break;
8862 case 4: tmp = gen_ld8s(addr, user); break;
8863 case 1: tmp = gen_ld16u(addr, user); break;
8864 case 5: tmp = gen_ld16s(addr, user); break;
8865 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
8866 default:
8867 tcg_temp_free_i32(addr);
8868 goto illegal_op;
a2fdc890
PM
8869 }
8870 if (rs == 15) {
8871 gen_bx(s, tmp);
9ee6e8bb 8872 } else {
a2fdc890 8873 store_reg(s, rs, tmp);
9ee6e8bb
PB
8874 }
8875 } else {
8876 /* Store. */
b0109805 8877 tmp = load_reg(s, rs);
9ee6e8bb 8878 switch (op) {
b0109805
PB
8879 case 0: gen_st8(tmp, addr, user); break;
8880 case 1: gen_st16(tmp, addr, user); break;
8881 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
8882 default:
8883 tcg_temp_free_i32(addr);
8884 goto illegal_op;
b7bcbe95 8885 }
2c0262af 8886 }
9ee6e8bb 8887 if (postinc)
b0109805
PB
8888 tcg_gen_addi_i32(addr, addr, imm);
8889 if (writeback) {
8890 store_reg(s, rn, addr);
8891 } else {
7d1b0095 8892 tcg_temp_free_i32(addr);
b0109805 8893 }
9ee6e8bb
PB
8894 }
8895 break;
8896 default:
8897 goto illegal_op;
2c0262af 8898 }
9ee6e8bb
PB
8899 return 0;
8900illegal_op:
8901 return 1;
2c0262af
FB
8902}
8903
9ee6e8bb 8904static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8905{
8906 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8907 int32_t offset;
8908 int i;
b26eefb6 8909 TCGv tmp;
d9ba4830 8910 TCGv tmp2;
b0109805 8911 TCGv addr;
99c475ab 8912
9ee6e8bb
PB
8913 if (s->condexec_mask) {
8914 cond = s->condexec_cond;
bedd2912
JB
8915 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8916 s->condlabel = gen_new_label();
8917 gen_test_cc(cond ^ 1, s->condlabel);
8918 s->condjmp = 1;
8919 }
9ee6e8bb
PB
8920 }
8921
b5ff1b31 8922 insn = lduw_code(s->pc);
99c475ab 8923 s->pc += 2;
b5ff1b31 8924
99c475ab
FB
8925 switch (insn >> 12) {
8926 case 0: case 1:
396e467c 8927
99c475ab
FB
8928 rd = insn & 7;
8929 op = (insn >> 11) & 3;
8930 if (op == 3) {
8931 /* add/subtract */
8932 rn = (insn >> 3) & 7;
396e467c 8933 tmp = load_reg(s, rn);
99c475ab
FB
8934 if (insn & (1 << 10)) {
8935 /* immediate */
7d1b0095 8936 tmp2 = tcg_temp_new_i32();
396e467c 8937 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
8938 } else {
8939 /* reg */
8940 rm = (insn >> 6) & 7;
396e467c 8941 tmp2 = load_reg(s, rm);
99c475ab 8942 }
9ee6e8bb
PB
8943 if (insn & (1 << 9)) {
8944 if (s->condexec_mask)
396e467c 8945 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8946 else
396e467c 8947 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb
PB
8948 } else {
8949 if (s->condexec_mask)
396e467c 8950 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 8951 else
396e467c 8952 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 8953 }
7d1b0095 8954 tcg_temp_free_i32(tmp2);
396e467c 8955 store_reg(s, rd, tmp);
99c475ab
FB
8956 } else {
8957 /* shift immediate */
8958 rm = (insn >> 3) & 7;
8959 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8960 tmp = load_reg(s, rm);
8961 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8962 if (!s->condexec_mask)
8963 gen_logic_CC(tmp);
8964 store_reg(s, rd, tmp);
99c475ab
FB
8965 }
8966 break;
8967 case 2: case 3:
8968 /* arithmetic large immediate */
8969 op = (insn >> 11) & 3;
8970 rd = (insn >> 8) & 0x7;
396e467c 8971 if (op == 0) { /* mov */
7d1b0095 8972 tmp = tcg_temp_new_i32();
396e467c 8973 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 8974 if (!s->condexec_mask)
396e467c
FN
8975 gen_logic_CC(tmp);
8976 store_reg(s, rd, tmp);
8977 } else {
8978 tmp = load_reg(s, rd);
7d1b0095 8979 tmp2 = tcg_temp_new_i32();
396e467c
FN
8980 tcg_gen_movi_i32(tmp2, insn & 0xff);
8981 switch (op) {
8982 case 1: /* cmp */
8983 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
8984 tcg_temp_free_i32(tmp);
8985 tcg_temp_free_i32(tmp2);
396e467c
FN
8986 break;
8987 case 2: /* add */
8988 if (s->condexec_mask)
8989 tcg_gen_add_i32(tmp, tmp, tmp2);
8990 else
8991 gen_helper_add_cc(tmp, tmp, tmp2);
7d1b0095 8992 tcg_temp_free_i32(tmp2);
396e467c
FN
8993 store_reg(s, rd, tmp);
8994 break;
8995 case 3: /* sub */
8996 if (s->condexec_mask)
8997 tcg_gen_sub_i32(tmp, tmp, tmp2);
8998 else
8999 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095 9000 tcg_temp_free_i32(tmp2);
396e467c
FN
9001 store_reg(s, rd, tmp);
9002 break;
9003 }
99c475ab 9004 }
99c475ab
FB
9005 break;
9006 case 4:
9007 if (insn & (1 << 11)) {
9008 rd = (insn >> 8) & 7;
5899f386
FB
9009 /* load pc-relative. Bit 1 of PC is ignored. */
9010 val = s->pc + 2 + ((insn & 0xff) * 4);
9011 val &= ~(uint32_t)2;
7d1b0095 9012 addr = tcg_temp_new_i32();
b0109805
PB
9013 tcg_gen_movi_i32(addr, val);
9014 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 9015 tcg_temp_free_i32(addr);
b0109805 9016 store_reg(s, rd, tmp);
99c475ab
FB
9017 break;
9018 }
9019 if (insn & (1 << 10)) {
9020 /* data processing extended or blx */
9021 rd = (insn & 7) | ((insn >> 4) & 8);
9022 rm = (insn >> 3) & 0xf;
9023 op = (insn >> 8) & 3;
9024 switch (op) {
9025 case 0: /* add */
396e467c
FN
9026 tmp = load_reg(s, rd);
9027 tmp2 = load_reg(s, rm);
9028 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9029 tcg_temp_free_i32(tmp2);
396e467c 9030 store_reg(s, rd, tmp);
99c475ab
FB
9031 break;
9032 case 1: /* cmp */
396e467c
FN
9033 tmp = load_reg(s, rd);
9034 tmp2 = load_reg(s, rm);
9035 gen_helper_sub_cc(tmp, tmp, tmp2);
7d1b0095
PM
9036 tcg_temp_free_i32(tmp2);
9037 tcg_temp_free_i32(tmp);
99c475ab
FB
9038 break;
9039 case 2: /* mov/cpy */
396e467c
FN
9040 tmp = load_reg(s, rm);
9041 store_reg(s, rd, tmp);
99c475ab
FB
9042 break;
9043 case 3:/* branch [and link] exchange thumb register */
b0109805 9044 tmp = load_reg(s, rm);
99c475ab 9045 if (insn & (1 << 7)) {
be5e7a76 9046 ARCH(5);
99c475ab 9047 val = (uint32_t)s->pc | 1;
7d1b0095 9048 tmp2 = tcg_temp_new_i32();
b0109805
PB
9049 tcg_gen_movi_i32(tmp2, val);
9050 store_reg(s, 14, tmp2);
99c475ab 9051 }
be5e7a76 9052 /* already thumb, no need to check */
d9ba4830 9053 gen_bx(s, tmp);
99c475ab
FB
9054 break;
9055 }
9056 break;
9057 }
9058
9059 /* data processing register */
9060 rd = insn & 7;
9061 rm = (insn >> 3) & 7;
9062 op = (insn >> 6) & 0xf;
9063 if (op == 2 || op == 3 || op == 4 || op == 7) {
9064 /* the shift/rotate ops want the operands backwards */
9065 val = rm;
9066 rm = rd;
9067 rd = val;
9068 val = 1;
9069 } else {
9070 val = 0;
9071 }
9072
396e467c 9073 if (op == 9) { /* neg */
7d1b0095 9074 tmp = tcg_temp_new_i32();
396e467c
FN
9075 tcg_gen_movi_i32(tmp, 0);
9076 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9077 tmp = load_reg(s, rd);
9078 } else {
9079 TCGV_UNUSED(tmp);
9080 }
99c475ab 9081
396e467c 9082 tmp2 = load_reg(s, rm);
5899f386 9083 switch (op) {
99c475ab 9084 case 0x0: /* and */
396e467c 9085 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9086 if (!s->condexec_mask)
396e467c 9087 gen_logic_CC(tmp);
99c475ab
FB
9088 break;
9089 case 0x1: /* eor */
396e467c 9090 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9091 if (!s->condexec_mask)
396e467c 9092 gen_logic_CC(tmp);
99c475ab
FB
9093 break;
9094 case 0x2: /* lsl */
9ee6e8bb 9095 if (s->condexec_mask) {
396e467c 9096 gen_helper_shl(tmp2, tmp2, tmp);
9ee6e8bb 9097 } else {
396e467c
FN
9098 gen_helper_shl_cc(tmp2, tmp2, tmp);
9099 gen_logic_CC(tmp2);
9ee6e8bb 9100 }
99c475ab
FB
9101 break;
9102 case 0x3: /* lsr */
9ee6e8bb 9103 if (s->condexec_mask) {
396e467c 9104 gen_helper_shr(tmp2, tmp2, tmp);
9ee6e8bb 9105 } else {
396e467c
FN
9106 gen_helper_shr_cc(tmp2, tmp2, tmp);
9107 gen_logic_CC(tmp2);
9ee6e8bb 9108 }
99c475ab
FB
9109 break;
9110 case 0x4: /* asr */
9ee6e8bb 9111 if (s->condexec_mask) {
396e467c 9112 gen_helper_sar(tmp2, tmp2, tmp);
9ee6e8bb 9113 } else {
396e467c
FN
9114 gen_helper_sar_cc(tmp2, tmp2, tmp);
9115 gen_logic_CC(tmp2);
9ee6e8bb 9116 }
99c475ab
FB
9117 break;
9118 case 0x5: /* adc */
9ee6e8bb 9119 if (s->condexec_mask)
396e467c 9120 gen_adc(tmp, tmp2);
9ee6e8bb 9121 else
396e467c 9122 gen_helper_adc_cc(tmp, tmp, tmp2);
99c475ab
FB
9123 break;
9124 case 0x6: /* sbc */
9ee6e8bb 9125 if (s->condexec_mask)
396e467c 9126 gen_sub_carry(tmp, tmp, tmp2);
9ee6e8bb 9127 else
396e467c 9128 gen_helper_sbc_cc(tmp, tmp, tmp2);
99c475ab
FB
9129 break;
9130 case 0x7: /* ror */
9ee6e8bb 9131 if (s->condexec_mask) {
f669df27
AJ
9132 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9133 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9134 } else {
396e467c
FN
9135 gen_helper_ror_cc(tmp2, tmp2, tmp);
9136 gen_logic_CC(tmp2);
9ee6e8bb 9137 }
99c475ab
FB
9138 break;
9139 case 0x8: /* tst */
396e467c
FN
9140 tcg_gen_and_i32(tmp, tmp, tmp2);
9141 gen_logic_CC(tmp);
99c475ab 9142 rd = 16;
5899f386 9143 break;
99c475ab 9144 case 0x9: /* neg */
9ee6e8bb 9145 if (s->condexec_mask)
396e467c 9146 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9147 else
396e467c 9148 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9149 break;
9150 case 0xa: /* cmp */
396e467c 9151 gen_helper_sub_cc(tmp, tmp, tmp2);
99c475ab
FB
9152 rd = 16;
9153 break;
9154 case 0xb: /* cmn */
396e467c 9155 gen_helper_add_cc(tmp, tmp, tmp2);
99c475ab
FB
9156 rd = 16;
9157 break;
9158 case 0xc: /* orr */
396e467c 9159 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9160 if (!s->condexec_mask)
396e467c 9161 gen_logic_CC(tmp);
99c475ab
FB
9162 break;
9163 case 0xd: /* mul */
7b2919a0 9164 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9165 if (!s->condexec_mask)
396e467c 9166 gen_logic_CC(tmp);
99c475ab
FB
9167 break;
9168 case 0xe: /* bic */
f669df27 9169 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9170 if (!s->condexec_mask)
396e467c 9171 gen_logic_CC(tmp);
99c475ab
FB
9172 break;
9173 case 0xf: /* mvn */
396e467c 9174 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9175 if (!s->condexec_mask)
396e467c 9176 gen_logic_CC(tmp2);
99c475ab 9177 val = 1;
5899f386 9178 rm = rd;
99c475ab
FB
9179 break;
9180 }
9181 if (rd != 16) {
396e467c
FN
9182 if (val) {
9183 store_reg(s, rm, tmp2);
9184 if (op != 0xf)
7d1b0095 9185 tcg_temp_free_i32(tmp);
396e467c
FN
9186 } else {
9187 store_reg(s, rd, tmp);
7d1b0095 9188 tcg_temp_free_i32(tmp2);
396e467c
FN
9189 }
9190 } else {
7d1b0095
PM
9191 tcg_temp_free_i32(tmp);
9192 tcg_temp_free_i32(tmp2);
99c475ab
FB
9193 }
9194 break;
9195
9196 case 5:
9197 /* load/store register offset. */
9198 rd = insn & 7;
9199 rn = (insn >> 3) & 7;
9200 rm = (insn >> 6) & 7;
9201 op = (insn >> 9) & 7;
b0109805 9202 addr = load_reg(s, rn);
b26eefb6 9203 tmp = load_reg(s, rm);
b0109805 9204 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9205 tcg_temp_free_i32(tmp);
99c475ab
FB
9206
9207 if (op < 3) /* store */
b0109805 9208 tmp = load_reg(s, rd);
99c475ab
FB
9209
9210 switch (op) {
9211 case 0: /* str */
b0109805 9212 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9213 break;
9214 case 1: /* strh */
b0109805 9215 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9216 break;
9217 case 2: /* strb */
b0109805 9218 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9219 break;
9220 case 3: /* ldrsb */
b0109805 9221 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
9222 break;
9223 case 4: /* ldr */
b0109805 9224 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9225 break;
9226 case 5: /* ldrh */
b0109805 9227 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
9228 break;
9229 case 6: /* ldrb */
b0109805 9230 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
9231 break;
9232 case 7: /* ldrsh */
b0109805 9233 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
9234 break;
9235 }
9236 if (op >= 3) /* load */
b0109805 9237 store_reg(s, rd, tmp);
7d1b0095 9238 tcg_temp_free_i32(addr);
99c475ab
FB
9239 break;
9240
9241 case 6:
9242 /* load/store word immediate offset */
9243 rd = insn & 7;
9244 rn = (insn >> 3) & 7;
b0109805 9245 addr = load_reg(s, rn);
99c475ab 9246 val = (insn >> 4) & 0x7c;
b0109805 9247 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9248
9249 if (insn & (1 << 11)) {
9250 /* load */
b0109805
PB
9251 tmp = gen_ld32(addr, IS_USER(s));
9252 store_reg(s, rd, tmp);
99c475ab
FB
9253 } else {
9254 /* store */
b0109805
PB
9255 tmp = load_reg(s, rd);
9256 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9257 }
7d1b0095 9258 tcg_temp_free_i32(addr);
99c475ab
FB
9259 break;
9260
9261 case 7:
9262 /* load/store byte immediate offset */
9263 rd = insn & 7;
9264 rn = (insn >> 3) & 7;
b0109805 9265 addr = load_reg(s, rn);
99c475ab 9266 val = (insn >> 6) & 0x1f;
b0109805 9267 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9268
9269 if (insn & (1 << 11)) {
9270 /* load */
b0109805
PB
9271 tmp = gen_ld8u(addr, IS_USER(s));
9272 store_reg(s, rd, tmp);
99c475ab
FB
9273 } else {
9274 /* store */
b0109805
PB
9275 tmp = load_reg(s, rd);
9276 gen_st8(tmp, addr, IS_USER(s));
99c475ab 9277 }
7d1b0095 9278 tcg_temp_free_i32(addr);
99c475ab
FB
9279 break;
9280
9281 case 8:
9282 /* load/store halfword immediate offset */
9283 rd = insn & 7;
9284 rn = (insn >> 3) & 7;
b0109805 9285 addr = load_reg(s, rn);
99c475ab 9286 val = (insn >> 5) & 0x3e;
b0109805 9287 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9288
9289 if (insn & (1 << 11)) {
9290 /* load */
b0109805
PB
9291 tmp = gen_ld16u(addr, IS_USER(s));
9292 store_reg(s, rd, tmp);
99c475ab
FB
9293 } else {
9294 /* store */
b0109805
PB
9295 tmp = load_reg(s, rd);
9296 gen_st16(tmp, addr, IS_USER(s));
99c475ab 9297 }
7d1b0095 9298 tcg_temp_free_i32(addr);
99c475ab
FB
9299 break;
9300
9301 case 9:
9302 /* load/store from stack */
9303 rd = (insn >> 8) & 7;
b0109805 9304 addr = load_reg(s, 13);
99c475ab 9305 val = (insn & 0xff) * 4;
b0109805 9306 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9307
9308 if (insn & (1 << 11)) {
9309 /* load */
b0109805
PB
9310 tmp = gen_ld32(addr, IS_USER(s));
9311 store_reg(s, rd, tmp);
99c475ab
FB
9312 } else {
9313 /* store */
b0109805
PB
9314 tmp = load_reg(s, rd);
9315 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9316 }
7d1b0095 9317 tcg_temp_free_i32(addr);
99c475ab
FB
9318 break;
9319
9320 case 10:
9321 /* add to high reg */
9322 rd = (insn >> 8) & 7;
5899f386
FB
9323 if (insn & (1 << 11)) {
9324 /* SP */
5e3f878a 9325 tmp = load_reg(s, 13);
5899f386
FB
9326 } else {
9327 /* PC. bit 1 is ignored. */
7d1b0095 9328 tmp = tcg_temp_new_i32();
5e3f878a 9329 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9330 }
99c475ab 9331 val = (insn & 0xff) * 4;
5e3f878a
PB
9332 tcg_gen_addi_i32(tmp, tmp, val);
9333 store_reg(s, rd, tmp);
99c475ab
FB
9334 break;
9335
9336 case 11:
9337 /* misc */
9338 op = (insn >> 8) & 0xf;
9339 switch (op) {
9340 case 0:
9341 /* adjust stack pointer */
b26eefb6 9342 tmp = load_reg(s, 13);
99c475ab
FB
9343 val = (insn & 0x7f) * 4;
9344 if (insn & (1 << 7))
6a0d8a1d 9345 val = -(int32_t)val;
b26eefb6
PB
9346 tcg_gen_addi_i32(tmp, tmp, val);
9347 store_reg(s, 13, tmp);
99c475ab
FB
9348 break;
9349
9ee6e8bb
PB
9350 case 2: /* sign/zero extend. */
9351 ARCH(6);
9352 rd = insn & 7;
9353 rm = (insn >> 3) & 7;
b0109805 9354 tmp = load_reg(s, rm);
9ee6e8bb 9355 switch ((insn >> 6) & 3) {
b0109805
PB
9356 case 0: gen_sxth(tmp); break;
9357 case 1: gen_sxtb(tmp); break;
9358 case 2: gen_uxth(tmp); break;
9359 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9360 }
b0109805 9361 store_reg(s, rd, tmp);
9ee6e8bb 9362 break;
99c475ab
FB
9363 case 4: case 5: case 0xc: case 0xd:
9364 /* push/pop */
b0109805 9365 addr = load_reg(s, 13);
5899f386
FB
9366 if (insn & (1 << 8))
9367 offset = 4;
99c475ab 9368 else
5899f386
FB
9369 offset = 0;
9370 for (i = 0; i < 8; i++) {
9371 if (insn & (1 << i))
9372 offset += 4;
9373 }
9374 if ((insn & (1 << 11)) == 0) {
b0109805 9375 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9376 }
99c475ab
FB
9377 for (i = 0; i < 8; i++) {
9378 if (insn & (1 << i)) {
9379 if (insn & (1 << 11)) {
9380 /* pop */
b0109805
PB
9381 tmp = gen_ld32(addr, IS_USER(s));
9382 store_reg(s, i, tmp);
99c475ab
FB
9383 } else {
9384 /* push */
b0109805
PB
9385 tmp = load_reg(s, i);
9386 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9387 }
5899f386 9388 /* advance to the next address. */
b0109805 9389 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9390 }
9391 }
a50f5b91 9392 TCGV_UNUSED(tmp);
99c475ab
FB
9393 if (insn & (1 << 8)) {
9394 if (insn & (1 << 11)) {
9395 /* pop pc */
b0109805 9396 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9397 /* don't set the pc until the rest of the instruction
9398 has completed */
9399 } else {
9400 /* push lr */
b0109805
PB
9401 tmp = load_reg(s, 14);
9402 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9403 }
b0109805 9404 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9405 }
5899f386 9406 if ((insn & (1 << 11)) == 0) {
b0109805 9407 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9408 }
99c475ab 9409 /* write back the new stack pointer */
b0109805 9410 store_reg(s, 13, addr);
99c475ab 9411 /* set the new PC value */
be5e7a76
DES
9412 if ((insn & 0x0900) == 0x0900) {
9413 store_reg_from_load(env, s, 15, tmp);
9414 }
99c475ab
FB
9415 break;
9416
9ee6e8bb
PB
9417 case 1: case 3: case 9: case 11: /* czb */
9418 rm = insn & 7;
d9ba4830 9419 tmp = load_reg(s, rm);
9ee6e8bb
PB
9420 s->condlabel = gen_new_label();
9421 s->condjmp = 1;
9422 if (insn & (1 << 11))
cb63669a 9423 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9424 else
cb63669a 9425 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9426 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9427 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9428 val = (uint32_t)s->pc + 2;
9429 val += offset;
9430 gen_jmp(s, val);
9431 break;
9432
9433 case 15: /* IT, nop-hint. */
9434 if ((insn & 0xf) == 0) {
9435 gen_nop_hint(s, (insn >> 4) & 0xf);
9436 break;
9437 }
9438 /* If Then. */
9439 s->condexec_cond = (insn >> 4) & 0xe;
9440 s->condexec_mask = insn & 0x1f;
9441 /* No actual code generated for this insn, just setup state. */
9442 break;
9443
06c949e6 9444 case 0xe: /* bkpt */
be5e7a76 9445 ARCH(5);
bc4a0de0 9446 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9447 break;
9448
9ee6e8bb
PB
9449 case 0xa: /* rev */
9450 ARCH(6);
9451 rn = (insn >> 3) & 0x7;
9452 rd = insn & 0x7;
b0109805 9453 tmp = load_reg(s, rn);
9ee6e8bb 9454 switch ((insn >> 6) & 3) {
66896cb8 9455 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9456 case 1: gen_rev16(tmp); break;
9457 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9458 default: goto illegal_op;
9459 }
b0109805 9460 store_reg(s, rd, tmp);
9ee6e8bb
PB
9461 break;
9462
9463 case 6: /* cps */
9464 ARCH(6);
9465 if (IS_USER(s))
9466 break;
9467 if (IS_M(env)) {
8984bd2e 9468 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 9469 /* PRIMASK */
8984bd2e
PB
9470 if (insn & 1) {
9471 addr = tcg_const_i32(16);
9472 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9473 tcg_temp_free_i32(addr);
8984bd2e 9474 }
9ee6e8bb 9475 /* FAULTMASK */
8984bd2e
PB
9476 if (insn & 2) {
9477 addr = tcg_const_i32(17);
9478 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9479 tcg_temp_free_i32(addr);
8984bd2e 9480 }
b75263d6 9481 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9482 gen_lookup_tb(s);
9483 } else {
9484 if (insn & (1 << 4))
9485 shift = CPSR_A | CPSR_I | CPSR_F;
9486 else
9487 shift = 0;
fa26df03 9488 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9ee6e8bb
PB
9489 }
9490 break;
9491
99c475ab
FB
9492 default:
9493 goto undef;
9494 }
9495 break;
9496
9497 case 12:
a7d3970d 9498 {
99c475ab 9499 /* load/store multiple */
a7d3970d
PM
9500 TCGv loaded_var;
9501 TCGV_UNUSED(loaded_var);
99c475ab 9502 rn = (insn >> 8) & 0x7;
b0109805 9503 addr = load_reg(s, rn);
99c475ab
FB
9504 for (i = 0; i < 8; i++) {
9505 if (insn & (1 << i)) {
99c475ab
FB
9506 if (insn & (1 << 11)) {
9507 /* load */
b0109805 9508 tmp = gen_ld32(addr, IS_USER(s));
a7d3970d
PM
9509 if (i == rn) {
9510 loaded_var = tmp;
9511 } else {
9512 store_reg(s, i, tmp);
9513 }
99c475ab
FB
9514 } else {
9515 /* store */
b0109805
PB
9516 tmp = load_reg(s, i);
9517 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9518 }
5899f386 9519 /* advance to the next address */
b0109805 9520 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9521 }
9522 }
b0109805 9523 if ((insn & (1 << rn)) == 0) {
a7d3970d 9524 /* base reg not in list: base register writeback */
b0109805
PB
9525 store_reg(s, rn, addr);
9526 } else {
a7d3970d
PM
9527 /* base reg in list: if load, complete it now */
9528 if (insn & (1 << 11)) {
9529 store_reg(s, rn, loaded_var);
9530 }
7d1b0095 9531 tcg_temp_free_i32(addr);
b0109805 9532 }
99c475ab 9533 break;
a7d3970d 9534 }
99c475ab
FB
9535 case 13:
9536 /* conditional branch or swi */
9537 cond = (insn >> 8) & 0xf;
9538 if (cond == 0xe)
9539 goto undef;
9540
9541 if (cond == 0xf) {
9542 /* swi */
422ebf69 9543 gen_set_pc_im(s->pc);
9ee6e8bb 9544 s->is_jmp = DISAS_SWI;
99c475ab
FB
9545 break;
9546 }
9547 /* generate a conditional jump to next instruction */
e50e6a20 9548 s->condlabel = gen_new_label();
d9ba4830 9549 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9550 s->condjmp = 1;
99c475ab
FB
9551
9552 /* jump to the offset */
5899f386 9553 val = (uint32_t)s->pc + 2;
99c475ab 9554 offset = ((int32_t)insn << 24) >> 24;
5899f386 9555 val += offset << 1;
8aaca4c0 9556 gen_jmp(s, val);
99c475ab
FB
9557 break;
9558
9559 case 14:
358bf29e 9560 if (insn & (1 << 11)) {
9ee6e8bb
PB
9561 if (disas_thumb2_insn(env, s, insn))
9562 goto undef32;
358bf29e
PB
9563 break;
9564 }
9ee6e8bb 9565 /* unconditional branch */
99c475ab
FB
9566 val = (uint32_t)s->pc;
9567 offset = ((int32_t)insn << 21) >> 21;
9568 val += (offset << 1) + 2;
8aaca4c0 9569 gen_jmp(s, val);
99c475ab
FB
9570 break;
9571
9572 case 15:
9ee6e8bb 9573 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9574 goto undef32;
9ee6e8bb 9575 break;
99c475ab
FB
9576 }
9577 return;
9ee6e8bb 9578undef32:
bc4a0de0 9579 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9580 return;
9581illegal_op:
99c475ab 9582undef:
bc4a0de0 9583 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9584}
9585
2c0262af
FB
9586/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9587 basic block 'tb'. If search_pc is TRUE, also generate PC
9588 information for each intermediate instruction. */
2cfc5f17
TS
9589static inline void gen_intermediate_code_internal(CPUState *env,
9590 TranslationBlock *tb,
9591 int search_pc)
2c0262af
FB
9592{
9593 DisasContext dc1, *dc = &dc1;
a1d1bb31 9594 CPUBreakpoint *bp;
2c0262af
FB
9595 uint16_t *gen_opc_end;
9596 int j, lj;
0fa85d43 9597 target_ulong pc_start;
b5ff1b31 9598 uint32_t next_page_start;
2e70f6ef
PB
9599 int num_insns;
9600 int max_insns;
3b46e624 9601
2c0262af 9602 /* generate intermediate code */
0fa85d43 9603 pc_start = tb->pc;
3b46e624 9604
2c0262af
FB
9605 dc->tb = tb;
9606
2c0262af 9607 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9608
9609 dc->is_jmp = DISAS_NEXT;
9610 dc->pc = pc_start;
8aaca4c0 9611 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9612 dc->condjmp = 0;
7204ab88 9613 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
98eac7ca
PM
9614 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9615 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9616#if !defined(CONFIG_USER_ONLY)
61f74d6a 9617 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9618#endif
5df8bac1 9619 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9620 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9621 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9622 cpu_F0s = tcg_temp_new_i32();
9623 cpu_F1s = tcg_temp_new_i32();
9624 cpu_F0d = tcg_temp_new_i64();
9625 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9626 cpu_V0 = cpu_F0d;
9627 cpu_V1 = cpu_F1d;
e677137d 9628 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9629 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9630 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9631 lj = -1;
2e70f6ef
PB
9632 num_insns = 0;
9633 max_insns = tb->cflags & CF_COUNT_MASK;
9634 if (max_insns == 0)
9635 max_insns = CF_COUNT_MASK;
9636
9637 gen_icount_start();
e12ce78d 9638
3849902c
PM
9639 tcg_clear_temp_count();
9640
e12ce78d
PM
9641 /* A note on handling of the condexec (IT) bits:
9642 *
9643 * We want to avoid the overhead of having to write the updated condexec
9644 * bits back to the CPUState for every instruction in an IT block. So:
9645 * (1) if the condexec bits are not already zero then we write
9646 * zero back into the CPUState now. This avoids complications trying
9647 * to do it at the end of the block. (For example if we don't do this
9648 * it's hard to identify whether we can safely skip writing condexec
9649 * at the end of the TB, which we definitely want to do for the case
9650 * where a TB doesn't do anything with the IT state at all.)
9651 * (2) if we are going to leave the TB then we call gen_set_condexec()
9652 * which will write the correct value into CPUState if zero is wrong.
9653 * This is done both for leaving the TB at the end, and for leaving
9654 * it because of an exception we know will happen, which is done in
9655 * gen_exception_insn(). The latter is necessary because we need to
9656 * leave the TB with the PC/IT state just prior to execution of the
9657 * instruction which caused the exception.
9658 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9659 * then the CPUState will be wrong and we need to reset it.
9660 * This is handled in the same way as restoration of the
9661 * PC in these situations: we will be called again with search_pc=1
9662 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9663 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9664 * this to restore the condexec bits.
e12ce78d
PM
9665 *
9666 * Note that there are no instructions which can read the condexec
9667 * bits, and none which can write non-static values to them, so
9668 * we don't need to care about whether CPUState is correct in the
9669 * middle of a TB.
9670 */
9671
9ee6e8bb
PB
9672 /* Reset the conditional execution bits immediately. This avoids
9673 complications trying to do it at the end of the block. */
98eac7ca 9674 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9675 {
7d1b0095 9676 TCGv tmp = tcg_temp_new_i32();
8f01245e 9677 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9678 store_cpu_field(tmp, condexec_bits);
8f01245e 9679 }
2c0262af 9680 do {
fbb4a2e3
PB
9681#ifdef CONFIG_USER_ONLY
9682 /* Intercept jump to the magic kernel page. */
9683 if (dc->pc >= 0xffff0000) {
9684 /* We always get here via a jump, so know we are not in a
9685 conditional execution block. */
9686 gen_exception(EXCP_KERNEL_TRAP);
9687 dc->is_jmp = DISAS_UPDATE;
9688 break;
9689 }
9690#else
9ee6e8bb
PB
9691 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9692 /* We always get here via a jump, so know we are not in a
9693 conditional execution block. */
d9ba4830 9694 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9695 dc->is_jmp = DISAS_UPDATE;
9696 break;
9ee6e8bb
PB
9697 }
9698#endif
9699
72cf2d4f
BS
9700 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9701 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9702 if (bp->pc == dc->pc) {
bc4a0de0 9703 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9704 /* Advance PC so that clearing the breakpoint will
9705 invalidate this TB. */
9706 dc->pc += 2;
9707 goto done_generating;
1fddef4b
FB
9708 break;
9709 }
9710 }
9711 }
2c0262af
FB
9712 if (search_pc) {
9713 j = gen_opc_ptr - gen_opc_buf;
9714 if (lj < j) {
9715 lj++;
9716 while (lj < j)
9717 gen_opc_instr_start[lj++] = 0;
9718 }
0fa85d43 9719 gen_opc_pc[lj] = dc->pc;
e12ce78d 9720 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
2c0262af 9721 gen_opc_instr_start[lj] = 1;
2e70f6ef 9722 gen_opc_icount[lj] = num_insns;
2c0262af 9723 }
e50e6a20 9724
2e70f6ef
PB
9725 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9726 gen_io_start();
9727
5642463a
PM
9728 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9729 tcg_gen_debug_insn_start(dc->pc);
9730 }
9731
7204ab88 9732 if (dc->thumb) {
9ee6e8bb
PB
9733 disas_thumb_insn(env, dc);
9734 if (dc->condexec_mask) {
9735 dc->condexec_cond = (dc->condexec_cond & 0xe)
9736 | ((dc->condexec_mask >> 4) & 1);
9737 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9738 if (dc->condexec_mask == 0) {
9739 dc->condexec_cond = 0;
9740 }
9741 }
9742 } else {
9743 disas_arm_insn(env, dc);
9744 }
e50e6a20
FB
9745
9746 if (dc->condjmp && !dc->is_jmp) {
9747 gen_set_label(dc->condlabel);
9748 dc->condjmp = 0;
9749 }
3849902c
PM
9750
9751 if (tcg_check_temp_count()) {
9752 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9753 }
9754
aaf2d97d 9755 /* Translation stops when a conditional branch is encountered.
e50e6a20 9756 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9757 * Also stop translation when a page boundary is reached. This
bf20dc07 9758 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9759 num_insns ++;
1fddef4b
FB
9760 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9761 !env->singlestep_enabled &&
1b530a6d 9762 !singlestep &&
2e70f6ef
PB
9763 dc->pc < next_page_start &&
9764 num_insns < max_insns);
9765
9766 if (tb->cflags & CF_LAST_IO) {
9767 if (dc->condjmp) {
9768 /* FIXME: This can theoretically happen with self-modifying
9769 code. */
9770 cpu_abort(env, "IO on conditional branch instruction");
9771 }
9772 gen_io_end();
9773 }
9ee6e8bb 9774
b5ff1b31 9775 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9776 instruction was a conditional branch or trap, and the PC has
9777 already been written. */
551bd27f 9778 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9779 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9780 if (dc->condjmp) {
9ee6e8bb
PB
9781 gen_set_condexec(dc);
9782 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9783 gen_exception(EXCP_SWI);
9ee6e8bb 9784 } else {
d9ba4830 9785 gen_exception(EXCP_DEBUG);
9ee6e8bb 9786 }
e50e6a20
FB
9787 gen_set_label(dc->condlabel);
9788 }
9789 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9790 gen_set_pc_im(dc->pc);
e50e6a20 9791 dc->condjmp = 0;
8aaca4c0 9792 }
9ee6e8bb
PB
9793 gen_set_condexec(dc);
9794 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9795 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9796 } else {
9797 /* FIXME: Single stepping a WFI insn will not halt
9798 the CPU. */
d9ba4830 9799 gen_exception(EXCP_DEBUG);
9ee6e8bb 9800 }
8aaca4c0 9801 } else {
9ee6e8bb
PB
9802 /* While branches must always occur at the end of an IT block,
9803 there are a few other things that can cause us to terminate
9804 the TB in the middel of an IT block:
9805 - Exception generating instructions (bkpt, swi, undefined).
9806 - Page boundaries.
9807 - Hardware watchpoints.
9808 Hardware breakpoints have already been handled and skip this code.
9809 */
9810 gen_set_condexec(dc);
8aaca4c0 9811 switch(dc->is_jmp) {
8aaca4c0 9812 case DISAS_NEXT:
6e256c93 9813 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9814 break;
9815 default:
9816 case DISAS_JUMP:
9817 case DISAS_UPDATE:
9818 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9819 tcg_gen_exit_tb(0);
8aaca4c0
FB
9820 break;
9821 case DISAS_TB_JUMP:
9822 /* nothing more to generate */
9823 break;
9ee6e8bb 9824 case DISAS_WFI:
d9ba4830 9825 gen_helper_wfi();
9ee6e8bb
PB
9826 break;
9827 case DISAS_SWI:
d9ba4830 9828 gen_exception(EXCP_SWI);
9ee6e8bb 9829 break;
8aaca4c0 9830 }
e50e6a20
FB
9831 if (dc->condjmp) {
9832 gen_set_label(dc->condlabel);
9ee6e8bb 9833 gen_set_condexec(dc);
6e256c93 9834 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
9835 dc->condjmp = 0;
9836 }
2c0262af 9837 }
2e70f6ef 9838
9ee6e8bb 9839done_generating:
2e70f6ef 9840 gen_icount_end(tb, num_insns);
2c0262af
FB
9841 *gen_opc_ptr = INDEX_op_end;
9842
9843#ifdef DEBUG_DISAS
8fec2b8c 9844 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
9845 qemu_log("----------------\n");
9846 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7204ab88 9847 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
93fcfe39 9848 qemu_log("\n");
2c0262af
FB
9849 }
9850#endif
b5ff1b31
FB
9851 if (search_pc) {
9852 j = gen_opc_ptr - gen_opc_buf;
9853 lj++;
9854 while (lj <= j)
9855 gen_opc_instr_start[lj++] = 0;
b5ff1b31 9856 } else {
2c0262af 9857 tb->size = dc->pc - pc_start;
2e70f6ef 9858 tb->icount = num_insns;
b5ff1b31 9859 }
2c0262af
FB
9860}
9861
2cfc5f17 9862void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 9863{
2cfc5f17 9864 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
9865}
9866
2cfc5f17 9867void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 9868{
2cfc5f17 9869 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
9870}
9871
b5ff1b31
FB
9872static const char *cpu_mode_names[16] = {
9873 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9874 "???", "???", "???", "und", "???", "???", "???", "sys"
9875};
9ee6e8bb 9876
9a78eead 9877void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 9878 int flags)
2c0262af
FB
9879{
9880 int i;
06e80fc9 9881#if 0
bc380d17 9882 union {
b7bcbe95
FB
9883 uint32_t i;
9884 float s;
9885 } s0, s1;
9886 CPU_DoubleU d;
a94a6abf
PB
9887 /* ??? This assumes float64 and double have the same layout.
9888 Oh well, it's only debug dumps. */
9889 union {
9890 float64 f64;
9891 double d;
9892 } d0;
06e80fc9 9893#endif
b5ff1b31 9894 uint32_t psr;
2c0262af
FB
9895
9896 for(i=0;i<16;i++) {
7fe48483 9897 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 9898 if ((i % 4) == 3)
7fe48483 9899 cpu_fprintf(f, "\n");
2c0262af 9900 else
7fe48483 9901 cpu_fprintf(f, " ");
2c0262af 9902 }
b5ff1b31 9903 psr = cpsr_read(env);
687fa640
TS
9904 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9905 psr,
b5ff1b31
FB
9906 psr & (1 << 31) ? 'N' : '-',
9907 psr & (1 << 30) ? 'Z' : '-',
9908 psr & (1 << 29) ? 'C' : '-',
9909 psr & (1 << 28) ? 'V' : '-',
5fafdf24 9910 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 9911 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 9912
5e3f878a 9913#if 0
b7bcbe95 9914 for (i = 0; i < 16; i++) {
8e96005d
FB
9915 d.d = env->vfp.regs[i];
9916 s0.i = d.l.lower;
9917 s1.i = d.l.upper;
a94a6abf
PB
9918 d0.f64 = d.d;
9919 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 9920 i * 2, (int)s0.i, s0.s,
a94a6abf 9921 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 9922 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 9923 d0.d);
b7bcbe95 9924 }
40f137e1 9925 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 9926#endif
2c0262af 9927}
a6b025d3 9928
e87b7cb0 9929void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
9930{
9931 env->regs[15] = gen_opc_pc[pc_pos];
e12ce78d 9932 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 9933}